├── .devcontainer └── devcontainer.json ├── .envrc ├── .git-blame-ignore-revs ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature.md ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── CI.yml │ ├── build_nix.yml │ └── documentation.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── build.rs ├── examples ├── duplex.rs ├── enumerate_alsa.rs ├── enumerate_coreaudio.rs ├── enumerate_pipewire.rs ├── enumerate_wasapi.rs ├── input.rs ├── loopback.rs ├── sine_wave.rs └── util │ ├── enumerate.rs │ ├── meter.rs │ ├── mod.rs │ └── sine.rs ├── flake.lock ├── flake.nix └── src ├── audio_buffer.rs ├── backends ├── alsa │ ├── device.rs │ ├── input.rs │ ├── mod.rs │ ├── output.rs │ ├── stream.rs │ └── triggerfd.rs ├── coreaudio.rs ├── mod.rs ├── pipewire │ ├── device.rs │ ├── driver.rs │ ├── error.rs │ ├── mod.rs │ ├── stream.rs │ └── utils.rs └── wasapi │ ├── device.rs │ ├── driver.rs │ ├── error.rs │ ├── mod.rs │ ├── prelude.rs │ ├── stream.rs │ └── util.rs ├── channel_map.rs ├── duplex.rs ├── lib.rs ├── prelude.rs └── timestamp.rs /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/rust 3 | { 4 | "name": "Rust", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | "image": "mcr.microsoft.com/devcontainers/rust:1-1-bullseye", 7 | "features": { 8 | "ghcr.io/devcontainers/features/nix:1": { 9 | "extraNixConfig": "experimental-features = nix-command flakes" 10 | }, 11 | "ghcr.io/dhoeric/features/act:1": {} 12 | }, 13 | "workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/interflow,type=bind,consistency=delegated", 14 | "workspaceFolder": "/workspaces/interflow", 15 | 16 | // Use 'mounts' to make the cargo cache persistent in a Docker Volume. 17 | // "mounts": [ 18 | // { 19 | // "source": "devcontainer-cargo-cache-${devcontainerId}", 20 | // "target": "/usr/local/cargo", 21 | // "type": "volume" 22 | // } 23 | // ] 24 | 25 | // Features to add to the dev container. More info: https://containers.dev/features. 26 | // "features": {}, 27 | 28 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 29 | // "forwardPorts": [], 30 | 31 | // Use 'postCreateCommand' to run commands after the container is created. 32 | // "postCreateCommand": "rustc --version", 33 | 34 | // Configure tool-specific properties. 35 | // "customizations": {}, 36 | 37 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 38 | // "remoteUser": "root", 39 | "postCreateCommand": "sudo apt-get update && sudo apt-get install -y python3{,-pip,-venv} libasound2-dev libspa-0.2-dev libpipewire-0.3-dev && pip install --user pipx && pipx install pre-commit && pre-commit install" 40 | } 41 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | # pre-commit initial run 2 | 6039951600079ee203e439a9435e3669c5f7cb86 3 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | src/backends/alsa @solarliner 2 | src/backends/wasapi @geom3trik 3 | src/backends/coreaudio.rs @solarliner 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a report to help us improve the project 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | --- 8 | 9 | ## Bug Description 10 | 11 | A clear and concise description of what the bug is. 12 | 13 | ## Steps To Reproduce 14 | 15 | Reproduction should be as minimal as possible, but in the worst case, a public repository reproducing the bug should be 16 | given. 17 | 18 | ## Expected Behavior 19 | 20 | A clear and concise description of what you expected to happen. 21 | 22 | ## Actual Behavior 23 | 24 | A clear and concise description of what actually happened. 25 | 26 | ## Environment 27 | 28 | - OS: [e.g., macOS Sonoma] 29 | - Audio driver (enable `info` logs): [e.g. CoreAudio] 30 | - Rust Version: [e.g., 1.85.0] 31 | - Project Version/Commit (see `Cargo.lock`): [e.g., v0.1.0 or commit hash] 32 | 33 | ## Additional Context 34 | 35 | Add any other context about the problem here. 36 | 37 | ## Possible Solution 38 | 39 | (Optional) Suggest a fix/reason for the bug. 40 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: feature 6 | assignees: '' 7 | --- 8 | 9 | ## Problem Statement 10 | 11 | A clear and concise description of what the problem is. 12 | Example: I'm always frustrated when [...] 13 | 14 | ## Proposed Solution 15 | 16 | A clear and concise description of what you want to happen. 17 | 18 | ## Alternative Solutions 19 | 20 | A clear and concise description of any alternative solutions or features you've considered. 21 | 22 | ## Example Usage 23 | 24 | Provide an example of how this feature would be used: 25 | ```rust 26 | // Your example code here 27 | ``` 28 | 29 | ## Additional Context 30 | 31 | Add any other context or screenshots about the feature request here. 32 | 33 | ## Checklist 34 | 35 | - [ ] I have searched for existing issues that may be similar 36 | - [ ] I have explained the feature's value proposition 37 | - [ ] I have provided concrete examples 38 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | - package-ecosystem: "cargo" 13 | directory: "/" # Location of package manifests 14 | schedule: 15 | interval: "daily" 16 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | Please include a summary of the change and which issue is fixed. Include relevant motivation and context. 4 | 5 | Fixes # (issue number) 6 | 7 | ## Type of Change 8 | 9 | Please delete options that are not relevant. 10 | 11 | - [ ] Bug fix (non-breaking change which fixes an issue) 12 | - [ ] New feature (non-breaking change which adds functionality) 13 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 14 | - [ ] Documentation update 15 | - [ ] Performance improvement 16 | - [ ] Code cleanup or refactor 17 | 18 | ## How Has This Been Tested? 19 | 20 | Please describe the tests you added or ran to verify your changes. Provide instructions so we can reproduce. Try to run 21 | on as many audio drivers as you can get your hands on. 22 | 23 | - [ ] Test A 24 | - [ ] Test B 25 | 26 | ## Checklist: 27 | 28 | - [ ] My code follows the style guidelines of this project 29 | - [ ] I have made corresponding changes to the documentation 30 | - [ ] My changes generate no new warnings 31 | - [ ] Wherever possible, I have added tests that prove my fix is effective or that my feature works. For changes that 32 | need to be validated manually (i.e. a new audio driver), use examples that can be run to easily validate them. 33 | - [ ] New and existing unit tests pass locally with my changes 34 | - [ ] I have checked my code and corrected any misspellings 35 | 36 | ## Screenshots (if appropriate): 37 | 38 | ## Additional Notes: 39 | 40 | Add any additional notes about the PR here. 41 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | fmt: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Install dependencies (Linux) 18 | run: sudo apt install libasound2-dev libspa-0.2-dev libpipewire-0.3-dev 19 | - name: Install Rust 1.86 20 | uses: actions-rs/toolchain@v1 21 | with: 22 | toolchain: 1.86 23 | default: true 24 | override: true 25 | components: rustfmt 26 | - name: Run rustfmt 27 | run: cargo fmt -- --check 28 | clippy: 29 | strategy: 30 | matrix: 31 | os: [windows, ubuntu, macos] 32 | runs-on: ${{ matrix.os }}-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | - name: Install dependencies (Linux) 36 | if: ${{ matrix.os == 'ubuntu' }} 37 | run: sudo apt install libasound2-dev libspa-0.2-dev libpipewire-0.3-dev 38 | - name: Install Rust 1.86 39 | uses: actions-rs/toolchain@v1 40 | with: 41 | toolchain: 1.86 42 | default: true 43 | override: true 44 | components: clippy 45 | - uses: LoliGothick/clippy-check@master 46 | continue-on-error: true 47 | with: 48 | options: ${{ matrix.os == 'ubuntu' && '--features pipewire' || '' }} 49 | token: ${{ secrets.GITHUB_TOKEN }} 50 | test: 51 | strategy: 52 | fail-fast: false 53 | matrix: 54 | os: [ubuntu-latest, macos-latest, windows-latest] 55 | rust: [1.84, 1.85, 1.86] 56 | feature-pipewire: [false, true] 57 | exclude: 58 | - os: windows-latest 59 | feature-pipewire: true 60 | - os: macos-latest 61 | feature-pipewire: true 62 | runs-on: ${{ matrix.os }} 63 | steps: 64 | - uses: actions/checkout@v4 65 | - name: Install dependencies (Linux) 66 | if: ${{ matrix.os == 'ubuntu-latest' }} 67 | run: sudo apt install libasound2-dev libspa-0.2-dev libpipewire-0.3-dev 68 | - name: Install Rust ${{ matrix.rust }} 69 | uses: actions-rs/toolchain@v1 70 | with: 71 | toolchain: ${{ matrix.rust }} 72 | default: true 73 | override: true 74 | - name: Build 75 | run: cargo build --verbose ${{ matrix.feature-pipewire && '--features pipewire' || '' }} 76 | - name: Run tests 77 | run: cargo test --verbose ${{ matrix.feature-pipewire && '--features pipewire' || '' }} 78 | -------------------------------------------------------------------------------- /.github/workflows/build_nix.yml: -------------------------------------------------------------------------------- 1 | name: "Build legacy Nix package on Ubuntu" 2 | 3 | on: 4 | push: 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - uses: cachix/install-nix-action@v31 12 | - name: Building package 13 | run: nix build 14 | -------------------------------------------------------------------------------- /.github/workflows/documentation.yml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | on: 3 | push: 4 | branches: [ main ] 5 | 6 | jobs: 7 | doc: 8 | name: Documentation 9 | runs-on: ubuntu-latest 10 | environment: 11 | name: github-pages 12 | url: ${{ steps.deployment.outputs.page_url }}/valib 13 | permissions: 14 | id-token: write 15 | pages: write 16 | steps: 17 | - name: Install dependencies 18 | run: | 19 | sudo apt-get update 20 | sudo apt-get install -y libasound2-dev libspa-0.2-dev libpipewire-0.3-dev 21 | - uses: actions/cache@v4 22 | with: 23 | path: | 24 | ~/.cargo/registry/index/ 25 | ~/.cargo/registry/cache/ 26 | ~/.cargo/git/db/ 27 | target/ 28 | key: ${{ matrix.name }}-${{ matrix.cross-target }} 29 | - uses: actions/checkout@v4 30 | - name: Fetch all git history 31 | run: git fetch --force --prune --tags --unshallow 32 | - name: Set up Rust toolchain 33 | uses: actions-rs/toolchain@v1 34 | with: 35 | toolchain: 1.86.0 36 | default: true 37 | - name: Build documentation 38 | run: cargo doc --all-features --no-deps 39 | - name: Fix permissions 40 | run: | 41 | chmod -c -R +rX "target/doc" | while read line; do 42 | echo "::warning title=Invalid file permissions automatically fixed::$line" 43 | done 44 | - name: Upload Pages artifact 45 | uses: actions/upload-pages-artifact@v3 46 | with: 47 | path: target/doc 48 | - name: Deploy to GitHub Pages 49 | id: deployment 50 | uses: actions/deploy-pages@v4 # or specific "vX.X.X" version tag for this action 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /.direnv 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v3.2.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - repo: https://github.com/doublify/pre-commit-rust 12 | rev: 6ef59f4af072d8ecd77e54f11e84536a0126cfa3 13 | hooks: 14 | - id: cargo-fmt 15 | - id: cargo-clippy 16 | args: [] 17 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official email address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement on the [Rust Audio Discord](https://discord.gg/qxEJ7YmPZg), 63 | through DMs `@solarliner`, or, least preffered, through e-mail at `me at solarliner dot dev`. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | [translations]: https://www.contributor-covenant.org/translations 133 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to `interflow` 2 | 3 | First off, thank you for considering contributing to this project! It's people like you who make this project better. 4 | 5 | ## Table of Contents 6 | 7 | - [Code of Conduct](#code-of-conduct) 8 | - [How Can I Contribute?](#how-can-i-contribute) 9 | - [Development Process](#development-process) 10 | - [Pull Requests](#pull-requests) 11 | - [Style Guidelines](#style-guidelines) 12 | 13 | ## Code of Conduct 14 | 15 | This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code. 16 | 17 | ## How Can I Contribute? 18 | 19 | ### Reporting Bugs 20 | 21 | Before creating bug reports, please check the issue list as you might find out that you don't need to create one. When you are creating a bug report, please include as many details as possible: 22 | 23 | * Use a clear and descriptive title 24 | * Describe the exact steps which reproduce the problem 25 | * Provide specific examples to demonstrate the steps 26 | * Describe the behavior you observed after following the steps 27 | * Explain which behavior you expected to see instead and why 28 | * Include any error messages 29 | 30 | ### Suggesting Enhancements 31 | 32 | Enhancement suggestions are tracked as GitHub issues. When creating an enhancement suggestion, please include: 33 | 34 | * A clear and descriptive title 35 | * A detailed description of the proposed functionality 36 | * Any possible implementation details 37 | * Why this enhancement would be useful 38 | 39 | ### Pull Requests 40 | 41 | Before starting, try to first open an issue, before starting your work on a PR. If an issue exists, make sure a PR isn't already opened for it. 42 | 43 | 1. Fork the repository 44 | 2. Clone your fork: `git clone https://github.com/solarliner/interflow` 45 | 3. Create a new branch: `git checkout -b your-branch-name`. Branch name should begin with `feature/`, `feat/`, `fix/` or `bugfix/` in order to create folders and organize the branches, 46 | and for this reason, they should be in lowercase (so that we don't end up with `fix` and `Fix` folder on Linux machines!) 47 | 4. Make sure to have the [prerequisites](#prerequisites) below 48 | 5. Make your changes 49 | 6. Run tests: `cargo test` 50 | 7. Submit a PR: the title should be descriptive of the changes in one sentence, and should have the `closes \#NNN` message that tells GitHub to automatically close the related 51 | issue, if there is one (there should, see above). 52 | 53 | **Open your PR early!** You can mark your PR as a draft to signal that it isn't yet ready for review. Having PRs opened early shows what is being worked on and reduces 54 | the duplication of effort. 55 | 56 | #### Prerequisites 57 | 58 | - Rust 1.85.0 or later 59 | - Any supported audio API (or the one you want to add) to be able to run the tests. 60 | - `pre-commit` installed into the repository 61 | 62 | #### PR opening process 63 | 64 | * Fill in the required template 65 | * Do not include issue numbers in the PR title 66 | * Include screenshots and animated GIFs in your pull request whenever possible 67 | * Follow the Rust style guidelines 68 | * Include tests when adding new features 69 | * Update documentation when changing core functionality 70 | 71 | ## Development Process 72 | 73 | 1. Create a new branch from `main` 74 | 2. Make your changes 75 | 3. Write or update tests as needed 76 | 4. Update documentation as needed 77 | 5. Run `cargo fmt` to format code 78 | 6. Run `cargo clippy` to check for common mistakes 79 | 7. Run `cargo test` for unit tests, and run the examples to make sure they still work 80 | 8. Commit your changes using clear commit messages (see [Git Commit Message](#git-commit-messages)). 81 | 9. Push to your fork 82 | 10. Open a Pull Request 83 | 84 | ## Style Guidelines 85 | 86 | ### Git Commit Messages 87 | 88 | While not enforced, the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) is preferred. 89 | 90 | * Use the present tense ("Add feature" not "Added feature") 91 | * Use the imperative mood ("Move cursor to..." not "Moves cursor to...") 92 | * Limit the first line to 72 characters or fewer 93 | * Reference issues and pull requests liberally after the first line 94 | 95 | ### Rust Code Style 96 | 97 | * Follow the [Rust Style Guide](https://doc.rust-lang.org/1.0.0/style/style/README.html) 98 | * Run `cargo fmt` before committing 99 | * Use `cargo clippy` to catch common mistakes 100 | * Write documentation for public APIs 101 | * Include unit tests for new code 102 | 103 | ## Testing 104 | 105 | * Include tests for new features 106 | * Update tests for bug fixes 107 | * CI will ensure tests pass, however it is good practice to ensure all tests pass locally before pushing. 108 | Make sure to also check out the examples to make sure they work correctly. 109 | * Write both unit tests and integration tests when applicable 110 | 111 | ## Documentation 112 | 113 | * Keep API documentation in code up to date 114 | * Add examples for new features 115 | 116 | ## Questions? 117 | 118 | Feel free to open an issue with your question or contact the maintainers directly. 119 | 120 | --- 121 | 122 | Thank you for contributing to this project! Your time and effort help make this project better for everyone. 123 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "interflow" 3 | version = "0.1.0" 4 | edition = "2021" 5 | rust-version = "1.80" 6 | license = "MIT" 7 | 8 | [dependencies] 9 | bitflags = "2.9.1" 10 | duplicate = "2.0.0" 11 | fixed-resample = "0.8.0" 12 | libspa = { version = "0.8.0", optional = true } 13 | libspa-sys = { version = "0.8.0", optional = true } 14 | log = { version = "0.4.27", features = ["kv"] } 15 | ndarray = "0.16.1" 16 | oneshot = "0.1.11" 17 | rtrb = "0.3.2" 18 | thiserror = "2.0.12" 19 | zerocopy = { version = "0.8.25", optional = true } 20 | 21 | [dev-dependencies] 22 | anyhow = "1.0.98" 23 | env_logger = "0.11.8" 24 | indicatif = "0.17.11" 25 | 26 | [build-dependencies] 27 | cfg_aliases = "0.2.1" 28 | 29 | [features] 30 | pipewire = ["dep:pipewire", "dep:libspa", "dep:libspa-sys", "dep:zerocopy"] 31 | 32 | [target.'cfg(any(target_os = "linux", target_os = "dragonfly", target_os = "freebsd", target_os = "netbsd"))'.dependencies] 33 | alsa = "0.9.1" 34 | libc = "0.2.172" 35 | nix = "0.30.0" 36 | pipewire = { version = "0.8.0", optional = true } 37 | 38 | [target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies] 39 | coreaudio-rs = "0.12.1" 40 | 41 | [target.'cfg(target_os = "windows")'.dependencies] 42 | windows = { version = "0.61.1", features = [ 43 | "Win32_Media_Audio", 44 | "Win32_Foundation", 45 | "Win32_Devices_Properties", 46 | "Win32_Media_KernelStreaming", 47 | "Win32_System_Com_StructuredStorage", 48 | "Win32_System_Threading", 49 | "Win32_Security", 50 | "Win32_System_SystemServices", 51 | "Win32_System_Variant", 52 | "Win32_Media_Multimedia", 53 | "Win32_UI_Shell_PropertiesSystem" 54 | ]} 55 | 56 | [[example]] 57 | name = "enumerate_alsa" 58 | path = "examples/enumerate_alsa.rs" 59 | 60 | [[example]] 61 | name = "enumerate_coreaudio" 62 | path = "examples/enumerate_coreaudio.rs" 63 | 64 | [[example]] 65 | name = "enumerate_wasapi" 66 | path = "examples/enumerate_wasapi.rs" 67 | 68 | [[example]] 69 | name = "enumerate_pipewire" 70 | path = "examples/enumerate_pipewire.rs" 71 | required-features = ["pipewire"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Nathan Graule 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Interflow 2 | 3 | [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md) 4 | ![GitHub branch check runs](https://img.shields.io/github/check-runs/SolarLiner/interflow/main) 5 | ![GitHub issue custom search in repo](https://img.shields.io/github/issues-search/SolarLiner/interflow?query=is%3Aissue%20state%3Aopen&label=issues) 6 | ![Discord](https://img.shields.io/discord/590254806208217089?label=RustAudio%20on%20Discord) 7 | 8 | Interflow is a Rust library that abstracts away platform-specific audio APIs 9 | and provides a unified, opinionated interface for audio applications. It aims 10 | to simplify the development of audio applications by offering seamless support 11 | for duplex audio with separate input and output devices, as well as sample rate 12 | and format conversion. 13 | 14 | ## Features 15 | 16 | - [x] Unified interface for platform-specific audio APIs. 17 | - [x] Support for duplex audio (simultaneous input and output). 18 | - [x] Separate input and output devices. 19 | - [ ] Sample rate conversion. 20 | - [x] Format conversion. 21 | 22 | ## Supported drivers 23 | 24 | - [x] WASAPI 25 | - [ ] ASIO 26 | - [x] ALSA 27 | - [ ] PulseAudio 28 | - [x] PipeWire 29 | - [ ] JACK 30 | - [x] CoreAudio 31 | 32 | ## Getting Started 33 | 34 | ### Prerequisites 35 | 36 | Ensure you have the following installed on your system: 37 | 38 | - [Rust](https://www.rust-lang.org/tools/install) (1.84 and up supported) 39 | - Platform-specific audio development libraries: 40 | - **Windows**: Ensure you have the Windows SDK installed, and optionally the 41 | ASIO SDK if the `asio` feature is enabled. 42 | - **macOS**: Xcode and its command line tools should be installed. 43 | - **Linux**: Development libraries for ALSA (Advanced Linux Sound 44 | Architecture), PulseAudio, PipeWire, or JACK are only required if their 45 | relevant features are enabled (by default, only `alsa` is). 46 | 47 | ### Building 48 | 49 | `Interflow` uses `cargo` for dependency management and building. 50 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use cfg_aliases::cfg_aliases; 2 | 3 | fn main() { 4 | // Setup cfg aliases 5 | cfg_aliases! { 6 | wasm: { any(target_os = "wasm32") }, 7 | os_alsa: { any(target_os = "linux", target_os = "dragonfly", target_os = "freebsd", 8 | target_os = "netbsd") }, 9 | os_coreaudio: { any (target_os = "macos", target_os = "ios") }, 10 | os_pipewire: { any(target_os = "linux", target_os = "dragonfly", target_os = "freebsd", target_os = "netbsd") }, 11 | os_wasapi: { target_os = "windows" }, 12 | unsupported: { not(any(os_alsa, os_coreaudio, os_wasapi)) } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/duplex.rs: -------------------------------------------------------------------------------- 1 | use crate::util::sine::SineWave; 2 | use anyhow::Result; 3 | use interflow::duplex::AudioDuplexCallback; 4 | use interflow::prelude::*; 5 | 6 | mod util; 7 | 8 | fn main() -> Result<()> { 9 | let input = default_input_device(); 10 | let output = default_output_device(); 11 | let mut input_config = input.default_input_config().unwrap(); 12 | input_config.buffer_size_range = (Some(128), Some(512)); 13 | let mut output_config = output.default_output_config().unwrap(); 14 | output_config.buffer_size_range = (Some(128), Some(512)); 15 | let duplex_config = DuplexStreamConfig::new(input_config, output_config); 16 | let stream = 17 | duplex::create_duplex_stream(input, output, RingMod::new(), duplex_config).unwrap(); 18 | println!("Press Enter to stop"); 19 | std::io::stdin().read_line(&mut String::new())?; 20 | stream.eject().unwrap(); 21 | Ok(()) 22 | } 23 | 24 | struct RingMod { 25 | carrier: SineWave, 26 | } 27 | 28 | impl RingMod { 29 | fn new() -> Self { 30 | Self { 31 | carrier: SineWave::new(440.0), 32 | } 33 | } 34 | } 35 | 36 | impl AudioDuplexCallback for RingMod { 37 | fn on_audio_data( 38 | &mut self, 39 | context: AudioCallbackContext, 40 | input: AudioInput, 41 | mut output: AudioOutput, 42 | ) { 43 | let sr = context.stream_config.samplerate as f32; 44 | for i in 0..output.buffer.num_samples() { 45 | let inp = input.buffer.get_frame(i)[0]; 46 | let c = self.carrier.next_sample(sr); 47 | output.buffer.set_mono(i, inp * c); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /examples/enumerate_alsa.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | #[cfg(os_alsa)] 4 | fn main() -> Result<(), Box> { 5 | use crate::util::enumerate::enumerate_devices; 6 | use interflow::backends::alsa::AlsaDriver; 7 | 8 | env_logger::init(); 9 | 10 | enumerate_devices(AlsaDriver) 11 | } 12 | 13 | #[cfg(not(os_alsa))] 14 | fn main() { 15 | println!("ALSA driver is not available on this platform"); 16 | } 17 | -------------------------------------------------------------------------------- /examples/enumerate_coreaudio.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | #[cfg(os_coreaudio)] 4 | fn main() -> Result<(), Box> { 5 | use crate::util::enumerate::enumerate_devices; 6 | use interflow::backends::coreaudio::CoreAudioDriver; 7 | 8 | enumerate_devices(CoreAudioDriver) 9 | } 10 | 11 | #[cfg(not(os_coreaudio))] 12 | fn main() { 13 | println!("CoreAudio is not available on this platform"); 14 | } 15 | -------------------------------------------------------------------------------- /examples/enumerate_pipewire.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | #[cfg(feature = "pipewire")] 4 | fn main() -> Result<(), Box> { 5 | use crate::util::enumerate::enumerate_devices; 6 | use interflow::backends::pipewire::driver::PipewireDriver; 7 | env_logger::init(); 8 | enumerate_devices(PipewireDriver::new()?)?; 9 | Ok(()) 10 | } 11 | 12 | #[cfg(not(feature = "pipewire"))] 13 | fn main() { 14 | println!("Pipewire feature is not enabled"); 15 | } 16 | -------------------------------------------------------------------------------- /examples/enumerate_wasapi.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | #[cfg(os_wasapi)] 4 | fn main() -> Result<(), Box> { 5 | use crate::util::enumerate::enumerate_devices; 6 | use interflow::backends::wasapi::WasapiDriver; 7 | enumerate_devices(WasapiDriver) 8 | } 9 | 10 | #[cfg(not(os_wasapi))] 11 | fn main() { 12 | println!("WASAPI driver is not available on this platform"); 13 | } 14 | -------------------------------------------------------------------------------- /examples/input.rs: -------------------------------------------------------------------------------- 1 | use crate::util::meter::PeakMeter; 2 | use crate::util::AtomicF32; 3 | use anyhow::Result; 4 | use interflow::prelude::*; 5 | use std::sync::Arc; 6 | 7 | mod util; 8 | 9 | fn main() -> Result<()> { 10 | env_logger::init(); 11 | 12 | let device = default_input_device(); 13 | let value = Arc::new(AtomicF32::new(0.)); 14 | let stream = device 15 | .default_input_stream(RmsMeter::new(value.clone())) 16 | .unwrap(); 17 | util::display_peakmeter(value)?; 18 | stream.eject().unwrap(); 19 | Ok(()) 20 | } 21 | 22 | struct RmsMeter { 23 | value: Arc, 24 | meter: Option, 25 | } 26 | 27 | impl RmsMeter { 28 | fn new(value: Arc) -> Self { 29 | Self { meter: None, value } 30 | } 31 | } 32 | 33 | impl AudioInputCallback for RmsMeter { 34 | fn on_input_data(&mut self, context: AudioCallbackContext, input: AudioInput) { 35 | let meter = self 36 | .meter 37 | .get_or_insert_with(|| PeakMeter::new(context.stream_config.samplerate as f32, 15.0)); 38 | meter.set_samplerate(context.stream_config.samplerate as f32); 39 | meter.process_buffer(input.buffer.as_ref()); 40 | self.value 41 | .store(meter.value(), std::sync::atomic::Ordering::Relaxed); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/loopback.rs: -------------------------------------------------------------------------------- 1 | use crate::util::meter::PeakMeter; 2 | use crate::util::AtomicF32; 3 | use anyhow::Result; 4 | use interflow::prelude::*; 5 | use std::sync::Arc; 6 | mod util; 7 | 8 | fn main() -> Result<()> { 9 | env_logger::init(); 10 | 11 | let input = default_input_device(); 12 | let output = default_output_device(); 13 | let mut input_config = input.default_input_config().unwrap(); 14 | input_config.buffer_size_range = (Some(128), Some(512)); 15 | let mut output_config = output.default_output_config().unwrap(); 16 | output_config.buffer_size_range = (Some(128), Some(512)); 17 | input_config.channels = 0b01; 18 | output_config.channels = 0b11; 19 | let value = Arc::new(AtomicF32::new(0.)); 20 | let config = DuplexStreamConfig::new(input_config, output_config); 21 | let stream = 22 | create_duplex_stream(input, output, Loopback::new(44100., value.clone()), config).unwrap(); 23 | util::display_peakmeter(value)?; 24 | stream.eject().unwrap(); 25 | Ok(()) 26 | } 27 | 28 | struct Loopback { 29 | meter: PeakMeter, 30 | value: Arc, 31 | } 32 | 33 | impl Loopback { 34 | fn new(samplerate: f32, value: Arc) -> Self { 35 | Self { 36 | meter: PeakMeter::new(samplerate, 15.0), 37 | value, 38 | } 39 | } 40 | } 41 | 42 | impl AudioDuplexCallback for Loopback { 43 | fn on_audio_data( 44 | &mut self, 45 | context: AudioCallbackContext, 46 | input: AudioInput, 47 | mut output: AudioOutput, 48 | ) { 49 | self.meter 50 | .set_samplerate(context.stream_config.samplerate as f32); 51 | let rms = self.meter.process_buffer(input.buffer.as_ref()); 52 | self.value.store(rms, std::sync::atomic::Ordering::Relaxed); 53 | output.buffer.as_interleaved_mut().fill(0.0); 54 | output.buffer.mix(input.buffer.as_ref(), 1.0); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /examples/sine_wave.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use interflow::prelude::*; 3 | use util::sine::SineWave; 4 | 5 | mod util; 6 | 7 | fn main() -> Result<()> { 8 | env_logger::init(); 9 | 10 | let device = default_output_device(); 11 | println!("Using device {}", device.name()); 12 | let stream = device.default_output_stream(SineWave::new(440.0)).unwrap(); 13 | println!("Press Enter to stop"); 14 | std::io::stdin().read_line(&mut String::new())?; 15 | stream.eject().unwrap(); 16 | Ok(()) 17 | } 18 | -------------------------------------------------------------------------------- /examples/util/enumerate.rs: -------------------------------------------------------------------------------- 1 | use interflow::{AudioDevice, AudioDriver, DeviceType}; 2 | use std::error::Error; 3 | 4 | pub fn enumerate_devices(driver: Driver) -> Result<(), Box> 5 | where 6 | ::Error: 'static, 7 | { 8 | eprintln!("Driver name : {}", Driver::DISPLAY_NAME); 9 | eprintln!("Driver version: {}", driver.version()?); 10 | eprintln!("Default device"); 11 | for (s, device_type) in [("Input", DeviceType::INPUT), ("Output", DeviceType::OUTPUT)] { 12 | let device_type = device_type | DeviceType::PHYSICAL; 13 | eprint!("\t{s}:\t"); 14 | if let Some(device) = driver.default_device(device_type)? { 15 | eprintln!("{}", device.name()); 16 | } else { 17 | eprintln!("None"); 18 | } 19 | } 20 | 21 | eprintln!("All devices"); 22 | for device in driver.list_devices()? { 23 | eprintln!("\t{} ({:?})", device.name(), device.device_type()); 24 | } 25 | Ok(()) 26 | } 27 | -------------------------------------------------------------------------------- /examples/util/meter.rs: -------------------------------------------------------------------------------- 1 | use interflow::audio_buffer::AudioRef; 2 | 3 | #[derive(Debug, Copy, Clone)] 4 | pub struct PeakMeter { 5 | last_out: f32, 6 | decay: f32, 7 | dt: f32, 8 | } 9 | 10 | impl PeakMeter { 11 | pub fn new(samplerate: f32, decay: f32) -> Self { 12 | Self { 13 | last_out: 0., 14 | decay, 15 | dt: 1. / samplerate, 16 | } 17 | } 18 | 19 | pub fn samplerate(&self) -> f32 { 20 | 1. / self.dt 21 | } 22 | 23 | pub fn set_samplerate(&mut self, samplerate: f32) { 24 | self.dt = 1. / samplerate; 25 | } 26 | 27 | pub fn decay(&self) -> f32 { 28 | self.decay 29 | } 30 | 31 | pub fn set_decay(&mut self, decay: f32) { 32 | self.decay = decay; 33 | } 34 | 35 | pub fn value(&self) -> f32 { 36 | self.last_out 37 | } 38 | pub fn process(&mut self, sample: f32) -> f32 { 39 | let k = f32::exp(-self.decay * self.dt); 40 | self.last_out = (k * sample).max(self.last_out); 41 | self.last_out 42 | } 43 | 44 | pub fn process_buffer(&mut self, buffer: AudioRef) -> f32 { 45 | let buffer_duration = buffer.num_samples() as f32 * self.dt; 46 | let peak_lin = buffer 47 | .channels() 48 | .flat_map(|ch| ch.iter().copied().max_by(f32::total_cmp)) 49 | .max_by(f32::total_cmp) 50 | .unwrap_or(0.); 51 | self.last_out = peak_lin.max(self.last_out * f32::exp(-self.decay * buffer_duration)); 52 | self.last_out 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /examples/util/mod.rs: -------------------------------------------------------------------------------- 1 | use indicatif::{ProgressBar, ProgressStyle}; 2 | use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; 3 | use std::sync::Arc; 4 | use std::thread; 5 | 6 | pub mod enumerate; 7 | pub mod meter; 8 | pub mod sine; 9 | 10 | #[derive(Debug)] 11 | #[repr(transparent)] 12 | pub struct AtomicF32(AtomicU32); 13 | 14 | impl AtomicF32 { 15 | pub fn new(value: f32) -> Self { 16 | Self(AtomicU32::new(value.to_bits())) 17 | } 18 | 19 | pub fn load(&self, ordering: Ordering) -> f32 { 20 | f32::from_bits(self.0.load(ordering)) 21 | } 22 | 23 | pub fn store(&self, value: f32, ordering: Ordering) { 24 | self.0.store(value.to_bits(), ordering); 25 | } 26 | } 27 | 28 | pub fn display_peakmeter(value: Arc) -> anyhow::Result<()> { 29 | println!("Press Enter to stop"); 30 | let quit = Arc::new(AtomicBool::new(false)); 31 | let handle = thread::spawn({ 32 | let quit = quit.clone(); 33 | move || { 34 | let progress = ProgressBar::new(100).with_style( 35 | ProgressStyle::default_bar() 36 | .template("{bar:40.green} {msg}") 37 | .unwrap(), 38 | ); 39 | while !quit.load(Ordering::Relaxed) { 40 | let peak_db = 20. * value.load(Ordering::Relaxed).log10(); 41 | let pc = normalize(-60., 6., peak_db); 42 | let pos = if let Some(len) = progress.length() { 43 | pc * len as f32 44 | } else { 45 | progress.set_length(100); 46 | 100. * pc 47 | }; 48 | progress.set_position(pos as _); 49 | progress.set_message(format!("Peak: {peak_db:2.1} dB")); 50 | thread::sleep(std::time::Duration::from_millis(100)); 51 | } 52 | } 53 | }); 54 | thread::spawn(move || { 55 | std::io::stdin().read_line(&mut String::new()).unwrap(); 56 | quit.store(true, std::sync::atomic::Ordering::Relaxed); 57 | }); 58 | handle.join().unwrap(); 59 | Ok(()) 60 | } 61 | 62 | pub fn normalize(min: f32, max: f32, value: f32) -> f32 { 63 | let range = max - min; 64 | (value - min) / range 65 | } 66 | -------------------------------------------------------------------------------- /examples/util/sine.rs: -------------------------------------------------------------------------------- 1 | use interflow::{AudioCallbackContext, AudioOutput, AudioOutputCallback}; 2 | use std::f32::consts::TAU; 3 | 4 | pub struct SineWave { 5 | pub frequency: f32, 6 | pub phase: f32, 7 | } 8 | 9 | impl AudioOutputCallback for SineWave { 10 | fn on_output_data(&mut self, context: AudioCallbackContext, mut output: AudioOutput) { 11 | eprintln!( 12 | "Callback called, timestamp: {:2.3} s", 13 | context.timestamp.as_seconds() 14 | ); 15 | let sr = context.timestamp.samplerate as f32; 16 | for i in 0..output.buffer.num_samples() { 17 | output.buffer.set_mono(i, self.next_sample(sr)); 18 | } 19 | // Reduce amplitude to not blow up speakers and ears 20 | output.buffer.change_amplitude(0.125); 21 | } 22 | } 23 | 24 | impl SineWave { 25 | pub fn new(frequency: f32) -> Self { 26 | Self { 27 | frequency, 28 | phase: 0.0, 29 | } 30 | } 31 | 32 | pub fn next_sample(&mut self, samplerate: f32) -> f32 { 33 | let step = samplerate.recip() * self.frequency; 34 | let y = (TAU * self.phase).sin(); 35 | self.phase += step; 36 | if self.phase > 1. { 37 | self.phase -= 1.; 38 | } 39 | y 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "naersk": { 4 | "inputs": { 5 | "nixpkgs": "nixpkgs" 6 | }, 7 | "locked": { 8 | "lastModified": 1739824009, 9 | "narHash": "sha256-fcNrCMUWVLMG3gKC5M9CBqVOAnJtyRvGPxptQFl5mVg=", 10 | "owner": "nix-community", 11 | "repo": "naersk", 12 | "rev": "e5130d37369bfa600144c2424270c96f0ef0e11d", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "nix-community", 17 | "ref": "master", 18 | "repo": "naersk", 19 | "type": "github" 20 | } 21 | }, 22 | "nixpkgs": { 23 | "locked": { 24 | "lastModified": 1742738698, 25 | "narHash": "sha256-KCtAXWwQs03JmEhP4ss59QVzT+rHZkhQO85KjNy8Crc=", 26 | "owner": "NixOS", 27 | "repo": "nixpkgs", 28 | "rev": "f3a2a0601e9669a6e38af25b46ce6c4563bcb6da", 29 | "type": "github" 30 | }, 31 | "original": { 32 | "id": "nixpkgs", 33 | "type": "indirect" 34 | } 35 | }, 36 | "nixpkgs_2": { 37 | "locked": { 38 | "lastModified": 1742738698, 39 | "narHash": "sha256-KCtAXWwQs03JmEhP4ss59QVzT+rHZkhQO85KjNy8Crc=", 40 | "owner": "NixOS", 41 | "repo": "nixpkgs", 42 | "rev": "f3a2a0601e9669a6e38af25b46ce6c4563bcb6da", 43 | "type": "github" 44 | }, 45 | "original": { 46 | "owner": "NixOS", 47 | "ref": "nixpkgs-unstable", 48 | "repo": "nixpkgs", 49 | "type": "github" 50 | } 51 | }, 52 | "root": { 53 | "inputs": { 54 | "naersk": "naersk", 55 | "nixpkgs": "nixpkgs_2", 56 | "utils": "utils" 57 | } 58 | }, 59 | "systems": { 60 | "locked": { 61 | "lastModified": 1681028828, 62 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 63 | "owner": "nix-systems", 64 | "repo": "default", 65 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 66 | "type": "github" 67 | }, 68 | "original": { 69 | "owner": "nix-systems", 70 | "repo": "default", 71 | "type": "github" 72 | } 73 | }, 74 | "utils": { 75 | "inputs": { 76 | "systems": "systems" 77 | }, 78 | "locked": { 79 | "lastModified": 1731533236, 80 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 81 | "owner": "numtide", 82 | "repo": "flake-utils", 83 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 84 | "type": "github" 85 | }, 86 | "original": { 87 | "owner": "numtide", 88 | "repo": "flake-utils", 89 | "type": "github" 90 | } 91 | } 92 | }, 93 | "root": "root", 94 | "version": 7 95 | } 96 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | naersk.url = "github:nix-community/naersk/master"; 4 | nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; 5 | utils.url = "github:numtide/flake-utils"; 6 | }; 7 | 8 | outputs = { self, nixpkgs, utils, naersk }: 9 | utils.lib.eachDefaultSystem (system: 10 | let 11 | pkgs = import nixpkgs { inherit system; }; 12 | naersk-lib = pkgs.callPackage naersk { stdenv = pkgs.clangStdenv; }; 13 | nativeBuildInputs = with pkgs; [pkg-config]; 14 | buildInputs = with pkgs; [clangStdenv.cc.libc jack2] ++ pkgs.lib.optionals pkgs.stdenv.isLinux [alsa-lib pipewire]; 15 | LIBCLANG_PATH = with pkgs; "${llvmPackages.libclang.lib}/lib"; 16 | in 17 | { 18 | packages = rec { 19 | interflow = naersk-lib.buildPackage { 20 | pname = "interflow"; 21 | version = "0.1.0"; 22 | src = ./.; 23 | inherit nativeBuildInputs buildInputs LIBCLANG_PATH; 24 | }; 25 | default = interflow; 26 | }; 27 | devShells.default = pkgs.clangStdenv.mkDerivation { 28 | name = "interflow-devshell"; 29 | buildInputs = buildInputs ++ nativeBuildInputs; 30 | inherit LIBCLANG_PATH; 31 | }; 32 | } 33 | ); 34 | } 35 | -------------------------------------------------------------------------------- /src/audio_buffer.rs: -------------------------------------------------------------------------------- 1 | //! Audio buffer types and traits for audio data manipulation. 2 | //! 3 | //! This module provides different types of audio buffers optimized for various use cases: 4 | //! 5 | //! - [`AudioBuffer`]: Owned buffer type for standard audio processing 6 | //! - [`AudioRef`]: Immutable reference buffer for reading audio data 7 | //! - [`AudioMut`]: Mutable reference buffer for modifying audio data 8 | //! - [`AudioShared`]: Arc-backed shared buffer for multithreaded access 9 | //! - [`AudioCow`]: Copy-on-write buffer (avoid in audio callbacks) 10 | //! 11 | //! The buffers support both interleaved and non-interleaved data formats and provide 12 | //! convenient methods for: 13 | //! 14 | //! - Accessing individual channels and frames 15 | //! - Slicing and subsetting audio data 16 | //! - Computing audio metrics like RMS 17 | //! - Mixing and amplitude adjustments 18 | //! - Converting between different sample formats 19 | //! 20 | //! The buffers are built on top of ndarray for efficient multidimensional array operations. 21 | 22 | use ndarray::{ 23 | s, Array0, ArrayBase, ArrayView1, ArrayView2, ArrayViewMut1, ArrayViewMut2, AsArray, CowRepr, 24 | Data, DataMut, DataOwned, Ix1, Ix2, OwnedArcRepr, OwnedRepr, RawData, RawDataClone, ViewRepr, 25 | }; 26 | use std::collections::Bound; 27 | use std::fmt; 28 | use std::fmt::Formatter; 29 | use std::ops::{AddAssign, RangeBounds}; 30 | 31 | /// Owned audio buffer type. 32 | pub type AudioBuffer = AudioBufferBase>; 33 | /// Immutably referenced audio buffer type. 34 | pub type AudioRef<'a, T> = AudioBufferBase>; 35 | /// Mutably referenced audio buffer type. 36 | pub type AudioMut<'a, T> = AudioBufferBase>; 37 | /// Arc-backed shared audio buffer type. 38 | pub type AudioShared = AudioBufferBase>; 39 | /// Copy-on-write audio buffer type. Should not be used within audio callbacks, as the copy will 40 | /// introduce allocations. 41 | pub type AudioCow<'a, T> = AudioBufferBase>; 42 | 43 | type Storage = ArrayBase; 44 | 45 | /// Audio buffer type, which backs all audio data interfacing with user code. 46 | /// 47 | /// This type is made to make manipulation of audio data easier, and is agnostic in its storage 48 | /// representation, meaning that it can work with both interleaved and non-interleaved data. 49 | /// 50 | /// Audio is stored as "row-per-channel" 51 | pub struct AudioBufferBase { 52 | storage: Storage, 53 | } 54 | 55 | impl fmt::Debug for AudioBufferBase { 56 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 57 | f.debug_struct("AudioBufferBase") 58 | .field( 59 | "storage", 60 | &format!( 61 | "[{}x{} buffer of {}]", 62 | self.storage.nrows(), 63 | self.storage.ncols(), 64 | std::any::type_name::() 65 | ), 66 | ) 67 | .finish_non_exhaustive() 68 | } 69 | } 70 | 71 | impl Clone for AudioBufferBase { 72 | fn clone(&self) -> Self { 73 | Self { 74 | storage: self.storage.clone(), 75 | } 76 | } 77 | } 78 | 79 | impl Copy for AudioBufferBase {} 80 | 81 | impl Default for AudioBufferBase { 82 | fn default() -> Self { 83 | Self { 84 | storage: ArrayBase::from_shape_fn((0, 0), |(_, _)| unreachable!()), 85 | } 86 | } 87 | } 88 | 89 | impl> PartialEq> for AudioBufferBase 90 | where 91 | S::Elem: PartialEq, 92 | { 93 | fn eq(&self, other: &AudioBufferBase) -> bool { 94 | self.storage.shape() == other.storage.shape() 95 | && self.storage.iter().eq(other.storage.iter()) 96 | } 97 | 98 | // Explicitely implementing `fn ne` may yield better performance with the shortcircuiting of the or operator 99 | #[allow(clippy::partialeq_ne_impl)] 100 | fn ne(&self, other: &AudioBufferBase) -> bool { 101 | self.storage.shape() != other.storage.shape() 102 | || self.storage.iter().ne(other.storage.iter()) 103 | } 104 | } 105 | 106 | impl Eq for AudioBufferBase 107 | where 108 | Self: PartialEq, 109 | S::Elem: Eq, 110 | { 111 | } 112 | 113 | impl AudioBufferBase { 114 | /// Number of samples present in this buffer. 115 | pub fn num_samples(&self) -> usize { 116 | self.storage.ncols() 117 | } 118 | 119 | /// Number of channels present in this buffer. 120 | pub fn num_channels(&self) -> usize { 121 | self.storage.nrows() 122 | } 123 | } 124 | 125 | impl AudioBufferBase { 126 | /// Return an immutable audio buffer view, sharing the data with this buffer. 127 | pub fn as_ref(&self) -> AudioRef { 128 | AudioRef { 129 | storage: self.storage.view(), 130 | } 131 | } 132 | 133 | /// Slice the contents of this audio buffer, returning an immutable view of this buffer 134 | /// containing only the audio samples at indices within the provided range. 135 | pub fn slice(&self, range: impl RangeBounds) -> AudioRef { 136 | let start = match range.start_bound() { 137 | Bound::Included(i) => *i, 138 | Bound::Excluded(i) => *i + 1, 139 | Bound::Unbounded => 0, 140 | }; 141 | let end = match range.end_bound() { 142 | Bound::Included(i) => *i - 1, 143 | Bound::Excluded(i) => *i, 144 | Bound::Unbounded => self.num_samples(), 145 | }; 146 | let storage = self.storage.slice(s![.., start..end]); 147 | AudioRef { storage } 148 | } 149 | 150 | /// Iterate over non-overlapping chunks of this audio buffer. 151 | pub fn chunks(&self, size: usize) -> impl Iterator> { 152 | let mut i = 0; 153 | std::iter::from_fn(move || { 154 | if i >= self.num_samples() { 155 | return None; 156 | } 157 | let range = i..(i + size).min(self.num_samples()); 158 | i += size; 159 | Some(self.slice(range)) 160 | }) 161 | } 162 | 163 | /// Iterate over non-overlapping chunks of this audio buffer. If the last chunk has a smaller length than the 164 | /// requested size, it will not be yielded. 165 | pub fn chunks_exact(&self, size: usize) -> impl Iterator> { 166 | let mut i = 0; 167 | std::iter::from_fn(move || { 168 | if i + size >= self.num_samples() { 169 | return None; 170 | } 171 | let range = i..i + size; 172 | i += size; 173 | Some(self.slice(range)) 174 | }) 175 | } 176 | 177 | /// Iterate over overlapping windows of this audio buffer. 178 | /// 179 | /// # Arguments 180 | /// 181 | /// - `size`: Size of the window 182 | pub fn windows(&self, size: usize) -> impl Iterator> { 183 | let mut i = 0; 184 | std::iter::from_fn(move || { 185 | if i + size >= self.num_samples() { 186 | return None; 187 | } 188 | let range = i..(i + size).min(self.num_samples()); 189 | i += 1; 190 | Some(self.slice(range)) 191 | }) 192 | } 193 | 194 | /// Return an immutable view of a single channel. Panics when the requested channel does not 195 | /// exist. 196 | pub fn get_channel(&self, channel: usize) -> ArrayView1 { 197 | self.storage.row(channel) 198 | } 199 | 200 | /// Return an iterator of immutable views of the channels present in this audio buffer. 201 | pub fn channels(&self) -> impl '_ + Iterator> { 202 | self.storage.rows().into_iter() 203 | } 204 | 205 | /// Get a single frame, that is all channels at the specified sample index. Panics when the 206 | /// sample is out of range. 207 | pub fn get_frame(&self, sample: usize) -> ArrayView1 { 208 | self.storage.column(sample) 209 | } 210 | 211 | /// Return an immutable interleaved 2-D array view, where samples are in rows and channels are 212 | /// in columns. 213 | pub fn as_interleaved(&self) -> ArrayView2 { 214 | self.storage.t() 215 | } 216 | 217 | /// Copies this audio buffer to another, giving you a unique owned buffer in the end. 218 | /// 219 | /// Not realtime-safe. 220 | pub fn to_owned(&self) -> AudioBuffer 221 | where 222 | S::Elem: Clone, 223 | { 224 | AudioBuffer { 225 | storage: self.storage.to_owned(), 226 | } 227 | } 228 | 229 | /// Copies audio data in this buffer to the provided interleaved buffer. The `output` buffer 230 | /// must represent an interleaved buffer with the same number of channels and same number of 231 | /// samples. 232 | #[must_use] 233 | pub fn copy_into_interleaved(&self, output: &mut [S::Elem]) -> bool 234 | where 235 | S::Elem: Copy, 236 | { 237 | if output.len() != self.storage.len() { 238 | return false; 239 | } 240 | 241 | for (inp, out) in self.as_interleaved().iter().zip(output.iter_mut()) { 242 | *out = *inp; 243 | } 244 | true 245 | } 246 | } 247 | 248 | impl AudioBufferBase { 249 | /// Return a mutable audio buffer view. 250 | pub fn as_mut(&mut self) -> AudioMut { 251 | AudioMut { 252 | storage: self.storage.view_mut(), 253 | } 254 | } 255 | 256 | /// Slice the contents of this audio buffer, returning a mutable view of this buffer 257 | /// containing only the audio samples at indices within the provided range. 258 | pub fn slice_mut(&mut self, range: impl RangeBounds) -> AudioMut { 259 | let start = match range.start_bound() { 260 | Bound::Included(i) => *i, 261 | Bound::Excluded(i) => *i + 1, 262 | Bound::Unbounded => 0, 263 | }; 264 | let end = match range.end_bound() { 265 | Bound::Included(i) => *i - 1, 266 | Bound::Excluded(i) => *i, 267 | Bound::Unbounded => self.num_samples(), 268 | }; 269 | let storage = self.storage.slice_mut(s![.., start..end]); 270 | AudioMut { storage } 271 | } 272 | 273 | /// Return a mutable view of a single channel. Panics when the requested channel does not 274 | /// exist. 275 | pub fn get_channel_mut(&mut self, channel: usize) -> ArrayViewMut1 { 276 | self.storage.row_mut(channel) 277 | } 278 | 279 | /// Return an iterator of mutable views of the channels present in this audio buffer. 280 | pub fn channels_mut(&mut self) -> impl '_ + Iterator> { 281 | self.storage.rows_mut().into_iter() 282 | } 283 | /// Return a mutable interleaved 2-D array view, where samples are in rows and channels are in 284 | /// columns. 285 | pub fn as_interleaved_mut(&mut self) -> ArrayViewMut2 { 286 | self.storage.view_mut().reversed_axes() 287 | } 288 | 289 | /// Copies audio data into this buffer from the provided interleaved audio buffer. The `output` buffer 290 | /// must represent an interleaved buffer with the same number of channels and same number of 291 | /// samples. 292 | #[must_use] 293 | pub fn copy_from_interleaved(&mut self, input: &[S::Elem]) -> bool 294 | where 295 | S::Elem: Copy, 296 | { 297 | if input.len() != self.storage.len() { 298 | return false; 299 | } 300 | 301 | for (out, inp) in self.as_interleaved_mut().iter_mut().zip(input.iter()) { 302 | *out = *inp; 303 | } 304 | true 305 | } 306 | } 307 | 308 | impl AudioBufferBase { 309 | /// Create a new audio buffer with the provided number of channels and sample size, filling 310 | /// it with the provided fill function. 311 | /// 312 | /// Not realtime-safe. 313 | pub fn fill_with( 314 | channels: usize, 315 | sample_size: usize, 316 | fill: impl Fn(usize, usize) -> S::Elem, 317 | ) -> Self { 318 | let storage = Storage::from_shape_fn((channels, sample_size), |(ch, i)| fill(ch, i)); 319 | Self { storage } 320 | } 321 | 322 | /// Create a new audio buffer with the provided number of channels and sample size, filling 323 | /// it with the provided value. 324 | pub fn fill(channels: usize, sample_size: usize, value: S::Elem) -> Self 325 | where 326 | S::Elem: Copy, 327 | { 328 | Self::fill_with(channels, sample_size, |_, _| value) 329 | } 330 | 331 | /// Create a new audio buffer with the provided number of channels and sample size, filling 332 | /// it with the [`Default`] value of that type. 333 | pub fn defaulted(channels: usize, sample_size: usize) -> Self 334 | where 335 | S::Elem: Default, 336 | { 337 | Self::fill_with(channels, sample_size, |_, _| S::Elem::default()) 338 | } 339 | } 340 | 341 | impl<'a, T: 'a> AudioRef<'a, T> 342 | where 343 | ViewRepr<&'a T>: Sized, 344 | { 345 | /// Create an audio buffer reference from interleaved data. This does *not* copy the data, 346 | /// but creates a view over it, so that it can be accessed as any other audio buffer. 347 | pub fn from_interleaved(data: &'a [T], channels: usize) -> Option { 348 | let buffer_size = data.len() / channels; 349 | let raw = ArrayView2::from_shape((buffer_size, channels), data).ok()?; 350 | let storage = raw.reversed_axes(); 351 | Some(Self { storage }) 352 | } 353 | 354 | pub fn from_noninterleaved(data: &'a [T], channels: usize) -> Option { 355 | let buffer_size = data.len() / channels; 356 | let storage = ArrayView2::from_shape((channels, buffer_size), data).ok()?; 357 | Some(Self { storage }) 358 | } 359 | } 360 | 361 | impl<'a, T: 'a> AudioMut<'a, T> { 362 | /// Create an audio buffer mutable reference from interleaved data. This does *not* copy the 363 | /// data, but creates a view over it, so that it can be accessed as any other audio buffer. 364 | /// 365 | /// Writes to the resulting buffer directly map to the provided slice, and asking an 366 | /// interleaved view out of the resulting buffer (with [`AudioBufferBase::as_interleaved`]) 367 | /// means the same slice is returned. This makes for efficient copying between different 368 | /// interleaved buffers, even though a non-interleaved interface. 369 | pub fn from_interleaved_mut(data: &'a mut [T], channels: usize) -> Option { 370 | let buffer_size = data.len() / channels; 371 | let raw = ArrayViewMut2::from_shape((buffer_size, channels), data).ok()?; 372 | let storage = raw.reversed_axes(); 373 | Some(Self { storage }) 374 | } 375 | 376 | pub fn from_noninterleaved_mut(data: &'a mut [T], channels: usize) -> Option { 377 | let buffer_size = data.len() / channels; 378 | let storage = ArrayViewMut2::from_shape((channels, buffer_size), data).ok()?; 379 | Some(Self { storage }) 380 | } 381 | } 382 | 383 | impl AudioBufferBase 384 | where 385 | S::Elem: Clone, 386 | { 387 | /// Returns a mutable view over each channel of the frame at the given index. 388 | /// 389 | /// # Arguments 390 | /// 391 | /// * `sample`: Sample index for the frame to return. 392 | /// 393 | /// # Panics 394 | /// 395 | /// Panics if the sample index is out of range. 396 | /// 397 | /// returns: ArrayBase::Elem>, Dim<[usize; 1]>> 398 | pub fn get_frame_mut(&mut self, sample: usize) -> ArrayViewMut1 { 399 | self.storage.column_mut(sample) 400 | } 401 | 402 | /// Sets audio data of a single frame, that is all channels at the specified sample index. 403 | /// Panics when the sample is out of range. 404 | pub fn set_frame<'a>(&mut self, sample: usize, data: impl AsArray<'a, S::Elem, Ix1>) 405 | where 406 | S::Elem: 'a, 407 | { 408 | let column = self.storage.column_mut(sample); 409 | data.into().assign_to(column); 410 | } 411 | 412 | /// Sets audio data of a single sample, copying the provided value to each channel at that 413 | /// sample index. Panics when the sample index is out of range. 414 | pub fn set_mono(&mut self, i: usize, value: S::Elem) { 415 | Array0::from_elem([], value) 416 | .broadcast((self.num_channels(),)) 417 | .unwrap() 418 | .assign_to(self.storage.column_mut(i)) 419 | } 420 | } 421 | 422 | /// Trait for sample types. Typical sample types can be `i32`, `f32`, etc. but more can be 423 | /// implemented downstream. 424 | pub trait Sample: Copy { 425 | /// Floating-point type which can fit all or a big majority of this type's values. 426 | /// This type is the type used in float conversions, as well as the type of the amplitude in 427 | /// buffer amplitude operations. 428 | type Float: Copy; 429 | /// Zero value for this sample. This is *not specifically* the numerical zero of the type, 430 | /// but the value for which the amplitude of the stream is zero. Unsigned types are an 431 | /// example for which the two are different. 432 | const ZERO: Self; 433 | 434 | /// Construct a sample of this type from the corresponding float signal value. 435 | fn from_float(f: Self::Float) -> Self; 436 | 437 | /// Compute the RMS value out of an iterator of this type. 438 | fn rms(it: impl Iterator) -> Self::Float; 439 | 440 | /// Convert this value into its floating point equivalent. 441 | fn into_float(self) -> Self::Float; 442 | 443 | /// Change the "amplitude" of this value, ie. absolute values less than one will bring the 444 | /// value closer to [`Self::ZERO`], whereas absolute values above one will move the value 445 | /// further away. 446 | fn change_amplitude(&mut self, amp: Self::Float); 447 | } 448 | 449 | #[duplicate::duplicate_item( 450 | ty fty; 451 | [i8] [f32]; 452 | [i16] [f32]; 453 | [i32] [f32]; 454 | [i64] [f64]; 455 | )] 456 | impl Sample for ty { 457 | type Float = fty; 458 | const ZERO: Self = 0; 459 | 460 | fn from_float(f: Self::Float) -> Self { 461 | (f * ty::MAX as fty) as ty 462 | } 463 | fn rms(it: impl Iterator) -> Self::Float { 464 | let mut i = 0.0; 465 | it.map(|t| t.into_float().powi(2)) 466 | .reduce(|a, b| { 467 | let res = a * i / (i + 1.0) + b / (i + 1.0); 468 | i += 1.0; 469 | res 470 | }) 471 | .unwrap_or(0.0) 472 | .sqrt() 473 | } 474 | 475 | fn into_float(self) -> Self::Float { 476 | self as fty / ty::MAX as fty 477 | } 478 | fn change_amplitude(&mut self, amp: Self::Float) { 479 | *self = ((*self as fty) * amp) as Self; 480 | } 481 | } 482 | 483 | #[duplicate::duplicate_item( 484 | ty fty; 485 | [u8] [f32]; 486 | [u16] [f32]; 487 | [u32] [f32]; 488 | [u64] [f64]; 489 | )] 490 | impl Sample for ty { 491 | type Float = fty; 492 | const ZERO: Self = 1 + Self::MAX / 2; 493 | 494 | fn from_float(f: Self::Float) -> Self { 495 | ((f * 0.5 + 0.5) * (Self::MAX as Self::Float + 1.0)) as Self 496 | } 497 | 498 | fn rms(it: impl Iterator) -> Self::Float { 499 | let mut i = 0.0; 500 | it.map(|t| t.into_float().powi(2)) 501 | .reduce(|a, b| { 502 | let res = a * i / (i + 1.0) + b / (i + 1.0); 503 | i += 1.0; 504 | res 505 | }) 506 | .unwrap_or(0.0) 507 | .sqrt() 508 | } 509 | 510 | fn into_float(self) -> Self::Float { 511 | let t = self as Self::Float / (Self::MAX as Self::Float + 1.0); 512 | t * 2.0 - 1.0 513 | } 514 | 515 | fn change_amplitude(&mut self, amp: Self::Float) { 516 | let f = Self::into_float(*self) * amp; 517 | *self = Self::from_float(f) 518 | } 519 | } 520 | 521 | #[duplicate::duplicate_item( 522 | ty; 523 | [f32]; 524 | [f64]; 525 | )] 526 | impl Sample for ty { 527 | type Float = Self; 528 | const ZERO: Self = 0.0; 529 | 530 | fn from_float(f: Self::Float) -> Self { 531 | f 532 | } 533 | 534 | fn rms(it: impl Iterator) -> Self::Float { 535 | let mut i = 0.0; 536 | it.map(|t| t.powi(2)) 537 | .reduce(|a, b| { 538 | let res = a * i / (i + 1.0) + b / (i + 1.0); 539 | i += 1.0; 540 | res 541 | }) 542 | .unwrap_or(0.0) 543 | .sqrt() 544 | } 545 | 546 | fn into_float(self) -> Self::Float { 547 | self 548 | } 549 | 550 | fn change_amplitude(&mut self, amp: Self::Float) { 551 | *self *= amp; 552 | } 553 | } 554 | 555 | impl AudioBuffer { 556 | /// Construct a zeroed buffer with the provided channels and sample size. 557 | /// 558 | /// Not realtime-safe. 559 | pub fn zeroed(channels: usize, sample_size: usize) -> Self { 560 | Self::fill(channels, sample_size, T::ZERO) 561 | } 562 | } 563 | 564 | impl AudioBufferBase 565 | where 566 | S::Elem: Sample, 567 | { 568 | /// Compute the RMS (Root Mean Square) value of this entire buffer, all channels considered 569 | /// equally. The result is given in terms of linear amplitude values, as a float determined by 570 | /// [`S::Float`]. 571 | /// 572 | /// You can convert the result to decibels with the formula `20. * rms.log10()`. 573 | pub fn rms(&self) -> ::Float { 574 | S::Elem::rms(self.storage.iter().copied()) 575 | } 576 | 577 | /// Compute the RMS (Root Mean Square) value of this entire buffer for a single channel. The 578 | /// result is given in terms of linear amplitude values, as a float determined by [`S::Float`]. 579 | /// 580 | /// You can convert the result to decibels with the formula `20. * rms.log10()`. 581 | pub fn channel_rms(&self, channel: usize) -> ::Float { 582 | S::Elem::rms(self.storage.column(channel).iter().copied()) 583 | } 584 | } 585 | 586 | impl> AudioBufferBase { 587 | /// Change the amplitude of this buffer by the provided amplitude. 588 | /// 589 | /// See [`Sample::change_amplitude`] for more details. 590 | pub fn change_amplitude(&mut self, amplitude: ::Float) { 591 | for s in self.storage.iter_mut() { 592 | s.change_amplitude(amplitude); 593 | } 594 | } 595 | 596 | /// Mix a buffer into this buffer at the specified amplitude. The audio will be mixed into 597 | /// this buffer as a result, and the other buffer's amplitude will be changed similarly to 598 | /// applying [`Self::change_amplitude`] first. 599 | pub fn mix(&mut self, other: AudioRef, other_amplitude: ::Float) 600 | where 601 | S::Elem: AddAssign, 602 | { 603 | for (mut ch_a, ch_b) in self.channels_mut().zip(other.channels()) { 604 | for (a, b) in ch_a.iter_mut().zip(ch_b) { 605 | let mut b = *b; 606 | b.change_amplitude(other_amplitude); 607 | *a += b; 608 | } 609 | } 610 | } 611 | } 612 | 613 | #[cfg(test)] 614 | mod tests { 615 | use super::*; 616 | use ndarray::ArrayView1; 617 | 618 | fn create_test_buffer() -> AudioBuffer { 619 | AudioBuffer::fill_with(2, 4, |ch, i| (ch * 4 + i) as f32) 620 | } 621 | 622 | #[test] 623 | fn test_buffer_creation() { 624 | let buf = create_test_buffer(); 625 | assert_eq!(buf.num_channels(), 2); 626 | assert_eq!(buf.num_samples(), 4); 627 | 628 | // Verify sample values 629 | assert_eq!(buf.get_channel(0).to_vec(), vec![0.0, 1.0, 2.0, 3.0]); 630 | assert_eq!(buf.get_channel(1).to_vec(), vec![4.0, 5.0, 6.0, 7.0]); 631 | } 632 | 633 | #[test] 634 | fn test_buffer_views() { 635 | let mut buf = create_test_buffer(); 636 | 637 | // Test immutable slice 638 | let slice = buf.slice(1..3); 639 | assert_eq!(slice.num_samples(), 2); 640 | assert_eq!(slice.get_channel(0).to_vec(), vec![1.0, 2.0]); 641 | 642 | // Test mutable slice 643 | let mut slice = buf.slice_mut(1..3); 644 | slice.get_channel_mut(0).fill(10.0); 645 | assert_eq!(buf.get_channel(0).to_vec(), vec![0.0, 10.0, 10.0, 3.0]); 646 | } 647 | 648 | #[test] 649 | fn test_sample_conversions() { 650 | // Test i16 <-> f32 conversion 651 | assert_eq!(i16::from_float(0.5), 16383); 652 | assert!((i16::MAX.into_float() - 1.0).abs() < f32::EPSILON); 653 | 654 | // Test u8 <-> f32 conversion 655 | assert_eq!(u8::from_float(0.0), 128); 656 | assert!((u8::ZERO.into_float()).abs() < f32::EPSILON); 657 | } 658 | 659 | #[test] 660 | fn test_rms() { 661 | let mut buf = AudioBuffer::::zeroed(1, 4); 662 | buf.get_channel_mut(0) 663 | .assign(&ArrayView1::from(&[1.0, -1.0, 1.0, -1.0])); 664 | let rms = buf.rms(); 665 | assert!((rms - 1.0).abs() < f32::EPSILON, "RMS is incorrect: {rms}"); 666 | } 667 | 668 | #[test] 669 | fn test_mixing() { 670 | let mut buf1 = AudioBuffer::::fill(1, 4, 1.0); 671 | let buf2 = AudioBuffer::::fill(1, 4, 0.5); 672 | 673 | buf1.mix(buf2.as_ref(), 2.0); 674 | assert_eq!(buf1.get_channel(0).to_vec(), vec![2.0, 2.0, 2.0, 2.0]); 675 | } 676 | 677 | #[test] 678 | fn test_interleaved() { 679 | let data = vec![1.0f32, 2.0, 3.0, 4.0]; 680 | let buf = AudioRef::from_interleaved(&data, 2).unwrap(); 681 | 682 | assert_eq!(buf.num_channels(), 2); 683 | assert_eq!(buf.num_samples(), 2); 684 | assert_eq!(buf.get_channel(0).to_vec(), vec![1.0, 3.0]); 685 | assert_eq!(buf.get_channel(1).to_vec(), vec![2.0, 4.0]); 686 | 687 | let mut out = vec![0.0f32; 4]; 688 | assert!(buf.copy_into_interleaved(&mut out)); 689 | assert_eq!(out, data); 690 | } 691 | } 692 | -------------------------------------------------------------------------------- /src/backends/alsa/device.rs: -------------------------------------------------------------------------------- 1 | use crate::backends::alsa::stream::AlsaStream; 2 | use crate::backends::alsa::AlsaError; 3 | use crate::{ 4 | AudioDevice, AudioInputCallback, AudioInputDevice, AudioOutputCallback, AudioOutputDevice, 5 | Channel, DeviceType, StreamConfig, 6 | }; 7 | use alsa::{pcm, PCM}; 8 | use std::borrow::Cow; 9 | use std::fmt; 10 | use std::rc::Rc; 11 | 12 | /// Type of ALSA devices. 13 | #[derive(Clone)] 14 | pub struct AlsaDevice { 15 | pub(super) pcm: Rc, 16 | pub(super) name: String, 17 | pub(super) direction: alsa::Direction, 18 | } 19 | 20 | impl fmt::Debug for AlsaDevice { 21 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 22 | f.debug_struct("AlsaDevice") 23 | .field("name", &self.name) 24 | .field("direction", &format!("{:?}", self.direction)) 25 | .finish_non_exhaustive() 26 | } 27 | } 28 | 29 | impl AudioDevice for AlsaDevice { 30 | type Error = AlsaError; 31 | 32 | fn name(&self) -> Cow { 33 | Cow::Borrowed(self.name.as_str()) 34 | } 35 | 36 | fn device_type(&self) -> DeviceType { 37 | match self.direction { 38 | alsa::Direction::Capture => DeviceType::PHYSICAL | DeviceType::INPUT, 39 | alsa::Direction::Playback => DeviceType::PHYSICAL | DeviceType::OUTPUT, 40 | } 41 | } 42 | 43 | fn channel_map(&self) -> impl IntoIterator { 44 | [] 45 | } 46 | 47 | fn is_config_supported(&self, config: &StreamConfig) -> bool { 48 | self.get_hwp(config) 49 | .inspect_err(|err| { 50 | log::debug!("{config:#?}"); 51 | log::debug!("Configuration unsupported: {err}"); 52 | }) 53 | .is_ok() 54 | } 55 | 56 | fn enumerate_configurations(&self) -> Option> { 57 | log::info!("TODO: enumerate configurations"); 58 | None::<[StreamConfig; 0]> 59 | } 60 | } 61 | 62 | impl AudioInputDevice for AlsaDevice { 63 | type StreamHandle = AlsaStream; 64 | 65 | fn default_input_config(&self) -> Result { 66 | self.default_config() 67 | } 68 | 69 | fn create_input_stream( 70 | &self, 71 | stream_config: StreamConfig, 72 | callback: Callback, 73 | ) -> Result, Self::Error> { 74 | AlsaStream::new_input(self.name.clone(), stream_config, callback) 75 | } 76 | } 77 | 78 | impl AudioOutputDevice for AlsaDevice { 79 | type StreamHandle = AlsaStream; 80 | 81 | fn default_output_config(&self) -> Result { 82 | self.default_config() 83 | } 84 | 85 | fn create_output_stream( 86 | &self, 87 | stream_config: StreamConfig, 88 | callback: Callback, 89 | ) -> Result, Self::Error> { 90 | AlsaStream::new_output(self.name.clone(), stream_config, callback) 91 | } 92 | } 93 | 94 | impl AlsaDevice { 95 | /// Shortcut constructor for getting ALSA devices directly. 96 | pub fn default_device(direction: alsa::Direction) -> Result, alsa::Error> { 97 | let pcm = Rc::new(PCM::new("default", direction, true)?); 98 | Ok(Some(Self { 99 | pcm, 100 | direction, 101 | name: "default".to_string(), 102 | })) 103 | } 104 | 105 | pub(super) fn new(name: &str, direction: alsa::Direction) -> Result { 106 | let pcm = PCM::new(name, direction, true)?; 107 | let pcm = Rc::new(pcm); 108 | Ok(Self { 109 | name: name.to_string(), 110 | direction, 111 | pcm, 112 | }) 113 | } 114 | 115 | fn get_hwp(&self, config: &StreamConfig) -> Result { 116 | let hwp = pcm::HwParams::any(&self.pcm)?; 117 | hwp.set_channels(config.channels as _)?; 118 | hwp.set_rate(config.samplerate as _, alsa::ValueOr::Nearest)?; 119 | if let Some(min) = config.buffer_size_range.0 { 120 | hwp.set_buffer_size_min(min as _)?; 121 | } 122 | if let Some(max) = config.buffer_size_range.1 { 123 | hwp.set_buffer_size_max(max as _)?; 124 | } 125 | hwp.set_format(pcm::Format::float())?; 126 | hwp.set_access(pcm::Access::RWInterleaved)?; 127 | Ok(hwp) 128 | } 129 | 130 | pub(super) fn apply_config( 131 | &self, 132 | config: &StreamConfig, 133 | ) -> Result<(pcm::HwParams, pcm::SwParams, pcm::IO), alsa::Error> { 134 | let hwp = self.get_hwp(config)?; 135 | self.pcm.hw_params(&hwp)?; 136 | let io = self.pcm.io_f32()?; 137 | let hwp = self.pcm.hw_params_current()?; 138 | let swp = self.pcm.sw_params_current()?; 139 | 140 | log::debug!("Apply config: hwp {hwp:#?}"); 141 | 142 | swp.set_start_threshold(hwp.get_buffer_size()?)?; 143 | self.pcm.sw_params(&swp)?; 144 | log::debug!("Apply config: swp {swp:#?}"); 145 | 146 | Ok((hwp, swp, io)) 147 | } 148 | 149 | fn default_config(&self) -> Result { 150 | let samplerate = 48e3; // Default ALSA sample rate 151 | let channel_count = 2; // Stereo stream 152 | let channels = (1 << channel_count) - 1; 153 | Ok(StreamConfig { 154 | samplerate: samplerate as _, 155 | channels, 156 | buffer_size_range: (None, None), 157 | exclusive: false, 158 | }) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/backends/alsa/input.rs: -------------------------------------------------------------------------------- 1 | use crate::audio_buffer::AudioRef; 2 | use crate::backends::alsa::stream::AlsaStream; 3 | use crate::backends::alsa::AlsaError; 4 | use crate::prelude::alsa::device::AlsaDevice; 5 | use crate::{AudioCallbackContext, AudioInput, AudioInputCallback, StreamConfig}; 6 | 7 | impl AlsaStream { 8 | pub(super) fn new_input( 9 | name: String, 10 | stream_config: StreamConfig, 11 | callback: Callback, 12 | ) -> Result { 13 | Self::new_generic( 14 | stream_config, 15 | move || AlsaDevice::new(&name, alsa::Direction::Capture), 16 | callback, 17 | move |ctx, recover| { 18 | if let Err(err) = ctx.io.readi(&mut ctx.buffer[..]) { 19 | recover(err)?; 20 | } 21 | let buffer = AudioRef::from_interleaved(ctx.buffer, ctx.num_channels).unwrap(); 22 | let context = AudioCallbackContext { 23 | stream_config: *ctx.config, 24 | timestamp: *ctx.timestamp, 25 | }; 26 | let input = AudioInput { 27 | buffer, 28 | timestamp: *ctx.timestamp, 29 | }; 30 | ctx.callback.on_input_data(context, input); 31 | *ctx.timestamp += ctx.num_frames as u64; 32 | Ok(()) 33 | }, 34 | ) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/backends/alsa/mod.rs: -------------------------------------------------------------------------------- 1 | //! # ALSA backend 2 | //! 3 | //! ALSA is a generally available driver for Linux and BSD systems. It is the oldest of the Linux 4 | //! drivers supported in this library, and as such makes it a good fallback driver. Newer drivers 5 | //! (PulseAudio, PipeWire) offer ALSA-compatible APIs so that older software can still access the 6 | //! audio devices through them. 7 | 8 | use crate::{AudioDriver, DeviceType}; 9 | use alsa::device_name::HintIter; 10 | use device::AlsaDevice; 11 | use std::borrow::Cow; 12 | use thiserror::Error; 13 | 14 | mod device; 15 | mod input; 16 | mod output; 17 | mod stream; 18 | mod triggerfd; 19 | 20 | /// Type of errors from using the ALSA backend. 21 | #[derive(Debug, Error)] 22 | #[error("ALSA error: ")] 23 | pub enum AlsaError { 24 | /// Error originates from ALSA itself. 25 | #[error("{0}")] 26 | BackendError(#[from] alsa::Error), 27 | /// Error originates from I/O operations. 28 | #[error("I/O error: {0}")] 29 | IoError(#[from] nix::Error), 30 | } 31 | 32 | /// ALSA driver type. ALSA is statically available without client configuration, therefore this type 33 | /// is zero-sized. 34 | #[derive(Debug, Clone, Default)] 35 | pub struct AlsaDriver; 36 | 37 | impl AudioDriver for AlsaDriver { 38 | type Error = AlsaError; 39 | type Device = AlsaDevice; 40 | 41 | const DISPLAY_NAME: &'static str = "ALSA"; 42 | 43 | fn version(&self) -> Result, Self::Error> { 44 | Ok(Cow::Borrowed("unknown")) 45 | } 46 | 47 | fn default_device(&self, device_type: DeviceType) -> Result, Self::Error> { 48 | let direction = match device_type { 49 | _ if device_type.is_input() => alsa::Direction::Capture, 50 | _ if device_type.is_output() => alsa::Direction::Playback, 51 | _ => return Ok(None), 52 | }; 53 | Ok(AlsaDevice::default_device(direction)?) 54 | } 55 | 56 | fn list_devices(&self) -> Result, Self::Error> { 57 | Ok(HintIter::new(None, c"pcm")? 58 | .filter_map(|hint| AlsaDevice::new(hint.name.as_ref()?, hint.direction?).ok())) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/backends/alsa/output.rs: -------------------------------------------------------------------------------- 1 | use crate::audio_buffer::AudioMut; 2 | use crate::backends::alsa::stream::AlsaStream; 3 | use crate::backends::alsa::AlsaError; 4 | use crate::prelude::alsa::device::AlsaDevice; 5 | use crate::{AudioCallbackContext, AudioOutput, AudioOutputCallback, StreamConfig}; 6 | 7 | impl AlsaStream { 8 | pub(super) fn new_output( 9 | name: String, 10 | stream_config: StreamConfig, 11 | callback: Callback, 12 | ) -> Result { 13 | Self::new_generic( 14 | stream_config, 15 | move || AlsaDevice::new(&name, alsa::Direction::Playback), 16 | callback, 17 | move |ctx, recover| { 18 | let context = AudioCallbackContext { 19 | stream_config, 20 | timestamp: *ctx.timestamp, 21 | }; 22 | let input = AudioOutput { 23 | buffer: AudioMut::from_interleaved_mut(&mut ctx.buffer[..], ctx.num_channels) 24 | .unwrap(), 25 | timestamp: *ctx.timestamp, 26 | }; 27 | ctx.callback.on_output_data(context, input); 28 | *ctx.timestamp += ctx.num_frames as u64; 29 | if let Err(err) = ctx.io.writei(&ctx.buffer[..]) { 30 | recover(err)?; 31 | } 32 | Ok(()) 33 | }, 34 | ) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/backends/alsa/stream.rs: -------------------------------------------------------------------------------- 1 | use crate::backends::alsa::device::AlsaDevice; 2 | use crate::backends::alsa::{triggerfd, AlsaError}; 3 | use crate::channel_map::{Bitset, ChannelMap32}; 4 | use crate::timestamp::Timestamp; 5 | use crate::{AudioStreamHandle, StreamConfig}; 6 | use alsa::pcm; 7 | use alsa::PollDescriptors; 8 | use std::sync::Arc; 9 | use std::thread::JoinHandle; 10 | use std::time::Duration; 11 | 12 | /// Type of ALSA streams. 13 | /// 14 | /// The audio stream implementation relies on the synchronous API for now, as the [`alsa`] crate 15 | /// does not seem to wrap the asynchronous API as of now. A separate I/O thread is spawned when 16 | /// creating a stream, and is stopped when caling [`AudioInputDevice::eject`] / 17 | /// [`AudioOutputDevice::eject`]. 18 | pub struct AlsaStream { 19 | pub(super) eject_trigger: Arc, 20 | pub(super) join_handle: JoinHandle>, 21 | } 22 | 23 | impl AudioStreamHandle for AlsaStream { 24 | type Error = AlsaError; 25 | 26 | fn eject(self) -> Result { 27 | self.eject_trigger.trigger()?; 28 | self.join_handle.join().unwrap() 29 | } 30 | } 31 | 32 | impl AlsaStream { 33 | pub(super) fn new_generic( 34 | stream_config: StreamConfig, 35 | device: impl 'static + Send + FnOnce() -> Result, 36 | mut callback: Callback, 37 | loop_callback: impl 'static 38 | + Send 39 | + Fn( 40 | StreamContext, 41 | &dyn Fn(alsa::Error) -> Result<(), alsa::Error>, 42 | ) -> Result<(), alsa::Error>, 43 | ) -> Result { 44 | let (tx, rx) = triggerfd::trigger()?; 45 | let join_handle = std::thread::spawn({ 46 | move || { 47 | let device = device()?; 48 | let recover = |err| device.pcm.try_recover(err, true); 49 | let mut poll_descriptors = { 50 | let mut buf = vec![rx.as_pollfd()]; 51 | let num_descriptors = device.pcm.count(); 52 | buf.extend( 53 | std::iter::repeat(libc::pollfd { 54 | fd: 0, 55 | events: 0, 56 | revents: 0, 57 | }) 58 | .take(num_descriptors), 59 | ); 60 | buf 61 | }; 62 | let (hwp, _, io) = device.apply_config(&stream_config)?; 63 | let (_, period_size) = device.pcm.get_params()?; 64 | let period_size = period_size as usize; 65 | log::info!("Period size : {period_size}"); 66 | let num_channels = hwp.get_channels()? as usize; 67 | log::info!("Num channels: {num_channels}"); 68 | let samplerate = hwp.get_rate()? as f64; 69 | log::info!("Sample rate : {samplerate}"); 70 | let stream_config = StreamConfig { 71 | samplerate, 72 | channels: ChannelMap32::default() 73 | .with_indices(std::iter::repeat(1).take(num_channels)), 74 | buffer_size_range: (Some(period_size), Some(period_size)), 75 | exclusive: false, 76 | }; 77 | let mut timestamp = Timestamp::new(samplerate); 78 | let mut buffer = vec![0f32; period_size * num_channels]; 79 | let latency = period_size as f64 / samplerate; 80 | device.pcm.prepare()?; 81 | if device.pcm.state() != pcm::State::Running { 82 | log::info!("Device not already started, starting now"); 83 | device.pcm.start()?; 84 | } 85 | let _try = || loop { 86 | let frames = device.pcm.avail_update()? as usize; 87 | if frames == 0 { 88 | let latency = latency.round() as i32; 89 | if alsa::poll::poll(&mut poll_descriptors, latency)? > 0 { 90 | log::debug!("Eject requested, returning ownership of callback"); 91 | break Ok(callback); 92 | } 93 | continue; 94 | } 95 | 96 | log::debug!("Frames available: {frames}"); 97 | let frames = std::cmp::min(frames, period_size); 98 | let len = frames * num_channels; 99 | 100 | loop_callback( 101 | StreamContext { 102 | config: &stream_config, 103 | timestamp: &mut timestamp, 104 | io: &io, 105 | num_channels, 106 | num_frames: frames, 107 | buffer: &mut buffer[..len], 108 | callback: &mut callback, 109 | }, 110 | &recover, 111 | )?; 112 | 113 | match device.pcm.state() { 114 | pcm::State::Suspended => { 115 | if hwp.can_resume() { 116 | device.pcm.resume()?; 117 | } else { 118 | device.pcm.prepare()?; 119 | } 120 | } 121 | pcm::State::Paused => std::thread::sleep(Duration::from_secs(1)), 122 | _ => {} 123 | } 124 | }; 125 | _try() 126 | } 127 | }); 128 | Ok(Self { 129 | eject_trigger: Arc::new(tx), 130 | join_handle, 131 | }) 132 | } 133 | } 134 | 135 | pub(super) struct StreamContext<'a, Callback: 'a> { 136 | pub(super) config: &'a StreamConfig, 137 | pub(super) timestamp: &'a mut Timestamp, 138 | pub(super) io: &'a pcm::IO<'a, f32>, 139 | pub(super) num_channels: usize, 140 | pub(super) num_frames: usize, 141 | pub(super) buffer: &'a mut [f32], 142 | pub(super) callback: &'a mut Callback, 143 | } 144 | -------------------------------------------------------------------------------- /src/backends/alsa/triggerfd.rs: -------------------------------------------------------------------------------- 1 | pub fn trigger() -> Result<(Sender, Receiver), nix::Error> { 2 | let mut fds = [0; 2]; 3 | let ret = unsafe { libc::pipe(fds.as_mut_ptr()) }; 4 | nix::Error::result(ret)?; 5 | let [read, write] = fds; 6 | Ok((Sender(read), Receiver(write))) 7 | } 8 | 9 | #[derive(Debug, Eq, PartialEq)] 10 | #[repr(transparent)] 11 | pub struct Sender(libc::c_int); 12 | 13 | unsafe impl Send for Sender {} 14 | unsafe impl Sync for Sender {} 15 | 16 | impl Drop for Sender { 17 | fn drop(&mut self) { 18 | unsafe { libc::close(self.0) }; 19 | } 20 | } 21 | 22 | impl Sender { 23 | pub fn trigger(&self) -> Result<(), nix::Error> { 24 | let buf = 1u64; 25 | let size = size_of_val(&buf); 26 | let buf = std::ptr::from_ref(&buf).cast(); 27 | let ret = unsafe { libc::write(self.0, buf, size) }; 28 | match ret { 29 | 8 => Ok(()), 30 | _ => Err(nix::Error::last()), 31 | } 32 | } 33 | } 34 | 35 | #[derive(Debug, Eq, PartialEq)] 36 | #[repr(transparent)] 37 | pub struct Receiver(libc::c_int); 38 | 39 | unsafe impl Send for Receiver {} 40 | unsafe impl Sync for Receiver {} 41 | 42 | impl Drop for Receiver { 43 | fn drop(&mut self) { 44 | unsafe { libc::close(self.0) }; 45 | } 46 | } 47 | 48 | impl Receiver { 49 | pub fn as_pollfd(&self) -> libc::pollfd { 50 | libc::pollfd { 51 | fd: self.0, 52 | events: libc::POLLIN, 53 | revents: 0, 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/backends/coreaudio.rs: -------------------------------------------------------------------------------- 1 | //! # CoreAudio backend 2 | //! 3 | //! CoreAudio is the audio backend for macOS and iOS devices. 4 | 5 | use std::borrow::Cow; 6 | use std::convert::Infallible; 7 | 8 | use coreaudio::audio_unit::audio_format::LinearPcmFlags; 9 | use coreaudio::audio_unit::macos_helpers::{ 10 | audio_unit_from_device_id, get_audio_device_ids_for_scope, get_default_device_id, 11 | get_device_name, get_supported_physical_stream_formats, 12 | }; 13 | use coreaudio::audio_unit::render_callback::{data, Args}; 14 | use coreaudio::audio_unit::{AudioUnit, Element, SampleFormat, Scope, StreamFormat}; 15 | use coreaudio::sys::{ 16 | kAudioUnitProperty_SampleRate, kAudioUnitProperty_StreamFormat, AudioDeviceID, 17 | }; 18 | use thiserror::Error; 19 | 20 | use crate::audio_buffer::{AudioBuffer, Sample}; 21 | use crate::channel_map::Bitset; 22 | use crate::prelude::ChannelMap32; 23 | use crate::timestamp::Timestamp; 24 | use crate::{ 25 | AudioCallbackContext, AudioDevice, AudioDriver, AudioInput, AudioInputCallback, 26 | AudioInputDevice, AudioOutput, AudioOutputCallback, AudioOutputDevice, AudioStreamHandle, 27 | Channel, DeviceType, SendEverywhereButOnWeb, StreamConfig, 28 | }; 29 | 30 | /// Type of errors from the CoreAudio backend 31 | #[derive(Debug, Error)] 32 | #[error("CoreAudio error:")] 33 | pub enum CoreAudioError { 34 | /// Error originating from CoreAudio 35 | #[error("{0}")] 36 | BackendError(#[from] coreaudio::Error), 37 | /// The scope given to an audio device is invalid. 38 | #[error("Invalid scope {0:?}")] 39 | InvalidScope(Scope), 40 | } 41 | 42 | /// The CoreAudio driver. 43 | #[derive(Debug, Copy, Clone)] 44 | pub struct CoreAudioDriver; 45 | 46 | impl AudioDriver for CoreAudioDriver { 47 | type Error = CoreAudioError; 48 | type Device = CoreAudioDevice; 49 | const DISPLAY_NAME: &'static str = "CoreAudio"; 50 | 51 | fn version(&self) -> Result, Self::Error> { 52 | Ok(Cow::Borrowed("unknown")) 53 | } 54 | 55 | fn default_device(&self, device_type: DeviceType) -> Result, Self::Error> { 56 | let Some(device_id) = get_default_device_id(device_type.is_input()) else { 57 | return Ok(None); 58 | }; 59 | Ok(Some(CoreAudioDevice::from_id( 60 | device_id, 61 | device_type.is_input(), 62 | )?)) 63 | } 64 | 65 | fn list_devices(&self) -> Result, Self::Error> { 66 | let per_scope = [Scope::Input, Scope::Output] 67 | .into_iter() 68 | .map(|scope| { 69 | let audio_ids = get_audio_device_ids_for_scope(scope)?; 70 | audio_ids 71 | .into_iter() 72 | .map(|id| CoreAudioDevice::from_id(id, matches!(scope, Scope::Input))) 73 | .collect::, _>>() 74 | }) 75 | .collect::, _>>()?; 76 | Ok(per_scope.into_iter().flatten()) 77 | } 78 | } 79 | 80 | /// Type of devices available from the CoreAudio driver. 81 | #[derive(Debug, Clone, Copy)] 82 | pub struct CoreAudioDevice { 83 | device_id: AudioDeviceID, 84 | device_type: DeviceType, 85 | } 86 | 87 | impl CoreAudioDevice { 88 | fn from_id(device_id: AudioDeviceID, is_input: bool) -> Result { 89 | let is_output = !is_input; // TODO: Interact with CoreAudio directly to be able to work with duplex devices 90 | let is_default = get_default_device_id(true) == Some(device_id) 91 | || get_default_device_id(false) == Some(device_id); 92 | let mut device_type = DeviceType::empty(); 93 | device_type.set(DeviceType::INPUT, is_input); 94 | device_type.set(DeviceType::OUTPUT, is_output); 95 | device_type.set(DeviceType::DEFAULT, is_default); 96 | Ok(Self { 97 | device_id, 98 | device_type, 99 | }) 100 | } 101 | } 102 | 103 | impl AudioDevice for CoreAudioDevice { 104 | type Error = CoreAudioError; 105 | 106 | fn name(&self) -> Cow { 107 | match get_device_name(self.device_id) { 108 | Ok(std) => Cow::Owned(std), 109 | Err(err) => { 110 | eprintln!("Cannot get audio device name: {err}"); 111 | Cow::Borrowed("") 112 | } 113 | } 114 | } 115 | 116 | fn device_type(&self) -> DeviceType { 117 | self.device_type 118 | } 119 | 120 | fn channel_map(&self) -> impl IntoIterator { 121 | let is_input = matches!(self.device_type, DeviceType::INPUT); 122 | let channels = match audio_unit_from_device_id(self.device_id, is_input) { 123 | Err(err) => { 124 | eprintln!("CoreAudio error getting audio unit: {err}"); 125 | 0 126 | } 127 | Ok(audio_unit) => { 128 | let stream_format = if is_input { 129 | audio_unit.input_stream_format().unwrap() 130 | } else { 131 | audio_unit.output_stream_format().unwrap() 132 | }; 133 | stream_format.channels as usize 134 | } 135 | }; 136 | (0..channels).map(|ch| Channel { 137 | index: ch, 138 | name: Cow::Owned(format!("Channel {}", ch)), 139 | }) 140 | } 141 | 142 | fn is_config_supported(&self, _config: &StreamConfig) -> bool { 143 | true 144 | } 145 | 146 | fn enumerate_configurations(&self) -> Option> { 147 | const TYPICAL_SAMPLERATES: [f64; 5] = [44100., 48000., 96000., 128000., 192000.]; 148 | let supported_list = get_supported_physical_stream_formats(self.device_id) 149 | .inspect_err(|err| eprintln!("Error getting stream formats: {err}")) 150 | .ok()?; 151 | Some(supported_list.into_iter().flat_map(|asbd| { 152 | let samplerate_range = asbd.mSampleRateRange.mMinimum..asbd.mSampleRateRange.mMaximum; 153 | TYPICAL_SAMPLERATES 154 | .iter() 155 | .copied() 156 | .filter(move |sr| samplerate_range.contains(sr)) 157 | .flat_map(move |sr| { 158 | [false, true] 159 | .into_iter() 160 | .map(move |exclusive| (sr, exclusive)) 161 | }) 162 | .map(move |(samplerate, exclusive)| { 163 | let channels = 1 << (asbd.mFormat.mChannelsPerFrame - 1); 164 | StreamConfig { 165 | samplerate, 166 | channels, 167 | buffer_size_range: (None, None), 168 | exclusive, 169 | } 170 | }) 171 | })) 172 | } 173 | } 174 | 175 | fn input_stream_format(sample_rate: f64, channels: ChannelMap32) -> StreamFormat { 176 | StreamFormat { 177 | sample_rate, 178 | sample_format: SampleFormat::I16, 179 | flags: LinearPcmFlags::IS_SIGNED_INTEGER, 180 | channels: channels.count() as _, 181 | } 182 | } 183 | 184 | impl AudioInputDevice for CoreAudioDevice { 185 | type StreamHandle = CoreAudioStream; 186 | 187 | fn default_input_config(&self) -> Result { 188 | let audio_unit = audio_unit_from_device_id(self.device_id, true)?; 189 | let samplerate = audio_unit.get_property::( 190 | kAudioUnitProperty_SampleRate, 191 | Scope::Input, 192 | Element::Input, 193 | )?; 194 | Ok(StreamConfig { 195 | channels: 0b11, 196 | samplerate, 197 | buffer_size_range: (None, None), 198 | exclusive: false, 199 | }) 200 | } 201 | 202 | fn create_input_stream( 203 | &self, 204 | stream_config: StreamConfig, 205 | callback: Callback, 206 | ) -> Result, Self::Error> { 207 | CoreAudioStream::new_input(self.device_id, stream_config, callback) 208 | } 209 | } 210 | 211 | fn output_stream_format(sample_rate: f64, channels: ChannelMap32) -> StreamFormat { 212 | StreamFormat { 213 | sample_rate, 214 | sample_format: SampleFormat::F32, 215 | flags: LinearPcmFlags::IS_NON_INTERLEAVED | LinearPcmFlags::IS_FLOAT, 216 | channels, 217 | } 218 | } 219 | 220 | impl AudioOutputDevice for CoreAudioDevice { 221 | type StreamHandle = CoreAudioStream; 222 | 223 | fn default_output_config(&self) -> Result { 224 | let audio_unit = audio_unit_from_device_id(self.device_id, false)?; 225 | let samplerate = audio_unit.sample_rate()?; 226 | Ok(StreamConfig { 227 | samplerate, 228 | buffer_size_range: (None, None), 229 | channels: 0b11, 230 | exclusive: false, 231 | }) 232 | } 233 | 234 | fn create_output_stream( 235 | &self, 236 | stream_config: StreamConfig, 237 | callback: Callback, 238 | ) -> Result, Self::Error> { 239 | CoreAudioStream::new_output(self.device_id, stream_config, callback) 240 | } 241 | } 242 | 243 | /// Stream type created by opening up a stream on a [`CoreAudioDevice`]. 244 | pub struct CoreAudioStream { 245 | audio_unit: AudioUnit, 246 | callback_retrieve: oneshot::Sender>, 247 | } 248 | 249 | impl AudioStreamHandle for CoreAudioStream { 250 | type Error = Infallible; 251 | 252 | fn eject(mut self) -> Result { 253 | let (tx, rx) = oneshot::channel(); 254 | self.callback_retrieve.send(tx).unwrap(); 255 | let callback = rx.recv().unwrap(); 256 | self.audio_unit.free_input_callback(); 257 | self.audio_unit.free_render_callback(); 258 | Ok(callback) 259 | } 260 | } 261 | 262 | impl CoreAudioStream { 263 | fn new_input( 264 | device_id: AudioDeviceID, 265 | stream_config: StreamConfig, 266 | callback: Callback, 267 | ) -> Result { 268 | let mut audio_unit = audio_unit_from_device_id(device_id, true)?; 269 | let asbd = input_stream_format(stream_config.samplerate, stream_config.channels).to_asbd(); 270 | audio_unit.set_property( 271 | kAudioUnitProperty_StreamFormat, 272 | Scope::Output, 273 | Element::Input, 274 | Some(&asbd), 275 | )?; 276 | let mut buffer = AudioBuffer::zeroed( 277 | stream_config.channels.count(), 278 | stream_config.samplerate as _, 279 | ); 280 | 281 | // Set up the callback retrieval process, without needing to make the callback `Sync` 282 | let (tx, rx) = oneshot::channel::>(); 283 | let mut callback = Some(callback); 284 | audio_unit.set_input_callback(move |args: Args>| { 285 | if let Ok(sender) = rx.try_recv() { 286 | sender.send(callback.take().unwrap()).unwrap(); 287 | return Err(()); 288 | } 289 | let mut buffer = buffer.slice_mut(..args.num_frames); 290 | for (out, inp) in buffer 291 | .as_interleaved_mut() 292 | .iter_mut() 293 | .zip(args.data.buffer.iter()) 294 | { 295 | *out = inp.into_float(); 296 | } 297 | let timestamp = 298 | Timestamp::from_count(stream_config.samplerate, args.time_stamp.mSampleTime as _); 299 | let input = AudioInput { 300 | buffer: buffer.as_ref(), 301 | timestamp, 302 | }; 303 | if let Some(callback) = &mut callback { 304 | callback.on_input_data( 305 | AudioCallbackContext { 306 | stream_config, 307 | timestamp, 308 | }, 309 | input, 310 | ); 311 | } 312 | Ok(()) 313 | })?; 314 | audio_unit.start()?; 315 | Ok(Self { 316 | audio_unit, 317 | callback_retrieve: tx, 318 | }) 319 | } 320 | } 321 | 322 | impl CoreAudioStream { 323 | fn new_output( 324 | device_id: AudioDeviceID, 325 | stream_config: StreamConfig, 326 | callback: Callback, 327 | ) -> Result { 328 | let mut audio_unit = audio_unit_from_device_id(device_id, false)?; 329 | let asbd = output_stream_format(stream_config.samplerate, stream_config.channels).to_asbd(); 330 | audio_unit.set_property( 331 | kAudioUnitProperty_StreamFormat, 332 | Scope::Input, 333 | Element::Output, 334 | Some(&asbd), 335 | )?; 336 | let mut buffer = AudioBuffer::zeroed( 337 | stream_config.channels.count(), 338 | stream_config.samplerate as _, 339 | ); 340 | 341 | // Set up the callback retrieval process, without needing to make the callback `Sync` 342 | let (tx, rx) = oneshot::channel::>(); 343 | let mut callback = Some(callback); 344 | audio_unit.set_render_callback(move |mut args: Args>| { 345 | if let Ok(sender) = rx.try_recv() { 346 | sender.send(callback.take().unwrap()).unwrap(); 347 | return Err(()); 348 | } 349 | let mut buffer = buffer.slice_mut(..args.num_frames); 350 | let timestamp = 351 | Timestamp::from_count(stream_config.samplerate, args.time_stamp.mSampleTime as _); 352 | let output = AudioOutput { 353 | buffer: buffer.as_mut(), 354 | timestamp, 355 | }; 356 | if let Some(callback) = &mut callback { 357 | callback.on_output_data( 358 | AudioCallbackContext { 359 | stream_config, 360 | timestamp, 361 | }, 362 | output, 363 | ); 364 | for (output, inner) in args.data.channels_mut().zip(buffer.channels()) { 365 | output.copy_from_slice(inner.as_slice().unwrap()); 366 | } 367 | } 368 | Ok(()) 369 | })?; 370 | audio_unit.start()?; 371 | Ok(Self { 372 | audio_unit, 373 | callback_retrieve: tx, 374 | }) 375 | } 376 | } 377 | -------------------------------------------------------------------------------- /src/backends/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Backends 2 | //! 3 | //! Home of the various backends supported by the library. 4 | //! 5 | //! Each backend is provided in its own submodule. Types should be public so that the user isn't 6 | //! limited to going through the main API if they want to choose a specific backend. 7 | 8 | use crate::{AudioDriver, AudioInputDevice, AudioOutputDevice, DeviceType}; 9 | 10 | #[cfg(unsupported)] 11 | compile_error!("Unsupported platform (supports ALSA, CoreAudio, and WASAPI)"); 12 | 13 | #[cfg(os_alsa)] 14 | pub mod alsa; 15 | 16 | #[cfg(os_coreaudio)] 17 | pub mod coreaudio; 18 | 19 | #[cfg(os_wasapi)] 20 | pub mod wasapi; 21 | 22 | #[cfg(all(os_pipewire, feature = "pipewire"))] 23 | pub mod pipewire; 24 | 25 | /// Returns the default driver. 26 | /// 27 | /// "Default" here means that it is a supported driver that is available on the platform. 28 | /// 29 | /// The signature makes it unfortunately impossible to do runtime selection, and could change in 30 | /// the future to make it possible. Until now, the "default" driver is the lowest common 31 | /// denominator. 32 | /// 33 | /// Selects the following driver depending on platform: 34 | /// 35 | /// | **Platform** | **Driver** | 36 | /// |:------------:|:---------------------------:| 37 | /// | Linux | Pipewire (if enabled), ALSA | 38 | /// | macOS | CoreAudio | 39 | /// | Windows | WASAPI | 40 | #[cfg(any(os_alsa, os_coreaudio, os_wasapi))] 41 | #[allow(clippy::needless_return)] 42 | pub fn default_driver() -> impl AudioDriver { 43 | #[cfg(all(os_pipewire, feature = "pipewire"))] 44 | return pipewire::driver::PipewireDriver::new().unwrap(); 45 | #[cfg(all(not(all(os_pipewire, feature = "pipewire")), os_alsa))] 46 | return alsa::AlsaDriver; 47 | #[cfg(os_coreaudio)] 48 | return coreaudio::CoreAudioDriver; 49 | #[cfg(os_wasapi)] 50 | return wasapi::WasapiDriver; 51 | } 52 | 53 | /// Returns the default input device for the given audio driver. 54 | /// 55 | /// The default device is usually the one the user has selected in its system settings. 56 | pub fn default_input_device_from(driver: &Driver) -> Driver::Device 57 | where 58 | Driver::Device: AudioInputDevice, 59 | { 60 | driver 61 | .default_device(DeviceType::PHYSICAL | DeviceType::INPUT) 62 | .expect("Audio driver error") 63 | .expect("No default device found") 64 | } 65 | 66 | /// Default input device from the default driver for this platform. 67 | /// 68 | /// "Default" here means both in terms of platform support but also can include runtime selection. 69 | /// Therefore, it is better to use this method directly rather than first getting the default 70 | /// driver from [`default_driver`]. 71 | #[cfg(any(feature = "pipewire", os_alsa, os_coreaudio, os_wasapi))] 72 | #[allow(clippy::needless_return)] 73 | pub fn default_input_device() -> impl AudioInputDevice { 74 | #[cfg(all(os_pipewire, feature = "pipewire"))] 75 | return default_input_device_from(&pipewire::driver::PipewireDriver::new().unwrap()); 76 | #[cfg(all(not(all(os_pipewire, feature = "pipewire")), os_alsa))] 77 | return default_input_device_from(&alsa::AlsaDriver); 78 | #[cfg(os_coreaudio)] 79 | return default_input_device_from(&coreaudio::CoreAudioDriver); 80 | #[cfg(os_wasapi)] 81 | return default_input_device_from(&wasapi::WasapiDriver); 82 | } 83 | 84 | /// Returns the default input device for the given audio driver. 85 | /// 86 | /// The default device is usually the one the user has selected in its system settings. 87 | pub fn default_output_device_from(driver: &Driver) -> Driver::Device 88 | where 89 | Driver::Device: AudioOutputDevice, 90 | { 91 | driver 92 | .default_device(DeviceType::PHYSICAL | DeviceType::OUTPUT) 93 | .expect("Audio driver error") 94 | .expect("No default device found") 95 | } 96 | 97 | /// Default output device from the default driver for this platform. 98 | /// 99 | /// "Default" here means both in terms of platform support but also can include runtime selection. 100 | /// Therefore, it is better to use this method directly rather than first getting the default 101 | /// driver from [`default_driver`]. 102 | #[cfg(any(os_alsa, os_coreaudio, os_wasapi, feature = "pipewire"))] 103 | #[allow(clippy::needless_return)] 104 | pub fn default_output_device() -> impl AudioOutputDevice { 105 | #[cfg(all(os_pipewire, feature = "pipewire"))] 106 | return default_output_device_from(&pipewire::driver::PipewireDriver::new().unwrap()); 107 | #[cfg(all(not(all(os_pipewire, feature = "pipewire")), os_alsa))] 108 | return default_output_device_from(&alsa::AlsaDriver); 109 | #[cfg(os_coreaudio)] 110 | return default_output_device_from(&coreaudio::CoreAudioDriver); 111 | #[cfg(os_wasapi)] 112 | return default_output_device_from(&wasapi::WasapiDriver); 113 | } 114 | -------------------------------------------------------------------------------- /src/backends/pipewire/device.rs: -------------------------------------------------------------------------------- 1 | use super::stream::StreamHandle; 2 | use crate::backends::pipewire::error::PipewireError; 3 | use crate::{ 4 | AudioDevice, AudioInputCallback, AudioInputDevice, AudioOutputCallback, AudioOutputDevice, 5 | Channel, DeviceType, SendEverywhereButOnWeb, StreamConfig, 6 | }; 7 | use pipewire::context::Context; 8 | use pipewire::main_loop::MainLoop; 9 | use std::borrow::Cow; 10 | use std::cell::{Cell, RefCell}; 11 | use std::rc::Rc; 12 | 13 | pub struct PipewireDevice { 14 | pub(super) target_node: Option, 15 | pub device_type: DeviceType, 16 | pub stream_name: Cow<'static, str>, 17 | } 18 | 19 | impl AudioDevice for PipewireDevice { 20 | type Error = PipewireError; 21 | 22 | fn name(&self) -> Cow { 23 | let Some(node_id) = self.target_node else { 24 | return Cow::Borrowed("Default"); 25 | }; 26 | match get_node_name(node_id) { 27 | Ok(Some(name)) => Cow::Owned(name), 28 | Ok(None) => Cow::Borrowed("Unknown"), 29 | Err(e) => { 30 | log::error!("Failed to get device name: {}", e); 31 | Cow::Borrowed("Error") 32 | } 33 | } 34 | } 35 | 36 | fn device_type(&self) -> DeviceType { 37 | self.device_type 38 | } 39 | 40 | fn channel_map(&self) -> impl IntoIterator { 41 | [] 42 | } 43 | 44 | fn is_config_supported(&self, _config: &StreamConfig) -> bool { 45 | true 46 | } 47 | 48 | fn enumerate_configurations(&self) -> Option> { 49 | Some([]) 50 | } 51 | } 52 | 53 | impl AudioInputDevice for PipewireDevice { 54 | type StreamHandle = StreamHandle; 55 | 56 | fn default_input_config(&self) -> Result { 57 | Ok(StreamConfig { 58 | samplerate: 48000.0, 59 | channels: 0b11, 60 | exclusive: false, 61 | buffer_size_range: (None, None), 62 | }) 63 | } 64 | 65 | fn create_input_stream( 66 | &self, 67 | stream_config: StreamConfig, 68 | callback: Callback, 69 | ) -> Result, Self::Error> { 70 | StreamHandle::new_input(&self.stream_name, stream_config, callback) 71 | } 72 | } 73 | 74 | impl AudioOutputDevice for PipewireDevice { 75 | type StreamHandle = StreamHandle; 76 | 77 | fn default_output_config(&self) -> Result { 78 | Ok(StreamConfig { 79 | samplerate: 48000.0, 80 | channels: 0b11, 81 | exclusive: false, 82 | buffer_size_range: (None, None), 83 | }) 84 | } 85 | 86 | fn create_output_stream( 87 | &self, 88 | stream_config: StreamConfig, 89 | callback: Callback, 90 | ) -> Result, Self::Error> { 91 | StreamHandle::new_output(&self.stream_name, stream_config, callback) 92 | } 93 | } 94 | 95 | impl PipewireDevice { 96 | pub fn with_stream_name(&mut self, name: impl Into>) { 97 | self.stream_name = name.into(); 98 | } 99 | } 100 | 101 | fn get_node_name(node_id: u32) -> Result, PipewireError> { 102 | let mainloop = MainLoop::new(None)?; 103 | let context = Context::new(&mainloop)?; 104 | let core = context.connect(None)?; 105 | let registry = core.get_registry()?; 106 | 107 | // To comply with Rust's safety rules, we wrap this variable in an `Rc` and a `Cell`. 108 | let done = Rc::new(Cell::new(false)); 109 | 110 | // Create new reference for each variable so that they can be moved into the closure. 111 | let done_clone = done.clone(); 112 | let loop_clone = mainloop.clone(); 113 | 114 | // Trigger the sync event. The server's answer won't be processed until we start the main loop, 115 | // so we can safely do this before setting up a callback. This lets us avoid using a Cell. 116 | let pending = core.sync(0)?; 117 | 118 | let _listener_core = core 119 | .add_listener_local() 120 | .done(move |id, seq| { 121 | log::debug!("[Core/Done] id: {id} seq: {}", seq.seq()); 122 | if id == pipewire::core::PW_ID_CORE && seq == pending { 123 | done_clone.set(true); 124 | loop_clone.quit(); 125 | } 126 | }) 127 | .register(); 128 | 129 | let data = Rc::new(RefCell::new(None)); 130 | let _listener_reg = registry 131 | .add_listener_local() 132 | .global({ 133 | let data = data.clone(); 134 | move |global| { 135 | if node_id == global.id { 136 | if let Some(props) = global.props { 137 | if let Some(name) = props.get("node.name") { 138 | data.borrow_mut().replace(name.to_string()); 139 | } 140 | } 141 | } 142 | } 143 | }) 144 | .register(); 145 | 146 | while !done.get() { 147 | mainloop.run(); 148 | } 149 | drop(_listener_core); 150 | drop(_listener_reg); 151 | Ok(Rc::into_inner(data).unwrap().into_inner()) 152 | } 153 | -------------------------------------------------------------------------------- /src/backends/pipewire/driver.rs: -------------------------------------------------------------------------------- 1 | use super::error::PipewireError; 2 | use crate::backends::pipewire::device::PipewireDevice; 3 | use crate::backends::pipewire::utils; 4 | use crate::{AudioDriver, DeviceType}; 5 | use std::borrow::Cow; 6 | use std::marker::PhantomData; 7 | 8 | pub struct PipewireDriver { 9 | __init: PhantomData<()>, 10 | } 11 | 12 | impl AudioDriver for PipewireDriver { 13 | type Error = PipewireError; 14 | type Device = PipewireDevice; 15 | const DISPLAY_NAME: &'static str = "Pipewire"; 16 | 17 | fn version(&self) -> Result, Self::Error> { 18 | // TODO: Figure out how to get version 19 | Ok(Cow::Borrowed("unkonwn")) 20 | } 21 | 22 | fn default_device(&self, device_type: DeviceType) -> Result, Self::Error> { 23 | Ok(Some(PipewireDevice { 24 | target_node: None, 25 | device_type, 26 | stream_name: Cow::Borrowed("Interflow stream"), 27 | })) 28 | } 29 | 30 | fn list_devices(&self) -> Result, Self::Error> { 31 | Ok(utils::get_devices()? 32 | .into_iter() 33 | .map(|(id, device_type)| PipewireDevice { 34 | target_node: Some(id), 35 | device_type, 36 | stream_name: Cow::Borrowed("Interflow stream"), 37 | })) 38 | } 39 | } 40 | 41 | impl PipewireDriver { 42 | /// Initialize the Pipewire driver. 43 | pub fn new() -> Result { 44 | pipewire::init(); 45 | Ok(Self { 46 | __init: PhantomData, 47 | }) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/backends/pipewire/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Debug, Error)] 4 | pub enum PipewireError { 5 | #[error("Pipewire error: {0}")] 6 | BackendError(#[from] pipewire::Error), 7 | #[error("Cannot create Pipewire stream: {0}")] 8 | GenError(#[from] libspa::pod::serialize::GenError), 9 | } 10 | -------------------------------------------------------------------------------- /src/backends/pipewire/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod device; 2 | pub mod driver; 3 | pub mod error; 4 | pub mod stream; 5 | mod utils; 6 | -------------------------------------------------------------------------------- /src/backends/pipewire/stream.rs: -------------------------------------------------------------------------------- 1 | use crate::audio_buffer::{AudioMut, AudioRef}; 2 | use crate::backends::pipewire::error::PipewireError; 3 | use crate::channel_map::Bitset; 4 | use crate::timestamp::Timestamp; 5 | use crate::{ 6 | AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, 7 | AudioStreamHandle, StreamConfig, 8 | }; 9 | use libspa::buffer::Data; 10 | use libspa::param::audio::{AudioFormat, AudioInfoRaw}; 11 | use libspa::pod::Pod; 12 | use libspa_sys::{SPA_PARAM_EnumFormat, SPA_TYPE_OBJECT_Format}; 13 | use pipewire::context::Context; 14 | use pipewire::keys; 15 | use pipewire::main_loop::{MainLoop, WeakMainLoop}; 16 | use pipewire::properties::properties; 17 | use pipewire::stream::{Stream, StreamFlags}; 18 | use std::fmt; 19 | use std::fmt::Formatter; 20 | use std::thread::JoinHandle; 21 | 22 | enum StreamCommands { 23 | ReceiveCallback(Callback), 24 | Eject(oneshot::Sender), 25 | } 26 | 27 | impl fmt::Debug for StreamCommands { 28 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 29 | match self { 30 | Self::ReceiveCallback(_) => write!(f, "ReceiveCallback"), 31 | Self::Eject(_) => write!(f, "Eject"), 32 | } 33 | } 34 | } 35 | 36 | struct StreamInner { 37 | commands: rtrb::Consumer>, 38 | scratch_buffer: Box<[f32]>, 39 | callback: Option, 40 | config: StreamConfig, 41 | timestamp: Timestamp, 42 | loop_ref: WeakMainLoop, 43 | } 44 | 45 | impl StreamInner { 46 | fn handle_command(&mut self, command: StreamCommands) { 47 | log::debug!("Handling command: {command:?}"); 48 | match command { 49 | StreamCommands::ReceiveCallback(callback) => { 50 | debug_assert!(self.callback.is_none()); 51 | self.callback = Some(callback); 52 | } 53 | StreamCommands::Eject(reply) => { 54 | if let Some(callback) = self.callback.take() { 55 | reply.send(callback).unwrap(); 56 | if let Some(loop_ref) = self.loop_ref.upgrade() { 57 | loop_ref.quit(); 58 | } 59 | } 60 | } 61 | } 62 | } 63 | 64 | fn handle_commands(&mut self) { 65 | while let Ok(command) = self.commands.pop() { 66 | self.handle_command(command); 67 | } 68 | } 69 | 70 | fn ejected(&self) -> bool { 71 | self.callback.is_none() 72 | } 73 | } 74 | 75 | impl StreamInner { 76 | fn process_output(&mut self, channels: usize, frames: usize) -> usize { 77 | let buffer = AudioMut::from_noninterleaved_mut( 78 | &mut self.scratch_buffer[..channels * frames], 79 | channels, 80 | ) 81 | .unwrap(); 82 | if let Some(callback) = self.callback.as_mut() { 83 | let context = AudioCallbackContext { 84 | stream_config: self.config, 85 | timestamp: self.timestamp, 86 | }; 87 | let num_frames = buffer.num_samples(); 88 | let output = AudioOutput { 89 | buffer, 90 | timestamp: self.timestamp, 91 | }; 92 | callback.on_output_data(context, output); 93 | self.timestamp += num_frames as u64; 94 | num_frames 95 | } else { 96 | 0 97 | } 98 | } 99 | } 100 | 101 | impl StreamInner { 102 | fn process_input(&mut self, channels: usize, frames: usize) -> usize { 103 | let buffer = 104 | AudioRef::from_noninterleaved(&self.scratch_buffer[..channels * frames], channels) 105 | .unwrap(); 106 | if let Some(callback) = self.callback.as_mut() { 107 | let context = AudioCallbackContext { 108 | stream_config: self.config, 109 | timestamp: self.timestamp, 110 | }; 111 | let num_frames = buffer.num_samples(); 112 | let input = AudioInput { 113 | buffer, 114 | timestamp: self.timestamp, 115 | }; 116 | callback.on_input_data(context, input); 117 | self.timestamp += num_frames as u64; 118 | num_frames 119 | } else { 120 | 0 121 | } 122 | } 123 | } 124 | 125 | pub struct StreamHandle { 126 | commands: rtrb::Producer>, 127 | handle: JoinHandle>, 128 | } 129 | 130 | impl AudioStreamHandle for StreamHandle { 131 | type Error = PipewireError; 132 | 133 | fn eject(mut self) -> Result { 134 | log::info!("Ejecting stream"); 135 | let (tx, rx) = oneshot::channel(); 136 | self.commands 137 | .push(StreamCommands::Eject(tx)) 138 | .expect("Command buffer overflow"); 139 | self.handle.join().unwrap()?; 140 | Ok(rx.recv().unwrap()) 141 | } 142 | } 143 | 144 | impl StreamHandle { 145 | fn create_stream( 146 | name: String, 147 | mut config: StreamConfig, 148 | callback: Callback, 149 | direction: pipewire::spa::utils::Direction, 150 | process_frames: impl Fn(&mut [Data], &mut StreamInner, usize, usize) -> usize 151 | + Send 152 | + 'static, 153 | ) -> Result { 154 | let (mut tx, rx) = rtrb::RingBuffer::new(16); 155 | let handle = std::thread::spawn(move || { 156 | let main_loop = MainLoop::new(None)?; 157 | let context = Context::new(&main_loop)?; 158 | let core = context.connect(None)?; 159 | 160 | let channels = config.channels.count(); 161 | let channels_str = channels.to_string(); 162 | let stream = Stream::new( 163 | &core, 164 | &name, 165 | properties! { 166 | *keys::MEDIA_TYPE => "Audio", 167 | *keys::MEDIA_ROLE => "Music", 168 | *keys::MEDIA_CATEGORY => get_category(direction), 169 | *keys::AUDIO_CHANNELS => channels_str, 170 | }, 171 | )?; 172 | config.samplerate = config.samplerate.round(); 173 | let _listener = stream 174 | .add_local_listener_with_user_data(StreamInner { 175 | callback: None, 176 | commands: rx, 177 | scratch_buffer: vec![0.0; MAX_FRAMES * channels].into_boxed_slice(), 178 | loop_ref: main_loop.downgrade(), 179 | config, 180 | timestamp: Timestamp::new(config.samplerate), 181 | }) 182 | .process(move |stream, inner| { 183 | log::debug!("Processing stream"); 184 | inner.handle_commands(); 185 | if inner.ejected() { 186 | return; 187 | } 188 | if let Some(mut buffer) = stream.dequeue_buffer() { 189 | let datas = buffer.datas_mut(); 190 | log::debug!("Datas: len={}", datas.len()); 191 | let Some(min_frames) = datas 192 | .iter_mut() 193 | .filter_map(|d| d.data().map(|d| d.len() / size_of::())) 194 | .min() 195 | else { 196 | log::warn!("No datas available"); 197 | return; 198 | }; 199 | let frames = min_frames.min(MAX_FRAMES); 200 | 201 | let frames = process_frames(datas, inner, channels, frames); 202 | 203 | for data in datas.iter_mut() { 204 | let chunk = data.chunk_mut(); 205 | *chunk.offset_mut() = 0; 206 | *chunk.stride_mut() = size_of::() as _; 207 | *chunk.size_mut() = (size_of::() * frames) as _; 208 | } 209 | } else { 210 | log::warn!("No buffer available"); 211 | } 212 | }) 213 | .register()?; 214 | let values = pipewire::spa::pod::serialize::PodSerializer::serialize( 215 | std::io::Cursor::new(Vec::new()), 216 | &pipewire::spa::pod::Value::Object(pipewire::spa::pod::Object { 217 | type_: SPA_TYPE_OBJECT_Format, 218 | id: SPA_PARAM_EnumFormat, 219 | properties: { 220 | let mut info = AudioInfoRaw::new(); 221 | info.set_format(AudioFormat::F32P); 222 | info.set_rate(config.samplerate as u32); 223 | info.set_channels(channels as u32); 224 | info.into() 225 | }, 226 | }), 227 | )? 228 | .0 229 | .into_inner(); 230 | let mut params = [Pod::from_bytes(&values).unwrap()]; 231 | stream.connect( 232 | direction, 233 | None, 234 | StreamFlags::AUTOCONNECT | StreamFlags::MAP_BUFFERS | StreamFlags::RT_PROCESS, 235 | &mut params, 236 | )?; 237 | log::debug!("Starting Pipewire main loop"); 238 | main_loop.run(); 239 | Ok::<_, PipewireError>(()) 240 | }); 241 | log::debug!("Sending callback to stream"); 242 | tx.push(StreamCommands::ReceiveCallback(callback)).unwrap(); 243 | Ok(Self { 244 | commands: tx, 245 | handle, 246 | }) 247 | } 248 | } 249 | 250 | impl StreamHandle { 251 | /// Create an input Pipewire stream 252 | pub fn new_input( 253 | name: impl ToString, 254 | config: StreamConfig, 255 | callback: Callback, 256 | ) -> Result { 257 | Self::create_stream( 258 | name.to_string(), 259 | config, 260 | callback, 261 | pipewire::spa::utils::Direction::Input, 262 | |datas, inner, channels, frames| { 263 | for (i, data) in datas.iter_mut().enumerate() { 264 | if let Some(data) = data.data() { 265 | let slice: &[f32] = zerocopy::FromBytes::ref_from_bytes(data) 266 | .inspect_err(|e| log::error!("Cannot cast to f32 slice: {e}")) 267 | .unwrap(); 268 | let target = &mut inner.scratch_buffer[i * frames..][..frames]; 269 | target.copy_from_slice(&slice[..frames]); 270 | } 271 | } 272 | inner.process_input(channels, frames) 273 | }, 274 | ) 275 | } 276 | } 277 | 278 | const MAX_FRAMES: usize = 8192; 279 | 280 | fn get_category(direction: pipewire::spa::utils::Direction) -> &'static str { 281 | match direction { 282 | pipewire::spa::utils::Direction::Input => "Capture", 283 | pipewire::spa::utils::Direction::Output => "Playback", 284 | x => unreachable!("Unexpected direction: 0x{:X}", x.as_raw()), 285 | } 286 | } 287 | 288 | impl StreamHandle { 289 | /// Create an output Pipewire stream 290 | pub fn new_output( 291 | name: impl ToString, 292 | config: StreamConfig, 293 | callback: Callback, 294 | ) -> Result { 295 | Self::create_stream( 296 | name.to_string(), 297 | config, 298 | callback, 299 | pipewire::spa::utils::Direction::Output, 300 | |datas, inner, channels, frames| { 301 | let frames = inner.process_output(channels, frames); 302 | for (i, data) in datas.iter_mut().enumerate() { 303 | let processed_slice = &inner.scratch_buffer[i * frames..][..frames]; 304 | if let Some(data) = data.data() { 305 | let slice: &mut [f32] = zerocopy::FromBytes::mut_from_bytes(data) 306 | .inspect_err(|e| log::error!("Cannot cast to f32 slice: {e}")) 307 | .unwrap(); 308 | slice[..frames].copy_from_slice(processed_slice); 309 | } 310 | } 311 | frames 312 | }, 313 | ) 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /src/backends/pipewire/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::backends::pipewire::error::PipewireError; 2 | use crate::DeviceType; 3 | use libspa::utils::dict::DictRef; 4 | use pipewire::context::Context; 5 | use pipewire::main_loop::MainLoop; 6 | use pipewire::registry::GlobalObject; 7 | use std::cell::{Cell, RefCell}; 8 | use std::rc::Rc; 9 | 10 | fn get_device_type(object: &GlobalObject<&DictRef>) -> Option { 11 | fn is_input(media_class: &str) -> bool { 12 | let str = media_class.trim().to_lowercase(); 13 | str == "audio/source" 14 | } 15 | 16 | fn is_output(str: &str) -> bool { 17 | let str = str.trim().to_lowercase(); 18 | str == "audio/sink" 19 | } 20 | 21 | let media_class = object.props?.get("media.class")?; 22 | let mut device_type = DeviceType::empty(); 23 | device_type.set(DeviceType::INPUT, is_input(media_class)); 24 | device_type.set(DeviceType::OUTPUT, is_output(media_class)); 25 | Some(device_type) 26 | } 27 | 28 | pub fn get_devices() -> Result, PipewireError> { 29 | let mainloop = MainLoop::new(None)?; 30 | let context = Context::new(&mainloop)?; 31 | let core = context.connect(None)?; 32 | let registry = core.get_registry()?; 33 | 34 | // To comply with Rust's safety rules, we wrap this variable in an `Rc` and a `Cell`. 35 | let done = Rc::new(Cell::new(false)); 36 | 37 | // Create new reference for each variable so that they can be moved into the closure. 38 | let done_clone = done.clone(); 39 | let loop_clone = mainloop.clone(); 40 | 41 | // Trigger the sync event. The server's answer won't be processed until we start the main loop, 42 | // so we can safely do this before setting up a callback. This lets us avoid using a Cell. 43 | let pending = core.sync(0)?; 44 | 45 | let _listener_core = core 46 | .add_listener_local() 47 | .done(move |id, seq| { 48 | log::debug!("[Core/Done] id: {id} seq: {}", seq.seq()); 49 | if id == pipewire::core::PW_ID_CORE && seq == pending { 50 | done_clone.set(true); 51 | loop_clone.quit(); 52 | } 53 | }) 54 | .register(); 55 | 56 | let data = Rc::new(RefCell::new(Vec::new())); 57 | let _listener_reg = registry 58 | .add_listener_local() 59 | .global({ 60 | let data = data.clone(); 61 | move |global| { 62 | log::debug!( 63 | "object: id:{} type:{}/{}", 64 | global.id, 65 | global.type_, 66 | global.version 67 | ); 68 | if let Some(device_type) = get_device_type(global) { 69 | data.borrow_mut().push((global.id, device_type)); 70 | } 71 | } 72 | }) 73 | .register(); 74 | 75 | while !done.get() { 76 | mainloop.run(); 77 | } 78 | drop(_listener_core); 79 | drop(_listener_reg); 80 | Ok(Rc::into_inner(data).unwrap().into_inner()) 81 | } 82 | -------------------------------------------------------------------------------- /src/backends/wasapi/device.rs: -------------------------------------------------------------------------------- 1 | use super::{error, stream}; 2 | use crate::backends::wasapi::stream::WasapiStream; 3 | use crate::channel_map::Bitset; 4 | use crate::prelude::wasapi::util::WasapiMMDevice; 5 | use crate::{ 6 | AudioDevice, AudioInputCallback, AudioInputDevice, AudioOutputCallback, AudioOutputDevice, 7 | Channel, DeviceType, StreamConfig, 8 | }; 9 | use std::borrow::Cow; 10 | use windows::Win32::Media::Audio; 11 | 12 | /// Type of devices available from the WASAPI driver. 13 | #[derive(Debug, Clone)] 14 | pub struct WasapiDevice { 15 | device: WasapiMMDevice, 16 | device_type: DeviceType, 17 | } 18 | 19 | impl WasapiDevice { 20 | pub(crate) fn new(device: Audio::IMMDevice, device_type: DeviceType) -> Self { 21 | WasapiDevice { 22 | device: WasapiMMDevice::new(device), 23 | device_type, 24 | } 25 | } 26 | } 27 | 28 | impl AudioDevice for WasapiDevice { 29 | type Error = error::WasapiError; 30 | 31 | fn name(&self) -> Cow { 32 | match self.device.name() { 33 | Some(std) => Cow::Owned(std), 34 | None => { 35 | eprintln!("Cannot get audio device name"); 36 | Cow::Borrowed("") 37 | } 38 | } 39 | } 40 | 41 | fn device_type(&self) -> DeviceType { 42 | self.device_type 43 | } 44 | 45 | fn channel_map(&self) -> impl IntoIterator { 46 | [] 47 | } 48 | 49 | fn is_config_supported(&self, config: &StreamConfig) -> bool { 50 | self.device_type.contains(DeviceType::OUTPUT) 51 | && stream::is_output_config_supported(self.device.clone(), config) 52 | } 53 | 54 | fn enumerate_configurations(&self) -> Option> { 55 | None::<[StreamConfig; 0]> 56 | } 57 | } 58 | 59 | impl AudioInputDevice for WasapiDevice { 60 | type StreamHandle = WasapiStream; 61 | 62 | fn default_input_config(&self) -> Result { 63 | let audio_client = self.device.activate::()?; 64 | let format = unsafe { audio_client.GetMixFormat()?.read_unaligned() }; 65 | let frame_size = unsafe { audio_client.GetBufferSize() } 66 | .map(|i| i as usize) 67 | .ok(); 68 | Ok(StreamConfig { 69 | channels: 0u32.with_indices(0..format.nChannels as _), 70 | exclusive: false, 71 | samplerate: format.nSamplesPerSec as _, 72 | buffer_size_range: (frame_size, frame_size), 73 | }) 74 | } 75 | 76 | fn create_input_stream( 77 | &self, 78 | stream_config: StreamConfig, 79 | callback: Callback, 80 | ) -> Result, Self::Error> { 81 | Ok(WasapiStream::new_input( 82 | self.device.clone(), 83 | stream_config, 84 | callback, 85 | )) 86 | } 87 | } 88 | 89 | impl AudioOutputDevice for WasapiDevice { 90 | type StreamHandle = WasapiStream; 91 | 92 | fn default_output_config(&self) -> Result { 93 | let audio_client = self.device.activate::()?; 94 | let format = unsafe { audio_client.GetMixFormat()?.read_unaligned() }; 95 | let frame_size = unsafe { audio_client.GetBufferSize() } 96 | .map(|i| i as usize) 97 | .ok(); 98 | Ok(StreamConfig { 99 | channels: 0u32.with_indices(0..format.nChannels as _), 100 | exclusive: false, 101 | samplerate: format.nSamplesPerSec as _, 102 | buffer_size_range: (frame_size, frame_size), 103 | }) 104 | } 105 | 106 | fn create_output_stream( 107 | &self, 108 | stream_config: StreamConfig, 109 | callback: Callback, 110 | ) -> Result, Self::Error> { 111 | Ok(WasapiStream::new_output( 112 | self.device.clone(), 113 | stream_config, 114 | callback, 115 | )) 116 | } 117 | } 118 | 119 | /// An iterable collection WASAPI devices. 120 | pub struct WasapiDeviceList { 121 | pub(crate) collection: Audio::IMMDeviceCollection, 122 | pub(crate) total_count: u32, 123 | pub(crate) next_item: u32, 124 | pub(crate) device_type: DeviceType, 125 | } 126 | 127 | unsafe impl Send for WasapiDeviceList {} 128 | 129 | unsafe impl Sync for WasapiDeviceList {} 130 | 131 | impl Iterator for WasapiDeviceList { 132 | type Item = WasapiDevice; 133 | 134 | fn next(&mut self) -> Option { 135 | if self.next_item >= self.total_count { 136 | return None; 137 | } 138 | 139 | unsafe { 140 | let device = self.collection.Item(self.next_item).unwrap(); 141 | self.next_item += 1; 142 | Some(WasapiDevice::new(device, self.device_type)) 143 | } 144 | } 145 | 146 | fn size_hint(&self) -> (usize, Option) { 147 | let rest = (self.total_count - self.next_item) as usize; 148 | (rest, Some(rest)) 149 | } 150 | } 151 | 152 | impl ExactSizeIterator for WasapiDeviceList {} 153 | -------------------------------------------------------------------------------- /src/backends/wasapi/driver.rs: -------------------------------------------------------------------------------- 1 | use crate::backends::wasapi::device::{WasapiDevice, WasapiDeviceList}; 2 | use bitflags::bitflags_match; 3 | use std::borrow::Cow; 4 | use std::sync::OnceLock; 5 | use windows::Win32::Media::Audio; 6 | use windows::Win32::System::Com; 7 | 8 | use super::{error, util}; 9 | 10 | use crate::{AudioDriver, DeviceType}; 11 | 12 | /// The WASAPI driver. 13 | #[derive(Debug, Clone, Default)] 14 | pub struct WasapiDriver; 15 | 16 | impl AudioDriver for WasapiDriver { 17 | type Error = error::WasapiError; 18 | type Device = WasapiDevice; 19 | 20 | const DISPLAY_NAME: &'static str = "WASAPI"; 21 | 22 | fn version(&self) -> Result, Self::Error> { 23 | Ok(Cow::Borrowed("unknown")) 24 | } 25 | 26 | fn default_device(&self, device_type: DeviceType) -> Result, Self::Error> { 27 | audio_device_enumerator().get_default_device(device_type) 28 | } 29 | 30 | fn list_devices(&self) -> Result, Self::Error> { 31 | audio_device_enumerator().get_device_list() 32 | } 33 | } 34 | 35 | pub fn audio_device_enumerator() -> &'static AudioDeviceEnumerator { 36 | ENUMERATOR.get_or_init(|| { 37 | // Make sure COM is initialised. 38 | util::com_initializer(); 39 | 40 | unsafe { 41 | let enumerator = Com::CoCreateInstance::<_, Audio::IMMDeviceEnumerator>( 42 | &Audio::MMDeviceEnumerator, 43 | None, 44 | Com::CLSCTX_ALL, 45 | ) 46 | .unwrap(); 47 | 48 | AudioDeviceEnumerator(enumerator) 49 | } 50 | }) 51 | } 52 | 53 | static ENUMERATOR: OnceLock = OnceLock::new(); 54 | 55 | /// Send/Sync wrapper around `IMMDeviceEnumerator`. 56 | pub struct AudioDeviceEnumerator(Audio::IMMDeviceEnumerator); 57 | 58 | impl AudioDeviceEnumerator { 59 | // Returns the default output device. 60 | fn get_default_device( 61 | &self, 62 | device_type: DeviceType, 63 | ) -> Result, error::WasapiError> { 64 | let data_flow = bitflags_match!(device_type, { 65 | DeviceType::INPUT | DeviceType::PHYSICAL => Some(Audio::eCapture), 66 | DeviceType::OUTPUT | DeviceType::PHYSICAL => Some(Audio::eRender), 67 | _ => None, 68 | }); 69 | 70 | data_flow.map_or(Ok(None), |flow| unsafe { 71 | let device = self.0.GetDefaultAudioEndpoint(flow, Audio::eConsole)?; 72 | Ok(Some(WasapiDevice::new(device, device_type))) 73 | }) 74 | } 75 | 76 | // Returns a chained iterator of output and input devices. 77 | fn get_device_list( 78 | &self, 79 | ) -> Result, error::WasapiError> { 80 | // Create separate collections for output and input devices and then chain them. 81 | unsafe { 82 | let output_collection = self 83 | .0 84 | .EnumAudioEndpoints(Audio::eRender, Audio::DEVICE_STATE_ACTIVE)?; 85 | 86 | let count = output_collection.GetCount()?; 87 | 88 | let output_device_list = WasapiDeviceList { 89 | collection: output_collection, 90 | total_count: count, 91 | next_item: 0, 92 | device_type: DeviceType::OUTPUT, 93 | }; 94 | 95 | let input_collection = self 96 | .0 97 | .EnumAudioEndpoints(Audio::eCapture, Audio::DEVICE_STATE_ACTIVE)?; 98 | 99 | let count = input_collection.GetCount()?; 100 | 101 | let input_device_list = WasapiDeviceList { 102 | collection: input_collection, 103 | total_count: count, 104 | next_item: 0, 105 | device_type: DeviceType::INPUT, 106 | }; 107 | 108 | Ok(output_device_list.chain(input_device_list)) 109 | } 110 | } 111 | } 112 | 113 | unsafe impl Send for AudioDeviceEnumerator {} 114 | 115 | unsafe impl Sync for AudioDeviceEnumerator {} 116 | -------------------------------------------------------------------------------- /src/backends/wasapi/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | /// Type of errors from the WASAPI backend. 4 | #[derive(Debug, Error)] 5 | #[error("WASAPI error: ")] 6 | pub enum WasapiError { 7 | /// Error originating from WASAPI. 8 | #[error("{} (code {})", .0.message(), .0.code())] 9 | BackendError(#[from] windows::core::Error), 10 | /// Requested WASAPI device configuration is not available 11 | #[error("Configuration not available")] 12 | ConfigurationNotAvailable, 13 | /// Windows Foundation error 14 | #[error("Win32 error: {0}")] 15 | FoundationError(String), 16 | } 17 | -------------------------------------------------------------------------------- /src/backends/wasapi/mod.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | mod error; 4 | 5 | mod device; 6 | pub(crate) mod driver; 7 | pub mod prelude; 8 | mod stream; 9 | 10 | pub use prelude::*; 11 | -------------------------------------------------------------------------------- /src/backends/wasapi/prelude.rs: -------------------------------------------------------------------------------- 1 | pub use super::{ 2 | device::WasapiDevice, driver::WasapiDriver, error::WasapiError, stream::WasapiStream, 3 | }; 4 | -------------------------------------------------------------------------------- /src/backends/wasapi/stream.rs: -------------------------------------------------------------------------------- 1 | use super::error; 2 | use crate::audio_buffer::AudioMut; 3 | use crate::backends::wasapi::util::WasapiMMDevice; 4 | use crate::channel_map::Bitset; 5 | use crate::prelude::{AudioRef, Timestamp}; 6 | use crate::{ 7 | AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, 8 | AudioStreamHandle, StreamConfig, 9 | }; 10 | use duplicate::duplicate_item; 11 | use std::marker::PhantomData; 12 | use std::ptr::NonNull; 13 | use std::sync::atomic::{AtomicBool, Ordering}; 14 | use std::sync::Arc; 15 | use std::thread::JoinHandle; 16 | use std::time::Duration; 17 | use std::{ops, ptr, slice}; 18 | use windows::core::imp::CoTaskMemFree; 19 | use windows::core::Interface; 20 | use windows::Win32::Foundation; 21 | use windows::Win32::Foundation::{CloseHandle, HANDLE}; 22 | use windows::Win32::Media::{Audio, KernelStreaming, Multimedia}; 23 | use windows::Win32::System::Threading; 24 | 25 | type EjectSignal = Arc; 26 | 27 | #[duplicate_item( 28 | name ty; 29 | [AudioCaptureBuffer] [IAudioCaptureClient]; 30 | [AudioRenderBuffer] [IAudioRenderClient]; 31 | )] 32 | struct name<'a, T> { 33 | interface: &'a Audio::ty, 34 | data: NonNull, 35 | frame_size: usize, 36 | channels: usize, 37 | __type: PhantomData, 38 | } 39 | 40 | #[duplicate_item( 41 | name; 42 | [AudioCaptureBuffer]; 43 | [AudioRenderBuffer]; 44 | )] 45 | impl<'a, T> ops::Deref for name<'a, T> { 46 | type Target = [T]; 47 | 48 | fn deref(&self) -> &Self::Target { 49 | unsafe { slice::from_raw_parts(self.data.cast().as_ptr(), self.channels * self.frame_size) } 50 | } 51 | } 52 | 53 | #[duplicate_item( 54 | name; 55 | [AudioCaptureBuffer]; 56 | [AudioRenderBuffer]; 57 | )] 58 | impl<'a, T> ops::DerefMut for name<'a, T> { 59 | fn deref_mut(&mut self) -> &mut Self::Target { 60 | unsafe { 61 | slice::from_raw_parts_mut(self.data.cast().as_ptr(), self.channels * self.frame_size) 62 | } 63 | } 64 | } 65 | 66 | impl Drop for AudioCaptureBuffer<'_, T> { 67 | fn drop(&mut self) { 68 | unsafe { self.interface.ReleaseBuffer(self.frame_size as _).unwrap() }; 69 | } 70 | } 71 | 72 | impl Drop for AudioRenderBuffer<'_, T> { 73 | fn drop(&mut self) { 74 | unsafe { 75 | self.interface 76 | .ReleaseBuffer(self.frame_size as _, 0) 77 | .unwrap(); 78 | } 79 | } 80 | } 81 | 82 | impl<'a, T> AudioRenderBuffer<'a, T> { 83 | fn from_client( 84 | render_client: &'a Audio::IAudioRenderClient, 85 | channels: usize, 86 | frame_size: usize, 87 | ) -> Result { 88 | let data = NonNull::new(unsafe { render_client.GetBuffer(frame_size as _) }?) 89 | .expect("Audio buffer data is null"); 90 | Ok(Self { 91 | interface: render_client, 92 | data, 93 | frame_size, 94 | channels, 95 | __type: PhantomData, 96 | }) 97 | } 98 | } 99 | impl<'a, T> AudioCaptureBuffer<'a, T> { 100 | fn from_client( 101 | capture_client: &'a Audio::IAudioCaptureClient, 102 | channels: usize, 103 | ) -> Result, error::WasapiError> { 104 | let mut buf_ptr = ptr::null_mut(); 105 | let mut frame_size = 0; 106 | let mut flags = 0; 107 | unsafe { capture_client.GetBuffer(&mut buf_ptr, &mut frame_size, &mut flags, None, None) }?; 108 | let Some(data) = NonNull::new(buf_ptr as _) else { 109 | return Ok(None); 110 | }; 111 | Ok(Some(Self { 112 | interface: capture_client, 113 | data, 114 | frame_size: frame_size as _, 115 | channels, 116 | __type: PhantomData, 117 | })) 118 | } 119 | } 120 | 121 | struct AudioThread { 122 | audio_client: Audio::IAudioClient, 123 | interface: Interface, 124 | audio_clock: Audio::IAudioClock, 125 | stream_config: StreamConfig, 126 | eject_signal: EjectSignal, 127 | frame_size: usize, 128 | callback: Callback, 129 | event_handle: HANDLE, 130 | clock_start: Duration, 131 | } 132 | 133 | impl AudioThread { 134 | fn finalize(self) -> Result { 135 | if !self.event_handle.is_invalid() { 136 | unsafe { CloseHandle(self.event_handle) }?; 137 | } 138 | let _ = unsafe { 139 | self.audio_client 140 | .Stop() 141 | .inspect_err(|err| eprintln!("Cannot stop audio thread: {err}")) 142 | }; 143 | Ok(self.callback) 144 | } 145 | } 146 | 147 | impl AudioThread { 148 | fn new( 149 | device: WasapiMMDevice, 150 | eject_signal: EjectSignal, 151 | mut stream_config: StreamConfig, 152 | callback: Callback, 153 | ) -> Result { 154 | unsafe { 155 | let audio_client: Audio::IAudioClient = device.activate()?; 156 | let sharemode = if stream_config.exclusive { 157 | Audio::AUDCLNT_SHAREMODE_EXCLUSIVE 158 | } else { 159 | Audio::AUDCLNT_SHAREMODE_SHARED 160 | }; 161 | let format = { 162 | let mut format = config_to_waveformatextensible(&stream_config); 163 | let mut actual_format = ptr::null_mut(); 164 | audio_client 165 | .IsFormatSupported( 166 | sharemode, 167 | &format.Format, 168 | (!stream_config.exclusive).then_some(&mut actual_format), 169 | ) 170 | .ok()?; 171 | if !stream_config.exclusive { 172 | assert!(!actual_format.is_null()); 173 | format.Format = actual_format.read_unaligned(); 174 | CoTaskMemFree(actual_format.cast()); 175 | let sample_rate = format.Format.nSamplesPerSec; 176 | stream_config.channels = 0u32.with_indices(0..format.Format.nChannels as _); 177 | stream_config.samplerate = sample_rate as _; 178 | } 179 | format 180 | }; 181 | let frame_size = stream_config 182 | .buffer_size_range 183 | .0 184 | .or(stream_config.buffer_size_range.1); 185 | let buffer_duration = frame_size 186 | .map(|frame_size| { 187 | buffer_size_to_duration(frame_size, stream_config.samplerate as _) 188 | }) 189 | .unwrap_or(0); 190 | audio_client.Initialize( 191 | sharemode, 192 | Audio::AUDCLNT_STREAMFLAGS_EVENTCALLBACK 193 | | Audio::AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM, 194 | buffer_duration, 195 | 0, 196 | &format.Format, 197 | None, 198 | )?; 199 | let buffer_size = audio_client.GetBufferSize()? as usize; 200 | let event_handle = { 201 | let event_handle = 202 | Threading::CreateEventA(None, false, false, windows::core::PCSTR(ptr::null()))?; 203 | audio_client.SetEventHandle(event_handle)?; 204 | event_handle 205 | }; 206 | let interface = audio_client.GetService::()?; 207 | let audio_clock = audio_client.GetService::()?; 208 | let frame_size = buffer_size; 209 | Ok(Self { 210 | audio_client, 211 | interface, 212 | audio_clock, 213 | event_handle, 214 | frame_size, 215 | eject_signal, 216 | stream_config: StreamConfig { 217 | buffer_size_range: (Some(frame_size), Some(frame_size)), 218 | ..stream_config 219 | }, 220 | clock_start: Duration::ZERO, 221 | callback, 222 | }) 223 | } 224 | } 225 | 226 | fn await_frame(&mut self) -> Result<(), error::WasapiError> { 227 | let _ = unsafe { 228 | let result = Threading::WaitForSingleObject(self.event_handle, Threading::INFINITE); 229 | if result == Foundation::WAIT_FAILED { 230 | let err = Foundation::GetLastError(); 231 | let description = format!("Waiting for event handle failed: {:?}", err); 232 | return Err(error::WasapiError::FoundationError(description)); 233 | } 234 | result 235 | }; 236 | Ok(()) 237 | } 238 | 239 | fn output_timestamp(&self) -> Result { 240 | let clock = stream_instant(&self.audio_clock)?; 241 | let diff = clock - self.clock_start; 242 | Ok(Timestamp::from_duration( 243 | self.stream_config.samplerate, 244 | diff, 245 | )) 246 | } 247 | } 248 | 249 | impl AudioThread { 250 | fn run(mut self) -> Result { 251 | set_thread_priority(); 252 | unsafe { 253 | self.audio_client.Start()?; 254 | } 255 | self.clock_start = stream_instant(&self.audio_clock)?; 256 | loop { 257 | if self.eject_signal.load(Ordering::Relaxed) { 258 | break self.finalize(); 259 | } 260 | self.await_frame()?; 261 | self.process()?; 262 | } 263 | .inspect_err(|err| eprintln!("Render thread process error: {err}")) 264 | } 265 | 266 | fn process(&mut self) -> Result<(), error::WasapiError> { 267 | let frames_available = unsafe { self.interface.GetNextPacketSize()? as usize }; 268 | if frames_available == 0 { 269 | return Ok(()); 270 | } 271 | let Some(mut buffer) = AudioCaptureBuffer::::from_client( 272 | &self.interface, 273 | self.stream_config.channels.count(), 274 | )? 275 | else { 276 | eprintln!("Null buffer from WASAPI"); 277 | return Ok(()); 278 | }; 279 | let timestamp = self.output_timestamp()?; 280 | let context = AudioCallbackContext { 281 | stream_config: self.stream_config, 282 | timestamp, 283 | }; 284 | let buffer = 285 | AudioRef::from_interleaved(&mut buffer, self.stream_config.channels.count()).unwrap(); 286 | let output = AudioInput { timestamp, buffer }; 287 | self.callback.on_input_data(context, output); 288 | Ok(()) 289 | } 290 | } 291 | 292 | impl AudioThread { 293 | fn run(mut self) -> Result { 294 | set_thread_priority(); 295 | unsafe { 296 | self.audio_client.Start()?; 297 | } 298 | self.clock_start = stream_instant(&self.audio_clock)?; 299 | loop { 300 | if self.eject_signal.load(Ordering::Relaxed) { 301 | break self.finalize(); 302 | } 303 | self.await_frame()?; 304 | self.process()?; 305 | } 306 | .inspect_err(|err| eprintln!("Render thread process error: {err}")) 307 | } 308 | 309 | fn process(&mut self) -> Result<(), error::WasapiError> { 310 | let frames_available = unsafe { 311 | let padding = self.audio_client.GetCurrentPadding()? as usize; 312 | self.frame_size - padding 313 | }; 314 | if frames_available == 0 { 315 | return Ok(()); 316 | } 317 | let frames_requested = if let Some(max_frames) = self.stream_config.buffer_size_range.1 { 318 | frames_available.min(max_frames) 319 | } else { 320 | frames_available 321 | }; 322 | let mut buffer = AudioRenderBuffer::::from_client( 323 | &self.interface, 324 | self.stream_config.channels.count(), 325 | frames_requested, 326 | )?; 327 | let timestamp = self.output_timestamp()?; 328 | let context = AudioCallbackContext { 329 | stream_config: self.stream_config, 330 | timestamp, 331 | }; 332 | let buffer = 333 | AudioMut::from_interleaved_mut(&mut buffer, self.stream_config.channels.count()) 334 | .unwrap(); 335 | let output = AudioOutput { timestamp, buffer }; 336 | self.callback.on_output_data(context, output); 337 | Ok(()) 338 | } 339 | } 340 | 341 | /// Type representing a WASAPI audio stream. 342 | pub struct WasapiStream { 343 | join_handle: JoinHandle>, 344 | eject_signal: EjectSignal, 345 | } 346 | 347 | impl AudioStreamHandle for WasapiStream { 348 | type Error = error::WasapiError; 349 | 350 | fn eject(self) -> Result { 351 | self.eject_signal.store(true, Ordering::Relaxed); 352 | self.join_handle 353 | .join() 354 | .expect("Audio output thread panicked") 355 | } 356 | } 357 | 358 | impl WasapiStream { 359 | pub(crate) fn new_input( 360 | device: WasapiMMDevice, 361 | stream_config: StreamConfig, 362 | callback: Callback, 363 | ) -> Self { 364 | let eject_signal = EjectSignal::default(); 365 | let join_handle = std::thread::Builder::new() 366 | .name("interflow_wasapi_output_stream".to_string()) 367 | .spawn({ 368 | let eject_signal = eject_signal.clone(); 369 | move || { 370 | let inner: AudioThread = 371 | AudioThread::new(device, eject_signal, stream_config, callback) 372 | .inspect_err(|err| { 373 | eprintln!("Failed to create render thread: {err}") 374 | })?; 375 | inner.run() 376 | } 377 | }) 378 | .expect("Cannot spawn audio output thread"); 379 | Self { 380 | join_handle, 381 | eject_signal, 382 | } 383 | } 384 | } 385 | 386 | impl WasapiStream { 387 | pub(crate) fn new_output( 388 | device: WasapiMMDevice, 389 | stream_config: StreamConfig, 390 | callback: Callback, 391 | ) -> Self { 392 | let eject_signal = EjectSignal::default(); 393 | let join_handle = std::thread::Builder::new() 394 | .name("interflow_wasapi_output_stream".to_string()) 395 | .spawn({ 396 | let eject_signal = eject_signal.clone(); 397 | move || { 398 | let inner: AudioThread = 399 | AudioThread::new(device, eject_signal, stream_config, callback) 400 | .inspect_err(|err| { 401 | eprintln!("Failed to create render thread: {err}") 402 | })?; 403 | inner.run() 404 | } 405 | }) 406 | .expect("Cannot spawn audio output thread"); 407 | Self { 408 | join_handle, 409 | eject_signal, 410 | } 411 | } 412 | } 413 | 414 | fn set_thread_priority() { 415 | unsafe { 416 | let thread_id = Threading::GetCurrentThreadId(); 417 | 418 | let _ = Threading::SetThreadPriority( 419 | HANDLE(thread_id as isize as _), 420 | Threading::THREAD_PRIORITY_TIME_CRITICAL, 421 | ); 422 | } 423 | } 424 | 425 | pub fn buffer_size_to_duration(buffer_size: usize, sample_rate: u32) -> i64 { 426 | (buffer_size as i64 / sample_rate as i64) * (1_000_000_000 / 100) 427 | } 428 | 429 | fn stream_instant(audio_clock: &Audio::IAudioClock) -> Result { 430 | let mut position: u64 = 0; 431 | let mut qpc_position: u64 = 0; 432 | unsafe { 433 | audio_clock.GetPosition(&mut position, Some(&mut qpc_position))?; 434 | }; 435 | // The `qpc_position` is in 100 nanosecond units. Convert it to nanoseconds. 436 | let qpc_nanos = qpc_position * 100; 437 | let instant = Duration::from_nanos(qpc_nanos); 438 | Ok(instant) 439 | } 440 | 441 | pub(crate) fn config_to_waveformatextensible(config: &StreamConfig) -> Audio::WAVEFORMATEXTENSIBLE { 442 | let format_tag = KernelStreaming::WAVE_FORMAT_EXTENSIBLE; 443 | let channels = config.channels as u16; 444 | let sample_rate = config.samplerate as u32; 445 | let sample_bytes = size_of::() as u16; 446 | let avg_bytes_per_sec = u32::from(channels) * sample_rate * u32::from(sample_bytes); 447 | let block_align = channels * sample_bytes; 448 | let bits_per_sample = 8 * sample_bytes; 449 | 450 | let cb_size = { 451 | let extensible_size = size_of::(); 452 | let ex_size = size_of::(); 453 | (extensible_size - ex_size) as u16 454 | }; 455 | 456 | let waveformatex = Audio::WAVEFORMATEX { 457 | wFormatTag: format_tag as u16, 458 | nChannels: channels, 459 | nSamplesPerSec: sample_rate, 460 | nAvgBytesPerSec: avg_bytes_per_sec, 461 | nBlockAlign: block_align, 462 | wBitsPerSample: bits_per_sample, 463 | cbSize: cb_size, 464 | }; 465 | 466 | let channel_mask = KernelStreaming::KSAUDIO_SPEAKER_DIRECTOUT; 467 | 468 | let sub_format = Multimedia::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; 469 | 470 | let waveformatextensible = Audio::WAVEFORMATEXTENSIBLE { 471 | Format: waveformatex, 472 | Samples: Audio::WAVEFORMATEXTENSIBLE_0 { 473 | wSamplesPerBlock: bits_per_sample, 474 | }, 475 | dwChannelMask: channel_mask, 476 | SubFormat: sub_format, 477 | }; 478 | 479 | waveformatextensible 480 | } 481 | 482 | pub(crate) fn is_output_config_supported( 483 | device: WasapiMMDevice, 484 | stream_config: &StreamConfig, 485 | ) -> bool { 486 | let mut try_ = || unsafe { 487 | let audio_client: Audio::IAudioClient = device.activate()?; 488 | let sharemode = if stream_config.exclusive { 489 | Audio::AUDCLNT_SHAREMODE_EXCLUSIVE 490 | } else { 491 | Audio::AUDCLNT_SHAREMODE_SHARED 492 | }; 493 | let mut format = config_to_waveformatextensible(&stream_config); 494 | let mut actual_format = ptr::null_mut(); 495 | audio_client 496 | .IsFormatSupported( 497 | sharemode, 498 | &format.Format, 499 | (!stream_config.exclusive).then_some(&mut actual_format), 500 | ) 501 | .ok()?; 502 | if !stream_config.exclusive { 503 | assert!(!actual_format.is_null()); 504 | format.Format = actual_format.read_unaligned(); 505 | CoTaskMemFree(actual_format.cast()); 506 | let sample_rate = format.Format.nSamplesPerSec; 507 | let new_channels = 0u32.with_indices(0..format.Format.nChannels as _); 508 | let new_samplerate = sample_rate as f64; 509 | if stream_config.samplerate != new_samplerate 510 | || stream_config.channels.count() != new_channels.count() 511 | { 512 | return Ok(false); 513 | } 514 | } 515 | Ok::<_, error::WasapiError>(true) 516 | }; 517 | try_() 518 | .inspect_err(|err| eprintln!("Error while checking configuration is valid: {err}")) 519 | .unwrap_or(false) 520 | } 521 | -------------------------------------------------------------------------------- /src/backends/wasapi/util.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::wasapi::error; 2 | use std::ffi::OsString; 3 | use std::marker::PhantomData; 4 | use std::os::windows::ffi::OsStringExt; 5 | use windows::core::Interface; 6 | use windows::Win32::Devices::Properties; 7 | use windows::Win32::Foundation::RPC_E_CHANGED_MODE; 8 | use windows::Win32::Media::Audio; 9 | use windows::Win32::System::Com; 10 | use windows::Win32::System::Com::{ 11 | CoInitializeEx, CoUninitialize, StructuredStorage, COINIT_APARTMENTTHREADED, STGM_READ, 12 | }; 13 | use windows::Win32::System::Variant::VT_LPWSTR; 14 | 15 | thread_local!(static COM_INITIALIZER: ComInitializer = { 16 | unsafe { 17 | // Try to initialize COM with STA by default to avoid compatibility issues with the ASIO 18 | // backend (where CoInitialize() is called by the ASIO SDK) or winit (where drag and drop 19 | // requires STA). 20 | // This call can fail with RPC_E_CHANGED_MODE if another library initialized COM with MTA. 21 | // That's OK though since COM ensures thread-safety/compatibility through marshalling when 22 | // necessary. 23 | let result = CoInitializeEx(None, COINIT_APARTMENTTHREADED); 24 | if result.is_ok() || result == RPC_E_CHANGED_MODE { 25 | ComInitializer { 26 | result, 27 | _ptr: PhantomData, 28 | } 29 | } else { 30 | // COM initialization failed in another way, something is really wrong. 31 | panic!( 32 | "Failed to initialize COM: {}", 33 | std::io::Error::from_raw_os_error(result.0) 34 | ); 35 | } 36 | } 37 | }); 38 | 39 | /// RAII object that guards the fact that COM is initialized. 40 | /// 41 | // We store a raw pointer because it's the only way at the moment to remove `Send`/`Sync` from the 42 | // object. 43 | struct ComInitializer { 44 | result: windows::core::HRESULT, 45 | _ptr: PhantomData<*mut ()>, 46 | } 47 | 48 | impl Drop for ComInitializer { 49 | #[inline] 50 | fn drop(&mut self) { 51 | // Need to avoid calling CoUninitialize() if CoInitializeEx failed since it may have 52 | // returned RPC_E_MODE_CHANGED - which is OK, see above. 53 | if self.result.is_ok() { 54 | unsafe { CoUninitialize() }; 55 | } 56 | } 57 | } 58 | 59 | /// Ensures that COM is initialized in this thread. 60 | #[inline] 61 | pub fn com_initializer() { 62 | COM_INITIALIZER.with(|_| {}); 63 | } 64 | 65 | #[derive(Debug, Clone)] 66 | pub struct WasapiMMDevice(Audio::IMMDevice); 67 | 68 | unsafe impl Send for WasapiMMDevice {} 69 | 70 | impl WasapiMMDevice { 71 | pub(crate) fn new(device: Audio::IMMDevice) -> Self { 72 | Self(device) 73 | } 74 | 75 | pub(crate) fn activate(&self) -> Result { 76 | unsafe { 77 | self.0 78 | .Activate::(Com::CLSCTX_ALL, None) 79 | .map_err(|err| error::WasapiError::BackendError(err)) 80 | } 81 | } 82 | 83 | pub(crate) fn name(&self) -> Option { 84 | get_device_name(&self.0) 85 | } 86 | } 87 | 88 | fn get_device_name(device: &Audio::IMMDevice) -> Option { 89 | unsafe { 90 | // Open the device's property store. 91 | let property_store = device 92 | .OpenPropertyStore(STGM_READ) 93 | .expect("could not open property store"); 94 | 95 | // Get the endpoint's friendly-name property, else the interface's friendly-name, else the device description. 96 | let mut property_value = property_store 97 | .GetValue(&Properties::DEVPKEY_Device_FriendlyName as *const _ as *const _) 98 | .or(property_store.GetValue( 99 | &Properties::DEVPKEY_DeviceInterface_FriendlyName as *const _ as *const _, 100 | )) 101 | .or(property_store 102 | .GetValue(&Properties::DEVPKEY_Device_DeviceDesc as *const _ as *const _)) 103 | .ok()?; 104 | 105 | let prop_variant = &property_value.Anonymous.Anonymous; 106 | 107 | // Read the friendly-name from the union data field, expecting a *const u16. 108 | if prop_variant.vt != VT_LPWSTR { 109 | return None; 110 | } 111 | 112 | let ptr_utf16 = *(&prop_variant.Anonymous as *const _ as *const *const u16); 113 | 114 | // Find the length of the friendly name. 115 | let mut len = 0; 116 | while *ptr_utf16.offset(len) != 0 { 117 | len += 1; 118 | } 119 | 120 | // Convert to a string. 121 | let name_slice = std::slice::from_raw_parts(ptr_utf16, len as usize); 122 | let name_os_string: OsString = OsStringExt::from_wide(name_slice); 123 | let name = name_os_string 124 | .into_string() 125 | .unwrap_or_else(|os_string| os_string.to_string_lossy().into()); 126 | 127 | // Clean up. 128 | StructuredStorage::PropVariantClear(&mut property_value).ok()?; 129 | 130 | Some(name) 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /src/channel_map.rs: -------------------------------------------------------------------------------- 1 | //! This module provides functionality for working with bitsets and channel mapping. 2 | //! 3 | //! A bitset is a data structure that efficiently stores a set of boolean values using bits. 4 | //! Each bit represents a boolean state (true/false) for a specific index or channel. 5 | //! 6 | //! The module includes: 7 | //! - Generic `Bitset` trait for types that can represent sets of boolean values 8 | //! - `CreateBitset` trait for constructing bitsets from indices 9 | //! - Implementations for standard unsigned integer types (u8, u16, u32, u64, u128) 10 | //! - Slice-based implementation for working with arrays of bitsets 11 | //! - Type aliases for common channel map sizes (32, 64, and 128 bits) 12 | //! 13 | //! # Example 14 | //! 15 | //! ``` 16 | //! use interflow::channel_map::Bitset; 17 | //! 18 | //! let mut map = 0u32; 19 | //! map.set_index(0, true); 20 | //! map.set_index(5, true); 21 | //! assert!(map.get_index(0)); 22 | //! assert!(map.get_index(5)); 23 | //! assert!(!map.get_index(1)); 24 | //! ``` 25 | 26 | use core::panic; 27 | 28 | /// Trait for types which can represent bitsets. 29 | /// 30 | /// A bit set is a type which encodes a boolean value, functioning similarly in principle to a 31 | /// `HashSet`. 32 | pub trait Bitset: Sized { 33 | /// Return the capacity of this bitset, that is, how many indices can be used with this type. 34 | fn capacity(&self) -> usize; 35 | 36 | /// Get the value for a specific index. Implementations should panic when this value is out 37 | /// of range. 38 | fn get_index(&self, index: usize) -> bool; 39 | 40 | /// Sets the value for a specific index. Implementations should panic when this value is out 41 | /// of range. 42 | fn set_index(&mut self, index: usize, value: bool); 43 | 44 | /// Returns an iterator of indices for which the value has been set `true`. 45 | fn indices(&self) -> impl IntoIterator { 46 | (0..self.capacity()).filter(|i| self.get_index(*i)) 47 | } 48 | /// Count the number of `true` elements in this bit set. 49 | fn count(&self) -> usize { 50 | self.indices().into_iter().count() 51 | } 52 | 53 | /// Builder-like method for setting a value at a specific index. 54 | fn with_index(&mut self, index: usize, value: bool) -> &mut Self { 55 | self.set_index(index, value); 56 | self 57 | } 58 | /// Builder-like method for setting all provided indices to `. 59 | fn with_indices(mut self, indices: impl IntoIterator) -> Self { 60 | for ix in indices { 61 | self.set_index(ix, true); 62 | } 63 | self 64 | } 65 | } 66 | 67 | /// Trait for bitsets that can be created from indices 68 | pub trait CreateBitset: Bitset { 69 | /// Create a [`Self`] from the given indices 70 | /// 71 | /// # Arguments 72 | /// 73 | /// - `indices`: [`IntoIterator`] implementation that returns [`usize`] values corresponding to the indices to 74 | /// set in the bitset. 75 | fn from_indices(indices: impl IntoIterator) -> Self; 76 | } 77 | 78 | #[duplicate::duplicate_item( 79 | ty; 80 | [u8]; 81 | [u16]; 82 | [u32]; 83 | [u64]; 84 | [u128]; 85 | )] 86 | impl Bitset for ty { 87 | fn capacity(&self) -> usize { 88 | ty::BITS as usize 89 | } 90 | 91 | fn get_index(&self, index: usize) -> bool { 92 | let mask = 1 << index; 93 | self & mask > 0 94 | } 95 | 96 | fn set_index(&mut self, index: usize, value: bool) { 97 | let mask = 1 << index; 98 | if value { 99 | *self |= mask; 100 | } else { 101 | *self &= !mask; 102 | } 103 | } 104 | 105 | fn count(&self) -> usize { 106 | self.count_ones() as _ 107 | } 108 | } 109 | 110 | #[duplicate::duplicate_item( 111 | ty; 112 | [u8]; 113 | [u16]; 114 | [u32]; 115 | [u64]; 116 | [u128]; 117 | )] 118 | impl CreateBitset for ty { 119 | fn from_indices(indices: impl IntoIterator) -> Self { 120 | indices 121 | .into_iter() 122 | .inspect(|x| assert!(*x < Self::BITS as usize, "Index out of range")) 123 | .fold(0, |acc, ix| acc | (1 << ix)) 124 | } 125 | } 126 | 127 | fn get_inner_bitset_at(arr: &[T], mut index: usize) -> Option<(usize, usize)> { 128 | arr.iter().enumerate().find_map({ 129 | move |(i, b)| match index.checked_sub(b.capacity()) { 130 | None => Some((i, index)), 131 | Some(v) => { 132 | index = v; 133 | None 134 | } 135 | } 136 | }) 137 | } 138 | 139 | impl Bitset for &mut [T] { 140 | fn capacity(&self) -> usize { 141 | self.iter().map(|b| b.capacity()).sum() 142 | } 143 | 144 | fn get_index(&self, index: usize) -> bool { 145 | let Some((bitset_index, inner_index)) = get_inner_bitset_at(self, index) else { 146 | return false; 147 | }; 148 | self[bitset_index].get_index(inner_index) 149 | } 150 | 151 | fn set_index(&mut self, index: usize, value: bool) { 152 | let Some((bitset_index, inner_index)) = get_inner_bitset_at(self, index) else { 153 | panic!("Index {index} outside of range {}", self.capacity()); 154 | }; 155 | self[bitset_index].set_index(inner_index, value); 156 | } 157 | } 158 | 159 | /// Type alias for a bitset with a capacity of 32 slots. 160 | pub type ChannelMap32 = u32; 161 | /// Type alias for a bitset with a capacity of 64 slots. 162 | pub type ChannelMap64 = u64; 163 | /// Type alias for a bitset with a capacity of 128 slots. 164 | pub type ChannelMap128 = u128; 165 | 166 | #[cfg(test)] 167 | mod test { 168 | use std::collections::HashSet; 169 | use std::hash::RandomState; 170 | 171 | use super::*; 172 | 173 | #[test] 174 | fn test_getset_index() { 175 | let mut bitset = 0u8; 176 | bitset.set_index(0, true); 177 | bitset.set_index(2, true); 178 | bitset.set_index(3, true); 179 | bitset.set_index(2, false); 180 | 181 | assert_eq!(0b1001, bitset); 182 | assert!(bitset.get_index(0)); 183 | assert!(bitset.get_index(3)); 184 | assert!(!bitset.get_index(2)); 185 | } 186 | 187 | #[test] 188 | fn test_from_indices() { 189 | let bitset = u8::from_indices([0, 2, 3]); 190 | assert_eq!(0b1101, bitset); 191 | } 192 | 193 | #[test] 194 | fn test_indices() { 195 | let bitset = 0b10010100u8; 196 | let result = HashSet::<_, RandomState>::from_iter(bitset.indices()); 197 | assert_eq!(HashSet::from_iter([2, 4, 7]), result); 198 | } 199 | 200 | #[test] 201 | fn test_slice_getset() { 202 | let mut storage = [0; 3]; 203 | let mut bitset: &mut [u32] = &mut storage; 204 | 205 | bitset.set_index(0, true); 206 | bitset.set_index(34, true); 207 | bitset.set_index(81, true); 208 | 209 | assert_eq!([0b1, 0b100, 1 << (81 - 64)], bitset); 210 | 211 | assert!(bitset.get_index(0)); 212 | assert!(bitset.get_index(34)); 213 | assert!(bitset.get_index(81)); 214 | } 215 | 216 | #[test] 217 | fn test_slice_indices() { 218 | let mut storage = [0b100101u8, (1 << 6) | (1 << 4), 1]; 219 | let bitrate: &mut [u8] = &mut storage; 220 | let result = HashSet::<_, RandomState>::from_iter(bitrate.indices()); 221 | assert_eq!(HashSet::from_iter([0, 2, 5, 12, 14, 16]), result); 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /src/duplex.rs: -------------------------------------------------------------------------------- 1 | //! Module for simultaneous input/output audio processing 2 | //! 3 | //! This module includes a proxy for gathering an input audio stream, and optionally process it to resample it to the 4 | //! output sample rate. 5 | use crate::audio_buffer::AudioRef; 6 | use crate::channel_map::Bitset; 7 | use crate::{ 8 | AudioCallbackContext, AudioDevice, AudioInput, AudioInputCallback, AudioInputDevice, 9 | AudioOutput, AudioOutputCallback, AudioOutputDevice, AudioStreamHandle, SendEverywhereButOnWeb, 10 | StreamConfig, 11 | }; 12 | use fixed_resample::{PushStatus, ReadStatus, ResamplingChannelConfig}; 13 | use std::error::Error; 14 | use std::num::NonZeroUsize; 15 | use thiserror::Error; 16 | 17 | const MAX_CHANNELS: usize = 64; 18 | 19 | /// Trait of types that can process both input and output audio streams at the same time. 20 | pub trait AudioDuplexCallback: 'static + SendEverywhereButOnWeb { 21 | /// Processes audio data in a duplex stream. 22 | /// 23 | /// # Arguments 24 | /// * `context` - The context containing stream configuration and timing information 25 | /// * `input` - The input audio buffer containing captured audio data 26 | /// * `output` - The output audio buffer to be filled with processed audio data 27 | fn on_audio_data( 28 | &mut self, 29 | context: AudioCallbackContext, 30 | input: AudioInput, 31 | output: AudioOutput, 32 | ); 33 | } 34 | 35 | /// Type which handles both a duplex stream handle. 36 | pub struct DuplexStream { 37 | _input_stream: Box>, 38 | _output_stream: Box, Error = Error>>, 39 | } 40 | 41 | /// Input proxy for transferring an input signal to a separate output callback to be processed as a duplex stream. 42 | pub struct InputProxy { 43 | producer: Option>, 44 | receive_output_samplerate: rtrb::Consumer, 45 | send_consumer: rtrb::Producer>, 46 | } 47 | 48 | impl InputProxy { 49 | /// Create a new input proxy for transferring an input stream, resample it, and make it available in an output 50 | /// stream. 51 | pub fn new() -> ( 52 | Self, 53 | rtrb::Producer, 54 | rtrb::Consumer>, 55 | ) { 56 | let (send_consumer, receive_consumer) = rtrb::RingBuffer::new(1); 57 | let (produce_output_samplerate, receive_output_samplerate) = rtrb::RingBuffer::new(1); 58 | ( 59 | Self { 60 | producer: None, 61 | receive_output_samplerate, 62 | send_consumer, 63 | }, 64 | produce_output_samplerate, 65 | receive_consumer, 66 | ) 67 | } 68 | } 69 | 70 | impl AudioInputCallback for InputProxy { 71 | /// Processes incoming audio data and stores it in the internal buffer. 72 | /// 73 | /// Handles sample rate conversion between input and output streams. 74 | /// 75 | /// # Arguments 76 | /// * `context` - The context containing stream configuration and timing information 77 | /// * `input` - The input audio buffer containing captured audio data 78 | fn on_input_data(&mut self, context: AudioCallbackContext, input: AudioInput) { 79 | log::trace!(num_samples = input.buffer.num_samples(), num_channels = input.buffer.num_channels(); 80 | "on_input_data"); 81 | if let Ok(output_samplerate) = self.receive_output_samplerate.pop() { 82 | let Some(num_channels) = NonZeroUsize::new(context.stream_config.channels.count()) 83 | else { 84 | log::error!("Input proxy: no input channels given"); 85 | return; 86 | }; 87 | let input_samplerate = context.stream_config.samplerate as _; 88 | log::debug!( 89 | "Creating resampling channel ({} Hz) -> ({} Hz) ({} channels)", 90 | input_samplerate, 91 | output_samplerate, 92 | num_channels.get() 93 | ); 94 | let (tx, rx) = fixed_resample::resampling_channel( 95 | num_channels, 96 | input_samplerate, 97 | output_samplerate, 98 | ResamplingChannelConfig { 99 | latency_seconds: 0.01, 100 | quality: fixed_resample::ResampleQuality::Low, 101 | ..Default::default() 102 | }, 103 | ); 104 | self.producer.replace(tx); 105 | match self.send_consumer.push(rx) { 106 | Ok(_) => { 107 | log::debug!( 108 | "Input proxy: resampling channel ({} Hz) sent", 109 | context.stream_config.samplerate 110 | ); 111 | } 112 | Err(err) => { 113 | log::error!("Input proxy: cannot send resampling channel: {}", err); 114 | } 115 | } 116 | } 117 | let Some(producer) = &mut self.producer else { 118 | log::debug!("No resampling producer available, dropping input data"); 119 | return; 120 | }; 121 | 122 | let mut scratch = [0f32; 32 * MAX_CHANNELS]; 123 | for slice in input.buffer.chunks(32) { 124 | let len = slice.num_samples() * slice.num_channels(); 125 | debug_assert!( 126 | slice.copy_into_interleaved(&mut scratch[..len]), 127 | "Cannot fail: len is computed from slice itself" 128 | ); 129 | match producer.push_interleaved(&scratch[..len]) { 130 | PushStatus::OverflowOccurred { .. } => { 131 | log::error!("Input proxy: overflow occurred"); 132 | } 133 | PushStatus::UnderflowCorrected { .. } => { 134 | log::error!("Input proxy: underflow corrected"); 135 | } 136 | _ => {} 137 | } 138 | } 139 | } 140 | } 141 | 142 | #[derive(Debug, Error)] 143 | #[error(transparent)] 144 | /// Represents errors that can occur during duplex stream operations. 145 | pub enum DuplexCallbackError { 146 | /// No input channels given 147 | #[error("No input channels given")] 148 | NoInputChannels, 149 | /// An error occurred in the input stream 150 | InputError(InputError), 151 | /// An error occurred in the output stream 152 | OutputError(OutputError), 153 | /// An error that doesn't fit into other categories 154 | Other(Box), 155 | } 156 | 157 | /// [`AudioOutputCallback`] implementation for which runs the provided [`AudioDuplexCallback`]. 158 | pub struct DuplexCallback { 159 | input: Option>, 160 | receive_consumer: rtrb::Consumer>, 161 | send_samplerate: rtrb::Producer, 162 | callback: Callback, 163 | storage_raw: Box<[f32]>, 164 | current_samplerate: u32, 165 | num_input_channels: usize, 166 | resample_config: ResamplingChannelConfig, 167 | } 168 | 169 | impl DuplexCallback { 170 | /// Consumes the DuplexCallback and returns the underlying callback implementation. 171 | /// 172 | /// # Returns 173 | /// The wrapped callback instance or an error if extraction fails 174 | pub fn into_inner(self) -> Result> { 175 | Ok(self.callback) 176 | } 177 | } 178 | 179 | impl AudioOutputCallback for DuplexCallback { 180 | fn on_output_data(&mut self, context: AudioCallbackContext, output: AudioOutput) { 181 | // If changed, send new output samplerate to input proxy 182 | let samplerate = context.stream_config.samplerate as u32; 183 | if samplerate != self.current_samplerate && self.send_samplerate.push(samplerate).is_ok() { 184 | log::debug!("Output samplerate changed to {}", samplerate); 185 | self.current_samplerate = samplerate; 186 | } 187 | 188 | // Receive updated resample channel 189 | if let Ok(input) = self.receive_consumer.pop() { 190 | log::debug!( 191 | "Output resample channel received ({}/{} Hz)", 192 | input.out_sample_rate(), 193 | input.in_sample_rate() 194 | ); 195 | self.num_input_channels = input.num_channels().get(); 196 | self.input.replace(input); 197 | } 198 | 199 | // Receive input from proxy 200 | let frames = output.buffer.num_samples(); 201 | let storage = if let Some(input) = &mut self.input { 202 | let len = input.num_channels().get() * frames; 203 | let slice = &mut self.storage_raw[..len]; 204 | match input.read_interleaved(slice) { 205 | ReadStatus::UnderflowOccurred { .. } => { 206 | log::error!("Output resample channel underflow occurred"); 207 | } 208 | ReadStatus::OverflowCorrected { .. } => { 209 | log::error!("Output resample channel overflow corrected"); 210 | } 211 | _ => {} 212 | } 213 | AudioRef::from_interleaved(slice, input.num_channels().get()).unwrap() 214 | } else { 215 | AudioRef::from_interleaved(&[], self.num_input_channels).unwrap() 216 | }; 217 | 218 | let input = AudioInput { 219 | timestamp: context.timestamp, 220 | buffer: storage, 221 | }; 222 | // Run user callback 223 | self.callback.on_audio_data(context, input, output); 224 | } 225 | } 226 | 227 | /// A handle for managing a duplex audio stream that combines input and output capabilities. 228 | /// 229 | /// This struct provides a way to control and manage a duplex audio stream that processes both 230 | /// input and output audio data simultaneously. It wraps the individual input and output stream 231 | /// handles and provides unified control over the duplex operation. 232 | /// 233 | /// # Type Parameters 234 | /// 235 | /// * `InputHandle` - The type of the input stream handle, must implement `AudioStreamHandle` 236 | /// * `OutputHandle` - The type of the output stream handle, must implement `AudioStreamHandle>` 237 | /// 238 | /// # Example 239 | /// 240 | /// ```no_run 241 | /// use interflow::duplex::AudioDuplexCallback; 242 | /// use interflow::prelude::*; 243 | /// 244 | /// let input_device = default_input_device(); 245 | /// let output_device = default_output_device(); 246 | /// let input_config = input_device.default_input_config().unwrap(); 247 | /// let output_config = output_device.default_output_config().unwrap(); 248 | /// 249 | /// struct MyCallback; 250 | /// 251 | /// impl MyCallback { 252 | /// fn new() -> Self { Self } 253 | /// } 254 | /// 255 | /// impl AudioDuplexCallback for MyCallback { 256 | /// fn on_audio_data(&mut self, context: AudioCallbackContext, input: AudioInput, output: AudioOutput) { 257 | /// // Implementation left as an exercise to the reader 258 | /// } 259 | /// } 260 | /// 261 | /// // Create and use a duplex stream 262 | /// let stream_handle = create_duplex_stream( 263 | /// input_device, 264 | /// output_device, 265 | /// MyCallback::new(), 266 | /// DuplexStreamConfig::new(input_config, output_config), 267 | /// ).expect("Failed to create duplex stream"); 268 | /// 269 | /// // Later, stop the stream and retrieve the callback 270 | /// let callback = stream_handle.eject().expect("Failed to stop stream"); 271 | /// ``` 272 | #[derive(Debug)] 273 | pub struct DuplexStreamHandle { 274 | input_handle: InputHandle, 275 | output_handle: OutputHandle, 276 | } 277 | 278 | impl< 279 | Callback, 280 | InputHandle: AudioStreamHandle, 281 | OutputHandle: AudioStreamHandle>, 282 | > AudioStreamHandle for DuplexStreamHandle 283 | { 284 | type Error = DuplexCallbackError; 285 | 286 | /// Stops the duplex stream and retrieves the callback instance. 287 | /// 288 | /// # Returns 289 | /// 290 | /// The callback instance if successful, or an error if the stream cannot be stopped properly 291 | fn eject(self) -> Result { 292 | self.input_handle 293 | .eject() 294 | .map_err(DuplexCallbackError::InputError)?; 295 | let duplex_callback = self 296 | .output_handle 297 | .eject() 298 | .map_err(DuplexCallbackError::OutputError)?; 299 | duplex_callback 300 | .into_inner() 301 | .map_err(DuplexCallbackError::Other) 302 | } 303 | } 304 | 305 | /// Configuration type for manual duplex streams. 306 | #[derive(Debug, Copy, Clone)] 307 | pub struct DuplexStreamConfig { 308 | /// Input stream configuration 309 | pub input: StreamConfig, 310 | /// Output stream configuration 311 | pub output: StreamConfig, 312 | /// Use high quality resampling. Increases latency and CPU usage. 313 | pub high_quality_resampling: bool, 314 | /// Target latency. May be higher if the resampling takes too much latency. 315 | pub target_latency_secs: f32, 316 | } 317 | 318 | impl DuplexStreamConfig { 319 | /// Create a new duplex stream config with the provided input and output stream configuration, and default 320 | /// resampler values. 321 | pub fn new(input: StreamConfig, output: StreamConfig) -> Self { 322 | Self { 323 | input, 324 | output, 325 | high_quality_resampling: false, 326 | target_latency_secs: 0.01, 327 | } 328 | } 329 | } 330 | 331 | /// Type alias of the result of creating a duplex stream. 332 | pub type DuplexStreamResult = Result< 333 | DuplexStreamHandle< 334 | ::StreamHandle, 335 | ::StreamHandle>, 336 | >, 337 | DuplexCallbackError<::Error, ::Error>, 338 | >; 339 | 340 | /// Creates a duplex audio stream that handles both input and output simultaneously. 341 | /// 342 | /// This function sets up a full-duplex audio stream by creating separate input and output streams 343 | /// and connecting them through a ring buffer. The input stream captures audio data and stores it 344 | /// in the buffer, while the output stream retrieves and processes this data before playback. 345 | /// 346 | /// # Arguments 347 | /// 348 | /// * `input_device` - The audio input device to capture audio from 349 | /// * `input_config` - Configuration parameters for the input stream 350 | /// * `output_device` - The audio output device to play audio through 351 | /// * `output_config` - Configuration parameters for the output stream 352 | /// * `callback` - The callback implementation that processes audio data 353 | /// 354 | /// # Returns 355 | /// 356 | /// A Result containing either: 357 | /// * A `DuplexStreamHandle` that can be used to manage the duplex stream 358 | /// * A `DuplexCallbackError` if stream creation fails 359 | /// 360 | /// # Example 361 | /// 362 | /// ```no_run 363 | /// use interflow::duplex::AudioDuplexCallback; 364 | /// use interflow::prelude::*; 365 | /// 366 | /// struct MyCallback; 367 | /// 368 | /// impl MyCallback { 369 | /// pub fn new() -> Self { 370 | /// Self 371 | /// } 372 | /// } 373 | /// 374 | /// impl AudioDuplexCallback for MyCallback { 375 | /// fn on_audio_data(&mut self, context: AudioCallbackContext, input: AudioInput, output: AudioOutput) { 376 | /// // Implementation left as exercise to the reader 377 | /// } 378 | /// } 379 | /// 380 | /// let input_device = default_input_device(); 381 | /// let output_device = default_output_device(); 382 | /// let input_config = input_device.default_input_config().unwrap(); 383 | /// let output_config = output_device.default_output_config().unwrap(); 384 | /// 385 | /// let callback = MyCallback::new(); 386 | /// 387 | /// let duplex_stream = create_duplex_stream( 388 | /// input_device, 389 | /// output_device, 390 | /// callback, 391 | /// DuplexStreamConfig::new(input_config, output_config), 392 | /// ).expect("Failed to create duplex stream"); 393 | /// 394 | /// ``` 395 | #[allow(clippy::type_complexity)] // Allowing because moving to a type alias would be just as complex 396 | pub fn create_duplex_stream< 397 | InputDevice: AudioInputDevice, 398 | OutputDevice: AudioOutputDevice, 399 | Callback: AudioDuplexCallback, 400 | >( 401 | input_device: InputDevice, 402 | output_device: OutputDevice, 403 | callback: Callback, 404 | config: DuplexStreamConfig, 405 | ) -> Result< 406 | DuplexStreamHandle< 407 | InputDevice::StreamHandle, 408 | OutputDevice::StreamHandle>, 409 | >, 410 | DuplexCallbackError, 411 | > { 412 | let (proxy, send_samplerate, receive_consumer) = InputProxy::new(); 413 | let input_handle = input_device 414 | .create_input_stream(config.input, proxy) 415 | .map_err(DuplexCallbackError::InputError)?; 416 | let output_handle = output_device 417 | .create_output_stream( 418 | config.output, 419 | DuplexCallback { 420 | input: None, 421 | send_samplerate, 422 | receive_consumer, 423 | callback, 424 | storage_raw: vec![0f32; 8192 * MAX_CHANNELS].into_boxed_slice(), 425 | current_samplerate: 0, 426 | num_input_channels: config.input.channels.count(), 427 | resample_config: ResamplingChannelConfig { 428 | capacity_seconds: (2.0 * config.target_latency_secs as f64).max(0.5), 429 | latency_seconds: config.target_latency_secs as f64, 430 | subtract_resampler_delay: true, 431 | quality: if config.high_quality_resampling { 432 | fixed_resample::ResampleQuality::High 433 | } else { 434 | fixed_resample::ResampleQuality::Low 435 | }, 436 | ..Default::default() 437 | }, 438 | }, 439 | ) 440 | .map_err(DuplexCallbackError::OutputError)?; 441 | Ok(DuplexStreamHandle { 442 | input_handle, 443 | output_handle, 444 | }) 445 | } 446 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #![warn(missing_docs)] 3 | 4 | use bitflags::bitflags; 5 | use std::borrow::Cow; 6 | use std::fmt; 7 | use std::fmt::Formatter; 8 | 9 | use crate::audio_buffer::{AudioMut, AudioRef}; 10 | use crate::channel_map::ChannelMap32; 11 | use crate::timestamp::Timestamp; 12 | 13 | pub mod audio_buffer; 14 | pub mod backends; 15 | pub mod channel_map; 16 | pub mod duplex; 17 | pub mod prelude; 18 | pub mod timestamp; 19 | 20 | bitflags! { 21 | /// Represents the types/capabilities of an audio device. 22 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 23 | pub struct DeviceType: u32 { 24 | /// Device supports audio input. 25 | const INPUT = 1 << 0; 26 | 27 | /// Device supports audio output. 28 | const OUTPUT = 1 << 1; 29 | 30 | /// Physical audio device (hardware). 31 | const PHYSICAL = 1 << 2; 32 | 33 | /// Virtual/software application device. 34 | const APPLICATION = 1 << 3; 35 | 36 | /// This device is set as default 37 | const DEFAULT = 1 << 4; 38 | 39 | /// Device that supports both input and output. 40 | const DUPLEX = Self::INPUT.bits() | Self::OUTPUT.bits(); 41 | } 42 | } 43 | 44 | /// Audio drivers provide access to the inputs and outputs of devices. 45 | /// Several drivers might provide the same accesses, some sharing it with other applications, 46 | /// while others work in exclusive mode. 47 | pub trait AudioDriver { 48 | /// Type of errors that can happen when using this audio driver. 49 | type Error: std::error::Error; 50 | /// Type of audio devices this driver provides. 51 | type Device: AudioDevice; 52 | 53 | /// Driver display name. 54 | const DISPLAY_NAME: &'static str; 55 | 56 | /// Runtime version of the audio driver. If there is a difference between "client" and 57 | /// "server" versions, then this should reflect the server version. 58 | fn version(&self) -> Result, Self::Error>; 59 | 60 | /// Default device of the given type. This is most often tied to the audio settings at the 61 | /// operating system level. 62 | fn default_device(&self, device_type: DeviceType) -> Result, Self::Error>; 63 | 64 | /// List all devices available through this audio driver. 65 | fn list_devices(&self) -> Result, Self::Error>; 66 | } 67 | 68 | impl DeviceType { 69 | /// Returns true if this device type has the input capability. 70 | pub fn is_input(&self) -> bool { 71 | self.contains(Self::INPUT) 72 | } 73 | 74 | /// Returns true if this device type has the output capability. 75 | pub fn is_output(&self) -> bool { 76 | self.contains(Self::OUTPUT) 77 | } 78 | 79 | /// Returns true if this device type is a physical device. 80 | pub fn is_physical(&self) -> bool { 81 | self.contains(Self::PHYSICAL) 82 | } 83 | 84 | /// Returns true if this device type is an application/virtual device. 85 | pub fn is_application(&self) -> bool { 86 | self.contains(Self::APPLICATION) 87 | } 88 | 89 | /// Returns true if this device is set as default 90 | pub fn is_default(&self) -> bool { 91 | self.contains(Self::DEFAULT) 92 | } 93 | 94 | /// Returns true if this device type supports both input and output. 95 | pub fn is_duplex(&self) -> bool { 96 | self.contains(Self::DUPLEX) 97 | } 98 | } 99 | 100 | /// Configuration for an audio stream. 101 | #[derive(Debug, Clone, Copy, PartialEq)] 102 | pub struct StreamConfig { 103 | /// Configured sample rate of the requested stream. The opened stream can have a different 104 | /// sample rate, so don't rely on this parameter being correct at runtime. 105 | pub samplerate: f64, 106 | /// Map of channels requested by the stream. Entries correspond in order to 107 | /// [AudioDevice::channel_map]. 108 | /// 109 | /// Some drivers allow specifying which channels are going to be opened and available through 110 | /// the audio buffers. For other drivers, only the number of requested channels is used, and 111 | /// order does not matter. 112 | pub channels: ChannelMap32, 113 | /// Range of preferential buffer sizes. The library will make a bast-effort attempt at 114 | /// honoring this setting, and in future versions may provide additional buffering to ensure 115 | /// it, but for now you should not make assumptions on buffer sizes based on this setting. 116 | pub buffer_size_range: (Option, Option), 117 | /// Whether the device should be exclusively held (meaning no other application can open the 118 | /// same device). 119 | pub exclusive: bool, 120 | } 121 | 122 | /// Audio channel description. 123 | #[derive(Debug, Clone)] 124 | pub struct Channel<'a> { 125 | /// Index of the channel in the device 126 | pub index: usize, 127 | /// Display name for the channel, if available, else a generic name like "Channel 1" 128 | pub name: Cow<'a, str>, 129 | } 130 | 131 | /// Trait for types describing audio devices. Audio devices have zero or more inputs and outputs, 132 | /// and depending on the driver, can be duplex devices which can provide both of them at the same 133 | /// time natively. 134 | pub trait AudioDevice { 135 | /// Type of errors that can happen when using this device. 136 | type Error: std::error::Error; 137 | 138 | /// Device display name 139 | fn name(&self) -> Cow; 140 | 141 | /// Device type. Either input, output, or duplex. 142 | fn device_type(&self) -> DeviceType; 143 | 144 | /// Iterator of the available channels in this device. Channel indices are used when 145 | /// specifying which channels to open when creating an audio stream. 146 | fn channel_map(&self) -> impl IntoIterator; 147 | 148 | /// Not all configuration values make sense for a particular device, and this method tests a 149 | /// configuration to see if it can be used in an audio stream. 150 | fn is_config_supported(&self, config: &StreamConfig) -> bool; 151 | 152 | /// Enumerate all possible configurations this device supports. If that is not provided by 153 | /// the device, and not easily generated manually, this will return `None`. 154 | fn enumerate_configurations(&self) -> Option>; 155 | } 156 | 157 | /// Marker trait for values which are [Send] everywhere but on the web (as WASM does not yet have 158 | /// web targets. 159 | /// 160 | /// This should only be used to define the traits and should not be relied upon in external code. 161 | /// 162 | /// This definition is selected on non-web platforms, and does require [`Send`]. 163 | #[cfg(not(wasm))] 164 | pub trait SendEverywhereButOnWeb: 'static + Send {} 165 | #[cfg(not(wasm))] 166 | impl SendEverywhereButOnWeb for T {} 167 | 168 | /// Marker trait for values which are [Send] everywhere but on the web (as WASM does not yet have 169 | /// web targets. 170 | /// 171 | /// This should only be used to define the traits and should not be relied upon in external code. 172 | /// 173 | /// This definition is selected on web platforms, and does not require [`Send`]. 174 | #[cfg(wasm)] 175 | pub trait SendEverywhereButOnWeb {} 176 | #[cfg(wasm)] 177 | impl SendEverywhereButOnWeb for T {} 178 | 179 | /// Trait for types which can provide input streams. 180 | /// 181 | /// Input devices require a [`AudioInputCallback`] which receives the audio data from the input 182 | /// device, and processes it. 183 | pub trait AudioInputDevice: AudioDevice { 184 | /// Type of the resulting stream. This stream can be used to control the audio processing 185 | /// externally, or stop it completely and give back ownership of the callback with 186 | /// [`AudioStreamHandle::eject`]. 187 | type StreamHandle: AudioStreamHandle; 188 | 189 | /// Return the default configuration for this device, if there is one. The returned configuration *must* be 190 | /// valid according to [`Self::is_config_supported`]. 191 | fn default_input_config(&self) -> Result; 192 | 193 | /// Creates an input stream with the provided stream configuration. For this call to be 194 | /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided 195 | /// configuration. 196 | /// 197 | /// An input callback is required to process the audio, whose ownership will be transferred 198 | /// to the audio stream. 199 | fn create_input_stream( 200 | &self, 201 | stream_config: StreamConfig, 202 | callback: Callback, 203 | ) -> Result, Self::Error>; 204 | 205 | /// Create an input stream with the default configuration (as returned by [`Self::default_input_config`]). 206 | /// 207 | /// # Arguments 208 | /// 209 | /// - `callback`: Callback to process the audio input 210 | fn default_input_stream( 211 | &self, 212 | callback: Callback, 213 | ) -> Result, Self::Error> { 214 | self.create_input_stream(self.default_input_config()?, callback) 215 | } 216 | } 217 | 218 | /// Trait for types which can provide output streams. 219 | /// 220 | /// Output devices require a [`AudioOutputCallback`] which receives the audio data from the output 221 | /// device, and processes it. 222 | pub trait AudioOutputDevice: AudioDevice { 223 | /// Type of the resulting stream. This stream can be used to control the audio processing 224 | /// externally, or stop it completely and give back ownership of the callback with 225 | /// [`AudioStreamHandle::eject`]. 226 | type StreamHandle: AudioStreamHandle; 227 | 228 | /// Return the default output configuration for this device, if it exists 229 | fn default_output_config(&self) -> Result; 230 | 231 | /// Creates an output stream with the provided stream configuration. For this call to be 232 | /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided 233 | /// configuration. 234 | /// 235 | /// An output callback is required to process the audio, whose ownership will be transferred 236 | /// to the audio stream. 237 | fn create_output_stream( 238 | &self, 239 | stream_config: StreamConfig, 240 | callback: Callback, 241 | ) -> Result, Self::Error>; 242 | 243 | /// Create an output stream using the default configuration as returned by [`Self::default_output_config`]. 244 | /// 245 | /// # Arguments 246 | /// 247 | /// - `callback`: Output callback to generate audio data with. 248 | fn default_output_stream( 249 | &self, 250 | callback: Callback, 251 | ) -> Result, Self::Error> { 252 | self.create_output_stream(self.default_output_config()?, callback) 253 | } 254 | } 255 | 256 | /// Trait for types which handles an audio stream (input or output). 257 | pub trait AudioStreamHandle { 258 | /// Type of errors which have caused the stream to fail. 259 | type Error: std::error::Error; 260 | 261 | /// Eject the stream, returning ownership of the callback. 262 | /// 263 | /// An error can occur when an irrecoverable error has occured and ownership has been lost 264 | /// already. 265 | fn eject(self) -> Result; 266 | } 267 | 268 | #[duplicate::duplicate_item( 269 | name bufty; 270 | [AudioInput] [AudioRef < 'a, T >]; 271 | [AudioOutput] [AudioMut < 'a, T >]; 272 | )] 273 | /// Plain-old-data object holding references to the audio buffer and the associated time-keeping 274 | /// [`Timestamp`]. This timestamp is associated with the stream, and in the cases where the 275 | /// driver provides timing information, it is used instead of relying on sample-counting. 276 | pub struct name<'a, T> { 277 | /// Associated time stamp for this callback. The time represents the duration for which the 278 | /// stream has been opened, and is either provided by the driver if available, or is kept up 279 | /// manually by the library. 280 | pub timestamp: Timestamp, 281 | /// Audio buffer data. 282 | pub buffer: bufty, 283 | } 284 | 285 | /// Plain-old-data object holding the passed-in stream configuration, as well as a general 286 | /// callback timestamp, which can be different from the input and output streams in case of 287 | /// cross-stream latencies; differences in timing can indicate desync. 288 | pub struct AudioCallbackContext { 289 | /// Passed-in stream configuration. Values have been updated where necessary to correspond to 290 | /// the actual stream properties. 291 | pub stream_config: StreamConfig, 292 | /// Callback-wide timestamp. 293 | pub timestamp: Timestamp, 294 | } 295 | 296 | /// Trait of types which process input audio data. This is the trait that users will want to 297 | /// implement when processing an input device. 298 | pub trait AudioInputCallback { 299 | /// Callback called when input data is available to be processed. 300 | fn on_input_data(&mut self, context: AudioCallbackContext, input: AudioInput); 301 | } 302 | 303 | /// Trait of types which process output audio data. This is the trait that users will want to 304 | /// implement when processing an output device. 305 | pub trait AudioOutputCallback { 306 | /// Callback called when output data is available to be processed. 307 | fn on_output_data(&mut self, context: AudioCallbackContext, input: AudioOutput); 308 | } 309 | -------------------------------------------------------------------------------- /src/prelude.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | //! Prelude module for `interflow`. Use as a star-import. 3 | 4 | #[cfg(os_wasapi)] 5 | pub use crate::backends::wasapi::prelude::*; 6 | pub use crate::backends::*; 7 | pub use crate::duplex::{ 8 | create_duplex_stream, AudioDuplexCallback, DuplexStreamConfig, DuplexStreamHandle, 9 | }; 10 | pub use crate::*; 11 | -------------------------------------------------------------------------------- /src/timestamp.rs: -------------------------------------------------------------------------------- 1 | //! Module for handling timestamp and duration calculations in audio processing. 2 | //! 3 | //! This module provides the [`Timestamp`] type, which manages time-related operations 4 | //! for audio streams by tracking sample counts and their corresponding durations based 5 | //! on a specified sample rate. It supports basic arithmetic operations for sample 6 | //! counting and duration calculations, making it useful for audio stream synchronization 7 | //! and timing operations. 8 | //! 9 | //! # Examples 10 | //! 11 | //! ```rust 12 | //! use std::time::Duration; 13 | //! use interflow::timestamp::Timestamp; 14 | //! 15 | //! // Create a timestamp for 48 kHz audio 16 | //! let mut ts = Timestamp::new(48000.); 17 | //! 18 | //! // Add 48 samples (1ms at 48kHz) 19 | //! ts += 48; 20 | //! assert_eq!(ts.as_duration(), Duration::from_millis(1)); 21 | //! 22 | //! // Convert a duration to samples 23 | //! let ts2 = Timestamp::from_duration(48000., Duration::from_millis(100)); 24 | //! assert_eq!(ts2.counter, 4800); 25 | //! ``` 26 | 27 | use std::ops; 28 | use std::ops::AddAssign; 29 | use std::time::Duration; 30 | 31 | /// Timestamp value, which computes duration information from a provided samplerate and a running 32 | /// sample counter. 33 | /// 34 | /// You can update the timestamp by add-assigning sample counts to it: 35 | /// 36 | /// ```rust 37 | /// use std::time::Duration; 38 | /// use interflow::timestamp::Timestamp; 39 | /// let mut ts = Timestamp::new(48000.); 40 | /// assert_eq!(ts.as_duration(), Duration::from_nanos(0)); 41 | /// ts += 48; 42 | /// assert_eq!(ts.as_duration(), Duration::from_millis(1)); 43 | /// ``` 44 | /// 45 | /// Adding also works, returning a new timestamp: 46 | /// 47 | /// ```rust 48 | /// use std::time::Duration; 49 | /// use interflow::timestamp::Timestamp; 50 | /// let mut ts = Timestamp::new(48000.); 51 | /// assert_eq!(ts.as_duration(), Duration::from_nanos(0)); 52 | /// let ts2 = ts + 48; 53 | /// assert_eq!(ts.as_duration(), Duration::from_millis(0)); 54 | /// assert_eq!(ts2.as_duration(), Duration::from_millis(1)); 55 | /// ``` 56 | /// 57 | /// Similarly, you can compute sample offsets by adding a [`Duration`] to it: 58 | /// 59 | /// ```rust 60 | /// use std::time::Duration; 61 | /// use interflow::timestamp::Timestamp; 62 | /// let ts = Timestamp::from_count(48000., 48); 63 | /// let ts_off = ts + Duration::from_millis(100); 64 | /// assert_eq!(ts_off.as_duration(), Duration::from_millis(101)); 65 | /// assert_eq!(ts_off.counter, 4848); 66 | /// ``` 67 | /// 68 | /// Or simply construct a [`Timestamp`] from a specified duration: 69 | /// 70 | /// ```rust 71 | /// use std::time::Duration; 72 | /// use interflow::timestamp::Timestamp; 73 | /// let ts = Timestamp::from_duration(44100., Duration::from_millis(1)); 74 | /// assert_eq!(ts.counter, 44); // Note that the conversion is lossy, as only whole samples are 75 | /// // stored in the timestamp. 76 | /// ``` 77 | #[derive(Debug, Copy, Clone, PartialEq)] 78 | pub struct Timestamp { 79 | /// Number of samples counted in this timestamp. 80 | pub counter: u64, 81 | /// Samplerate of the audio stream associated with the counter. 82 | pub samplerate: f64, 83 | } 84 | 85 | impl AddAssign for Timestamp { 86 | fn add_assign(&mut self, rhs: Duration) { 87 | let samples = rhs.as_secs_f64() * self.samplerate; 88 | self.counter += samples as u64; 89 | } 90 | } 91 | 92 | impl AddAssign for Timestamp { 93 | fn add_assign(&mut self, rhs: u64) { 94 | self.counter += rhs; 95 | } 96 | } 97 | 98 | impl ops::Add for Timestamp 99 | where 100 | Self: AddAssign, 101 | { 102 | type Output = Self; 103 | 104 | fn add(mut self, rhs: T) -> Self { 105 | self.add_assign(rhs); 106 | self 107 | } 108 | } 109 | 110 | impl Timestamp { 111 | /// Create a zeroed timestamp with the provided sample rate. 112 | pub fn new(samplerate: f64) -> Self { 113 | Self { 114 | counter: 0, 115 | samplerate, 116 | } 117 | } 118 | 119 | /// Create a timestamp from the given sample rate and existing sample count. 120 | pub fn from_count(samplerate: f64, counter: u64) -> Self { 121 | Self { 122 | samplerate, 123 | counter, 124 | } 125 | } 126 | 127 | /// Compute the sample offset that most closely matches the provided duration for the given 128 | /// sample rate. 129 | pub fn from_duration(samplerate: f64, duration: Duration) -> Self { 130 | Self::from_seconds(samplerate, duration.as_secs_f64()) 131 | } 132 | 133 | /// Compute the sample offset that most closely matches the provided duration in seconds for 134 | /// the given sample rate. 135 | pub fn from_seconds(samplerate: f64, seconds: f64) -> Self { 136 | let samples = samplerate * seconds; 137 | Self { 138 | samplerate, 139 | counter: samples as _, 140 | } 141 | } 142 | 143 | /// Compute the duration represented by this [`Timestamp`]. 144 | pub fn as_duration(&self) -> Duration { 145 | Duration::from_secs_f64(self.as_seconds()) 146 | } 147 | 148 | /// Compute the number of seconds represented in this [`Timestamp`]. 149 | pub fn as_seconds(&self) -> f64 { 150 | self.counter as f64 / self.samplerate 151 | } 152 | } 153 | --------------------------------------------------------------------------------