├── tests ├── __init__.py ├── roi │ ├── __init__.py │ └── test_region_of_interest.py ├── processors │ ├── __init__.py │ └── test_processor.py ├── face_example1.jpg ├── face_example2.jpg ├── test_helpers.py ├── test_rppg.py ├── conftest.py ├── test_digital_filter.py └── quicktest.py ├── docs ├── style.css ├── ruff.toml ├── reference │ ├── roi │ │ ├── index.md │ │ ├── facemesh_detector.md │ │ └── selfie_detector.md │ ├── rppg.md │ ├── settings.md │ ├── ui │ │ └── index.md │ ├── helpers.md │ ├── containers.md │ ├── processors │ │ ├── chrom.md │ │ ├── index.md │ │ └── processor.md │ ├── hr_calculator.md │ └── index.md ├── images │ └── yarppg-screenshot.png ├── profile_processing.py ├── cli.md ├── index.md ├── video_processing.py └── deepdive.py ├── src └── yarppg │ ├── _resources │ └── .gitignore │ ├── ui │ ├── qt6 │ │ ├── __init__.py │ │ ├── utils.py │ │ ├── camera.py │ │ └── simple_window.py │ ├── __init__.py │ └── simplest.py │ ├── roi │ ├── detector.py │ ├── __init__.py │ ├── roi_tools.py │ ├── selfie_segmenter.py │ └── facemesh_segmenter.py │ ├── main.py │ ├── __init__.py │ ├── processors │ ├── __init__.py │ ├── processor.py │ └── chrom.py │ ├── hr_calculator.py │ ├── containers.py │ ├── digital_filter.py │ ├── helpers.py │ ├── settings.py │ └── rppg.py ├── ruff.toml ├── pyproject.toml ├── LICENSE ├── mkdocs.yml ├── .gitignore └── README.md /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/roi/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/processors/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/style.css: -------------------------------------------------------------------------------- 1 | p { 2 | text-align: justify; 3 | } -------------------------------------------------------------------------------- /src/yarppg/_resources/.gitignore: -------------------------------------------------------------------------------- 1 | /*.task 2 | /*.tflite 3 | -------------------------------------------------------------------------------- /docs/ruff.toml: -------------------------------------------------------------------------------- 1 | extend = "../ruff.toml" 2 | 3 | line-length = 80 4 | -------------------------------------------------------------------------------- /docs/reference/roi/index.md: -------------------------------------------------------------------------------- 1 | # ROI detectors 2 | 3 | ::: yarppg.roi 4 | -------------------------------------------------------------------------------- /docs/reference/rppg.md: -------------------------------------------------------------------------------- 1 | # rPPG orchestrator 2 | 3 | ::: yarppg.rppg 4 | -------------------------------------------------------------------------------- /docs/reference/settings.md: -------------------------------------------------------------------------------- 1 | # Settings 2 | 3 | ::: yarppg.settings 4 | -------------------------------------------------------------------------------- /docs/reference/ui/index.md: -------------------------------------------------------------------------------- 1 | # User interfaces 2 | 3 | ::: yarppg.ui 4 | -------------------------------------------------------------------------------- /docs/reference/helpers.md: -------------------------------------------------------------------------------- 1 | # Helper functions 2 | 3 | ::: yarppg.helpers 4 | -------------------------------------------------------------------------------- /docs/reference/containers.md: -------------------------------------------------------------------------------- 1 | # Data containers 2 | 3 | ::: yarppg.containers 4 | -------------------------------------------------------------------------------- /docs/reference/processors/chrom.md: -------------------------------------------------------------------------------- 1 | # ChromProcessor 2 | 3 | ::: yarppg.processors.chrom -------------------------------------------------------------------------------- /docs/reference/processors/index.md: -------------------------------------------------------------------------------- 1 | # rPPG processors 2 | 3 | ::: yarppg.processors 4 | -------------------------------------------------------------------------------- /docs/reference/hr_calculator.md: -------------------------------------------------------------------------------- 1 | # Heart rate estimation 2 | 3 | ::: yarppg.hr_calculator 4 | -------------------------------------------------------------------------------- /tests/face_example1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamProell/yarppg/HEAD/tests/face_example1.jpg -------------------------------------------------------------------------------- /tests/face_example2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamProell/yarppg/HEAD/tests/face_example2.jpg -------------------------------------------------------------------------------- /docs/reference/roi/facemesh_detector.md: -------------------------------------------------------------------------------- 1 | # MediaPipe FaceMesh 2 | 3 | ::: yarppg.roi.facemesh_segmenter 4 | -------------------------------------------------------------------------------- /docs/reference/roi/selfie_detector.md: -------------------------------------------------------------------------------- 1 | # MediaPipe Selfie Segmenter 2 | 3 | ::: yarppg.roi.selfie_segmenter 4 | -------------------------------------------------------------------------------- /docs/images/yarppg-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SamProell/yarppg/HEAD/docs/images/yarppg-screenshot.png -------------------------------------------------------------------------------- /docs/reference/processors/processor.md: -------------------------------------------------------------------------------- 1 | # Basic processor 2 | 3 | ::: yarppg.processors.processor 4 | options: 5 | members_order: "source" 6 | -------------------------------------------------------------------------------- /src/yarppg/ui/qt6/__init__.py: -------------------------------------------------------------------------------- 1 | """Simple Qt6-based implementation of the yarPPG GUI.""" 2 | from .simple_window import SimpleQt6WindowSettings, launch_window 3 | -------------------------------------------------------------------------------- /docs/reference/index.md: -------------------------------------------------------------------------------- 1 | # Reference 2 | The following pages provide a more detailed look into all elements of the yarPPG 3 | code. 4 | 5 | - The [rPPG orchestrator](rppg.md) combines all steps required to perform rPPG 6 | - [ROI detection](roi/index.md) gives an overview of available ROI detectors 7 | - [rPPG processors](processors/index.md) lists available rPPG processors 8 | - [Heart rate estimation](hr_calculator.md) describes heart rate estimators 9 | - [Data containers](containers.md) are used to bundle intermediate results meaningfully 10 | 11 | -------------------------------------------------------------------------------- /src/yarppg/roi/detector.py: -------------------------------------------------------------------------------- 1 | """Provides the base class of the ROI detector.""" 2 | 3 | import numpy as np 4 | 5 | from ..containers import RegionOfInterest 6 | 7 | 8 | class RoiDetector: 9 | """Base class for ROI detectors.""" 10 | 11 | def detect(self, frame: np.ndarray) -> RegionOfInterest: 12 | """Find region of interest in the given frame.""" 13 | raise NotImplementedError("Detect method needs to be overwritten.") 14 | 15 | def __call__(self, frame: np.ndarray) -> RegionOfInterest: 16 | """Apply detector on the given frame.""" 17 | return self.detect(frame) 18 | -------------------------------------------------------------------------------- /docs/profile_processing.py: -------------------------------------------------------------------------------- 1 | """Profile the offline video processing.""" 2 | # %% 3 | #! %load_ext cProfile 4 | import yarppg 5 | 6 | # %% 7 | filename = "tests/testvideo_30fps.mp4" 8 | 9 | fps = yarppg.helpers.get_video_fps(filename) 10 | filter_cfg = yarppg.digital_filter.FilterConfig(fps, 0.5, 1.5, btype="bandpass") 11 | livefilter = yarppg.digital_filter.make_digital_filter(filter_cfg) 12 | processor = yarppg.FilteredProcessor(yarppg.Processor(), livefilter=livefilter) 13 | rppg = yarppg.Rppg( 14 | processor=processor, 15 | hr_calc=yarppg.PeakBasedHrCalculator(fps, window_seconds=4), 16 | ) 17 | # %% 18 | #!%prun -D process_video.prof rppg.process_video(filename) 19 | -------------------------------------------------------------------------------- /tests/roi/test_region_of_interest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import yarppg 4 | 5 | 6 | def test_pixelate(sim_roi: yarppg.RegionOfInterest): 7 | frame = sim_roi.baseimg.copy() 8 | yarppg.pixelate(frame, (2, 2, 10, 10), 5) 9 | 10 | assert np.array_equal(frame[:2], sim_roi.baseimg[:2]) 11 | assert np.array_equal(frame[:, :2], sim_roi.baseimg[:, :2]) 12 | assert np.all(frame[2:4, 2:4, 0] == int(sim_roi.baseimg[2:7, 2:7, 0].mean())) 13 | 14 | 15 | def test_contour_to_mask(): 16 | size = (10, 10) 17 | points = [(2, 2), (2, 5), (5, 5)] 18 | 19 | mask = yarppg.roi.contour_to_mask(size, points) 20 | 21 | assert mask.sum() == 10 22 | assert mask[mask > 0].mean() == 1 23 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import yarppg 4 | 5 | 6 | def test_get_video_fps(testfiles_root: pathlib.Path): 7 | fps60 = yarppg.helpers.get_video_fps(testfiles_root / "testvideo_60fps.mp4") 8 | fps30 = yarppg.helpers.get_video_fps(testfiles_root / "testvideo_30fps.mp4") 9 | 10 | assert abs(fps60 - 60) < 0.1 11 | assert abs(fps30 - 30) < 0.1 12 | 13 | 14 | def test_frames_from_video(testfiles_root: pathlib.Path): 15 | filename = testfiles_root / "testvideo_30fps.mp4" 16 | 17 | frame = next(yarppg.helpers.frames_from_video(filename)) 18 | count = sum(1 for _ in yarppg.helpers.frames_from_video(filename)) 19 | 20 | assert count == 294 21 | assert frame.shape == (1080, 1920, 3) 22 | -------------------------------------------------------------------------------- /src/yarppg/main.py: -------------------------------------------------------------------------------- 1 | """Main entrypoint for yarPGG GUI.""" 2 | 3 | import hydra 4 | import omegaconf 5 | 6 | import yarppg 7 | import yarppg.ui 8 | 9 | 10 | @hydra.main(version_base=None, config_name="config") 11 | def main(cfg: omegaconf.DictConfig): 12 | """Initialize the an rPPG orchestrator with CLI arguments and launch UI.""" 13 | config: yarppg.Settings = omegaconf.OmegaConf.to_object(cfg) # type: ignore 14 | rppg = yarppg.Rppg.from_settings(config) 15 | 16 | yarppg.ui.launch_ui(rppg, config.ui) 17 | 18 | 19 | def run_yarppg(): 20 | """Register structured configs and run the main function.""" 21 | yarppg.settings.register_schemas() 22 | main() # type: ignore 23 | 24 | 25 | if __name__ == "__main__": 26 | run_yarppg() 27 | -------------------------------------------------------------------------------- /tests/test_rppg.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import yarppg 4 | 5 | 6 | def test_process_video(testfiles_root: pathlib.Path): 7 | filename = testfiles_root / "testvideo_30fps.mp4" 8 | fps = yarppg.get_video_fps(filename) 9 | filter_cfg = yarppg.digital_filter.FilterConfig(fps, 0.5, 1.5, btype="bandpass") 10 | livefilter = yarppg.digital_filter.make_digital_filter(filter_cfg) 11 | processor = yarppg.FilteredProcessor(yarppg.Processor(), livefilter=livefilter) 12 | hrcalc = yarppg.PeakBasedHrCalculator(fs=fps, window_seconds=5) 13 | rppg = yarppg.Rppg(processor=processor, hr_calc=hrcalc) 14 | 15 | results = rppg.process_video(filename) 16 | 17 | assert len(results) == 294 18 | assert abs(yarppg.bpm_from_frames_per_beat(results[-1].hr, fps) - 60) < 2.0 19 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | import yarppg 7 | 8 | 9 | @pytest.fixture 10 | def testfiles_root() -> pathlib.Path: 11 | """Return the directory containing test files.""" 12 | return pathlib.Path(__file__).parent 13 | 14 | 15 | @pytest.fixture 16 | def sim_roi() -> yarppg.RegionOfInterest: 17 | frame = np.arange(16)[:, np.newaxis] * np.arange(16)[np.newaxis, :] 18 | frame = np.stack([frame, frame, frame], axis=-1) 19 | frame[..., 1] = 2 20 | frame[..., 2] = 3 21 | 22 | bg_mask = np.zeros_like(frame[..., 0], dtype="uint8") 23 | bg_mask[:2] = 1 24 | frame[bg_mask > 0] = [4, 5, 6] 25 | 26 | mask = np.zeros_like(bg_mask, dtype="uint8") 27 | mask[4:-4, 3:-3] = 1 28 | 29 | return yarppg.RegionOfInterest(mask, frame.astype("uint8"), bg_mask=bg_mask) 30 | -------------------------------------------------------------------------------- /tests/processors/test_processor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import yarppg 4 | import yarppg.roi.roi_tools 5 | from yarppg.processors import processor 6 | 7 | 8 | def test_masked_average(sim_roi: yarppg.RegionOfInterest): 9 | assert sim_roi.bg_mask is not None 10 | bg_avg = yarppg.roi.roi_tools.masked_average(sim_roi.baseimg, sim_roi.bg_mask) 11 | roi_avg = yarppg.roi.roi_tools.masked_average(sim_roi.baseimg, sim_roi.mask) 12 | 13 | assert np.array_equal(bg_avg, (4, 5, 6)) 14 | assert np.array_equal(roi_avg, (56.25, 2, 3)) 15 | 16 | 17 | def test_process(sim_roi: yarppg.RegionOfInterest): 18 | proc = processor.Processor() 19 | 20 | result = proc.process(sim_roi) 21 | 22 | assert result.value == 2 23 | assert np.array_equal(result.roi_mean, (56.25, 2, 3)) 24 | assert np.array_equal(result.bg_mean, (4, 5, 6)) 25 | -------------------------------------------------------------------------------- /tests/test_digital_filter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.signal 3 | 4 | from yarppg.rppg import digital_filter 5 | 6 | 7 | def test_filtercoeffs_unchanged(): 8 | cfg = digital_filter.FilterConfig(10.0, 3.0) 9 | 10 | ba = digital_filter.filtercoeffs_from_config(cfg) 11 | scipy_ba = scipy.signal.iirfilter(2, cfg.f1, fs=cfg.fs, btype="low") 12 | 13 | assert np.array_equal(ba, scipy_ba) 14 | 15 | 16 | def test_process(): 17 | cfg = digital_filter.FilterConfig(10.0, f1=3.0, btype="low") 18 | b, a = digital_filter.filtercoeffs_from_config(cfg) 19 | lfilter = digital_filter.make_digital_filter(cfg) 20 | 21 | xs = np.arange(0, 10, 0.1) 22 | ys = np.sin(2 * np.pi * xs) + 0.2 * np.random.normal(size=len(xs)) 23 | 24 | yfilt = np.array([lfilter.process(y) for y in ys]) 25 | yfilt_scipy = scipy.signal.lfilter(b, a, ys) 26 | 27 | assert np.mean(np.abs(yfilt - yfilt_scipy)) < 1e-7 28 | -------------------------------------------------------------------------------- /ruff.toml: -------------------------------------------------------------------------------- 1 | line-length = 88 2 | src = ["src"] 3 | 4 | [lint] 5 | select = ["E", "F", "PL", "N", "D", "ARG", "I"] 6 | ignore = [ 7 | "D105", # undocumented-magic-method, 8 | "D107", # undocumented-public-init 9 | "PLR2004", # magic-value-comparison, 10 | ] 11 | ignore-init-module-imports = true 12 | exclude = [ 13 | "__pypackages__", 14 | "_build", 15 | ".bzr", 16 | ".direnv", 17 | ".eggs", 18 | ".git-rewrite", 19 | ".git", 20 | ".hg", 21 | ".mypy_cache", 22 | ".nox", 23 | ".pants.d", 24 | ".pytest_cache", 25 | ".pytype", 26 | ".ruff_cache", 27 | ".svn", 28 | ".tox", 29 | ".venv", 30 | ".vscode", 31 | "buck-out", 32 | "build", 33 | "dist", 34 | "node_modules", 35 | "venv", 36 | ] 37 | 38 | [lint.per-file-ignores] 39 | "__init__.py" = ["F401"] # unused-import 40 | "tests/*.py" = ["D"] # no docs needed in tests. 41 | "docs/*.py" = ["D"] 42 | 43 | [lint.pydocstyle] 44 | convention = "google" 45 | 46 | [lint.pylint] 47 | max-args = 10 48 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = ["setuptools"] 4 | 5 | [project] 6 | authors = [{ name = "Samuel Pröll", email = "info@samproell.at" }] 7 | description = "Yet another implementation of remote Photoplethysmography in Python" 8 | dynamic = ["version"] 9 | dependencies = [ 10 | "hydra-core", 11 | "mediapipe", 12 | "numpy", 13 | "opencv-contrib-python", 14 | "pandas", 15 | "scipy", 16 | ] 17 | license = { file = "LICENSE" } 18 | name = "yarppg" 19 | readme = "README.md" 20 | 21 | [project.optional-dependencies] 22 | qt6 = ["PyQt6", "pyqtgraph"] 23 | dev = [ 24 | "mkdocs-autorefs", 25 | "mkdocs-jupyter", 26 | "mkdocs-material", 27 | "mkdocs-section-index", 28 | "mkdocs", 29 | "mkdocstrings[python]", 30 | "pytest", 31 | ] 32 | 33 | [project.urls] 34 | repository = "https://github.com/SamProell/yarppg" 35 | 36 | [project.scripts] 37 | run-yarppg = "yarppg.main:run_yarppg" 38 | 39 | [tool.setuptools.packages.find] 40 | where = ["src"] 41 | 42 | [tool.setuptools.dynamic] 43 | version = { attr = "yarppg.__version__" } 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Samuel Pröll 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/yarppg/roi/__init__.py: -------------------------------------------------------------------------------- 1 | """Utilities for ROI (region of interest) detection and manipulation. 2 | 3 | yarPPG comes with a number of ROI detectors, which find the face in the 4 | input frame and provide a mask with the relevant region(s). The following 5 | detectors are currently implemented: 6 | 7 | - [`FaceMeshDetector`][yarppg.FaceMeshDetector] (default) - uses MediaPipe's 8 | FaceMesh landmarker. 9 | - [`SelfieDetector`][yarppg.SelfieDetector] - uses MediaPipe's SelfieSegmenter 10 | solution. Selfie segmentation is slower than FaceMesh and may not work in a 11 | real-time application. 12 | 13 | Detectors return a [`RegionOfInterest`][yarppg.RegionOfInterest] container 14 | that stores the original image, the ROI mask and an optional background mask. 15 | """ 16 | 17 | from typing import Callable 18 | 19 | from .detector import RoiDetector 20 | from .facemesh_segmenter import FaceMeshDetector 21 | from .roi_tools import contour_to_mask, overlay_mask, pixelate, pixelate_mask 22 | from .selfie_segmenter import SelfieDetector 23 | 24 | detectors: dict[str, Callable[..., RoiDetector]] = { 25 | "facemesh": FaceMeshDetector, 26 | "selfie": SelfieDetector, 27 | } 28 | -------------------------------------------------------------------------------- /src/yarppg/__init__.py: -------------------------------------------------------------------------------- 1 | """Yet another rPPG implementation.""" 2 | 3 | __version__ = "1.0" 4 | 5 | __all__ = [ 6 | "bpm_from_frames_per_beat", 7 | "ChromProcessor", 8 | "Color", 9 | "DigitalFilter", 10 | "FaceMeshDetector", 11 | "FilteredProcessor", 12 | "FpsTracker", 13 | "frames_from_video", 14 | "get_config", 15 | "get_video_fps", 16 | "HrCalculator", 17 | "PeakBasedHrCalculator", 18 | "pixelate_mask", 19 | "pixelate", 20 | "Processor", 21 | "RegionOfInterest", 22 | "RoiDetector", 23 | "Rppg", 24 | "RppgResult", 25 | "SelfieDetector", 26 | "Settings", 27 | "UiSettings", 28 | ] 29 | 30 | from .containers import Color, RegionOfInterest, RppgResult 31 | from .digital_filter import DigitalFilter 32 | from .helpers import ( 33 | FpsTracker, 34 | bpm_from_frames_per_beat, 35 | frames_from_video, 36 | get_video_fps, 37 | ) 38 | from .hr_calculator import HrCalculator, PeakBasedHrCalculator 39 | from .processors import ChromProcessor, FilteredProcessor, Processor 40 | from .roi import FaceMeshDetector, RoiDetector, SelfieDetector, pixelate, pixelate_mask 41 | from .rppg import Rppg 42 | from .settings import Settings, UiSettings, get_config 43 | -------------------------------------------------------------------------------- /docs/cli.md: -------------------------------------------------------------------------------- 1 | # Command line interface 2 | yarPPG comes with the `run-yarppg` command to launch a graphical user 3 | interface. The command line interface is built as a 4 | *structured configuration* using [Hydra](https://hydra.cc). 5 | 6 | Hydra offers a robust configuration management with type checking for 7 | complex, modular settings hierarchies. Additionally, we get a 8 | powerful override syntax, allowing users to adjust the settings 9 | from the command line. 10 | 11 | !!! note 12 | 13 | The command line interface is still a work in progress. For now, you 14 | can adjust only a handful of options. 15 | 16 | You can call `run-yarppg --help` to get more information of available 17 | options and how to override them: 18 | 19 | ``` 20 | run-yarppg is powered by Hydra. 21 | 22 | == Configuration groups == 23 | Compose your configuration from those groups (group=option) 24 | 25 | ui: qt6_simple, simplest 26 | 27 | 28 | == Config == 29 | Override anything in the config (foo.bar=value) 30 | 31 | ui: 32 | roi_alpha: 0.0 33 | video: 0 34 | savepath: null 35 | detector: facemesh 36 | filter: 37 | fs: 30.0 38 | f1: 0.5 39 | f2: 2.0 40 | btype: bandpass 41 | ftype: butter 42 | order: 2 43 | algorithm: green 44 | 45 | 46 | Powered by Hydra (https://hydra.cc) 47 | Use --hydra-help to view Hydra specific help 48 | ``` 49 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: yarPPG 2 | theme: 3 | name: material 4 | palette: 5 | scheme: default 6 | features: 7 | - navigation.expand 8 | extra_css: 9 | - style.css 10 | nav: 11 | - Getting started: index.md 12 | - Deep dive: deepdive.py 13 | - Video processing: video_processing.py 14 | - cli.md 15 | - Reference: 16 | - reference/index.md 17 | - reference/rppg.md 18 | - ROI detection: 19 | - reference/roi/index.md 20 | - reference/roi/facemesh_detector.md 21 | - reference/roi/selfie_detector.md 22 | - Signal extraction: 23 | - reference/processors/index.md 24 | - reference/processors/processor.md 25 | - reference/processors/chrom.md 26 | - reference/hr_calculator.md 27 | - User interfaces: 28 | - reference/ui/index.md 29 | - reference/containers.md 30 | - reference/settings.md 31 | - reference/helpers.md 32 | 33 | plugins: 34 | - search 35 | - section-index 36 | - autorefs 37 | - mkdocstrings: 38 | default_handler: python 39 | - mkdocs-jupyter: 40 | ignore_h1_titles: True 41 | execute: true 42 | 43 | markdown_extensions: 44 | - footnotes 45 | - admonition 46 | - pymdownx.highlight: 47 | anchor_linenums: true 48 | line_spans: __span 49 | pygments_lang_class: true 50 | - pymdownx.inlinehilite 51 | - pymdownx.snippets 52 | - pymdownx.superfences 53 | -------------------------------------------------------------------------------- /src/yarppg/processors/__init__.py: -------------------------------------------------------------------------------- 1 | """Implementations of various rPPG signal extractors found in literature. 2 | 3 | All processors base the [`Processor`][yarppg.Processor] which most importantly 4 | features a [`process`][yarppg.Processor.process] method. 5 | The `process` function takes an [`RegionOfInterest` container][yarppg.RegionOfInterest] 6 | and extracts the rPPG signal value. 7 | 8 | Note that this is a stateful function, for most processors. Many algorithms use an 9 | internal buffer of previous values to provide a more robust calculation. 10 | To clear the internal buffer, we can call [`reset`][yarppg.Processor.reset] 11 | 12 | Processors can be wrapped in a [`FilteredProcessor`][yarppg.FilteredProcessor] 13 | allowing for ad-hoc signal smoothing with each signal update. 14 | 15 | Besides the base processor, the following additional algorithms from literature 16 | are implemented: 17 | 18 | ## [ChromProcessor][yarppg.processors.ChromProcessor] (experimental) 19 | Implements the chrominance-based algorithm by 20 | [de Haan, & Jeanne (2013)](https://pubmed.ncbi.nlm.nih.gov/23744659/). 21 | 22 | ## More to come (your contributions are welcome) 23 | """ 24 | 25 | from typing import Callable 26 | 27 | from .chrom import ChromProcessor 28 | from .processor import FilteredProcessor, Processor 29 | 30 | algorithms: dict[str, Callable[..., Processor]] = { 31 | "green": Processor, 32 | "chrom": ChromProcessor, 33 | } 34 | -------------------------------------------------------------------------------- /src/yarppg/ui/qt6/utils.py: -------------------------------------------------------------------------------- 1 | """Various utility functions related to the user interface.""" 2 | 3 | import numpy as np 4 | import pyqtgraph 5 | from numpy.typing import ArrayLike 6 | 7 | 8 | def plain_image_item(data): 9 | """Create a `pyqtgraph.ImageView` showing only the actual image.""" 10 | img_item = pyqtgraph.image(data) 11 | img_item.ui.histogram.hide() 12 | img_item.ui.roiBtn.hide() 13 | img_item.ui.menuBtn.hide() 14 | return img_item 15 | 16 | 17 | def add_multiaxis_plot( 18 | p1: pyqtgraph.PlotItem, **kwargs 19 | ) -> tuple[pyqtgraph.PlotCurveItem, pyqtgraph.ViewBox]: 20 | """Add a new line in multiaxis view on top of the given base plot.""" 21 | p2 = pyqtgraph.ViewBox() 22 | p1.scene().addItem(p2) # type: ignore 23 | p1.hideAxis("right") 24 | p1.getAxis("right").linkToView(p2) 25 | p2.setXLink(p1) 26 | 27 | line = pyqtgraph.PlotCurveItem(**kwargs) 28 | p2.addItem(line) 29 | 30 | def update_view(): 31 | p2.setGeometry(p1.vb.sceneBoundingRect()) # type: ignore 32 | p2.linkedViewChanged(p1.vb, p2.XAxis) 33 | 34 | update_view() 35 | p1.vb.sigResized.connect(update_view) # type: ignore 36 | 37 | return line, p2 38 | 39 | 40 | def get_autorange(data: ArrayLike, factor: float = 0.05): 41 | """Use data to determine the range for plot boundaries.""" 42 | if np.all(np.isnan(data)): 43 | return 0, 1 44 | x1, x2 = np.nanmin(data), np.nanmax(data) 45 | pad = (x2 - x1) * factor 46 | return x1 - pad, x2 + pad 47 | -------------------------------------------------------------------------------- /src/yarppg/ui/__init__.py: -------------------------------------------------------------------------------- 1 | """Provides user interfaces for yarPPG. 2 | 3 | yarPPG comes with several user interfaces based on additional optional 4 | dependencies. Make sure to install the corresponding extras to use 5 | special UIs, instead of the OpenCV based `simplest` UI. 6 | 7 | ## Available UIs 8 | ### Simplest 9 | This is a simple infinite loop, grabbing new frames from the camera and 10 | visualizing the results in an OpenCV window. 11 | 12 | No additional depencies are required for the default interface. 13 | 14 | 15 | ### Simple Qt6 window 16 | A small GUI window highlighting the detected ROI and a trace of the extracted 17 | rPPG signal. Make sure to install extras with: 18 | ```bash 19 | pip install ".[qt6]" 20 | ``` 21 | 22 | ### More to come 23 | Feel free to contribute other user interfaces using any framework. 24 | """ 25 | 26 | import yarppg 27 | 28 | 29 | def launch_ui(rppg: yarppg.Rppg, ui_settings: yarppg.UiSettings) -> int: 30 | """Launch a user interface for the given configuration.""" 31 | if type(ui_settings).__name__ == "SimpleQt6WindowSettings": 32 | from yarppg.ui.qt6.simple_window import SimpleQt6WindowSettings, launch_window 33 | 34 | assert isinstance(ui_settings, SimpleQt6WindowSettings) 35 | return launch_window(rppg, ui_settings) 36 | 37 | if type(ui_settings).__name__ == "SimplestOpenCvWindowSettings": 38 | from yarppg.ui.simplest import SimplestOpenCvWindowSettings, launch_loop 39 | 40 | assert isinstance(ui_settings, SimplestOpenCvWindowSettings) 41 | return launch_loop(rppg, ui_settings) 42 | 43 | raise NotImplementedError(f"Cannot understand the given {ui_settings!r}") 44 | -------------------------------------------------------------------------------- /src/yarppg/ui/simplest.py: -------------------------------------------------------------------------------- 1 | """Provides a simplistic user interface with values printed to console only.""" 2 | import dataclasses 3 | 4 | import cv2 5 | 6 | import yarppg 7 | 8 | FONT_COLOR = (207, 117, 6) 9 | 10 | 11 | def _is_window_closed(name: str) -> bool: 12 | return cv2.getWindowProperty(name, cv2.WND_PROP_VISIBLE) < 1 13 | 14 | 15 | @dataclasses.dataclass 16 | class SimplestOpenCvWindowSettings(yarppg.settings.UiSettings): 17 | """Configuration for the simplest OpenCV user interface.""" 18 | 19 | roi_alpha: float = 0.0 20 | video: int | str = 0 21 | 22 | 23 | def launch_loop(rppg: yarppg.Rppg, config: SimplestOpenCvWindowSettings) -> int: 24 | """Launch a simple Qt6-based GUI visualizing rPPG results in real-time.""" 25 | cam = cv2.VideoCapture(config.video) 26 | if not cam.isOpened(): 27 | print(f"Could not open {config.video=!r}") 28 | return -1 29 | 30 | tracker = yarppg.FpsTracker() 31 | while True: 32 | ret, frame = cam.read() 33 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 34 | if not ret: 35 | break 36 | result = rppg.process_frame(frame) 37 | img = yarppg.roi.overlay_mask( 38 | frame, result.roi.mask != 0, alpha=config.roi_alpha 39 | ) 40 | img = cv2.flip(img, 1) 41 | tracker.tick() 42 | result.hr = 60 * tracker.fps / result.hr 43 | text = f"{result.hr:.1f} (bpm)" 44 | pos = (10, img.shape[0] - 10) 45 | cv2.putText(img, text, pos, cv2.FONT_HERSHEY_COMPLEX, 0.8, color=FONT_COLOR) 46 | cv2.imshow("yarPPG", cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) 47 | print(result.value, result.hr) 48 | if cv2.waitKey(1) == ord("q") or _is_window_closed("yarPPG"): 49 | break 50 | return 0 51 | -------------------------------------------------------------------------------- /src/yarppg/hr_calculator.py: -------------------------------------------------------------------------------- 1 | """Heart rate calculation utilities.""" 2 | 3 | from collections import deque 4 | 5 | import numpy as np 6 | import scipy.signal 7 | 8 | 9 | class HrCalculator: 10 | """Base class for heart rate calculation.""" 11 | 12 | def update(self, value: float) -> float: # noqa: ARG002 13 | """Process the new data and update HR estimate.""" 14 | return np.nan 15 | 16 | def reset(self) -> None: 17 | """Clear the the internal state.""" 18 | pass 19 | 20 | 21 | class PeakBasedHrCalculator(HrCalculator): 22 | """Peak-based heart rate calculation.""" 23 | 24 | def __init__( 25 | self, 26 | fs: float, 27 | window_seconds: float = 10, 28 | distance: float = 0.5, 29 | update_interval: int = 10, 30 | ): 31 | self.winsize = int(fs * window_seconds) 32 | self.values = deque(maxlen=self.winsize) 33 | self.mindist = int(fs * distance) 34 | 35 | self.update_interval = update_interval 36 | self.frames_seen = 0 37 | self.last_hr = np.nan 38 | 39 | def update(self, value: float) -> float: 40 | """Process the new data and update HR estimate in frames per beat.""" 41 | self.frames_seen += 1 42 | self.values.append(value) 43 | if ( 44 | len(self.values) < self.winsize 45 | or self.frames_seen % self.update_interval != 0 46 | ): 47 | return self.last_hr 48 | peaks, _ = scipy.signal.find_peaks(self.values, distance=self.mindist) 49 | self.last_hr = np.diff(peaks).mean() 50 | return self.last_hr 51 | 52 | def reset(self) -> None: 53 | """Clear the internal buffer and intermediate values.""" 54 | self.frames_seen = 0 55 | self.values.clear() 56 | self.last_hr = np.nan 57 | -------------------------------------------------------------------------------- /src/yarppg/processors/processor.py: -------------------------------------------------------------------------------- 1 | """Provides base classes for rPPG signal computation.""" 2 | 3 | import numpy as np 4 | 5 | from ..containers import Color, RegionOfInterest, RppgResult 6 | from ..digital_filter import DigitalFilter 7 | from ..roi.roi_tools import masked_average 8 | 9 | 10 | class Processor: 11 | """Base rPPG processor, extracting the average green channel from the ROI.""" 12 | 13 | def process(self, roi: RegionOfInterest) -> RppgResult: 14 | """Calculate average green channel in the roi area.""" 15 | avg = masked_average(roi.baseimg, roi.mask) 16 | bg_mean = Color.null() 17 | if roi.bg_mask is not None: 18 | bg_mean = masked_average(roi.baseimg, roi.bg_mask) 19 | 20 | return RppgResult(avg.g, roi, roi_mean=avg, bg_mean=bg_mean) 21 | 22 | def reset(self) -> None: 23 | """Reset internal state and intermediate values.""" 24 | pass # no persistent values in base class 25 | 26 | 27 | class FilteredProcessor(Processor): 28 | """Processor with temporal filtering of the extracted signal.""" 29 | 30 | def __init__(self, processor: Processor, livefilter: DigitalFilter | None = None): 31 | self.processor = processor 32 | self.livefilter = livefilter 33 | 34 | def process(self, roi: RegionOfInterest) -> RppgResult: 35 | """Calculate processor output and apply digital filter.""" 36 | result = self.processor.process(roi) 37 | if self.livefilter is not None and np.isfinite(result.value): 38 | # only calculate filter update if not NaN 39 | result.value = self.livefilter.process(result.value) 40 | return result 41 | 42 | def reset(self) -> None: 43 | """Reset internal state and intermediate values.""" 44 | self.processor.reset() 45 | if self.livefilter is not None: 46 | self.livefilter.reset() 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | yarppg_old/ 2 | 3 | *.mp4 4 | *.task 5 | *.tflite 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # celery beat schedule file 85 | celerybeat-schedule 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # Environments 91 | .env 92 | .venv* 93 | env/ 94 | venv/ 95 | ENV/ 96 | env.bak/ 97 | venv.bak/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | 112 | # devel 113 | .idea/ 114 | .vscode/ 115 | -------------------------------------------------------------------------------- /src/yarppg/containers.py: -------------------------------------------------------------------------------- 1 | """Defines some containers passed between objects of the yarPPG application.""" 2 | 3 | from dataclasses import dataclass 4 | 5 | import numpy as np 6 | import pandas as pd 7 | 8 | 9 | @dataclass 10 | class RegionOfInterest: 11 | """Container for defining the region of interest (and background) in an image.""" 12 | 13 | mask: np.ndarray 14 | baseimg: np.ndarray 15 | bg_mask: np.ndarray | None = None 16 | face_rect: tuple[int, int, int, int] | None = None 17 | """Bounding box of the detected face (x, y, w, h).""" 18 | 19 | 20 | @dataclass 21 | class Color: 22 | """Defines a color in RGB(A) format.""" 23 | 24 | r: float 25 | g: float 26 | b: float 27 | 28 | @classmethod 29 | def null(cls): 30 | """Create empty color with NaN values.""" 31 | return cls(np.nan, np.nan, np.nan) 32 | 33 | def __array__(self): 34 | return np.array([self.r, self.g, self.b]) 35 | 36 | @classmethod 37 | def from_array(cls, arr: np.ndarray): 38 | """Convert numpy array to `Color` object.""" 39 | if len(arr) in {3, 4} and arr.ndim == 1: 40 | return cls(*arr) 41 | raise ValueError(f"Cannot interpret {arr=!r}") 42 | 43 | 44 | @dataclass 45 | class RppgResult: 46 | """Container for rPPG computation results. 47 | 48 | Calling `np.array` on this container will return a 8-element vector containing 49 | the rPPG signal value, RGB values of the ROI, RGB values of the background (or nans) 50 | and the HR. `to_series` produces a clearer representation of the values with named 51 | indices. 52 | 53 | Note that both `__array__` and `to_series` ignore the `roi` attribute. 54 | """ 55 | 56 | value: float 57 | """Output value of the rPPG signal extractor.""" 58 | roi: RegionOfInterest 59 | """Region of interest identified in the current frame.""" 60 | roi_mean: Color 61 | """Mean color of the ROI.""" 62 | bg_mean: Color 63 | """Mean color of the background.""" 64 | hr: float = np.nan 65 | """Heart rate estimate in frames per beat.""" 66 | 67 | def __array__(self): 68 | return np.r_[self.value, self.roi_mean, self.bg_mean, self.hr] 69 | 70 | def to_series(self): 71 | """Extract the rPPG signal values into a Pandas series.""" 72 | return pd.Series( 73 | np.array(self), 74 | index=["value", "roi_r", "roi_g", "roi_b", "bg_r", "bg_g", "bg_b", "hr"], 75 | ) 76 | -------------------------------------------------------------------------------- /tests/quicktest.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa 2 | # %% 3 | #! %load_ext autoreload 4 | #! %autoreload 2 5 | # %% 6 | import pathlib 7 | 8 | #! %cd {pathlib.Path(__file__).parent.parent} 9 | # %% 10 | import cv2 11 | import matplotlib.pyplot as plt 12 | import numpy as np 13 | import yarppg 14 | 15 | # %% 16 | frame = cv2.imread("tests/face_example1.jpg") 17 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 18 | detector = yarppg.FaceMeshDetector() 19 | roi = detector.detect(frame) 20 | # %% 21 | plt.imshow(yarppg.roi.overlay_mask(roi.baseimg, roi.mask == 1, alpha=0.3)) 22 | # %% 23 | detector = yarppg.SelfieDetector() 24 | roi = detector.detect(frame) 25 | plt.imshow(yarppg.roi.overlay_mask(roi.baseimg, roi.mask == 1, alpha=0.3)) 26 | # %% 27 | from yarppg_old.rppg.roi.facemesh_detector import ( 28 | FaceMeshDetector as OldFaceMeshDetector, 29 | ) 30 | from yarppg_old.rppg.processors.chrom import ChromProcessor as OldChromProcessor 31 | from yarppg_old.rppg.roi import RegionOfInterest as OldRegionOfInterest 32 | 33 | old_detector = OldFaceMeshDetector() 34 | old_chrom = OldChromProcessor() 35 | 36 | # processor = yarppg.Processor() 37 | processor = yarppg.ChromProcessor() 38 | detector = yarppg.FaceMeshDetector() 39 | results: list[yarppg.RppgResult] = [] 40 | old_results: list[float] = [] 41 | 42 | for frame in yarppg.frames_from_video("video.mp4"): 43 | roi = detector.detect(frame) 44 | results.append(processor.process(roi)) 45 | # print(results[-1].value) 46 | # old_roi = old_detector.detect(frame) 47 | old_results.append(old_chrom.calculate(OldRegionOfInterest(frame, roi.mask))) 48 | plt.plot([r.value for r in results]) 49 | plt.plot(old_results) 50 | # %% 51 | 52 | import scipy.signal 53 | 54 | from src.yarppg.rppg import Rppg 55 | from src.yarppg.digital_filter import DigitalFilter 56 | from src.yarppg.hr_calculator import PeakBasedHrCalculator 57 | from src.yarppg.processors import FilteredProcessor 58 | 59 | fs = yarppg.get_video_fps("video2.mp4") 60 | 61 | b, a = scipy.signal.iirfilter(2, [0.5, 2], fs=fs, btype="bandpass") 62 | livefilter = DigitalFilter(b, a, xi=-1) 63 | 64 | hrcalc = PeakBasedHrCalculator(fs, window_seconds=5) 65 | 66 | rppg = Rppg(detector, FilteredProcessor(yarppg.Processor(), livefilter), hrcalc) 67 | 68 | results = rppg.process_video("video2.mp4") 69 | yfilt = np.array([r.value for r in results]) 70 | plt.plot(yfilt[90:]) 71 | # %% 72 | from yarppg import settings 73 | import omegaconf 74 | 75 | cfg = omegaconf.OmegaConf.structured(settings.Settings) 76 | settings.flatten_dict(omegaconf.OmegaConf.to_container(cfg)) # type: ignore 77 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Welcome to the yarPPG documentation 2 | *yarPPG* is **y**et **a**nother implementation of **r**emote 3 | **P**hoto**P**lethysmo**G**raphy. 4 | Remote photo­plethysmography (rPPG) refers to the camera-based measurement 5 | of a blood volume pulse signal. It works by detecting small changes in skin 6 | color, originating from the pulsation of blood[^1]. 7 | 8 | !!! warning 9 | 10 | This is just a hobby project. Intended for demo purposes only, the 11 | provided program/code is not suitable to be used in a clinical setup 12 | or for any decision making in general. 13 | 14 | ## Installation and usage 15 | In order to run the yarPPG application, clone 16 | [the repository](https://github.com/SamProell/yarppg) and navigate 17 | to the downloaded folder. You can then install the folder into your Python 18 | environment. This will install the `run-yarppg` command. 19 | 20 | ```bash 21 | git clone https://github.com/SamProell/yarppg.git 22 | cd yarppg 23 | pip install "." 24 | run-yarppg 25 | ``` 26 | 27 | ![yarPPG Qt6-based simple user interface](images/yarppg-screenshot.png) 28 | 29 | ## Core functionality 30 | Different from earlier versions of yarPPG, the core functionality for remote PPG 31 | signal extraction has been completely decoupled from the user interface. 32 | The `Rppg` class combines all required steps (roi identification, signal extraction, 33 | heart rate estimation) into one (stateful) function. 34 | 35 | ```python 36 | import yarppg 37 | 38 | rppg = yarppg.Rppg() 39 | 40 | while running: 41 | # frame = ... # get an image array of shape h x w x 3 42 | result = rppg.process_frame(frame) 43 | print(f"Current rPPG signal value: {result.value} (HR: {result.hr})") 44 | ``` 45 | 46 | See [this guide](./deepdive.py) if you need more fine-grained control over the individual 47 | calculation steps. 48 | The `Rppg` class also comes with a method to process an entire video file 49 | at once. See more details [here](./video_processing.py). 50 | 51 | ## User interfaces 52 | The default user interface launched by the `run-yarppg` command is a simplistic 53 | window based on OpenCV. 54 | More elaborate user interfaces are available, but require additional dependencies. 55 | 56 | ### Simple Qt6 window 57 | ```bash 58 | pip install ".[qt6]" 59 | run-yarppg ui=qt6_simple 60 | ``` 61 | 62 | ### More to come 63 | You are welcome to contribute 64 | 65 | [^1]: W Verkruysse, L O Svaasand and J S Nelson. Remote plethysmographic 66 | imaging using ambient light. *Optics Express*. 2008;16(26):21434–21445. 67 | doi:[10.1364/oe.16.021434](https://doi.org/10.1364/oe.16.021434) 68 | -------------------------------------------------------------------------------- /src/yarppg/roi/roi_tools.py: -------------------------------------------------------------------------------- 1 | """Provides the base container for regions of interests.""" 2 | 3 | import cv2 4 | import numpy as np 5 | from numpy.typing import ArrayLike 6 | 7 | from yarppg.containers import Color 8 | 9 | 10 | def pixelate(img: np.ndarray, xywh: tuple[int, int, int, int], size: int): 11 | """Blur a rectangular region with oversized pixels.""" 12 | x, y, w, h = xywh 13 | slicex = slice(x, x + w) 14 | slicey = slice(y, y + h) 15 | 16 | tmp = cv2.resize( 17 | img[slicey, slicex], 18 | (w // size, h // size), 19 | interpolation=cv2.INTER_LINEAR, 20 | ) 21 | img[slicey, slicex] = cv2.resize(tmp, (w, h), interpolation=cv2.INTER_NEAREST) 22 | 23 | 24 | def pixelate_mask(img: np.ndarray, mask: np.ndarray, size: int = 10): 25 | """Blur the bounding box of a mask with oversized pixels.""" 26 | bbox: tuple[int, int, int, int] = cv2.boundingRect(mask) # type: ignore 27 | pixelate(img, bbox, size=size) 28 | 29 | 30 | def contour_to_mask(size: tuple[int, int], points: ArrayLike) -> np.ndarray: 31 | """Create a binary mask filled inside the polygon defined by the given points. 32 | 33 | Args: 34 | size: height and width of the target image. 35 | points: list of polygon coordinates. 36 | 37 | Returns: 38 | A binary mask of the desired size, filled with ones. 39 | """ 40 | mask = np.zeros(size, dtype="uint8") 41 | contours = np.reshape(np.asarray(points), (1, -1, 1, 2)) 42 | return cv2.drawContours(mask, contours, 0, color=1, thickness=cv2.FILLED) # type: ignore 43 | 44 | 45 | def overlay_mask( 46 | img: np.ndarray, 47 | mask: np.ndarray, 48 | color: tuple[int, int, int] = (255, 0, 0), 49 | alpha: float = 0.5, 50 | ) -> np.ndarray: 51 | """Overlay masked region in an image with a transparent color. 52 | 53 | Args: 54 | img: OpenCV-compatible image. 55 | mask: boolean mask defining the pixels to be overlaid. 56 | color: base color of the overlay. Defaults to red. 57 | alpha: intensity of the overlay. 0 (empty) to 1 (solid). Defaults to 0.5. 58 | 59 | Returns: 60 | OpenCV image. 61 | """ 62 | overlay = img.copy() 63 | if mask.sum() == 0: 64 | return img 65 | overlay[mask] = color 66 | return cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0) 67 | 68 | 69 | def masked_average(frame: np.ndarray, mask: np.ndarray) -> Color: 70 | """Calculate average color of the masked region.""" 71 | if mask.sum() == 0: 72 | return Color.null() 73 | r, g, b, _ = cv2.mean(frame, mask) 74 | return Color(r, g, b) 75 | -------------------------------------------------------------------------------- /src/yarppg/processors/chrom.py: -------------------------------------------------------------------------------- 1 | """Chrominance-based rPPG method introduced by de Haan & Jeanne (2013). 2 | 3 | > de Haan, G., & Jeanne, V. (2013). Robust Pulse Rate From 4 | Chrominance-Based rPPG. IEEE Transactions on Biomedical Engineering, 5 | 60(10), 2878-2886. 6 | """ 7 | 8 | from typing import Literal 9 | 10 | import numpy as np 11 | 12 | from ..containers import Color, RegionOfInterest, RppgResult 13 | from .processor import Processor 14 | 15 | 16 | class ChromProcessor(Processor): 17 | """Chrominance-based rPPG algorithm by de Haan & Jeanne (2013). 18 | 19 | Args: 20 | winsize: window size for moving average calculations. Defaults to 45. 21 | method: method to use. Can be 'xovery' or 'fixed'. Defaults to "xovery". 22 | """ 23 | 24 | def __init__( 25 | self, winsize: int = 45, method: Literal["fixed", "xovery"] = "xovery" 26 | ): 27 | Processor.__init__(self) 28 | 29 | self.winsize = winsize 30 | self.method = method 31 | 32 | self._rgbs: list[Color] = [] 33 | self._xs: list[float] = [] 34 | self._ys: list[float] = [] 35 | 36 | def process(self, roi: RegionOfInterest) -> RppgResult: 37 | """Calculate pulse signal update according to Chrom algorithm.""" 38 | result = super().process(roi) 39 | self._rgbs.append(result.roi_mean) 40 | 41 | if self.method == "fixed": 42 | result.value = self._calculate_fixed_update() 43 | 44 | elif self.method == "xovery": 45 | result.value = self._calculate_xovery_update() 46 | 47 | return result 48 | 49 | def _calculate_fixed_update(self) -> float: 50 | rgbmean = Color.from_array(np.mean(self._rgbs[-self.winsize :], axis=0)) 51 | 52 | rn = self._rgbs[-1].r / (rgbmean.r or 1.0) 53 | gn = self._rgbs[-1].g / (rgbmean.g or 1.0) 54 | bn = self._rgbs[-1].b / (rgbmean.b or 1.0) 55 | 56 | self._xs.append(3 * rn - 2 * gn) 57 | self._ys.append(1.5 * rn + gn - 1.5 * bn) 58 | 59 | return self._xs[-1] / (self._ys[-1] or 1.0) - 1 60 | 61 | def _calculate_xovery_update(self) -> float: 62 | rgb = self._rgbs[-1] 63 | 64 | self._xs.append(rgb.r - rgb.g) 65 | self._ys.append(0.5 * rgb.r + 0.5 * rgb.g - rgb.b) 66 | 67 | xmean = np.mean(self._xs[-self.winsize :]) 68 | ymean = np.mean(self._ys[-self.winsize :]) 69 | 70 | return float(xmean / (ymean or 1) - 1) 71 | 72 | def reset(self): 73 | """Reset internal state and intermediate values.""" 74 | self._rgbs.clear() 75 | self._xs.clear() 76 | self._ys.clear() 77 | -------------------------------------------------------------------------------- /src/yarppg/digital_filter.py: -------------------------------------------------------------------------------- 1 | """Provides tools for applying digital filters in a real-time application.""" 2 | 3 | from dataclasses import dataclass 4 | from typing import Sequence 5 | 6 | import numpy as np 7 | import scipy.signal 8 | 9 | 10 | @dataclass 11 | class FilterConfig: 12 | """Container for configuration of a digital filter. 13 | 14 | This configuration allows creation of filters through `scipy.signal.iirfilter`. 15 | 16 | Attributes: 17 | fs: expected sampling rate of the signal. 18 | f1: first cut-off frequency. 19 | f2: seconds cut-off frequency. Required for bandpass filters. Defaults to None. 20 | btype: type of the filter. low-, high-, or bandpass. 21 | ftype: type of the filter design. Butterworth is good for most cases. 22 | order: order of the filter. 23 | """ 24 | 25 | fs: float 26 | f1: float 27 | f2: float | None = None 28 | btype: str = "low" 29 | ftype: str = "butter" 30 | order: int = 2 31 | 32 | 33 | class DigitalFilter: 34 | """Live digital filter processing one sample at a time. 35 | 36 | Args: 37 | b: numerator coefficients obtained from scipy. 38 | a: denominator coefficients obtained from scipy. 39 | xi: first signal value used to initialize the filter state. 40 | """ 41 | 42 | def __init__(self, b: np.ndarray, a: np.ndarray, xi: float = 0): 43 | self.b = b 44 | self.a = a 45 | self.reset(xi) 46 | 47 | def process(self, x: float) -> float: 48 | """Process incoming data and update filter state.""" 49 | y, self.zi = scipy.signal.lfilter(self.b, self.a, [x], zi=self.zi) 50 | return y[0] 51 | 52 | def process_signal(self, x: Sequence[float]) -> np.ndarray: 53 | """Process an entire signal at once (SciPy's lfilter with current state).""" 54 | y, self.zi = scipy.signal.lfilter(self.b, self.a, x, zi=self.zi) 55 | return y 56 | 57 | def reset(self, xi: float = 0): 58 | """Reset filter state to initial value.""" 59 | self.zi = scipy.signal.lfiltic(self.b, self.a, [xi], xi) 60 | 61 | 62 | def filtercoeffs_from_config(cfg: FilterConfig): 63 | """Get coefficients (b, a) for filter with given settings.""" 64 | cutoff = [cfg.f1] 65 | if cfg.f2: 66 | cutoff.append(cfg.f2) 67 | b, a = scipy.signal.iirfilter( 68 | cfg.order, cutoff, btype=cfg.btype, ftype=cfg.ftype, fs=cfg.fs 69 | ) 70 | return b, a 71 | 72 | 73 | def make_digital_filter(cfg: FilterConfig) -> DigitalFilter: 74 | """Create live digital filter with given settings.""" 75 | b, a = filtercoeffs_from_config(cfg) 76 | return DigitalFilter(b, a) 77 | -------------------------------------------------------------------------------- /src/yarppg/roi/selfie_segmenter.py: -------------------------------------------------------------------------------- 1 | """Detect the face skin region with MediaPipe's selfie segmentation. 2 | 3 | This method is very slow (150-200ms per frame) and will not properly work in 4 | a real-time setting. `FaceMeshDetector` should be used instead. 5 | 6 | More information on the selfie segmenter can be found here: 7 | 8 | """ 9 | 10 | import time 11 | 12 | import mediapipe as mp 13 | import numpy as np 14 | 15 | from ..containers import RegionOfInterest 16 | from ..helpers import get_cached_resource_path 17 | from .detector import RoiDetector 18 | 19 | MEDIAPIPE_MODELS_BASE = "https://storage.googleapis.com/mediapipe-models/" 20 | SELFIE_TASK = "image_segmenter/selfie_multiclass_256x256/float32/latest/selfie_multiclass_256x256.tflite" # noqa: E501 21 | 22 | 23 | def get_selfie_segmenter_modelfile(): 24 | """Get the filename of the SelfieSegmenter - download file if necessary.""" 25 | task_filename = "selfie_multiclass.tflite" 26 | return get_cached_resource_path(task_filename, MEDIAPIPE_MODELS_BASE + SELFIE_TASK) 27 | 28 | 29 | class SelfieDetector(RoiDetector): 30 | """Face detector based on MediaPipe's selfie segmentation task.""" 31 | 32 | def __init__(self, confidence=0.5, **kwargs): 33 | super().__init__(**kwargs) 34 | self.confidence = confidence 35 | 36 | modelpath = get_selfie_segmenter_modelfile() 37 | if modelpath is None: 38 | raise FileNotFoundError("Could not find or download segmenter model file.") 39 | 40 | base_options = mp.tasks.BaseOptions(model_asset_path=modelpath) 41 | segmenter_options = mp.tasks.vision.ImageSegmenterOptions( 42 | base_options=base_options, running_mode=mp.tasks.vision.RunningMode.VIDEO 43 | ) 44 | self.segmenter = mp.tasks.vision.ImageSegmenter.create_from_options( 45 | segmenter_options 46 | ) 47 | 48 | def __del__(self): 49 | self.segmenter.close() 50 | 51 | def detect(self, frame: np.ndarray) -> RegionOfInterest: 52 | """Identify face skin region and background in the given image.""" 53 | rawimg = frame.copy() 54 | mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame) 55 | 56 | results = self.segmenter.segment_for_video( 57 | mp_image, int(time.perf_counter() * 1000) 58 | ) 59 | 60 | face_mask = results.confidence_masks[3].numpy_view() > self.confidence 61 | bg_mask = results.confidence_masks[0].numpy_view() > self.confidence 62 | return RegionOfInterest( 63 | face_mask.astype(np.uint8), baseimg=rawimg, bg_mask=bg_mask.astype(np.uint8) 64 | ) 65 | -------------------------------------------------------------------------------- /src/yarppg/ui/qt6/camera.py: -------------------------------------------------------------------------------- 1 | """Provides compatible wrappers around the CV2 camera input.""" 2 | import time 3 | 4 | import cv2 5 | import numpy as np 6 | from PyQt6.QtCore import QObject, QThread, pyqtSignal 7 | 8 | 9 | class Camera(QThread): 10 | """Wraps cv2.VideoCapture and emits Qt signals with frames in RGB format. 11 | 12 | The `run` function launches a loop that waits for new frames in 13 | the VideoCapture and emits them with a `new_frame` signal. 14 | Calling `stop` stops the loop and releases the camera. 15 | 16 | It is very difficult to set camera properties through OpenCV. Setting 17 | the `exposure` property may or may not work on your end. Range of required 18 | values for exposure are also badly documented and not consistent. 19 | See for example here: 20 | 21 | Also, settings might stay active even after closing the application. 22 | 23 | Args: 24 | video: ID of camera or video filename 25 | parent: parent object in Qt context 26 | delay_frames: delay next read until specified time passed. Defaults to NaN. 27 | exposure: set fixed exposure instead of auto-exposure. Defaults to None. 28 | """ 29 | 30 | frame_received = pyqtSignal(np.ndarray) 31 | 32 | def __init__( 33 | self, 34 | video: int | str = 0, 35 | parent: QObject | None = None, 36 | delay_frames: float = np.nan, 37 | exposure: float | None = None, 38 | ): 39 | QThread.__init__(self, parent=parent) 40 | self._cap = cv2.VideoCapture(video) 41 | if exposure is not None: 42 | self._cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1) # manual mode 43 | self._cap.set(cv2.CAP_PROP_EXPOSURE, exposure) 44 | self._running = False 45 | self.delay_frames = delay_frames 46 | 47 | def run(self): 48 | """Start camera and emit successive frames.""" 49 | self._running = True 50 | while self._running: 51 | ret, frame = self._cap.read() 52 | last_time = time.perf_counter() 53 | 54 | if not ret: 55 | self._running = False 56 | raise RuntimeError("No frame received") 57 | else: 58 | self.frame_received.emit(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) 59 | 60 | while (time.perf_counter() - last_time) < self.delay_frames: 61 | # np.nan will always evaluate to False and thus skip this. 62 | time.sleep(0.001) 63 | 64 | def stop(self): 65 | """Stop camera loop and release resources.""" 66 | self._running = False 67 | time.sleep(0.1) 68 | self._cap.release() 69 | -------------------------------------------------------------------------------- /docs/video_processing.py: -------------------------------------------------------------------------------- 1 | # %% [markdown] 2 | # yarPPG can now also be applied fully offline, without a user interface. 3 | # To streamline such use cases, the `Rppg` orchestrator provides a helper 4 | # function to process a video file in one line: 5 | # [`process_video`](/reference/rppg#yarppg.rppg.Rppg.process_video). 6 | # %% 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | 10 | import yarppg 11 | 12 | # %% [markdown] 13 | # ## Setup 14 | # For this demo, we set up a bandpass-filtered version of the default processor, 15 | # which extracts the average green channel of the region of interest. 16 | # We need to know the number of frames per second (FPS) of the video file, to 17 | # properly set up the filter. 18 | # [`yarppg.get_video_fps`](/reference/helpers#yarppg.helpers.frames_from_video) 19 | # uses OpenCV to extract the FPS information from the video file. 20 | # %% 21 | filename = "tests/testvideo_30fps.mp4" 22 | 23 | fps = yarppg.get_video_fps(filename) 24 | filter_cfg = yarppg.digital_filter.FilterConfig(fps, 0.5, 1.5, btype="bandpass") 25 | livefilter = yarppg.digital_filter.make_digital_filter(filter_cfg) 26 | processor = yarppg.FilteredProcessor(yarppg.Processor(), livefilter=livefilter) 27 | # %% [markdown] 28 | # Since the example video is quite short, we modify the behavior of the 29 | # heart rate calculator, to produce an update with a window length of only 30 | # four seconds. 31 | # In practice, this will result in less accurate estimations, as outliers 32 | # contribute more to the final result. 33 | # Additionally, since the processors need a few seconds to adjust to the 34 | # specific video content (lighting, colors, etc.), the provided estimates 35 | # for this 10s video are of low quality. 36 | # %% 37 | rppg = yarppg.Rppg( 38 | processor=processor, 39 | hr_calc=yarppg.PeakBasedHrCalculator(fps, window_seconds=4), 40 | ) 41 | # %% [markdown] 42 | # ## Processing the video 43 | # Once setup, we can process the video in just one line. By default, 44 | # `process_video` returns a list of `RppgResult` containers. Beware 45 | # that these include the raw image data from all frames and ROI masks 46 | # inside the [`RegionOfInterest` 47 | # container](/reference/containers#yarppg.containers.RegionOfInterst). 48 | # %% 49 | results = rppg.process_video(filename) 50 | # %% [markdown] 51 | # ## Handling results 52 | # `RppgResult` allows easy conversion to an array. Even the list of results can 53 | # be converted neatly. This produces an Nx8 array with the following values for 54 | # each frame: 55 | # ``` 56 | # value, roi_r, roi_g, roi_b, bg_r, bg_g, bg_b, hr 57 | # ``` 58 | # . 59 | # %% 60 | values = np.array(results) 61 | hrs = yarppg.bpm_from_frames_per_beat(values[:, -1], fps) 62 | plt.plot(values[int(2.5 * fps) :, 0]) 63 | plt.twinx().plot(hrs[int(2.5 * fps) :], "C2") 64 | -------------------------------------------------------------------------------- /src/yarppg/helpers.py: -------------------------------------------------------------------------------- 1 | """Utility functions and helpers.""" 2 | 3 | import collections 4 | import pathlib 5 | import time 6 | import urllib.request 7 | from typing import Iterator 8 | 9 | import cv2 10 | import numpy as np 11 | from numpy.typing import ArrayLike 12 | 13 | RESOURCE_DIR = pathlib.Path(__file__).parent / "_resources" 14 | 15 | 16 | def get_cached_resource_path(filename: str, url: str, reload: bool = False): 17 | """Download a file from the web and store it locally.""" 18 | RESOURCE_DIR.mkdir(exist_ok=True) 19 | local_file = RESOURCE_DIR / filename 20 | if not local_file.exists() or reload: 21 | urllib.request.urlretrieve(url, filename=str(local_file)) 22 | if not local_file.exists(): 23 | raise FileNotFoundError( 24 | f"Something went wrong when getting {filename=:!r} from {url=:!r}." 25 | ) 26 | return local_file 27 | 28 | 29 | def frames_from_video(filename: str | pathlib.Path) -> Iterator[np.ndarray]: 30 | """Read and yield frames from a video file.""" 31 | cap = cv2.VideoCapture(str(filename)) 32 | while True: 33 | ret, frame = cap.read() 34 | if not ret: 35 | break 36 | yield frame 37 | 38 | 39 | def get_video_fps(filename: str | pathlib.Path) -> float: 40 | """Find the frame rate of the given video file.""" 41 | if not pathlib.Path(filename).exists(): 42 | raise FileNotFoundError(f"{filename=!r} not found.") 43 | cap = cv2.VideoCapture(str(filename)) 44 | fps = cap.get(cv2.CAP_PROP_FPS) 45 | cap.release() 46 | return fps 47 | 48 | 49 | def bpm_from_frames_per_beat(hr: ArrayLike, fps: float) -> np.ndarray: 50 | """Convert frames per beat to beats per minute (60 * fps / hr).""" 51 | return 60 * fps / np.asarray(hr) 52 | 53 | 54 | class FpsTracker: 55 | """Utility class to track frames per second. 56 | 57 | Use `tracker.tick()` once per update (e.g., per frame). The tracker 58 | stores the time differences (dt) between successive `tick` calls. 59 | You can then get the current estimate of FPS through the `tracker.fps` 60 | property. 61 | 62 | Args: 63 | maxlen: number of time differences to use for FPS calculation. Defaults to 30. 64 | """ 65 | 66 | def __init__(self, maxlen=30): 67 | self.last_update = time.perf_counter() 68 | self.dts = collections.deque(maxlen=maxlen) 69 | 70 | def tick(self): 71 | """Update tracker (call this once per loop iteration).""" 72 | now = time.perf_counter() 73 | self.dts.append(now - self.last_update) 74 | self.last_update = now 75 | 76 | @property 77 | def fps(self) -> float: 78 | """Frames per second calculated from average time difference between updates.""" 79 | if len(self.dts) > 0: 80 | return 1 / (sum(self.dts) / len(self.dts)) 81 | return 1 82 | -------------------------------------------------------------------------------- /src/yarppg/settings.py: -------------------------------------------------------------------------------- 1 | """Provides configuration containers for the yarPPG application.""" 2 | 3 | # import copy 4 | import dataclasses # import dataclass, field 5 | from typing import Any 6 | 7 | import hydra.conf 8 | import hydra.core.config_store 9 | import hydra.utils 10 | 11 | from .digital_filter import FilterConfig 12 | 13 | 14 | @dataclasses.dataclass 15 | class UiSettings: 16 | """Settings for the user interface.""" 17 | 18 | 19 | @dataclasses.dataclass(kw_only=True) 20 | class HydraSettings: 21 | """Base class for Hydra-based configurations. 22 | 23 | Mainly manages the hydra-specific settings, deactivating its directory and output 24 | management, so that running with hydra.main does not behave differently from a 25 | normal CLI. 26 | """ 27 | 28 | hydra: "hydra.conf.HydraConf" = dataclasses.field( 29 | default_factory=lambda: hydra.conf.HydraConf( 30 | output_subdir=None, 31 | run=hydra.conf.RunDir("."), 32 | help=hydra.conf.HelpConf(app_name="run-yarppg"), 33 | overrides=hydra.conf.OverridesConf( 34 | # hydra=["job_logging=null", "hydra_logging=null"] 35 | ), 36 | ) 37 | ) 38 | 39 | 40 | @dataclasses.dataclass 41 | class Settings(HydraSettings): 42 | """Comprises all configuration options available in the yarppg application.""" 43 | 44 | ui: Any 45 | savepath: str | None = None 46 | detector: str = "facemesh" 47 | filter: FilterConfig | None = dataclasses.field( 48 | default_factory=lambda: FilterConfig(30, 0.5, 2, btype="bandpass") 49 | ) 50 | algorithm: str = "green" 51 | defaults: Any = dataclasses.field( 52 | default_factory=lambda: [ 53 | {"ui": "simplest"}, 54 | "_self_", 55 | ] 56 | ) 57 | 58 | 59 | def available_ui_configs(): 60 | """Check availability of UIs and return each corresponding settings container.""" 61 | import yarppg.ui.simplest 62 | 63 | uis: dict[str, Any] = {"simplest": yarppg.ui.simplest.SimplestOpenCvWindowSettings} 64 | try: 65 | import yarppg.ui.qt6 66 | 67 | uis["qt6_simple"] = yarppg.ui.qt6.SimpleQt6WindowSettings 68 | except (ModuleNotFoundError, ImportError): 69 | pass 70 | 71 | return uis 72 | 73 | 74 | def register_schemas(): 75 | """Register base schema and settings for available UI implementations.""" 76 | cs = hydra.core.config_store.ConfigStore.instance() 77 | cs.store(name="config", node=Settings) 78 | for name, cfg_class in available_ui_configs().items(): 79 | cs.store(name=name, node=cfg_class, group="ui") 80 | 81 | 82 | def get_config(argv: list[str] | None) -> Settings: 83 | """Get the default configuration with optional overrides.""" 84 | register_schemas() 85 | with hydra.initialize(): 86 | cfg = hydra.compose(config_name="config", overrides=argv) 87 | return hydra.utils.instantiate(cfg) 88 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # yarPPG 2 | *yarPPG* is **y**et **a**nother implementation of **r**emote 3 | **P**hoto**P**lethysmo**G**raphy. 4 | Remote photo­plethysmography (rPPG) refers to the camera-based measurement 5 | of a blood volume pulse signal. It works by detecting small changes in skin 6 | color, originating from the pulsation of blood[^1]. 7 | 8 | > [!CAUTION] 9 | > This is just a hobby project. Intended for demo purposes only, the 10 | provided program/code is not suitable to be used in a clinical setup 11 | or for any decision making in general. 12 | 13 | > [!IMPORTANT] 14 | > **October 2024 Update** - yarPPG has seen a major overhaul. The rPPG 15 | > processing logic has been completely decoupled from the user interface 16 | > which now also allows offline processing. Not all features have yet 17 | > been ported to the new code base. 18 | > 19 | > Please submit an issue if you miss something from the old version! 20 | 21 | ## Documentation 22 | The documentation pages are here: 23 | 24 | ## Installation and usage 25 | In order to run the yarPPG application, clone this repository and navigate 26 | to the downloaded folder. You can then install the folder into your Python 27 | environment. This will install the `run-yarppg` command. 28 | 29 | ```bash 30 | git clone https://github.com/SamProell/yarppg.git 31 | cd yarppg 32 | pip install "." 33 | run-yarppg 34 | ``` 35 | 36 | ![yarPPG's Qt6-based user interface](docs/images/yarppg-screenshot.png) 37 | 38 | ## Core functionality 39 | Different from earlier versions of yarPPG, the core functionality for remote PPG 40 | signal extraction has been completely decoupled from the user interface. 41 | The `Rppg` class combines all required steps (roi identification, signal extraction, 42 | heart rate estimation) into one (stateful) function. 43 | 44 | ```python 45 | import yarppg 46 | 47 | rppg = yarppg.Rppg() 48 | 49 | while running: 50 | # frame = ... # get an image array of shape h x w x 3 51 | result = rppg.process_frame(frame) 52 | print(f"Current rPPG signal value: {result.value} (HR: {result.hr})") 53 | ``` 54 | 55 | See [this guide](https://samproell.github.io/yarppg/deepdive/) if you need more 56 | fine-grained control over the individual calculation steps. 57 | 58 | The `Rppg` class also comes with a method to process an entire video file 59 | at once. See more details [here](https://samproell.github.io/yarppg/video_processing/). 60 | 61 | ## User interfaces 62 | The default user interface launched by the `run-yarppg` command is a simplistic 63 | window based on OpenCV. 64 | More elaborate user interfaces are available, but require additional dependencies. 65 | 66 | ### Simple Qt6 window 67 | ```bash 68 | pip install ".[qt6]" 69 | run-yarppg ui=qt6_simple 70 | ``` 71 | 72 | ### More to come, you are welcome to contribute 73 | 74 | [^1]: W Verkruysse, L O Svaasand and J S Nelson. Remote plethysmographic 75 | imaging using ambient light. *Optics Express*. 2008;16(26):21434–21445. 76 | doi:[10.1364/oe.16.021434](https://doi.org/10.1364/oe.16.021434) 77 | -------------------------------------------------------------------------------- /src/yarppg/rppg.py: -------------------------------------------------------------------------------- 1 | """Provides the Rppg orchestrator class. 2 | 3 | The orchestrator ties together the typical steps required in an rPPG pipeline: 4 | 5 | 1. region of interest (ROI) identification ([yarppg.roi][]) 6 | 2. rPPG signal extraction ([yarppg.processors][]) 7 | 3. heart rate estimation ([yarppg.hr_calculator][]) 8 | 9 | `Rppg`'s [`process_frame`][yarppg.Rppg.process_frame] method performs the three 10 | steps from above in order and produces an [yarppg.containers.RppgResult][] that 11 | holds the extracted rPPG signal value as well as the frame, ROI and some 12 | additional information. 13 | 14 | ```python 15 | import yarppg 16 | 17 | default_settings = yarppg.Settings() 18 | rppg = yarppg.Rppg.from_settings(default_settings) 19 | 20 | result = rppg.process_frame(frame) # input a (h x w x 3)-image array. 21 | print(result.hr) 22 | ``` 23 | 24 | """ 25 | 26 | import pathlib 27 | from typing import Literal, overload 28 | 29 | import numpy as np 30 | import pandas as pd 31 | import scipy.signal 32 | 33 | from . import digital_filter, helpers, hr_calculator, processors, roi 34 | from .containers import RppgResult 35 | from .settings import Settings 36 | 37 | 38 | class Rppg: 39 | """Orchestrator for the complete rPPG pipeline. 40 | 41 | If unspecified the following default configuration is used: 42 | 43 | - [`FaceMeshDetector`][yarppg.FaceMeshDetector] is used for ROI identification. 44 | - The base [`Processor`][yarppg.Processor] extracts the average green value. 45 | - A [`PeakBasedHrCalculator`][yarppg.PeakBasedHrCalculator] estimates HR 46 | 47 | Args: 48 | roi_detector: detector for identifying the region of interest (and background). 49 | processor: rPPG signal extraction algorithm. 50 | hr_calc: heart rate calculation algorithm. 51 | fps: expected frames per second of the camera/video 52 | """ 53 | 54 | def __init__( 55 | self, 56 | roi_detector: roi.RoiDetector | None = None, 57 | processor: processors.Processor | None = None, 58 | hr_calc: hr_calculator.HrCalculator | None = None, 59 | fps: float = 30, 60 | ): 61 | self.roi_detector = roi_detector or roi.FaceMeshDetector() 62 | self.processor = processor or processors.Processor() 63 | self.hr_calculator = hr_calc or hr_calculator.PeakBasedHrCalculator(fps) 64 | 65 | def process_frame(self, frame: np.ndarray) -> RppgResult: 66 | """Process a single frame from video or live stream.""" 67 | roi = self.roi_detector.detect(frame) 68 | result = self.processor.process(roi) 69 | result.hr = self.hr_calculator.update(result.value) 70 | 71 | return result 72 | 73 | @overload 74 | def process_video(self, filename: ..., as_dataframe: Literal[True]) -> pd.DataFrame: 75 | ... 76 | 77 | @overload 78 | def process_video( 79 | self, filename: ..., as_dataframe: Literal[False] = ... 80 | ) -> list[RppgResult]: 81 | ... 82 | 83 | def process_video(self, filename: str | pathlib.Path, as_dataframe=False): 84 | """Convenience function to process an entire video file at once.""" 85 | results = [] 86 | for frame in helpers.frames_from_video(filename): 87 | result = self.process_frame(frame) 88 | if as_dataframe: 89 | results.append(result.to_series()) 90 | else: 91 | results.append(result) 92 | if as_dataframe: 93 | return pd.concat(results).T 94 | return results 95 | 96 | def reset(self) -> None: 97 | """Reset internal elements.""" 98 | self.processor.reset() 99 | 100 | @classmethod 101 | def from_settings(cls, settings: Settings) -> "Rppg": 102 | """Instantiate rPPG orchestrator with the given settings.""" 103 | detector = roi.detectors[settings.detector]() 104 | processor = processors.algorithms[settings.algorithm]() 105 | if settings.filter: 106 | if settings.filter == "bandpass": 107 | b, a = scipy.signal.iirfilter(2, [0.7, 1.8], fs=30, btype="band") 108 | livefilter = digital_filter.DigitalFilter(b, a) 109 | else: 110 | livefilter = digital_filter.make_digital_filter(settings.filter) 111 | processor = processors.FilteredProcessor(processor, livefilter) 112 | return cls(detector, processor) 113 | -------------------------------------------------------------------------------- /docs/deepdive.py: -------------------------------------------------------------------------------- 1 | # %% [markdown] 2 | # # Diving deeper into the rPPG components 3 | # This guide walks you through the inner workings of the 4 | # [`Rppg.process_frame`](/reference/rppg#yarppg.rppg.Rppg.process_frame) 5 | # method. 6 | # %% 7 | import matplotlib.patches 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | 11 | import yarppg 12 | 13 | filename = "tests/testvideo_30fps.mp4" 14 | fps = yarppg.helpers.get_video_fps(filename) 15 | # %% [markdown] 16 | # ## Overview 17 | # Remote Photoplethysmography (rPPG) typically involves three steps: 18 | # 19 | # - region of interest (ROI) identification 20 | # - signal extraction 21 | # - heart rate estimation 22 | # 23 | # The yarppg.Rppg class combines all these steps in a convenient manner. 24 | # By default, `yarppg.Rppg()` will give you a simple processor, that 25 | # finds the lower part of a face and extracts the average green channel 26 | # within the region. 27 | # %% 28 | rppg = yarppg.Rppg() 29 | # %% [markdown] 30 | # If you are only interested in the results, you can do something like 31 | # this: 32 | # %% 33 | results: list[yarppg.RppgResult] = [] 34 | for frame in yarppg.frames_from_video(filename): 35 | results.append(rppg.process_frame(frame)) 36 | plt.plot(np.array(results)[:, 0]) # plot rPPG signal 37 | # %% [markdown] 38 | # Under the hood, `rppg.process_frame` calls 39 | # 40 | # 1. a roi detector's `detect` method 41 | # 2. a signal extracor's (`yarppg.Processor`) `process` 42 | # 3. a `HrCalculator`s `update` method. 43 | # 44 | # Let's define each component separately. 45 | # 46 | # 47 | # ## Region of interest detection 48 | # yarPPG comes with several different implementations of ROI detectors. 49 | # By default, we use an AI-based face landmarker provided through 50 | # Google's 51 | # [MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker/python). 52 | # The `FaceMeshDetector` applies the face landmarker model and extracts the 53 | # region of the lower face, as is done by Li et al. (2014). 54 | # 55 | # > X. Li, J. Chen, G. Zhao, and M. Pietikainen, “Remote Heart Rate Measurement 56 | # From Face Videos Under Realistic Situations”, Proceedings of the IEEE 57 | # Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4264-4271, 58 | # 2014 [doi:10.1109/CVPR.2014.543](https://doi.org/10.1109/CVPR.2014.543) 59 | # 60 | # We can visualize the ROI mask, which is > 0 for each pixel of the ROI. 61 | # Some segmenters, including the FaceMeshDetector, also return the bounding box 62 | # of the detected face. We mark the bounding box as a red rectangle below. 63 | # %% 64 | roi_detector = yarppg.FaceMeshDetector() 65 | 66 | frame = next(yarppg.frames_from_video(filename)) 67 | roi = roi_detector.detect(frame) 68 | plt.imshow(roi.mask > 0, cmap="Greys_r", aspect="auto") 69 | 70 | assert ( 71 | roi.face_rect is not None 72 | ) # FaceMeshDetector also provides a bounding box. 73 | x, y, w, h = roi.face_rect 74 | rect = matplotlib.patches.Rectangle( 75 | (x, y), w, h, edgecolor="r", facecolor="none" 76 | ) 77 | plt.gca().add_patch(rect) 78 | plt.axis("off") 79 | # %% [markdown] 80 | # ## Signal extraction 81 | # The default signal extractor (`yarppg.Processor`) simply calculates the 82 | # average green channel within the region of interest. 83 | # This can already be enough to estimate heart rate accurately, if there is 84 | # no movement and no lighting changes. 85 | # 86 | # The processor returns an `RppgResult` container, which includes some 87 | # additional information besides the extracted value. 88 | # For example, all processors return the mean color (R, G and B) of the ROI, 89 | # regardless of the specific algorithm. 90 | # %% 91 | processor = yarppg.Processor() 92 | result = processor.process(roi) 93 | print(result.value == result.roi_mean.g) 94 | # %% [markdown] 95 | # ## Heart rate estimation 96 | # In order to perform heart rate estimation, we need to look at the rPPG signal 97 | # over time. The `yarppg.HrCalculator` keeps an internal buffer of recent 98 | # signal values and periodically updates the heart rate estimate. 99 | # The `PeakBasedHrCalculator` identifies peaks in the rPPG signal and 100 | # calculates heart rate from the average distance of peaks within the buffer 101 | # window. 102 | # 103 | # In your signal processing loop, you can call the `update` method in every 104 | # iteration. The calculator will decide based on the `update_interval` 105 | # attribute, whether to perform the calculation or to simply store the value 106 | # in the buffer. 107 | # Below, we set up the calculator to produce a new HR estimate with every 15th 108 | # frame. 109 | # %% 110 | hrcalc = yarppg.PeakBasedHrCalculator( 111 | fs=30, window_seconds=5, distance=0.6, update_interval=15 112 | ) 113 | # %% [markdown] 114 | # We can see the internal buffer growing, when repeatedly calling `update`. 115 | # Note that HR will be nan as long as the buffer is smaller than the expected 116 | # window size. 117 | # To clear the buffer, we call `hrcalc.reset()`. 118 | # %% 119 | for res in results[:5]: 120 | hr = hrcalc.update(res.value) 121 | print("Buffer lenght:", len(hrcalc.values), "HR:", hr) 122 | hrcalc.reset() 123 | print("Buffer lenght:", len(hrcalc.values), "- state cleared.") 124 | # %% [markdown] 125 | # ## Putting everything together 126 | # We can combine all of the above tools to build a fully customizable 127 | # and extendable rPPG processing loop (see the 128 | # [`yarppg.ui.simplest`][yarppg.ui.simplest] loop for an equivalent 129 | # implementation with a simplistic UI.) 130 | # %% 131 | # Clear the previous state. 132 | processor.reset() 133 | hrcalc.reset() 134 | 135 | results: list[yarppg.RppgResult] = [] 136 | for i, frame in enumerate(yarppg.frames_from_video(filename)): 137 | roi = roi_detector.detect(frame) 138 | result = processor.process(roi) 139 | result.hr = hrcalc.update(result.value) 140 | 141 | results.append(result) 142 | if i % 30 == 0: 143 | print( 144 | f"{i=} {(roi.mask > 0).mean()=:.1%} {result.value=:.2f}" 145 | f" {result.hr=:.2f}" 146 | ) 147 | 148 | plt.plot(np.array(results)[:, 0]) 149 | -------------------------------------------------------------------------------- /src/yarppg/ui/qt6/simple_window.py: -------------------------------------------------------------------------------- 1 | """Provides a PyQt window for displaying rPPG processing in real-time.""" 2 | 3 | import dataclasses 4 | from collections import deque 5 | 6 | import numpy as np 7 | import pyqtgraph 8 | import scipy.signal 9 | from PyQt6 import QtCore, QtWidgets 10 | 11 | import yarppg 12 | from yarppg.ui.qt6 import camera, utils 13 | 14 | 15 | @dataclasses.dataclass 16 | class SimpleQt6WindowSettings(yarppg.UiSettings): 17 | """Settings for the simple Qt6 window.""" 18 | 19 | blursize: int | None = None 20 | roi_alpha: float = 0.0 21 | video: int | str = 0 22 | frame_delay: float = float("nan") 23 | 24 | 25 | class SimpleQt6Window(QtWidgets.QMainWindow): 26 | """A simple window displaying the webcam feed and processed signals.""" 27 | 28 | new_image = QtCore.pyqtSignal(np.ndarray) 29 | 30 | def __init__( 31 | self, 32 | parent: QtWidgets.QWidget | None = None, 33 | blursize: int | None = None, 34 | roi_alpha: float = 0, 35 | ): 36 | super().__init__(parent=parent) 37 | 38 | pyqtgraph.setConfigOptions( 39 | imageAxisOrder="row-major", antialias=True, foreground="k", background="w" 40 | ) 41 | 42 | self.blursize = blursize 43 | self.roi_alpha = roi_alpha 44 | 45 | self.history = deque(maxlen=150) 46 | self.setWindowTitle("yet another rPPG") 47 | self._init_ui() 48 | self.tracker = yarppg.FpsTracker() 49 | self.new_image.connect(self.update_image) 50 | 51 | def _init_ui(self) -> None: 52 | child = QtWidgets.QWidget() 53 | layout = QtWidgets.QGridLayout() 54 | child.setLayout(layout) 55 | self.setCentralWidget(child) 56 | 57 | graph = pyqtgraph.GraphicsLayoutWidget() 58 | layout.addWidget(graph, 0, 0) 59 | self.img_item = pyqtgraph.ImageItem(axisOrder="row-major") 60 | vb = graph.addViewBox(col=0, row=0, invertX=True, invertY=True, lockAspect=True) # type: ignore 61 | vb.addItem(self.img_item) 62 | 63 | grid = self._make_plots() 64 | layout.addWidget(grid, 0, 1) 65 | 66 | self.fps_label = QtWidgets.QLabel("FPS:") 67 | layout.addWidget( 68 | self.fps_label, 1, 0, alignment=QtCore.Qt.AlignmentFlag.AlignBottom 69 | ) 70 | self.hr_label = QtWidgets.QLabel("HR:") 71 | font = self.hr_label.font() 72 | font.setPointSize(24) 73 | self.hr_label.setFont(font) 74 | layout.addWidget( 75 | self.hr_label, 1, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter 76 | ) 77 | 78 | def _make_plots(self) -> pyqtgraph.GraphicsLayoutWidget: 79 | # We create a 2-row layout with linked x-axes. 80 | # The first plot shows the signal obtained through the processor. 81 | # The second plot shows average R, G and B channels in the ROI. 82 | grid = pyqtgraph.GraphicsLayoutWidget() 83 | main_plot: pyqtgraph.PlotItem = grid.addPlot(row=0, col=0) # type: ignore 84 | self.rgb_plot: pyqtgraph.PlotItem = grid.addPlot(row=1, col=0) # type: ignore 85 | self.rgb_plot.setXLink(main_plot.vb) # type: ignore[attr-defined] 86 | main_plot.hideAxis("bottom") 87 | main_plot.hideAxis("left") 88 | self.rgb_plot.hideAxis("left") 89 | self.plots = [main_plot] 90 | 91 | self.lines = [main_plot.plot(pen=pyqtgraph.mkPen("k", width=3))] 92 | for c in "rgb": 93 | pen = pyqtgraph.mkPen(c, width=1.5) 94 | line, plot = utils.add_multiaxis_plot(self.rgb_plot, pen=pen) 95 | self.plots.append(plot) 96 | self.lines.append(line) 97 | 98 | for plot in self.plots: 99 | plot.disableAutoRange() # type: ignore 100 | 101 | return grid 102 | 103 | def update_image(self, frame: np.ndarray) -> None: 104 | """Update image plot item with new frame.""" 105 | self.img_item.setImage(frame) 106 | 107 | def _handle_roi( 108 | self, frame: np.ndarray, roi: yarppg.RegionOfInterest 109 | ) -> np.ndarray: 110 | if self.blursize is not None and roi.face_rect is not None: 111 | yarppg.pixelate(frame, roi.face_rect, size=self.blursize) 112 | 113 | frame = yarppg.roi.overlay_mask( 114 | frame, roi.mask == 1, color=(98, 3, 252), alpha=self.roi_alpha 115 | ) 116 | 117 | return frame 118 | 119 | def _handle_signals(self, result: yarppg.RppgResult) -> None: 120 | rgb = result.roi_mean 121 | self.history.append((result.value, rgb.r, rgb.g, rgb.b)) 122 | data = np.asarray(self.history) 123 | 124 | self.plots[0].setXRange(0, len(data)) # type: ignore 125 | for i in range(4): 126 | self.lines[i].setData(np.arange(len(data)), data[:, i]) 127 | self.plots[i].setYRange(*utils.get_autorange(data[:, i])) # type: ignore 128 | 129 | def _handle_hrvalue(self, value: float) -> None: 130 | """Update user interface with the new HR value.""" 131 | if np.isfinite(value): 132 | hr_bpm = self.tracker.fps * 60 / value 133 | self.hr_label.setText(f"HR: {hr_bpm:.1f}") 134 | 135 | def _update_fps(self): 136 | self.tracker.tick() 137 | self.fps_label.setText(f"FPS: {self.tracker.fps:.1f}") 138 | 139 | def on_result(self, result: yarppg.RppgResult, frame: np.ndarray) -> None: 140 | """Update user interface with the new rPPG results.""" 141 | self._update_fps() 142 | self.new_image.emit(self._handle_roi(frame, result.roi)) 143 | self._handle_signals(result) 144 | self._handle_hrvalue(result.hr) 145 | 146 | def keyPressEvent(self, e): # noqa: N802 147 | """Handle key presses. Closes the window on Q.""" 148 | if e.key() == ord("Q"): 149 | self.close() 150 | 151 | 152 | def launch_window(rppg: yarppg.Rppg, config: SimpleQt6WindowSettings) -> int: 153 | """Launch a simple Qt6-based GUI visualizing rPPG results in real-time.""" 154 | app = QtWidgets.QApplication([]) 155 | win = SimpleQt6Window(blursize=config.blursize, roi_alpha=config.roi_alpha) 156 | 157 | cam = camera.Camera(config.video, delay_frames=config.frame_delay) 158 | cam.frame_received.connect( 159 | lambda frame: win.on_result(rppg.process_frame(frame), frame) 160 | ) 161 | cam.start() 162 | 163 | win.show() 164 | ret = app.exec() 165 | cam.stop() 166 | return ret 167 | 168 | 169 | if __name__ == "__main__": 170 | b, a = scipy.signal.iirfilter(2, [0.7, 1.8], fs=30, btype="band") 171 | livefilter = yarppg.DigitalFilter(b, a) 172 | processor = yarppg.FilteredProcessor(yarppg.Processor(), livefilter) 173 | 174 | rppg = yarppg.Rppg(processor=processor) 175 | launch_window(rppg, yarppg.settings.get_config(["ui=qt6_simple"]).ui) 176 | -------------------------------------------------------------------------------- /src/yarppg/roi/facemesh_segmenter.py: -------------------------------------------------------------------------------- 1 | """Detect the lower face with MediaPipe's FaceMesh detector. 2 | 3 | This detector is based on the [face landmarker task from 4 | MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker/python). 5 | The face landmarker provides locations of more than 450 facial landmarks. 6 | From these, we can define a region for the lower face, as is done for example 7 | by Li et al. (2014)[^1]. 8 | 9 | [^1]: X. Li, J. Chen, G. Zhao, and M. Pietikainen, “Remote Heart Rate 10 | Measurement From Face Videos Under Realistic Situations”, Proceedings of 11 | the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 12 | pp. 4264-4271, 2014 13 | [doi:10.1109/CVPR.2014.543](https://doi.org/10.1109/CVPR.2014.543) 14 | """ 15 | 16 | import time 17 | import warnings 18 | 19 | import mediapipe as mp 20 | import numpy as np 21 | from mediapipe.framework.formats import landmark_pb2 22 | from mediapipe.tasks.python.components.containers import ( 23 | landmark as landmark_module, # type: ignore 24 | ) 25 | 26 | from ..containers import RegionOfInterest 27 | from ..helpers import get_cached_resource_path 28 | from .detector import RoiDetector 29 | from .roi_tools import contour_to_mask 30 | 31 | MEDIAPIPE_MODELS_BASE = "https://storage.googleapis.com/mediapipe-models/" 32 | LANDMARKER_TASK = "face_landmarker/face_landmarker/float16/latest/face_landmarker.task" 33 | 34 | TESSELATION_SPEC = mp.solutions.drawing_styles.get_default_face_mesh_tesselation_style() # type: ignore 35 | CONTOUR_SPEC = mp.solutions.drawing_styles.get_default_face_mesh_contours_style() # type: ignore 36 | IRISES_SPEC = mp.solutions.drawing_styles.get_default_face_mesh_iris_connections_style() # type: ignore 37 | 38 | 39 | def get_face_landmarker_modelfile(): 40 | """Get the filename of the FaceLandmarker - download file if necessary.""" 41 | task_filename = "face_landmarker.task" 42 | return get_cached_resource_path( 43 | task_filename, MEDIAPIPE_MODELS_BASE + LANDMARKER_TASK 44 | ) 45 | 46 | 47 | def get_landmark_coords( 48 | landmarks: list[landmark_module.NormalizedLandmark], width: int, height: int 49 | ) -> np.ndarray: 50 | """Extract normalized landmark coordinates to array of pixel coordinates.""" 51 | xyz = [(lm.x, lm.y, lm.z) for lm in landmarks] 52 | return np.multiply(xyz, [width, height, width]).astype(int) 53 | 54 | 55 | def get_boundingbox_from_coords(coords: np.ndarray) -> np.ndarray: 56 | """Calculate the bounding rectangle containing all landmarks.""" 57 | xy = np.min(coords, axis=0) 58 | wh = np.subtract(np.max(coords, axis=0), xy) 59 | 60 | return np.r_[xy, wh] 61 | 62 | 63 | class FaceMeshDetector(RoiDetector): 64 | """Face detector using MediaPipe's face landmarker.""" 65 | 66 | _lower_face = [200, 431, 411, 340, 349, 120, 111, 187, 211] 67 | 68 | def __init__(self, draw_landmarks=False, **kwargs): 69 | super().__init__(**kwargs) 70 | modelpath = get_face_landmarker_modelfile() 71 | if modelpath is None: 72 | raise FileNotFoundError("Could not find or download landmarker model file.") 73 | base_options = mp.tasks.BaseOptions(model_asset_path=modelpath) 74 | landmarker_options = mp.tasks.vision.FaceLandmarkerOptions( 75 | base_options=base_options, 76 | running_mode=mp.tasks.vision.RunningMode.VIDEO, 77 | ) 78 | self.landmarker = mp.tasks.vision.FaceLandmarker.create_from_options( 79 | landmarker_options 80 | ) 81 | self.draw_landmarks = draw_landmarks 82 | 83 | def __del__(self): 84 | self.landmarker.close() 85 | 86 | def _process_landmarks(self, frame, results) -> tuple[np.ndarray, np.ndarray]: 87 | height, width = frame.shape[:2] 88 | coords = get_landmark_coords(results.face_landmarks[0], width, height)[:, :2] 89 | face_rect = get_boundingbox_from_coords(coords) 90 | 91 | mask = contour_to_mask((height, width), coords[self._lower_face]) 92 | return mask, face_rect 93 | 94 | def detect(self, frame: np.ndarray) -> RegionOfInterest: 95 | """Find face landmarks and create ROI around the lower face region.""" 96 | rawimg = frame.copy() 97 | mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame) 98 | with warnings.catch_warnings(): 99 | warnings.simplefilter("ignore") 100 | results = self.landmarker.detect_for_video( 101 | mp_image, int(time.perf_counter() * 1000) 102 | ) 103 | 104 | if len(results.face_landmarks) < 1: 105 | return RegionOfInterest(np.zeros_like(frame), baseimg=frame) 106 | 107 | if self.draw_landmarks: 108 | self.draw_facemesh(frame, results.face_landmarks[0], tesselate=True) 109 | 110 | mask, face_rect = self._process_landmarks(frame, results) 111 | return RegionOfInterest(mask, baseimg=rawimg, face_rect=tuple(face_rect)) 112 | 113 | def draw_facemesh( 114 | self, 115 | img, 116 | face_landmarks, 117 | tesselate=False, 118 | contour=False, 119 | irises=False, 120 | ): 121 | """Draw the detected face landmarks on the image.""" 122 | face_landmarks_proto = landmark_pb2.NormalizedLandmarkList() # type: ignore 123 | face_landmarks_proto.landmark.extend( 124 | [ 125 | landmark_pb2.NormalizedLandmark( # type: ignore 126 | x=landmark.x, y=landmark.y, z=landmark.z 127 | ) 128 | for landmark in face_landmarks 129 | ] 130 | ) 131 | if tesselate: 132 | mp.solutions.drawing_utils.draw_landmarks( # type: ignore 133 | image=img, 134 | landmark_list=face_landmarks_proto, 135 | connections=mp.solutions.face_mesh.FACEMESH_TESSELATION, # type: ignore 136 | landmark_drawing_spec=None, 137 | connection_drawing_spec=TESSELATION_SPEC, 138 | ) 139 | if contour: 140 | mp.solutions.drawing_utils.draw_landmarks( # type: ignore 141 | image=img, 142 | landmark_list=face_landmarks_proto, 143 | connections=mp.solutions.face_mesh.FACEMESH_CONTOURS, # type: ignore 144 | landmark_drawing_spec=None, 145 | connection_drawing_spec=CONTOUR_SPEC, 146 | ) 147 | if irises: 148 | mp.solutions.drawing_utils.draw_landmarks( # type: ignore 149 | image=img, 150 | landmark_list=face_landmarks_proto, 151 | connections=mp.solutions.face_mesh.FACEMESH_IRISES, # type: ignore 152 | landmark_drawing_spec=None, 153 | connection_drawing_spec=IRISES_SPEC, 154 | ) 155 | --------------------------------------------------------------------------------