├── .gitattributes ├── .github └── workflows │ └── main.yml ├── .gitignore ├── .pylintrc ├── CHANGELOG.md ├── LICENSE.txt ├── Pipfile ├── Pipfile.lock ├── README.md ├── pyproject.toml ├── setup.cfg └── src ├── main ├── python │ └── camdkit │ │ ├── __init__.py │ │ ├── arri │ │ ├── __init__.py │ │ ├── cli.py │ │ └── reader.py │ │ ├── bmd │ │ ├── __init__.py │ │ ├── cli.py │ │ └── reader.py │ │ ├── camera_types.py │ │ ├── canon │ │ ├── __init__.py │ │ ├── cli.py │ │ └── reader.py │ │ ├── clip.py │ │ ├── compatibility.py │ │ ├── examples.py │ │ ├── framework.py │ │ ├── lens_types.py │ │ ├── model.py │ │ ├── mosys │ │ ├── __init__.py │ │ ├── cli.py │ │ ├── f4.py │ │ └── reader.py │ │ ├── numeric_types.py │ │ ├── red │ │ ├── __init__.py │ │ ├── cli.py │ │ ├── cooke.py │ │ └── reader.py │ │ ├── string_types.py │ │ ├── timing_types.py │ │ ├── tracker_types.py │ │ ├── transform_types.py │ │ ├── units.py │ │ ├── utils.py │ │ ├── venice │ │ ├── __init__.py │ │ ├── cli.py │ │ └── reader.py │ │ └── versioning_types.py └── resources │ ├── css │ ├── generic-light.min.css │ └── style.css │ ├── img │ ├── Axis_Rotation.svg │ ├── Coordinate_System.svg │ ├── Example_System.svg │ ├── Producer_Consumers.svg │ ├── RISLogoFinalwhiteColor.png │ ├── RTP_Transport.svg │ ├── download.svg │ └── logo_white.svg │ ├── lyx │ ├── OpenCV_to_OpenTrackIO.lyx │ └── OpenCV_to_OpenTrackIO.tex │ └── res │ ├── OpenCV_to_OpenTrackIO.pdf │ ├── OpenLensIO_v1-0-0.pdf │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ ├── apple-touch-icon.png │ ├── browserconfig.xml │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── favicon.ico │ ├── mstile-150x150.png │ ├── safari-pinned-tab.svg │ └── site.webmanifest ├── test ├── cpp │ └── opentrackio-parser │ │ ├── .gitignore │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ ├── conanfile.py │ │ ├── get-opentrackio-json.bat │ │ ├── get-opentrackio-json.sh │ │ └── src │ │ ├── argparse │ │ └── argparse.hpp │ │ ├── main.cpp │ │ └── opentrackio-lib │ │ ├── OpenTrackIOParser.cpp │ │ └── OpenTrackIOParser.h ├── python │ ├── __init__.py │ ├── parser │ │ ├── README.md │ │ ├── opentrackio_lib.py │ │ ├── opentrackio_parser.py │ │ ├── opentrackio_receiver.py │ │ └── opentrackio_sender.py │ ├── test_arri_reader.py │ ├── test_bmd_reader.py │ ├── test_camera_types.py │ ├── test_canon_reader.py │ ├── test_clip.py │ ├── test_compatibility.py │ ├── test_cooke_data.py │ ├── test_example_regression.py │ ├── test_lens_types.py │ ├── test_model.py │ ├── test_mosys_reader.py │ ├── test_numeric_types.py │ ├── test_red_reader.py │ ├── test_schema_regression.py │ ├── test_string_types.py │ ├── test_timing_types.py │ ├── test_tracker_types.py │ ├── test_transform_types.py │ ├── test_utils.py │ ├── test_venice_reader.py │ └── test_versioning_types.py └── resources │ ├── arri │ ├── B001C001_180327_R1ZA.mov.csv │ └── README.txt │ ├── bmd │ ├── README.txt │ └── metadata.txt │ ├── canon │ ├── 20221007_TNumber_CanonCameraMetadata_Frames.csv │ ├── 20221007_TNumber_CanonCameraMetadata_Static.csv │ └── README.txt │ ├── classic │ ├── examples │ │ ├── complete_dynamic_example.json │ │ ├── complete_static_example.json │ │ ├── recommended_dynamic_example.json │ │ └── recommended_static_example.json │ ├── schema.json │ └── subschemas │ │ ├── lens.json │ │ ├── static_camera.json │ │ ├── static_duration.json │ │ ├── static_lens.json │ │ ├── static_tracker.json │ │ ├── timing.json │ │ ├── tracker.json │ │ └── transforms.json │ ├── mosys │ └── A003_C001_01 15-03-47-01.f4 │ ├── red │ ├── A001_C066_0303LZ_001.frames.csv │ ├── A001_C066_0303LZ_001.static.csv │ └── README.txt │ └── venice │ ├── D001C005_210716AG.csv │ ├── D001C005_210716AGM01.xml │ └── README.txt └── tools └── python ├── generate_complete_dynamic_example.py ├── generate_complete_static_example.py ├── generate_component_schemas.py ├── generate_opentrackio_schema.py ├── generate_recommended_dynamic_example.py ├── generate_recommended_static_example.py ├── make_documentation.py ├── make_opentrackio_documentation.py └── templates └── OpenTrackIO.html /.gitattributes: -------------------------------------------------------------------------------- 1 | # Basic .gitattributes for a python repo. 2 | # Adapted from https://github.com/alexkaratarakis/gitattributes/blob/master/Python.gitattributes 3 | * text=auto 4 | # Markdown 5 | *.md text diff=markdown 6 | # shell files 7 | *.sh text eol=lf 8 | # Source files 9 | *.pxd text eol=lf diff=python 10 | *.py text eol=lf diff=python 11 | *.py3 text eol=lf diff=python 12 | *.pyw text eol=lf diff=python 13 | *.pyx text eol=lf diff=python 14 | *.pyz text eol=lf diff=python 15 | *.pyi text eol=lf diff=python 16 | # Binary files 17 | *.db binary 18 | *.p binary 19 | *.pkl binary 20 | *.pickle binary 21 | *.pyc binary export-ignore 22 | *.pyo binary export-ignore 23 | *.pyd binary 24 | # Jupyter notebook 25 | *.ipynb text 26 | # RED camera files 27 | *.R3D filter=lfs diff=lfs merge=lfs -text 28 | *.r3d filter=lfs diff=lfs merge=lfs -text 29 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | env: 8 | AWS_REGION: us-east-1 9 | AWS_RIS_PUB_BUCKET: smpte-ris-pub 10 | AWS_RIS_PUB_ROLE: arn:aws:iam::189079736792:role/role-gh-ris-pub 11 | AWS_OPENTRACKIO_BUCKET: opentrackio-www 12 | AWS_OPENTRACKIO_ROLE: arn:aws:iam::189079736792:role/role-opentrackio-www 13 | RIS_PUB_PREFIX: https://ris-pub.smpte.org 14 | PYTHONPATH: src/main/python 15 | 16 | jobs: 17 | build: 18 | if: > 19 | github.repository_owner == 'SMPTE' && ( 20 | (github.event_name == 'push' && github.ref == 'refs/heads/main') 21 | || github.event_name == 'pull_request' 22 | || github.event_name == 'release' 23 | ) 24 | runs-on: ubuntu-latest 25 | 26 | permissions: 27 | id-token: write 28 | contents: write 29 | pull-requests: write 30 | 31 | steps: 32 | - uses: actions/checkout@v4 33 | 34 | - uses: actions/setup-python@v5 35 | with: 36 | python-version: '3.11' 37 | 38 | - name: Install Python dependencies 39 | run: | 40 | python -m pip install --upgrade pipenv 41 | pipenv install --deploy --dev 42 | 43 | - name: Setup build directory 44 | run: mkdir -p build 45 | 46 | - name: Unit tests 47 | run: | 48 | pipenv run python -m unittest discover -v -s src/test/python 49 | 50 | - name: Generate opentrackio documentation 51 | run: | 52 | pipenv run python src/tools/python/make_opentrackio_documentation.py 53 | 54 | - name: Generate parameter documentation 55 | run: | 56 | pipenv run python src/tools/python/make_documentation.py > build/index.md 57 | 58 | - name: Generate HTML 59 | uses: BaileyJM02/markdown-to-pdf@v1.2.0 60 | with: 61 | input_path: build/index.md 62 | output_dir: build/ 63 | 64 | - name: Deploy to GitHub pages 65 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 66 | uses: peaceiris/actions-gh-pages@v3 67 | with: 68 | github_token: ${{ secrets.GITHUB_TOKEN }} 69 | publish_dir: build/ 70 | 71 | - name: Configure AWS Credentials 72 | uses: aws-actions/configure-aws-credentials@v4 73 | with: 74 | role-to-assume: ${{ env.AWS_RIS_PUB_ROLE }} 75 | aws-region: ${{ env.AWS_REGION }} 76 | 77 | - name: Create review copy at https://ris-pub.smpte.org/ 78 | if: github.event_name != 'push' || github.ref != 'refs/heads/main' 79 | id: deployRISpub 80 | shell: bash 81 | run: | 82 | SNAP_PATH=${GITHUB_REPOSITORY#*/}/${GITHUB_SHA} 83 | aws s3 sync ./build/ s3://${{ env.AWS_RIS_PUB_BUCKET }}/${SNAP_PATH} 84 | echo "SNAP_PATH=$SNAP_PATH" >> $GITHUB_OUTPUT 85 | 86 | - name: Publish documentation at https://ris-pub.smpte.org/ 87 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 88 | shell: bash 89 | run: | 90 | aws s3 sync ./build/opentrackio s3://${{ env.AWS_RIS_PUB_BUCKET }} 91 | 92 | - name: Configure AWS Credentials 93 | uses: aws-actions/configure-aws-credentials@v4 94 | with: 95 | role-to-assume: ${{ env.AWS_OPENTRACKIO_ROLE }} 96 | aws-region: ${{ env.AWS_REGION }} 97 | 98 | - name: Publish documentation at https://www.opentrackio.org/ 99 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 100 | shell: bash 101 | run: | 102 | aws s3 sync ./build/opentrackio s3://${{ env.AWS_OPENTRACKIO_BUCKET }} 103 | 104 | - uses: marocchino/sticky-pull-request-comment@v2 105 | if: github.event_name == 'pull_request' 106 | with: 107 | message: | 108 | [OpenTrackIO documentation](${{ env.RIS_PUB_PREFIX }}/${{ steps.deployRISpub.outputs.SNAP_PATH }}/opentrackio/) 109 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # Visual Studio Code 141 | .vscode 142 | 143 | # Idea 144 | .idea/ 145 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) Society of Motion Picture and Television Engineers 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | pylint = "==2.6.0" 8 | coverage = "*" 9 | jsonschema = "*" 10 | jinja2 = "*" 11 | 12 | [requires] 13 | python_version = "3.11" 14 | pydantic = "*" 15 | jsonref = "*" 16 | cbor2 = "*" 17 | ntplib = "*" 18 | 19 | [packages] 20 | pydantic = "*" 21 | jsonref = "*" 22 | cbor2 = "*" 23 | ntplib = "*" 24 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = camdkit 3 | version = 1.0.0dev.1 4 | description = OSVP Camera Metadata Toolkit -------------------------------------------------------------------------------- /src/main/python/camdkit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/arri/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/arri/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/arri/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''ARRI CLI tool''' 8 | 9 | import json 10 | import argparse 11 | import camdkit.arri.reader 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="Converts ARRI camera metadata to JSON according to the OSVP Camera Metadata Model.") 15 | parser.add_argument('csv_path', type=str, help='Path the CSV file extracted using ARRI Meta Extract (AME)') 16 | 17 | args = parser.parse_args() 18 | 19 | model = camdkit.arri.reader.to_clip(args.csv_path) 20 | 21 | print(json.dumps(model.to_json(), indent=2)) 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /src/main/python/camdkit/arri/reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''ARRI camera metadata reader''' 8 | 9 | import csv 10 | import math 11 | import typing 12 | from fractions import Fraction 13 | 14 | import camdkit.model 15 | import camdkit.utils as utils 16 | 17 | # https://www.arri.com/resource/blob/31908/14147b455c90a9a35018c0d091350ff3/2021-10-arri-formatsandresolutionsoverview-3-4-data.pdf 18 | _CAMERA_FAMILY_PIXEL_PITCH_MAP = { 19 | ("ALEXALF", 1920) : Fraction(316800, 1920), 20 | ("ALEXALF", 2048) : Fraction(316800, 2048), 21 | ("ALEXALF", 3840) : Fraction(316800, 3840), 22 | ("ALEXALF", 4448) : Fraction(367000, 4448), 23 | } 24 | 25 | def t_number_from_linear_iris_value(lin_value: int) -> typing.Optional[Fraction]: 26 | """Calculate t-number (regular iris values) from linear iris values 27 | """ 28 | return math.pow(2, (lin_value - 1000)/1000/2) 29 | 30 | def to_clip(csv_path: str) -> camdkit.model.Clip: 31 | """Read ARRI camera metadata into a `Clip`. `csv_path` is the path to a CSV 32 | file extracted using ARRI Meta Extract (AME).""" 33 | 34 | with open(csv_path, encoding="utf-8") as csvfile: 35 | csv_data = list(csv.DictReader(csvfile, dialect="excel-tab")) 36 | 37 | n_frames = len(csv_data) 38 | 39 | if n_frames <= 0: 40 | raise ValueError("No data") 41 | 42 | clip = camdkit.model.Clip() 43 | 44 | assert csv_data[0]["Lens Distance Unit"] == "Meter" 45 | 46 | clip.iso = int(csv_data[0]["Exposure Index ASA"]) 47 | 48 | clip.duration = len(csv_data)/Fraction(csv_data[0]["Project FPS"]) 49 | 50 | clip.camera_make = "ARRI" 51 | 52 | clip.camera_model = csv_data[0]["Camera Model"] 53 | 54 | clip.camera_serial_number = csv_data[0]["Camera Serial Number"] 55 | 56 | lens_model = csv_data[0]["Lens Model"] 57 | 58 | if lens_model.startswith("ARRI "): 59 | clip.lens_make = "ARRI" 60 | clip.lens_model = lens_model[5:] 61 | else: 62 | clip.lens_model = lens_model 63 | 64 | clip.lens_serial_number = csv_data[0]["Lens Serial Number"] 65 | 66 | clip.capture_frame_rate = utils.guess_fps(Fraction(csv_data[0]["Project FPS"])) 67 | 68 | clip.shutter_angle = float(csv_data[0]["Shutter Angle"]) 69 | 70 | clip.anamorphic_squeeze = Fraction(csv_data[0]["Lens Squeeze"]) 71 | 72 | pix_dims = camdkit.model.Dimensions( 73 | width=int(csv_data[0]["Image Width"]), 74 | height=int(csv_data[0]["Image Height"]) 75 | ) 76 | pixel_pitch = _CAMERA_FAMILY_PIXEL_PITCH_MAP[(csv_data[0]["Camera Family"], pix_dims.width)] 77 | clip.active_sensor_physical_dimensions = camdkit.model.Dimensions( 78 | width=pix_dims.width * pixel_pitch / 1000.0, 79 | height=pix_dims.height * pixel_pitch / 1000.0 80 | ) 81 | 82 | focal_lengths = set([m["Lens Focal Length"] for m in csv_data]) 83 | if len(focal_lengths) == 1: 84 | focal_length = float(focal_lengths.pop()) 85 | clip.lens_nominal_focal_length = focal_length 86 | 87 | clip.lens_focus_distance = tuple(float(m["Lens Focus Distance"]) for m in csv_data) 88 | 89 | clip.lens_t_number = tuple(t_number_from_linear_iris_value(int(m["Lens Linear Iris"])) for m in csv_data) 90 | 91 | # TODO: Entrance Pupil Position 92 | # TODO: Sensor physical dimensions 93 | 94 | return clip 95 | -------------------------------------------------------------------------------- /src/main/python/camdkit/bmd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/bmd/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/bmd/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''BMD CLI tool''' 8 | 9 | import json 10 | import argparse 11 | import camdkit.bmd.reader 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="Convert the output of the ExtractMetadata sample \ 15 | tool from the Blackmagic RAW SDK to JSON according to the OSVP Camera Metadata Model.") 16 | parser.add_argument( 17 | 'metadata_path', 18 | type=str, 19 | help="Path to the metadata file" 20 | ) 21 | args = parser.parse_args() 22 | 23 | with open(args.metadata_path, "r", encoding="utf-8") as fp: 24 | clip = camdkit.bmd.reader.to_clip(fp) 25 | 26 | print(json.dumps(clip.to_json(), indent=2)) 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /src/main/python/camdkit/bmd/reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Blackmagic camera reader''' 8 | 9 | import typing 10 | import re 11 | from fractions import Fraction 12 | 13 | import camdkit.model 14 | 15 | _CLIP_HEADING_RE = re.compile(r"^Clip Metadata$") 16 | _FRAME_HEADING_RE = re.compile(r"^Frame (\d+) Metadata$") 17 | _METADATA_LINE_RE = re.compile(r"^([^:]+): (.+)$") 18 | 19 | 20 | def to_clip(metadata_file: typing.IO) -> camdkit.model.Clip: 21 | """Read Blackmagic camera metadata into a `Clip`. 22 | `metadata_raw_sdk`: Output of the ExtractMetadata sample tool from the Blackmagic RAW SDK 23 | """ 24 | 25 | clip_data = {} 26 | frame_data = [] 27 | cur_metadata = {} 28 | 29 | for line in metadata_file: 30 | 31 | m = _METADATA_LINE_RE.match(line) 32 | 33 | if m: 34 | cur_metadata[m.group(1)] = m.group(2) 35 | 36 | elif _CLIP_HEADING_RE.match(line): 37 | cur_metadata = clip_data 38 | 39 | elif _FRAME_HEADING_RE.match(line): 40 | frame_data.append({}) 41 | cur_metadata = frame_data[-1] 42 | 43 | 44 | if len(frame_data) == 0: 45 | raise "Camera data does not contain frame information" 46 | 47 | # read clip metadata 48 | clip = camdkit.model.Clip() 49 | 50 | # read frame metadata 51 | first_frame_data = frame_data[0] 52 | 53 | # clip metadata 54 | 55 | # active_sensor_physical_dimensions 56 | if clip_data.get("camera_type") == "Blackmagic URSA Mini Pro 12K": 57 | crop_size = clip_data.get("crop_size") 58 | if crop_size is not None: 59 | width, height, _ = clip_data.get("crop_size").split(",") 60 | clip.active_sensor_physical_dimensions = camdkit.model.Dimensions( 61 | width=round(int(width) * 270030 / 12288), 62 | height=round(int(height) * 14250 / 6480) 63 | ) 64 | 65 | # frame rate 66 | sensor_rate = first_frame_data.get("sensor_rate") 67 | if sensor_rate is not None: 68 | num, denom, _ = sensor_rate.split(",") 69 | clip.capture_frame_rate = Fraction(int(num), int(denom)) 70 | 71 | # duration 72 | if clip.capture_frame_rate is not None: 73 | clip.duration = clip.capture_frame_rate * len(frame_data) 74 | 75 | # anamorphic_squeeze 76 | anamorphic_enable = int(clip_data.get("anamorphic_enable", 0)) 77 | anamorphic = clip_data.get("anamorphic") 78 | if anamorphic_enable != 0 and anamorphic is not None: 79 | clip.anamorphic_squeeze = Fraction(anamorphic[:-1]) 80 | 81 | # ISO 82 | iso = first_frame_data.get("iso", None) 83 | if iso is not None: 84 | clip.iso = int(iso) 85 | 86 | clip.camera_make = clip_data.get("manufacturer") 87 | 88 | clip.camera_model = clip_data.get("camera_type") 89 | 90 | clip.camera_serial_number = clip_data.get("camera_id") 91 | 92 | clip.camera_firmware = clip_data.get("firmware_version") 93 | 94 | clip.lens_model = clip_data.get("lens_type") 95 | 96 | # clip.lens_serial_number is not supported 97 | # clip.active_sensor_physical_dimensions is not supported 98 | 99 | # white_balance 100 | # white_balance_kelvin = first_frame_data.get("white_balance_kelvin") 101 | # if white_balance_kelvin is not None: 102 | # clip.white_balance = int(white_balance_kelvin) 103 | 104 | # shutter angle 105 | shutter_value = first_frame_data.get("shutter_value") 106 | if shutter_value is not None: 107 | clip.shutter_angle = float(shutter_value[:-1]) 108 | 109 | # sampled metadata 110 | 111 | # focal_length 112 | focal_lengths = set([m["focal_length"][:-2] for m in frame_data]) 113 | if len(focal_lengths) == 1: 114 | focal_length = float(focal_lengths.pop()) 115 | clip.lens_nominal_focal_length = focal_length 116 | 117 | # focus_position 118 | clip.lens_focus_distance = tuple(float(m["distance"][:-2]) for m in frame_data) 119 | 120 | # entrance_pupil_offset not supported 121 | 122 | # t_number 123 | clip.lens_t_number = tuple(float(m["aperture"][1:]) for m in frame_data) 124 | 125 | return clip 126 | -------------------------------------------------------------------------------- /src/main/python/camdkit/camera_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Types for camera modeling""" 8 | 9 | from typing import Annotated 10 | 11 | from pydantic import Field, field_validator 12 | 13 | from camdkit.compatibility import (CompatibleBaseModel, 14 | NONBLANK_UTF8_MAX_1023_CHARS, 15 | UUID_URN, 16 | STRICTLY_POSITIVE_RATIONAL, 17 | STRICTLY_POSITIVE_INTEGER,) 18 | from camdkit.numeric_types import (MAX_INT_32, 19 | StrictlyPositiveInt, 20 | StrictlyPositiveRational, 21 | rationalize_strictly_and_positively) 22 | from camdkit.units import MILLIMETER, PIXEL, HERTZ, DEGREE 23 | from camdkit.string_types import NonBlankUTF8String, UUIDURN 24 | 25 | # Tempting as it might seem to make PhysicalDimensions and SenselDimensions subclasses 26 | # of a single generic Dimension[T] class, that doesn't work play well with the Field 27 | # annotations, unfortunately. Maybe someone smart will figure out how to make this idea 28 | # work, but for now it's a wish, not something for a to-do list. 29 | 30 | 31 | class PhysicalDimensions(CompatibleBaseModel): 32 | """Height and width of the active area of the camera sensor in millimeters 33 | """ 34 | height: Annotated[float, Field(ge=0.0, strict=True)] 35 | width: Annotated[float, Field(ge=0.0, strict=True)] 36 | 37 | class Config: 38 | json_schema_extra = {"units": MILLIMETER} 39 | 40 | def __init__(self, width: float, height: float) -> None: 41 | super(PhysicalDimensions, self).__init__(width=width, height=height) 42 | 43 | 44 | class SenselDimensions(CompatibleBaseModel): 45 | """Photosite resolution of the active area of the camera sensor in pixels""" 46 | height: Annotated[int, Field(ge=0, le=MAX_INT_32)] 47 | width: Annotated[int, Field(ge=0, le=MAX_INT_32)] 48 | 49 | class Config: 50 | json_schema_extra = {"units": PIXEL} 51 | 52 | def __init__(self, width: int, height: int) -> None: 53 | super(SenselDimensions, self).__init__(width=width, height=height) 54 | 55 | 56 | ShutterAngle = Annotated[float, Field(ge=0.0, le=360.0, strict=True)] 57 | 58 | 59 | class StaticCamera(CompatibleBaseModel): 60 | capture_frame_rate: Annotated[StrictlyPositiveRational | None, 61 | Field(alias="captureFrameRate", 62 | json_schema_extra={"units": HERTZ, 63 | "clip_property": "capture_frame_rate", 64 | "constraints": STRICTLY_POSITIVE_RATIONAL})] = None 65 | """Capture frame rate of the camera""" 66 | 67 | active_sensor_physical_dimensions: Annotated[PhysicalDimensions | None, 68 | Field(alias="activeSensorPhysicalDimensions", 69 | json_schema_extra={"clip_property": 'active_sensor_physical_dimensions', 70 | "constraints": "The height and width shall be each be real non-negative numbers."})] = None 71 | 72 | active_sensor_resolution: Annotated[SenselDimensions | None, 73 | Field(alias="activeSensorResolution", 74 | json_schema_extra={"clip_property": 'active_sensor_resolution', 75 | "constraints": """The height and width shall be each be an integer in the range 76 | [0..2,147,483,647]. 77 | """})] = None 78 | 79 | make: Annotated[NonBlankUTF8String | None, 80 | Field(json_schema_extra={"clip_property": "camera_make", 81 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 82 | """ 83 | Non-blank string naming camera manufacturer 84 | """ 85 | 86 | model: Annotated[NonBlankUTF8String | None, 87 | Field(json_schema_extra={"clip_property": "camera_model", 88 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 89 | """Non-blank string identifying camera model""" 90 | 91 | serial_number: Annotated[NonBlankUTF8String | None, 92 | Field(alias="serialNumber", 93 | json_schema_extra={"clip_property": "camera_serial_number", 94 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 95 | """Non-blank string uniquely identifying the camera""" 96 | 97 | firmware_version: Annotated[NonBlankUTF8String | None, 98 | Field(alias="firmwareVersion", 99 | json_schema_extra={"clip_property": "camera_firmware", 100 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 101 | """Non-blank string identifying camera firmware version""" 102 | 103 | label: Annotated[NonBlankUTF8String | None, 104 | Field(json_schema_extra={"clip_property": "camera_label", 105 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 106 | """Non-blank string containing user-determined camera identifier""" 107 | 108 | anamorphic_squeeze: Annotated[StrictlyPositiveRational | None, 109 | Field(alias="anamorphicSqueeze", 110 | json_schema_extra={"clip_property": "anamorphic_squeeze", 111 | "constraints": STRICTLY_POSITIVE_RATIONAL})] = None 112 | """Nominal ratio of height to width of the image of an axis-aligned 113 | square captured by the camera sensor. It can be used to de-squeeze 114 | images but is not however an exact number over the entire captured 115 | area due to a lens' intrinsic analog nature. 116 | """ 117 | 118 | iso: Annotated[StrictlyPositiveInt | None, 119 | Field(alias="isoSpeed", 120 | json_schema_extra={"clip_property": "iso", 121 | "constraints": STRICTLY_POSITIVE_INTEGER})] = None 122 | """Arithmetic ISO scale as defined in ISO 12232""" 123 | 124 | fdl_link: Annotated[UUIDURN | None, 125 | Field(alias="fdlLink", 126 | json_schema_extra={"clip_property": "fdl_link", 127 | "constraints": UUID_URN})] = None 128 | """URN identifying the ASC Framing Decision List used by the camera.""" 129 | 130 | shutter_angle: Annotated[ShutterAngle | None, 131 | Field(alias="shutterAngle", 132 | json_schema_extra={"units": DEGREE, 133 | "clip_property": "shutter_angle", 134 | "constraints": "The parameter shall be a real number in the range (0..360]."})] = None 135 | """Shutter speed as a fraction of the capture frame rate. The shutter 136 | speed (in units of 1/s) is equal to the value of the parameter divided 137 | by 360 times the capture frame rate. 138 | """ 139 | 140 | # noinspection PyNestedDecorators 141 | @field_validator("capture_frame_rate", "anamorphic_squeeze", mode="before") 142 | @classmethod 143 | def coerce_camera_type_to_strictly_positive_rational(cls, v): 144 | return rationalize_strictly_and_positively(v) 145 | -------------------------------------------------------------------------------- /src/main/python/camdkit/canon/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/canon/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/canon/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Canon CLI tool''' 8 | 9 | import json 10 | import argparse 11 | import camdkit.canon.reader 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="Convert Canon camera metadata to JSON according to the OSVP Camera Metadata Model.") 15 | parser.add_argument( 16 | 'static_csv_path', 17 | type=str, 18 | help="Path to CSV file containing static Canon camera metadata" 19 | ) 20 | parser.add_argument( 21 | 'frame_csv_path', 22 | type=str, 23 | help="Path to CSV file containing per-frame Canon camera metadata" 24 | ) 25 | 26 | args = parser.parse_args() 27 | 28 | with open(args.static_csv_path, "r", encoding="utf-8") as static_csv, \ 29 | open(args.frame_csv_path, "r", encoding="utf-8") as frame_csv: 30 | clip = camdkit.canon.reader.to_clip(static_csv, frame_csv) 31 | 32 | print(json.dumps(clip.to_json(), indent=2)) 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /src/main/python/camdkit/canon/reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Canon camera reader''' 8 | 9 | import csv 10 | import typing 11 | import struct 12 | from fractions import Fraction 13 | 14 | import camdkit.model 15 | 16 | def _read_float32_as_hex(float32_hex: str) -> float: 17 | return struct.unpack('>f', bytes.fromhex(float32_hex))[0] 18 | 19 | def to_clip(static_csv: typing.IO, frames_csv: typing.IO) -> camdkit.model.Clip: 20 | """Read Canon camera metadata into a `Clip`. 21 | `static_csv`: Static camera metadata. 22 | `frames_csv`: Per-frame camera metadata. 23 | """ 24 | 25 | # read clip metadata 26 | clip_metadata = next(csv.DictReader(static_csv)) 27 | clip = camdkit.model.Clip() 28 | 29 | # read frame metadata 30 | frame_data = list(csv.DictReader(frames_csv)) 31 | first_frame_data = frame_data[0] 32 | 33 | # clip metadata 34 | 35 | # duration 36 | clip.duration = Fraction(int(clip_metadata["Duration"]), int(clip_metadata["Timescale"])) 37 | 38 | # anamorphic_squeeze 39 | lens_squeeze_factor = int(clip_metadata["LensSqueezeFactor"]) 40 | if lens_squeeze_factor == 0: 41 | clip.anamorphic_squeeze = 1 42 | elif lens_squeeze_factor == 1: 43 | clip.anamorphic_squeeze = 1.33 44 | elif lens_squeeze_factor == 2: 45 | clip.anamorphic_squeeze = 2 46 | elif lens_squeeze_factor == 3: 47 | clip.anamorphic_squeeze = 1.8 48 | 49 | # ISO 50 | if int(first_frame_data['PhotographicSensitivityMode']) == 1: 51 | clip.iso = Fraction(first_frame_data['PhotographicSensitivity']).numerator - 0x80000000 52 | 53 | # clip.active_sensor_physical_dimensions is not supported 54 | # clip.capture_frame_rate is no supported 55 | 56 | clip.camera_make = "Canon" 57 | 58 | # shutter angle 59 | clip.shutter_angle = float(Fraction(first_frame_data['ExposureTime'])) 60 | 61 | # sampled metadata 62 | 63 | # focal_length 64 | focal_lengths = set(Fraction(m["FocalLength"]) for m in frame_data) 65 | if len(focal_lengths) == 1: 66 | focal_length = float(focal_lengths.pop()) 67 | clip.lens_nominal_focal_length = focal_length 68 | 69 | # focus_position 70 | clip.lens_focus_distance = tuple(_read_float32_as_hex(m["FocusPosition"]) for m in frame_data) 71 | 72 | # entrance_pupil_offset not supported 73 | 74 | # t_number 75 | if int(first_frame_data['ApertureMode']) == 2: 76 | clip.lens_t_number = tuple(Fraction(m["ApertureNumber"]) for m in frame_data) 77 | elif int(first_frame_data['ApertureMode']) == 1: 78 | clip.lens_f_number = tuple(Fraction(m["ApertureNumber"]) for m in frame_data) 79 | 80 | return clip 81 | -------------------------------------------------------------------------------- /src/main/python/camdkit/framework.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from fractions import Fraction 4 | 5 | from camdkit.camera_types import PhysicalDimensions as ActiveSensorPhysicalDimensions 6 | from camdkit.camera_types import SenselDimensions as Dimensions 7 | from camdkit.lens_types import (FizEncoders, RawFizEncoders, 8 | ExposureFalloff, 9 | Distortion, DistortionOffset, 10 | ProjectionOffset) 11 | from camdkit.numeric_types import StrictlyPositiveRational 12 | from camdkit.timing_types import SynchronizationSource as SynchronizationSourceEnum 13 | from camdkit.timing_types import (PTPProfile, Timestamp, 14 | TimingMode, Timecode, 15 | SynchronizationOffsets, SynchronizationPTPPriorities, SynchronizationPTP, 16 | Synchronization) 17 | from camdkit.tracker_types import GlobalPosition 18 | from camdkit.transform_types import Vector3, Rotator3, Transform 19 | from camdkit.versioning_types import (OPENTRACKIO_PROTOCOL_NAME, 20 | OPENTRACKIO_PROTOCOL_VERSION, 21 | VersionedProtocol) -------------------------------------------------------------------------------- /src/main/python/camdkit/model.py: -------------------------------------------------------------------------------- 1 | from camdkit.clip import Clip 2 | 3 | from camdkit.camera_types import PhysicalDimensions as Dimensions 4 | from camdkit.lens_types import DistortionOffset as LensDistortionOffset 5 | from camdkit.lens_types import ExposureFalloff as LensExposureFalloff 6 | from camdkit.lens_types import FizEncoders as LensEncoders 7 | from camdkit.lens_types import ProjectionOffset as LensProjectionOffset 8 | from camdkit.lens_types import RawFizEncoders as LensRawEncoders 9 | from camdkit.timing_types import Synchronization 10 | from camdkit.timing_types import Timecode as TimingTimecode 11 | from camdkit.timing_types import Timestamp as TimingTimestamp 12 | from camdkit.timing_types import TimingMode as TimingModeEnum 13 | from camdkit.transform_types import Transform as Transforms 14 | from camdkit.versioning_types import OPENTRACKIO_PROTOCOL_NAME, OPENTRACKIO_PROTOCOL_VERSION -------------------------------------------------------------------------------- /src/main/python/camdkit/mosys/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/mosys/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/mosys/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Mo-Sys CLI tool''' 8 | 9 | import json 10 | import argparse 11 | import camdkit.framework 12 | import camdkit.mosys.reader 13 | 14 | def main(): 15 | parser = argparse.ArgumentParser(description="Convert Mo-Sys F4 tracking metadata to JSON according to the OSVP Camera Metadata Model.") 16 | parser.add_argument( 17 | 'frame_f4_path', 18 | type=str, 19 | help="Path to F4 file containing Mo-Sys camera tracking data" 20 | ) 21 | 22 | args = parser.parse_args() 23 | 24 | # First 10 frames 25 | clip = camdkit.mosys.reader.to_clip(args.frame_f4_path, 10) 26 | # Print frame 0 of the clip 27 | print(json.dumps(clip.to_json(0), indent=2)) 28 | 29 | 30 | if __name__ == "__main__": 31 | main() 32 | -------------------------------------------------------------------------------- /src/main/python/camdkit/mosys/reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Mo-Sys F4 data reader''' 8 | 9 | from camdkit.model import Clip 10 | from camdkit.mosys.f4 import F4PacketParser 11 | 12 | def to_frame(data: bytes) -> Clip: 13 | """Parse a frame of Mo-Sys F4 data into a Clip. 14 | """ 15 | parser = F4PacketParser() 16 | success = parser.initialise(data) 17 | if success: 18 | frame = parser.get_tracking_frame() 19 | return success, frame, parser._packet.size 20 | 21 | def to_clip(filename: str, frames: int = -1) -> Clip: 22 | """Read Mo-Sys F4 data into a Clip. 23 | `filename`: Filename of the f4 file. 24 | """ 25 | clip = Clip() 26 | with open(filename, "rb") as f4_file: 27 | data = f4_file.read() 28 | offset = 0 29 | success = True 30 | count = 0 31 | while success and (frames == -1 or (count <= frames)): 32 | success, frame, packet_size = to_frame(data[offset:]) 33 | if success: 34 | if offset == 0: 35 | clip = frame 36 | else: 37 | clip.append(frame) 38 | offset += packet_size 39 | count += 1 40 | return clip 41 | 42 | def to_frames(filename: str, frame_count: int) -> list[dict]: 43 | """Read Mo-Sys F4 data into a list of `Frame`s. 44 | `filename`: Filename of the f4 file. 45 | """ 46 | clip = to_clip(filename, frame_count) 47 | frames = [] 48 | for i in range(0, frame_count): 49 | frames.append(clip.to_json(i)) 50 | return frames 51 | -------------------------------------------------------------------------------- /src/main/python/camdkit/numeric_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Constrained versions of built-in numeric types""" 8 | 9 | import numbers 10 | from fractions import Fraction 11 | from typing import Any, Final, Annotated 12 | from pydantic import Field 13 | 14 | from camdkit.compatibility import CompatibleBaseModel 15 | 16 | __all__ = ['MIN_INT_8', 'MAX_INT_8', 17 | 'MIN_UINT_8', 'MAX_UINT_8', 18 | 'MIN_UINT_32', 'MAX_UINT_32', 19 | 'MIN_INT_32', 'MAX_INT_32', 20 | 'MAX_UINT_48', 21 | 'SingleDigitInt', 22 | 'NonNegative8BitInt', 'StrictlyPositive8BitInt', 23 | 'NonNegativeInt', 'NonNegative48BitInt', 'StrictlyPositiveInt', 24 | 'NonNegativeFloat', 'StrictlyPositiveFloat', 'NormalizedFloat', 'UnityOrGreaterFloat', 25 | 'Rational', 'StrictlyPositiveRational', 'rationalize_strictly_and_positively'] 26 | 27 | MIN_INT_8: Final[int] = -2**7 28 | MAX_INT_8: Final[int] = 2**7-1 29 | MIN_UINT_8: Final[int] = 0 30 | MAX_UINT_8: Final[int] = 2**8-1 31 | MIN_UINT_32: Final[int] = 0 32 | MAX_UINT_32: Final[int] = 2**32-1 33 | MIN_INT_32: Final[int] = -2**31 34 | MAX_INT_32: Final[int] = 2**31-1 35 | MAX_UINT_48: Final[int] = 2**48-1 36 | 37 | SingleDigitInt = Annotated[int, Field(..., ge=0, le=9, strict=True)] 38 | 39 | NonNegative8BitInt = Annotated[int, Field(..., ge=0, le=MAX_INT_8, strict=True)] 40 | 41 | StrictlyPositive8BitInt = Annotated[int, Field(..., ge=0, le=MAX_UINT_8, strict=True)] 42 | 43 | NonNegativeInt = Annotated[int, Field(..., ge=0, le=MAX_UINT_32, strict=True)] 44 | 45 | NonNegative48BitInt = Annotated[int, Field(..., ge=0, le=MAX_UINT_48, strict=True)] 46 | 47 | StrictlyPositiveInt = Annotated[int, Field(..., ge=1, le=MAX_UINT_32, strict=True)] 48 | 49 | NonNegativeFloat = Annotated[float, Field(..., ge=0, strict=True)] 50 | 51 | StrictlyPositiveFloat = Annotated[float, Field(..., gt=0.0, strict=True)] 52 | 53 | NormalizedFloat = Annotated[float, Field(..., ge=0.0, le=1.0, strict=True)] 54 | 55 | UnityOrGreaterFloat = Annotated[float, Field(..., ge=1.0, strict=True)] 56 | 57 | # init methods because by default Pydantic BaseModel doesn't let you use positional arguments, 58 | # and camdkit 0.9 uses that style of object instantiation 59 | 60 | class Rational(CompatibleBaseModel): 61 | num: int = Field(ge=MIN_INT_32, le=MAX_INT_32, strict=True) 62 | denom: int = Field(ge=1, le=MAX_UINT_32, strict=True) 63 | 64 | def __init__(self, num: int, denom: int) -> None: 65 | super(Rational, self).__init__(num=num, denom=denom) 66 | 67 | # Not the full set of operations; just enough to pass classic unit tests 68 | @staticmethod 69 | def _canonicalize(other: Any): 70 | if isinstance(other, Rational): 71 | return other 72 | elif isinstance(other, StrictlyPositiveRational): 73 | return Rational(num=other.num, denom=other.denom) 74 | frac = Fraction(other) # may well throw TypeError 75 | return Rational(frac.numerator, frac.denominator) 76 | 77 | def __eq__(self, other: Any) -> bool: 78 | if other: 79 | wrapped = Rational._canonicalize(other) 80 | return self.num == wrapped.num and self.denom == wrapped.denom 81 | return False 82 | 83 | def __mul__(self, other: Any): 84 | wrapped = Rational._canonicalize(other) 85 | return Rational(self.num * wrapped.num, self.denom * wrapped.denom) 86 | 87 | def __rtruediv__(self, other: Any): 88 | wrapped = Rational._canonicalize(other) 89 | return Rational(self.num * wrapped.denom, self.denom * wrapped.num) 90 | 91 | 92 | class StrictlyPositiveRational(CompatibleBaseModel): 93 | num: int = Field(ge=1, le=MAX_INT_32, strict=True) 94 | denom: int = Field(ge=1, le=MAX_UINT_32, strict=True) 95 | 96 | def __init__(self, num: int, denom: int, ) -> None: 97 | super(StrictlyPositiveRational, self).__init__(num=num, denom=denom) 98 | 99 | # Not the full set of operations; just enough to pass classic unit tests 100 | @staticmethod 101 | def _canonicalize(other: Any): 102 | if isinstance(other, StrictlyPositiveRational): 103 | return other 104 | elif isinstance(other, Rational): 105 | return StrictlyPositiveRational(num=other.num, denom=other.denom) 106 | frac = Fraction(other) # may well throw TypeError 107 | return StrictlyPositiveRational(frac.numerator, frac.denominator) 108 | 109 | def __eq__(self, other: Any) -> bool: 110 | if other: 111 | wrapped = StrictlyPositiveRational._canonicalize(other) 112 | return self.num == wrapped.num and self.denom == wrapped.denom 113 | return False 114 | 115 | def __mul__(self, other: Any): 116 | wrapped = StrictlyPositiveRational._canonicalize(other) 117 | return StrictlyPositiveRational(self.num * wrapped.num, self.denom * wrapped.denom) 118 | 119 | def __rtruediv__(self, other: Any): 120 | wrapped = StrictlyPositiveRational._canonicalize(other) 121 | return StrictlyPositiveRational(self.num * wrapped.denom, self.denom * wrapped.num) 122 | 123 | def rationalize_strictly_and_positively(x: Any) -> StrictlyPositiveRational: 124 | if x: 125 | if not isinstance(x, StrictlyPositiveRational): 126 | if isinstance(x, int) and x <= MAX_INT_32: 127 | return StrictlyPositiveRational(x, 1) 128 | elif isinstance(x, numbers.Rational): 129 | return StrictlyPositiveRational(int(x.numerator), int(x.denominator)) 130 | elif isinstance(x, dict) and len(x) == 2 and "num" in x and "denom" in x: 131 | return StrictlyPositiveRational(int(x["num"]), int(x["denom"])) 132 | raise ValueError(f"could not convert input of type {type(x)} to a StrictlyPositiveRational") 133 | return x 134 | -------------------------------------------------------------------------------- /src/main/python/camdkit/red/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/red/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/red/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''RED CLI tool''' 8 | 9 | import json 10 | import argparse 11 | import camdkit.red.reader 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="Convert RED camera metadata to JSON according to the OSVP Camera Metadata Model.") 15 | parser.add_argument( 16 | 'meta_3_file_path', 17 | type=str, 18 | help="Path to CSV file generated using REDline (`REDline --silent --i --printMeta 3`)" 19 | ) 20 | parser.add_argument( 21 | 'meta_5_file_path', 22 | type=str, 23 | help="Path to CSV file generated using REDline (`REDline --silent --i --printMeta 5`)" 24 | ) 25 | 26 | args = parser.parse_args() 27 | 28 | with open(args.meta_3_file_path, "r", encoding="utf-8") as type_3_file, \ 29 | open(args.meta_5_file_path, "r", encoding="utf-8") as type_5_file: 30 | clip = camdkit.red.reader.to_clip(type_3_file, type_5_file) 31 | 32 | print(json.dumps(clip.to_json(), indent=2)) 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /src/main/python/camdkit/red/cooke.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Cook data parser''' 8 | 9 | import dataclasses 10 | 11 | @dataclasses.dataclass 12 | class CookeLensData: 13 | entrance_pupil_position: int 14 | aperture_value: int 15 | 16 | def lens_data_from_binary_string(cooked_packed_bin_data: bytes) -> CookeLensData: 17 | sign = -1 if cooked_packed_bin_data[25] & 0b00100000 else 1 18 | entrance_pupil_position = sign * (((cooked_packed_bin_data[25] & 0b00001111) << 6) + (cooked_packed_bin_data[26] & 0b00111111)) 19 | aperture_value = (((cooked_packed_bin_data[5] & 0b00111111) << 6) + (cooked_packed_bin_data[6] & 0b00111111)) 20 | return CookeLensData(entrance_pupil_position=entrance_pupil_position, aperture_value=aperture_value) 21 | 22 | @dataclasses.dataclass 23 | class CookeFixedData: 24 | firmware_version_number: str 25 | 26 | def fixed_data_from_string(cooked_fixed_data: str) -> CookeFixedData: 27 | return CookeFixedData(firmware_version_number=cooked_fixed_data[61:65]) 28 | -------------------------------------------------------------------------------- /src/main/python/camdkit/red/reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''RED camera reader''' 8 | 9 | import csv 10 | import typing 11 | from fractions import Fraction 12 | 13 | import camdkit.model 14 | import camdkit.utils as utils 15 | import camdkit.red.cooke as cooke 16 | 17 | _LENS_NAME_PIXEL_PITCH_MAP = { 18 | "RAPTOR 8K VV": 5, 19 | "MONSTRO 8K VV": 5, 20 | "KOMODO 6K S35": 4.4, 21 | "HELIUM 8K S35": 3.65, 22 | "GEMINI 5K S35": 6, 23 | "DRAGON": 5 24 | } 25 | 26 | def to_clip(meta_3_file: typing.IO, meta_5_file: typing.IO) -> camdkit.model.Clip: 27 | """Read RED camera metadata into a `Clip`. 28 | `meta_3_file`: Static camera metadata. CSV file generated using REDline (`REDline --silent --i {camera_file_path} --printMeta 3`) 29 | `meta_5_file`: Per-frame camera metadata. CSV file generated using REDline (`REDline --silent --i {camera_file_path} --printMeta 5`) 30 | """ 31 | 32 | # read clip metadata 33 | clip_metadata = next(csv.DictReader(meta_3_file)) 34 | clip = camdkit.model.Clip() 35 | 36 | clip.iso = int(clip_metadata['ISO']) 37 | 38 | clip.camera_make = "RED" 39 | 40 | clip.camera_model = clip_metadata["Camera Model"].strip() 41 | 42 | clip.camera_serial_number = clip_metadata["Camera PIN"].strip() 43 | 44 | clip.camera_firmware = clip_metadata["Firmware Version"].strip() 45 | 46 | clip.lens_make = clip_metadata["Lens Brand"].strip() 47 | 48 | clip.lens_model = clip_metadata["Lens Name"].strip() 49 | 50 | clip.lens_serial_number = clip_metadata["Lens Serial Number"].strip() 51 | 52 | clip.lens_firmware = cooke.fixed_data_from_string(clip_metadata["Lens Cooke /i Static"]).firmware_version_number 53 | 54 | pix_dims = camdkit.model.Dimensions( 55 | width=int(clip_metadata["Frame Width"]), 56 | height=int(clip_metadata["Frame Height"]) 57 | ) 58 | pixel_pitch = _LENS_NAME_PIXEL_PITCH_MAP[clip_metadata["Sensor Name"]] 59 | clip.active_sensor_physical_dimensions = camdkit.model.Dimensions( 60 | width=pix_dims.width * pixel_pitch / 1000.0, 61 | height=pix_dims.height * pixel_pitch / 1000.0 62 | ) 63 | 64 | # read frame metadata 65 | csv_data = list(csv.DictReader(meta_5_file)) 66 | 67 | n_frames = int(clip_metadata["Total Frames"]) 68 | 69 | if len(csv_data) != n_frames: 70 | raise ValueError(f"Inconsistent frame count between header {n_frames} and frame {len(csv_data)} files") 71 | 72 | clip.capture_frame_rate = utils.guess_fps(Fraction(clip_metadata["FPS"])) 73 | 74 | clip.duration = len(csv_data)/clip.capture_frame_rate 75 | 76 | clip.anamorphic_squeeze = Fraction(clip_metadata["Pixel Aspect Ratio"]) 77 | 78 | clip.shutter_angle = float(clip_metadata["Shutter (deg)"]) 79 | 80 | focal_lengths = set([float(m["Focal Length"]) for m in csv_data]) 81 | if len(focal_lengths) == 1: 82 | focal_length = float(focal_lengths.pop()) 83 | clip.lens_nominal_focal_length = focal_length 84 | 85 | clip.lens_focus_distance = tuple(int(m["Focus Distance"]) for m in csv_data) 86 | 87 | cooke_metadata = tuple(cooke.lens_data_from_binary_string(bytes(int(i, 16) for i in m["Cooke Metadata"].split("/"))) for m in csv_data) 88 | 89 | clip.lens_entrance_pupil_offset = tuple(float(m.entrance_pupil_position) / 1000.0 for m in cooke_metadata) 90 | 91 | clip.lens_t_number = tuple(m.aperture_value / 100.0 for m in cooke_metadata) 92 | 93 | return clip 94 | -------------------------------------------------------------------------------- /src/main/python/camdkit/string_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Types for modeling constrained strings""" 8 | 9 | from typing import Final, Annotated 10 | 11 | from pydantic import Field 12 | 13 | __all__ = [ 14 | 'NonBlankUTF8String', 15 | 'UUIDURN' 16 | ] 17 | 18 | MAX_STR_LENGTH: Final[int] = 1023 19 | NonBlankUTF8String = Annotated[str, Field(min_length=1, max_length=MAX_STR_LENGTH)] 20 | 21 | UUID_URN_PATTERN: Final[str] = r'^urn:uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$' 22 | 23 | UUIDURN = Annotated[str, Field(pattern=UUID_URN_PATTERN)] 24 | -------------------------------------------------------------------------------- /src/main/python/camdkit/tracker_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Types for modeling of tracker-related metadata""" 8 | 9 | from typing import Annotated 10 | 11 | from pydantic import Field 12 | 13 | from camdkit.string_types import NonBlankUTF8String 14 | from camdkit.compatibility import (CompatibleBaseModel, 15 | BOOLEAN, 16 | NONBLANK_UTF8_MAX_1023_CHARS) 17 | 18 | 19 | class StaticTracker(CompatibleBaseModel): 20 | make: Annotated[NonBlankUTF8String | None, 21 | Field(json_schema_extra={"clip_property": "tracker_make", 22 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 23 | """Non-blank string naming tracking device manufacturer""" 24 | 25 | model: Annotated[NonBlankUTF8String | None, 26 | Field(json_schema_extra={"clip_property": "tracker_model", 27 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 28 | """Non-blank string identifying tracking device model""" 29 | 30 | serial_number: Annotated[NonBlankUTF8String | None, 31 | Field(alias="serialNumber", 32 | json_schema_extra={"clip_property": "tracker_serial_number", 33 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 34 | """Non-blank string uniquely identifying the tracking device""" 35 | 36 | firmware: Annotated[NonBlankUTF8String | None, 37 | Field(alias="firmwareVersion", 38 | json_schema_extra={"clip_property": "tracker_firmware", 39 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 40 | """Non-blank string identifying tracking device firmware version""" 41 | 42 | 43 | class Tracker(CompatibleBaseModel): 44 | notes: Annotated[tuple[NonBlankUTF8String, ...] | None, 45 | Field(json_schema_extra={"clip_property": "tracker_notes", 46 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 47 | """Non-blank string containing notes about tracking system""" 48 | 49 | recording: Annotated[tuple[bool, ...] | None, 50 | Field(json_schema_extra={"clip_property": "tracker_recording", 51 | "constraints": BOOLEAN})] = None 52 | """Boolean indicating whether tracking system is recording data""" 53 | 54 | slate: Annotated[tuple[NonBlankUTF8String, ...] | None, 55 | Field(json_schema_extra={"clip_property": "tracker_slate", 56 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 57 | """Non-blank string describing the recording slate""" 58 | 59 | status: Annotated[tuple[NonBlankUTF8String, ...] | None, 60 | Field(json_schema_extra={"clip_property": "tracker_status", 61 | "constraints": NONBLANK_UTF8_MAX_1023_CHARS})] = None 62 | """Non-blank string describing status of tracking system""" 63 | 64 | 65 | class GlobalPosition(CompatibleBaseModel): 66 | """Global ENU and geodetic coördinates 67 | Reference:. https://en.wikipedia.org/wiki/Local_tangent_plane_coordinates 68 | """ 69 | E: float # East (meters) 70 | N: float # North (meters) 71 | U: float # Up (meters) 72 | lat0: float # latitude (degrees) 73 | lon0: float # longitude (degrees) 74 | h0: float # height (meters) 75 | 76 | def __init__(self, E: float, N: float, U: float, lat0: float, lon0: float, h0: float): 77 | super(GlobalPosition, self).__init__(E=E, N=N, U=U, lat0=lat0, lon0=lon0, h0=h0) 78 | -------------------------------------------------------------------------------- /src/main/python/camdkit/transform_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Types for modeling of spatial transforms""" 8 | from multiprocessing.context import DefaultContext 9 | from typing import Optional, Annotated 10 | 11 | from pydantic import Field 12 | 13 | from camdkit.compatibility import CompatibleBaseModel 14 | from camdkit.string_types import NonBlankUTF8String 15 | from camdkit.units import DEGREE, METER 16 | 17 | class Vector3(CompatibleBaseModel): 18 | x: Annotated[float | None, Field()] = None 19 | y: Annotated[float | None, Field()] = None 20 | z: Annotated[float | None, Field()] = None 21 | 22 | # class Config: 23 | # json_schema_extra = {"units": METER} 24 | 25 | def __init__(self, x: float, y: float, z: float): 26 | super(Vector3, self).__init__(x=x, y=y, z=z) 27 | 28 | 29 | class Rotator3(CompatibleBaseModel): 30 | pan: Annotated[float | None, Field()] = None 31 | tilt: Annotated[float | None, Field()] = None 32 | roll: Annotated[float | None, Field()] = None 33 | 34 | # class Config: 35 | # json_schema_extra = {"units": DEGREE} 36 | 37 | def __init__(self, pan: float, tilt: float, roll: float): 38 | super(Rotator3, self).__init__(pan=pan, tilt=tilt, roll=roll) 39 | 40 | 41 | class Transform(CompatibleBaseModel): 42 | translation: Annotated[Vector3, Field(json_schema_extra={"units": METER})] 43 | rotation: Annotated[Rotator3, Field(json_schema_extra={"units": DEGREE})] 44 | scale: Annotated[Vector3 | None, Field()] = None 45 | id: Annotated[NonBlankUTF8String | None, Field()] = None -------------------------------------------------------------------------------- /src/main/python/camdkit/units.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Units for unambiguous annotation of parameters that require them""" 8 | 9 | from typing import Final 10 | 11 | __ALL__ = [ 12 | 'SENSEL', 'PIXEL', 13 | 'MICRON', 'MILLIMETER', 'METER', 14 | 'DEGREE', 'RADIAN', 15 | 'SECOND', 16 | 'HERTZ' 17 | ] 18 | 19 | SENSEL: Final[str] = "sensel" 20 | PIXEL: Final[str] = "pixel" 21 | 22 | MICRON: Final[str] = "micron" 23 | MILLIMETER: Final[str] = "millimeter" 24 | METER: Final[str] = "meter" 25 | 26 | DEGREE: Final[str] = "degree" 27 | RADIAN: Final[str] = "radian" 28 | 29 | SECOND: Final[str] = "second" 30 | 31 | HERTZ: Final[str] = "hertz" 32 | 33 | # TODO raise an issue replacing this confusing non-unit with "meters and degrees" 34 | METERS_AND_DEGREES: Final[str] = "meter / degree" 35 | -------------------------------------------------------------------------------- /src/main/python/camdkit/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Utility functions""" 8 | 9 | from fractions import Fraction 10 | import numbers 11 | 12 | _WELL_KNOWN_FRACTIONAL_FPS = set([Fraction(24000, 1001), Fraction(30000, 1001), Fraction(60000, 1001), Fraction(120000, 1001)]) 13 | _FPS_THRESHOLD = 0.01 14 | 15 | def guess_fps(fps: numbers.Real) -> Fraction: 16 | """Heuristically determines an exact fps value from an approximate one using 17 | well-known fps values.""" 18 | 19 | if fps is None: 20 | raise ValueError 21 | 22 | if isinstance(fps, numbers.Integral): 23 | return Fraction(fps) 24 | 25 | if not isinstance(fps, numbers.Real): 26 | raise TypeError 27 | 28 | if isinstance(fps, numbers.Rational) and fps.denominator == 1: 29 | return fps 30 | 31 | approx_fps = float(fps) 32 | 33 | if abs(int(approx_fps) - approx_fps) / approx_fps < _FPS_THRESHOLD: 34 | return Fraction(int(approx_fps)) 35 | 36 | for wkfps in _WELL_KNOWN_FRACTIONAL_FPS: 37 | if round(float(wkfps), 2) == round(approx_fps, 2): 38 | return wkfps 39 | 40 | raise ValueError("Not a valid FPS") 41 | -------------------------------------------------------------------------------- /src/main/python/camdkit/venice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/python/camdkit/venice/__init__.py -------------------------------------------------------------------------------- /src/main/python/camdkit/venice/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Venice CLI tool''' 8 | 9 | import json 10 | import argparse 11 | import camdkit.venice.reader 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="Convert Sony Venice camera metadata to JSON according to the OSVP Camera Metadata Model.") 15 | parser.add_argument( 16 | 'static_xml_path', 17 | type=str, 18 | help="Path to the static XML metadata file" 19 | ) 20 | parser.add_argument( 21 | 'dyn_csv_path', 22 | type=str, 23 | help="Path to the per-frame CSV file" 24 | ) 25 | 26 | args = parser.parse_args() 27 | 28 | with open(args.static_xml_path, "r", encoding="utf-8") as static_file, \ 29 | open(args.dyn_csv_path, "r", encoding="utf-8") as dynamic_file: 30 | clip = camdkit.venice.reader.to_clip(static_file, dynamic_file) 31 | 32 | print(json.dumps(clip.to_json(), indent=2)) 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /src/main/python/camdkit/venice/reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Sony Venice camera reader''' 8 | 9 | import csv 10 | import typing 11 | import re 12 | import xml.etree.ElementTree as ET 13 | from fractions import Fraction 14 | 15 | import camdkit.model 16 | import camdkit.utils as utils 17 | 18 | NS_PREFIXES = { 19 | "nrt" : "urn:schemas-professionalDisc:nonRealTimeMeta:ver.2.10" 20 | } 21 | 22 | def find_value(doc: ET.ElementTree, item_name: str) -> typing.Optional[str]: 23 | elem = doc.find(f".//nrt:Item[@name='{item_name}']" , namespaces=NS_PREFIXES) 24 | 25 | if elem is None: 26 | return None 27 | 28 | attr = elem.get("value") 29 | 30 | if attr is None: 31 | return None 32 | 33 | return attr 34 | 35 | def get_attribute_value(element: ET.Element, attr_name: str) -> typing.Optional[str]: 36 | if element is None or attr_name is None: 37 | return None 38 | v = element.get(attr_name) 39 | return None if v is None or v == "" else v.strip() 40 | 41 | def find_camera_info(doc: ET.ElementTree) -> typing.Tuple[str]: 42 | elem = doc.find(".//nrt:Camera" , namespaces=NS_PREFIXES) 43 | 44 | if elem is None: 45 | return (None, None, None, None) 46 | 47 | camera_make = get_attribute_value(elem, "manufacturer") 48 | camera_model = get_attribute_value(elem, "modelName") 49 | camera_sn = get_attribute_value(elem, "serialNo") 50 | 51 | elem = elem.find(".//nrt:Element[@hardware='Main-Board']" , namespaces=NS_PREFIXES) 52 | 53 | camera_firmware = get_attribute_value(elem, "software") 54 | 55 | return (camera_make, camera_model, camera_sn, camera_firmware) 56 | 57 | def find_lens_info(doc: ET.ElementTree) -> typing.Tuple[str]: 58 | elem = doc.find(".//nrt:Lens" , namespaces=NS_PREFIXES) 59 | 60 | lens_make = get_attribute_value(elem, "software") 61 | lens_model = get_attribute_value(elem, "modelName") 62 | 63 | lens_sn = find_value(doc, "LensAttributes") 64 | 65 | return (lens_make, lens_model, lens_sn) 66 | 67 | 68 | def find_fps(doc: ET.ElementTree) -> typing.Optional[Fraction]: 69 | elem = doc.find(".//nrt:VideoFrame" , namespaces=NS_PREFIXES) 70 | 71 | if elem is None: 72 | return None 73 | 74 | attr = elem.get("captureFps") 75 | 76 | if attr is None: 77 | return None 78 | 79 | fps_match = re.fullmatch("([0-9.]+)[a-zA-Z]", attr) 80 | 81 | if fps_match is None: 82 | return None 83 | 84 | return Fraction(fps_match.group(1)) 85 | 86 | def find_duration(doc: ET.ElementTree) -> typing.Optional[int]: 87 | try: 88 | elem = doc.find(".//nrt:Duration" , namespaces=NS_PREFIXES) 89 | 90 | if elem is None: 91 | return None 92 | 93 | attr = elem.get("value") 94 | 95 | if attr is None: 96 | return None 97 | 98 | return int(attr) 99 | 100 | except TypeError: 101 | return None 102 | 103 | def find_px_dims(doc: ET.ElementTree) -> typing.Optional[camdkit.model.Dimensions]: 104 | try: 105 | elem = doc.find(".//nrt:VideoLayout" , namespaces=NS_PREFIXES) 106 | 107 | if elem is None: 108 | return None 109 | 110 | h_pixels = int(elem.get("numOfVerticalLine")) 111 | 112 | v_pixels = int(elem.get("pixel")) 113 | 114 | return camdkit.model.Dimensions(height=h_pixels, width=v_pixels) 115 | 116 | except TypeError: 117 | return None 118 | 119 | def t_number_from_frac_stop(frac_stop_str: str) -> typing.Optional[float]: 120 | 121 | m = re.fullmatch("T ([0-9]+)(?: ([0-9]/10))?", frac_stop_str) 122 | 123 | if m is None: 124 | return None 125 | 126 | aperture_value = int(m.group(1)) 127 | 128 | if m.group(2) is not None: 129 | aperture_value += Fraction(m.group(2)) 130 | 131 | return 2**(float(aperture_value) / 2) 132 | 133 | def int_or_none(value: typing.Optional[str]) -> typing.Optional[int]: 134 | return int(value) if value is not None else None 135 | 136 | def to_clip(static_file: typing.IO, dynamic_file: typing.IO) -> camdkit.model.Clip: 137 | """Read Sony Venice camera metadata into a `Clip`. 138 | `static_file`: Static camera metadata. XML file. 139 | `dynamic_file`: Per-frame camera metadata. CSV file 140 | """ 141 | 142 | # read clip metadata 143 | clip = camdkit.model.Clip() 144 | 145 | clip_metadata = ET.parse(static_file) 146 | 147 | clip.iso = int_or_none(find_value(clip_metadata, "ISOSensitivity")) 148 | 149 | clip.lens_serial_number = find_value(clip_metadata, "LensAttributes") 150 | 151 | clip.camera_make, clip.camera_model, clip.camera_serial_number, clip.camera_firmware = find_camera_info(clip_metadata) 152 | 153 | clip.lens_make, clip.lens_model, clip.lens_serial_number = find_lens_info(clip_metadata) 154 | 155 | # lens_firmware not supported 156 | 157 | shutter_angle = find_value(clip_metadata, "ShutterSpeedAngle") 158 | clip.shutter_angle = float(shutter_angle) / 100.0 159 | 160 | pixel_aspect_ratio = find_value(clip_metadata, "PixelAspectRatio") 161 | if pixel_aspect_ratio is not None: 162 | m = re.fullmatch("([0-9]+):([0-9]+)", pixel_aspect_ratio) 163 | if m is not None: 164 | clip.anamorphic_squeeze = Fraction(int(m.group(1)), int(m.group(2))) 165 | 166 | clip_fps = find_fps(clip_metadata) 167 | 168 | if clip_fps is None: 169 | raise ValueError("No valid capture fps found") 170 | 171 | clip.capture_frame_rate = utils.guess_fps(clip_fps) 172 | 173 | n_frames = find_duration(clip_metadata) 174 | 175 | if n_frames is None: 176 | raise ValueError("No valid duration found") 177 | 178 | pixel_pitch = 22800 / 3840 # page 5 of "VENICE v6 Ops.pdf" 179 | pix_dims = find_px_dims(clip_metadata) 180 | clip.active_sensor_physical_dimensions = camdkit.model.Dimensions( 181 | width=pix_dims.width * pixel_pitch / 1000.0, 182 | height=pix_dims.height * pixel_pitch / 1000.0 183 | ) 184 | 185 | # read frame metadata 186 | csv_data = list(csv.DictReader(dynamic_file)) 187 | 188 | if len(csv_data) != n_frames: 189 | raise ValueError(f"Inconsistent frame count between header {n_frames} and frame {len(csv_data)} files") 190 | 191 | clip.duration = len(csv_data)/clip_fps 192 | 193 | focal_lengths = set([float(m["Focal Length (mm)"]) for m in csv_data]) 194 | if len(focal_lengths) == 1: 195 | focal_length = float(focal_lengths.pop()) 196 | clip.lens_nominal_focal_length = focal_length 197 | 198 | clip.lens_focus_distance = tuple(float(m["Focus Distance (ft)"]) * 12.0 * 25.4 / 1000.0 for m in csv_data) 199 | 200 | # TODO: clip.entrance_pupil_offset 201 | 202 | clip.lens_t_number = tuple(t_number_from_frac_stop(m["Aperture"]) for m in csv_data) 203 | 204 | return clip 205 | -------------------------------------------------------------------------------- /src/main/python/camdkit/versioning_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Types for versioning protocols""" 8 | 9 | from typing import Annotated 10 | 11 | from camdkit.compatibility import CompatibleBaseModel 12 | from camdkit.numeric_types import SingleDigitInt 13 | from camdkit.string_types import NonBlankUTF8String 14 | 15 | from pydantic import Field 16 | 17 | __all__ = ['OPENTRACKIO_PROTOCOL_NAME', 'OPENTRACKIO_PROTOCOL_VERSION', 'VersionedProtocol'] 18 | 19 | OPENTRACKIO_PROTOCOL_NAME = "OpenTrackIO" 20 | OPENTRACKIO_PROTOCOL_VERSION = (1, 0, 0) 21 | 22 | VersionComponent = Annotated[int, Field(ge=0, le=9)] 23 | 24 | class VersionedProtocol(CompatibleBaseModel): 25 | name: NonBlankUTF8String 26 | version: Annotated[tuple[VersionComponent, ...], Field(min_length=3, max_length=3)] 27 | 28 | def __init__(self, name: NonBlankUTF8String, version: tuple[SingleDigitInt, SingleDigitInt, SingleDigitInt]): 29 | super(VersionedProtocol, self).__init__(name=name, version=version) 30 | if name != OPENTRACKIO_PROTOCOL_NAME: 31 | raise ValueError("The only currently accepted name for a versioned protocol" 32 | " is {OPENTRACKIO_PROTOCOL_NAME}") 33 | -------------------------------------------------------------------------------- /src/main/resources/css/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; 3 | padding:0; 4 | margin:0; 5 | } 6 | .collapsible { 7 | background-color: #777; 8 | color: white; 9 | cursor: pointer; 10 | padding: 18px; 11 | border: none; 12 | text-align: left; 13 | outline: none; 14 | font-size: 15px; 15 | } 16 | 17 | .active, .collapsible:hover { 18 | background-color: #555; 19 | } 20 | 21 | .content { 22 | padding: 20px; 23 | display: none; 24 | overflow: hidden; 25 | background-color: #f1f1f1; 26 | margin:20px 0; 27 | } 28 | a { 29 | color:#1076eb; 30 | text-decoration:none; 31 | } 32 | th, td { 33 | border: 1px solid rgb(160 160 160); 34 | padding: 8px 10px; 35 | } 36 | th { 37 | text-align: center; 38 | } 39 | tr:nth-of-type(even) { 40 | background-color: #eee; 41 | } 42 | table { 43 | border-collapse: collapse; 44 | border: 2px solid rgb(140 140 140); 45 | font-size: 0.8rem; 46 | } 47 | code { 48 | text-wrap: balance; 49 | } 50 | .wrapper { 51 | 52 | margin:0 auto; 53 | width:100%; 54 | max-width:1200px; 55 | padding-bottom:40px; 56 | } 57 | .wrapper img { 58 | display:block; 59 | margin:auto; 60 | padding-bottom:10px; 61 | } 62 | .img-inline { 63 | display: inline !important; 64 | vertical-align: middle; 65 | } 66 | .inner { 67 | padding:10px; 68 | max-width: 800px; 69 | margin:auto; 70 | } 71 | .header { 72 | 73 | background-color:#101010; 74 | color:#FFFFFF; 75 | text-align:center; 76 | position:relative; 77 | width:100%; 78 | } 79 | .headerInner { 80 | padding:40px; 81 | } 82 | .header::after { 83 | content:''; 84 | position:absolute; 85 | left:0; 86 | bottom:0; 87 | width:100%; 88 | height:10px; 89 | background-size:cover; 90 | background-position:center center; 91 | background-image:url(../img/download.svg); 92 | } 93 | .footer{ 94 | background-color:#101010; 95 | color:#FFFFFF; 96 | text-align:center; 97 | } 98 | .footerLogo{ 99 | width: 320px; 100 | } 101 | -------------------------------------------------------------------------------- /src/main/resources/img/RISLogoFinalwhiteColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/img/RISLogoFinalwhiteColor.png -------------------------------------------------------------------------------- /src/main/resources/img/RTP_Transport.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |






RTP
RTP...
UDP
UDP


CBOR
CBOR...
JSON
JSON
Text is not SVG - cannot display
-------------------------------------------------------------------------------- /src/main/resources/img/download.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/main/resources/img/logo_white.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /src/main/resources/res/OpenCV_to_OpenTrackIO.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/OpenCV_to_OpenTrackIO.pdf -------------------------------------------------------------------------------- /src/main/resources/res/OpenLensIO_v1-0-0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/OpenLensIO_v1-0-0.pdf -------------------------------------------------------------------------------- /src/main/resources/res/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/android-chrome-192x192.png -------------------------------------------------------------------------------- /src/main/resources/res/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/android-chrome-512x512.png -------------------------------------------------------------------------------- /src/main/resources/res/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/apple-touch-icon.png -------------------------------------------------------------------------------- /src/main/resources/res/browserconfig.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | #da532c 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /src/main/resources/res/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/favicon-16x16.png -------------------------------------------------------------------------------- /src/main/resources/res/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/favicon-32x32.png -------------------------------------------------------------------------------- /src/main/resources/res/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/favicon.ico -------------------------------------------------------------------------------- /src/main/resources/res/mstile-150x150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/main/resources/res/mstile-150x150.png -------------------------------------------------------------------------------- /src/main/resources/res/safari-pinned-tab.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | Created by potrace 1.14, written by Peter Selinger 2001-2017 9 | 10 | 12 | 28 | 44 | 57 | 70 | 83 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /src/main/resources/res/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "short_name": "", 4 | "icons": [ 5 | { 6 | "src": "/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#ffffff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } 20 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/c++,cmake 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=c++,cmake 3 | 4 | ### C++ ### 5 | # Prerequisites 6 | *.d 7 | 8 | # Compiled Object files 9 | *.slo 10 | *.lo 11 | *.o 12 | *.obj 13 | 14 | # Precompiled Headers 15 | *.gch 16 | *.pch 17 | 18 | # Compiled Dynamic libraries 19 | *.so 20 | *.dylib 21 | *.dll 22 | 23 | # Fortran module files 24 | *.mod 25 | *.smod 26 | 27 | # Compiled Static libraries 28 | *.lai 29 | *.la 30 | *.a 31 | *.lib 32 | 33 | # Executables 34 | *.exe 35 | *.out 36 | *.app 37 | 38 | ### CMake ### 39 | CMakeLists.txt.user 40 | CMakeUserPresets.json 41 | CMakeCache.txt 42 | CMakeFiles 43 | CMakeScripts 44 | Testing 45 | Makefile 46 | cmake_install.cmake 47 | install_manifest.txt 48 | compile_commands.json 49 | CTestTestfile.cmake 50 | _deps 51 | 52 | # OpentrackIO files. 53 | opentrackio-json/ 54 | 55 | # Python 56 | venv 57 | 58 | ### CMake Patch ### 59 | # External projects 60 | *-prefix/ 61 | 62 | # End of https://www.toptal.com/developers/gitignore/api/c++,cmake 63 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.30) 2 | set(CMAKE_CXX_STANDARD 20) 3 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 4 | set(CMAKE_CXX_EXTENSIONS OFF) 5 | 6 | project(opentrackio-parser) 7 | 8 | if(WIN32) 9 | set(CMAKE_PREFIX_PATH "${CMAKE_CURRENT_BINARY_DIR}/generators/" ${CMAKE_PREFIX_PATH}) 10 | else() 11 | set(CMAKE_PREFIX_PATH "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/generators/" ${CMAKE_PREFIX_PATH}) 12 | endif() 13 | 14 | find_package(opentrackio-cpp CONFIG REQUIRED) 15 | find_package(nlohmann_json REQUIRED) 16 | 17 | add_executable( 18 | ${PROJECT_NAME} 19 | src/main.cpp 20 | src/opentrackio-lib/OpenTrackIOParser.cpp) 21 | 22 | target_include_directories( 23 | ${PROJECT_NAME} 24 | PRIVATE 25 | src/opentrackio-lib 26 | src/argparse) 27 | 28 | target_link_libraries( 29 | ${PROJECT_NAME} 30 | PRIVATE 31 | opentrackio-cpp::opentrackio-cpp 32 | nlohmann_json::nlohmann_json) 33 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/README.md: -------------------------------------------------------------------------------- 1 | # OpenTrackIO Parser C++ Example 2 | 3 | ## Contents 4 | 5 | - [Overview](#overview) 6 | - [Required Software](#required-software) 7 | - [Setup Instructions](#setup-instructions) 8 | - [Windows](#windows) 9 | - [Linux/MacOS](#linuxmacos) 10 | 11 | ## Overview 12 | 13 | This C++ example implements a simple OpenTrackIO JSON sample parser. The parser includes methods for accessing values 14 | and scaling them to user-preferred units. 15 | 16 | ## Required Software 17 | - C++20 18 | - Python 3.8 or higher 19 | - CMake 20 | 21 | ## Setup Instructions 22 | 23 | ### Windows 24 | 25 | 1. Download OpenTrackIO schema and a sample examples. 26 | ``` 27 | get-opentrackio-json.bat 28 | ``` 29 | 30 | 2. Set up Python virtual environment 31 | ``` 32 | python -m venv venv 33 | ``` 34 | 35 | - Command Prompt: 36 | ``` 37 | .\venv\Scripts\activate.bat 38 | ``` 39 | 40 | - PowerShell: 41 | ``` 42 | .\venv\Scripts\Activate.ps1 43 | ``` 44 | 45 | 3. Set up Conan 46 | ``` 47 | pip install conan 48 | ``` 49 | 50 | Set up Conan profile (first-time only) 51 | ``` 52 | conan profile detect 53 | ``` 54 | 55 | Install Conan dependencies 56 | ``` 57 | conan install . --build=missing -s compiler.cppstd=20 -s build_type=Release 58 | conan install . --build=missing -s compiler.cppstd=20 -s build_type=Debug 59 | ``` 60 | 61 | 4. Generate and build CMake 62 | ``` 63 | cmake -S . -B ./build 64 | cmake --build ./build --target opentrackio-parser --config Release 65 | ``` 66 | 67 | 5. Run OpenTrackIO parser 68 | - Command Prompt: 69 | ``` 70 | .\build\Release\opentrackio-parser.exe -f %CD%\opentrackio-json\complete_static_example.json -s %CD%\opentrackio-json\schema.json 71 | ``` 72 | 73 | - PowerShell: 74 | ``` 75 | .\build\Release\opentrackio-parser.exe -f $PWD\opentrackio-json\complete_static_example.json -s $PWD\opentrackio-json\schema.json 76 | ``` 77 | 78 | ### Linux/MacOS 79 | 80 | 1. Download JSON files 81 | ``` 82 | sudo bash ./get-opentrackio-json.sh 83 | ``` 84 | 85 | 2. Set up Python virtual environment 86 | ``` 87 | python3 -m venv venv 88 | source ./venv/bin/activate 89 | ``` 90 | 91 | 3. Install Conan 92 | ``` 93 | pip install conan 94 | ``` 95 | 96 | Set up Conan profile (first-time only) 97 | ``` 98 | conan profile detect 99 | ``` 100 | 101 | Install Conan dependencies 102 | ``` 103 | conan install . --build=missing -s compiler.cppstd=20 -s build_type=Release 104 | conan install . --build=missing -s compiler.cppstd=20 -s build_type=Debug 105 | ``` 106 | 107 | 4. Generate and build CMake 108 | Release 109 | ``` 110 | cmake -S . -B ./build -DCMAKE_BUILD_TYPE=Release 111 | cmake --build ./build --target opentrackio-parser --config Release 112 | ``` 113 | 114 | Debug 115 | ``` 116 | cmake -S . -B ./build -DCMAKE_BUILD_TYPE=Debug 117 | cmake --build ./build --target opentrackio-parser --config Debug 118 | ``` 119 | 120 | 5. Run OpenTrackIO parser 121 | ``` 122 | ./build/opentrackio-parser -f $PWD/opentrackio-json/complete_static_example.json -s $PWD/opentrackio-json/schema.json 123 | ``` 124 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/conanfile.py: -------------------------------------------------------------------------------- 1 | from conan import ConanFile 2 | from conan.tools.cmake import cmake_layout 3 | 4 | 5 | class ExampleRecipe(ConanFile): 6 | settings = "os", "compiler", "build_type", "arch" 7 | generators = "CMakeDeps", "CMakeToolchain" 8 | 9 | def requirements(self): 10 | self.requires("opentrackio-cpp/1.0.1") 11 | 12 | def layout(self): 13 | cmake_layout(self) 14 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/get-opentrackio-json.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | echo Downloading OpenTrackIO JSON files... 3 | 4 | REM Create a directory for the json files. 5 | if not exist "opentrackio-json" mkdir opentrackio-json 6 | 7 | REM Grab schema. 8 | powershell -Command "& {Invoke-WebRequest -Uri 'https://www.opentrackio.org/schema.json' -OutFile 'opentrackio-json\schema.json'}" 9 | echo Downloaded schema.json 10 | 11 | REM Grab example file. 12 | powershell -Command "& {Invoke-WebRequest -Uri 'https://www.opentrackio.org/examples/complete_static_example.json' -OutFile 'opentrackio-json\complete_static_example.json'}" 13 | echo Downloaded complete_static_example.json 14 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/get-opentrackio-json.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Downloading OpenTrackIO JSON files..." 4 | 5 | # Create a directory for the json files if it doesn't exist. 6 | if [ ! -d "opentrackio-json" ]; then 7 | mkdir opentrackio-json 8 | fi 9 | 10 | # Grab schema. 11 | if command -v curl > /dev/null; then 12 | curl -L "https://www.opentrackio.org/schema.json" -o "opentrackio-json/schema.json" 13 | elif command -v wget > /dev/null; then 14 | wget -O "opentrackio-json/schema.json" "https://www.opentrackio.org/schema.json" 15 | else 16 | echo "Error: Neither curl nor wget is installed. Please install one of them and try again." 17 | exit 1 18 | fi 19 | echo "Downloaded schema.json" 20 | 21 | # Grab example file. 22 | if command -v curl > /dev/null; then 23 | curl -L "https://www.opentrackio.org/examples/complete_static_example.json" -o "opentrackio-json/complete_static_example.json" 24 | elif command -v wget > /dev/null; then 25 | wget -O "opentrackio-json/complete_static_example.json" "https://www.opentrackio.org/examples/complete_static_example.json" 26 | fi 27 | echo "Downloaded complete_static_example.json" 28 | 29 | echo "Download complete! Files saved to opentrackio_json folder." 30 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/src/main.cpp: -------------------------------------------------------------------------------- 1 | // main.cpp 2 | // 3 | // Reference code for decoding opentrackIO messages 4 | // Copyright Contributors to the SMTPE RIS OSVP Metadata Project 5 | // 6 | // License: this code is open-source under the FreeBSD License 7 | // 8 | // nlohmann JSON library licensed under the MIT License, Copyright (c) 2013-2022 Niels Lohmann 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "argparse/argparse.hpp" 16 | #include "opentrackio-lib/OpenTrackIOParser.h" 17 | 18 | 19 | int main(int argc, char* argv[]) 20 | { 21 | argparse::ArgumentParser parser("OpenTrackIOProtocol parser"); 22 | 23 | parser.add_argument("-f", "--file") 24 | .help("OpenTrackIO JSON file to parse.") 25 | .default_value(std::string()); 26 | 27 | parser.add_argument("-s", "--schema") 28 | .help("The OpenTrackIO schema JSON file.") 29 | .default_value(std::string()); 30 | 31 | parser.add_argument("-v", "--verbose") 32 | .help("Verbose logging of the parsing process.") 33 | .default_value(false) 34 | .implicit_value(true); 35 | 36 | try 37 | { 38 | parser.parse_args(argc, argv); 39 | } 40 | catch (const std::runtime_error& err) 41 | { 42 | std::cerr << err.what() << std::endl; 43 | std::cerr << parser; 44 | return 1; 45 | } 46 | 47 | std::string sampleText; 48 | std::string schemaText; 49 | bool verbose = parser.get("--verbose"); 50 | 51 | if (parser.is_used("--schema")) 52 | { 53 | if (const auto schemaPath = parser.get("--schema"); 54 | std::filesystem::exists(schemaPath)) 55 | { 56 | if (std::ifstream file(schemaPath); 57 | file.is_open()) 58 | { 59 | std::cout << "Reading OpenTrackIO schema file: " << schemaPath << std::endl; 60 | schemaText.assign( 61 | (std::istreambuf_iterator(file)), 62 | std::istreambuf_iterator()); 63 | file.close(); 64 | } 65 | 66 | if (!schemaText.empty()) 67 | { 68 | std::cout << "Successfully read schema." << std::endl; 69 | } 70 | } 71 | } 72 | 73 | if (parser.is_used("--file")) 74 | { 75 | if (auto filepath = parser.get("--file"); 76 | std::filesystem::exists(filepath)) 77 | { 78 | if (std::ifstream file(filepath); 79 | file.is_open()) 80 | { 81 | std::cout << "Reading OpenTrackIO sample file: " << filepath << std::endl; 82 | sampleText.assign( 83 | (std::istreambuf_iterator(file)), 84 | std::istreambuf_iterator()); 85 | 86 | file.close(); 87 | } 88 | } 89 | } 90 | 91 | OpenTrackIOSampleParser sample(sampleText, schemaText, verbose); 92 | if (!sample.isValid()) 93 | { 94 | std::cerr << "OpenTrackIO sample is invalid." << std::endl; 95 | return 1; 96 | } 97 | 98 | sample.setTranslationUnits(opentrackio_parser::PositionUnits::Millimeters); 99 | sample.setSampleTimeFormat(opentrackio_parser::SampleTimeFormat::Seconds); 100 | sample.setFocusDistanceUnits(opentrackio_parser::PositionUnits::Centimeters); 101 | sample.setRotationUnits(opentrackio_parser::RotationUnits::Degrees); 102 | std::cout << std::endl; 103 | 104 | std::string protocol = sample.getProtocol(); 105 | std::cout << "Detected protocol: " << protocol << std::endl; 106 | 107 | std::string slate = sample.getSlate(); 108 | std::cout << "On slate: " << slate << std::endl; 109 | 110 | std::string timecode = sample.getTimecode(); 111 | std::cout << "Current camera timecode: " << timecode << std::endl; 112 | 113 | double sampleRate = sample.getSampleRate(); 114 | std::cout << "At a camera frame rate of: " << std::fixed << std::setprecision(5) << sampleRate << std::endl; 115 | std::cout << std::endl; 116 | 117 | std::cout << "Sample time PTP time is: " << sample.getSampleTime() << " sec" << std::endl; 118 | sample.setSampleTimeFormat(opentrackio_parser::SampleTimeFormat::Seconds); 119 | std::cout << "Sample time PTP as a string: " << sample.getSampleTime() << std::endl; 120 | sample.setSampleTimeFormat(opentrackio_parser::SampleTimeFormat::Timecode); 121 | std::cout << "Sample time PTP as timecode: " << sample.getSampleTime() << std::endl; 122 | std::cout << "Sample time PTP elements: " << sample.getSampleTime("yy") << " " 123 | << sample.getSampleTime("dd") << " " 124 | << sample.getSampleTime("hh") << " " 125 | << sample.getSampleTime("mm") << " " 126 | << sample.getSampleTime("ss") << " " 127 | << sample.getSampleTime("ns") << std::endl; 128 | 129 | std::cout << std::endl; 130 | 131 | if (std::string serialNumber = sample.getTrackingDeviceSerialNumber(); 132 | !serialNumber.empty()) 133 | { 134 | std::cout << "Tracking device serial number: " << serialNumber << std::endl; 135 | } 136 | else 137 | { 138 | std::cout << "Unknown tracking device, wait for static sample to come in..." << std::endl; 139 | } 140 | 141 | auto [x, y, z] = sample.getCameraTransform(); 142 | std::cout << "Camera position is: (" << x << "," << y << "," << z << 143 | ") cm" << std::endl; 144 | 145 | auto rotation = sample.getRotation(); 146 | std::cout << "Camera rotation is: (" << rotation.pan << "," << rotation.tilt << "," << rotation.roll << ") deg" << 147 | std::endl; 148 | 149 | sample.setRotationUnits(opentrackio_parser::RotationUnits::Radians); 150 | rotation = sample.getRotation(); 151 | std::cout << "Camera rotation is: (" << std::fixed << std::setprecision(5) 152 | << rotation.pan << "," << rotation.tilt << "," << rotation.roll << ") radians" << std::endl; 153 | std::cout << std::endl; 154 | 155 | double fl = sample.getPineHoleFocalLength(); 156 | if (double height = sample.getSensoryResolutionHeight(); 157 | height != 0) 158 | { 159 | double width = sample.getSensorResolutionWidth(); 160 | std::string units = sample.getSensorDimensionsUnits(); 161 | std::cout << "Active camera sensor height: " << height << ", width: " << width << " " << units << std::endl; 162 | } 163 | else 164 | { 165 | std::cout << "Unknown camera sensor, wait for static sample to come in..." << std::endl; 166 | } 167 | 168 | std::cout << "Focal length is: " << fl << std::endl; 169 | 170 | double fd = sample.getFocusDistance(); 171 | std::cout << "Focus distance is: " << fd << " cm" << std::endl; 172 | 173 | sample.setFocusDistanceUnits(opentrackio_parser::PositionUnits::Inches); 174 | fd = sample.getFocusDistance(); 175 | std::cout << "Focus distance is: " << std::fixed << std::setprecision(4) << fd << " in" << std::endl; 176 | 177 | return 0; 178 | } 179 | -------------------------------------------------------------------------------- /src/test/cpp/opentrackio-parser/src/opentrackio-lib/OpenTrackIOParser.h: -------------------------------------------------------------------------------- 1 | // OpenTrackIOParser.h 2 | // 3 | // Reference code for decoding opentrackIO messages 4 | // 5 | // SPDX-License-Identifier: BSD-3-Clause 6 | // Copyright Contributors to the SMTPE RIS OSVP Metadata Project 7 | // 8 | // nlohmann JSON library licensed under the MIT License, Copyright (c) 2013-2022 Niels Lohmann 9 | 10 | #pragma once 11 | 12 | #include 13 | 14 | #include 15 | #include 16 | 17 | namespace opentrackio_parser 18 | { 19 | enum class PositionUnits 20 | { 21 | Meters, 22 | Centimeters, 23 | Millimeters, 24 | Inches 25 | }; 26 | 27 | inline std::string toString(const PositionUnits unit) 28 | { 29 | switch (unit) 30 | { 31 | case PositionUnits::Meters: return "m"; 32 | case PositionUnits::Centimeters: return "cm"; 33 | case PositionUnits::Millimeters: return "mm"; 34 | case PositionUnits::Inches: return "in"; 35 | default: 36 | assert(false && "Unsupported unit"); 37 | return "Unsupported unit"; 38 | } 39 | } 40 | 41 | enum class RotationUnits 42 | { 43 | Degrees, 44 | Radians 45 | }; 46 | 47 | inline std::string toString(const RotationUnits unit) 48 | { 49 | switch (unit) 50 | { 51 | case RotationUnits::Degrees: return "deg"; 52 | case RotationUnits::Radians: return "rad"; 53 | default: 54 | assert(false && "Unsupported unit"); 55 | return "Unsupported unit"; 56 | } 57 | } 58 | 59 | enum class SampleTimeFormat 60 | { 61 | Seconds, 62 | Timecode, 63 | String 64 | }; 65 | 66 | inline std::string toString(const SampleTimeFormat format) 67 | { 68 | switch (format) 69 | { 70 | case SampleTimeFormat::Seconds: return "sec"; 71 | case SampleTimeFormat::Timecode: return "timecode"; 72 | case SampleTimeFormat::String: return "string"; 73 | default: 74 | assert(false && "Unsupported format"); 75 | return "Unsupported format"; 76 | } 77 | } 78 | 79 | struct Transform 80 | { 81 | double x = 0.0; 82 | double y = 0.0; 83 | double z = 0.0; 84 | }; 85 | 86 | struct Rotation 87 | { 88 | double pan = 0.0; 89 | double tilt = 0.0; 90 | double roll = 0.0; 91 | }; 92 | } 93 | 94 | /** 95 | * OpenTrackIOSampleParser provides functionality for parsing and extracting data from OpenTrackIO JSON samples. 96 | * 97 | * This class handles the parsing, validation and access to OpenTrackIO data, with features including: 98 | * Unit conversion for position, rotation, and focus distance values. 99 | * Access to camera transform and rotation data. 100 | * Access to lens information (focal length, focus distance). 101 | * Access to timing information (timecode, sample time). 102 | * Access to camera sensor information. 103 | * 104 | * The parser supports customization of output units through dedicated setter methods, 105 | * allowing consumers to work with their preferred measurement systems. 106 | */ 107 | class OpenTrackIOSampleParser 108 | { 109 | public: 110 | /** Constructor that initializes the parser with sample data and schema. */ 111 | OpenTrackIOSampleParser(const std::string& sample, const std::string& schema, bool verbose); 112 | 113 | ~OpenTrackIOSampleParser() = default; 114 | 115 | /** Returns whether the sample and schema has successfully been parsed. */ 116 | bool isValid() const { return _isValid; } 117 | 118 | /** Gets camera transform. */ 119 | opentrackio_parser::Transform getCameraTransform() const; 120 | 121 | opentrackio_parser::Rotation getRotation() const; 122 | 123 | /** Gets the slate (shot identification) information. */ 124 | std::string getSlate() const; 125 | 126 | /** Gets the camera timecode as a formatted string. */ 127 | std::string getTimecode() const; 128 | 129 | /** Gets timestamp information, optionally filtered by part (yy, dd, hh, mm, ss, ns). */ 130 | std::string getSampleTime(const std::string& part = "") const; 131 | 132 | /** Gets the camera's active sensor height in pixels. */ 133 | int getSensoryResolutionHeight() const; 134 | 135 | /** Gets the camera's active sensor width in pixels. */ 136 | int getSensorResolutionWidth() const; 137 | 138 | /** Gets the measurement units for sensor dimensions as stated in the schema. */ 139 | std::string getSensorDimensionsUnits(); 140 | 141 | /** Gets the tracking device's serial number. */ 142 | std::string getTrackingDeviceSerialNumber() const; 143 | 144 | /** Gets the lens focal length in millimeters. */ 145 | double getPineHoleFocalLength() const; 146 | 147 | /** Gets the lens focus distance in the configured units. */ 148 | double getFocusDistance() const; 149 | 150 | /** Gets the sample frame rate. */ 151 | double getSampleRate() const; 152 | 153 | /** Get the protocol name and version. */ 154 | std::string getProtocol() const; 155 | 156 | /** Set measurement units format for transforms. */ 157 | void setTranslationUnits(opentrackio_parser::PositionUnits unit); 158 | 159 | /** Set rotation units format. */ 160 | void setRotationUnits(opentrackio_parser::RotationUnits unit); 161 | 162 | /** Set sample time format. */ 163 | void setSampleTimeFormat(opentrackio_parser::SampleTimeFormat format); 164 | 165 | /** Set measurement units format for focus distance. */ 166 | void setFocusDistanceUnits(opentrackio_parser::PositionUnits unit); 167 | 168 | private: 169 | bool importSchema(); 170 | bool parse(); 171 | 172 | opentrackio::OpenTrackIOSample _sample; 173 | std::string _sampleStr; 174 | std::string _schemaStr; 175 | 176 | nlohmann::json _schemaJson; 177 | 178 | bool _isVerbose; 179 | bool _isValid = false; 180 | 181 | double _transformMultiplier = 1.0; 182 | double _rotationMultiplier = 1.0; 183 | double _focusDistanceMultiplier = 1.0; 184 | 185 | std::string _sampleTimeFormat = "sec"; 186 | }; 187 | -------------------------------------------------------------------------------- /src/test/python/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/test/python/__init__.py -------------------------------------------------------------------------------- /src/test/python/parser/README.md: -------------------------------------------------------------------------------- 1 | #Documentation for opentrackIO_parser.py 2 | 3 | This python reference code is for parsing "sample" JSON in OpenTrackIO format. User API is in the methods of the class "OpenTrackIOProtocol" and includes methods for getting and scaling values to user-preferred units. 4 | 5 | This code works with Python 3.11 and above 6 | 7 | Example: 8 | ``` 9 | export PYTHONPATH=src/main/python 10 | python3 src/tools/python/generate_opentrackio_schema.py > opentrackio_schema.json 11 | python3 src/tools/python/generate_complete_static_example.py > opentrackio_complete_static_sample.json 12 | 13 | python3 opentrackio_parser.py --file=opentrackio_complete_static_sample.json --schema=opentrackio_schema.json 14 | 15 | Reading OpenTrackIO schema file: opentrackio_schema.json 16 | Reading OpenTrackIO sample file: opentrackio_complete_static_sample.json 17 | Parsing JSON string from sample buffer... 18 | Parsed the sample JSON successfully. 19 | Parsed the schema JSON successfully. 20 | 21 | Detected protocol: OpenTrackIO 22 | Protocol version: 1.0.0 23 | On slate: A101_A_4 24 | Current camera timecode: 01:02:03:04 25 | At a camera frame rate of: 23.976 26 | 27 | Sample time PTP time is: 1718806554.5 sec 28 | Sample time PTP as a string: year:2024 day:183 hour:14 min:15 sec:54 nsec:500000000 29 | Sample time PTP as timecode: 14:15:54:11 30 | Sample time PTP elements: 2024 183 14 15 54 500000000 31 | 32 | Tracking device serial number: 1234567890A 33 | Camera position is: (100.0,200.0,300.0) cm 34 | Camera rotation is: (180.0,90.0,45.0) deg 35 | Camera rotation is: (3.1416,1.5708,0.7854) radians 36 | 37 | Active camera sensor height: 2160, width: 3840 micron 38 | Focal length is: 24.305 millimeter 39 | Focus distance is: 100.0 cm 40 | Focus distance is: 39.37 in 41 | ``` 42 | 43 | Note that JSON dictionary contents and other debug info can be examined with the verbose flag: -v 44 | -------------------------------------------------------------------------------- /src/test/python/parser/opentrackio_parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # opentrackio_parser.py 4 | # 5 | # Reference code for decoding OpenTrackIO samples 6 | # 7 | # SPDX-License-Identifier: BSD-3-Clause 8 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 9 | # 10 | # Example run: python3 opentrackio_parser.py --file=opentrackio_sample.json --schema=opentrackio_schema.json 11 | # This is tested against the generated "complete_static_example" and "complete_dynamic_example" json 12 | 13 | import os 14 | import argparse 15 | from opentrackio_lib import * 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser(description='OpenTrackIO protocol parser') 19 | parser.add_argument('-f', '--file', help='The JSON input file.', default=None) 20 | parser.add_argument('-s', '--schema', help='The schema (JSON) input file.', default=None) 21 | parser.add_argument('-v', '--verbose', help='Make script more verbose',action='store_true') 22 | args = parser.parse_args() 23 | 24 | sample_text = '' 25 | schematext = '' 26 | verbose = False 27 | schemapath = None 28 | filepath = None 29 | 30 | if (args.schema): 31 | if os.path.exists(args.schema): 32 | schemapath = args.schema 33 | if (schemapath != None): 34 | with open(schemapath, 'r') as fd: 35 | print("Reading OpenTrackIO schema file: {0}".format(schemapath)) 36 | lines = fd.readlines() 37 | for line in lines: 38 | schematext = schematext + line 39 | if (args.file): 40 | if os.path.exists(args.file): 41 | filepath = args.file 42 | if (filepath != None): 43 | with open(filepath, 'r') as fd: 44 | print("Reading OpenTrackIO sample file: {0}".format(filepath)) 45 | lines = fd.readlines() 46 | for line in lines: 47 | sample_text = sample_text + line 48 | if (args.verbose): 49 | verbose = True 50 | 51 | if not filepath or not schemapath: 52 | print("Usage: python3 opentrackio_parser.py --file=opentrackio_sample.json --schema=opentrackio_schema.json") 53 | exit (-1) 54 | 55 | sample = OpenTrackIOProtocol(schematext,verbose) # a "Sample" is a de-serialized JSON object containing the protocol 56 | sample.parse_json(sample_text) # parse the actual JSON of the protocol 57 | 58 | sample.set_translation_units(TranslationUnit.CENTIMETER) # end-user preferred units 59 | sample.set_sample_time_format(TimeFormat.SECONDS) 60 | sample.set_focus_distance_units(FocusDistanceUnit.CENTIMETER) 61 | sample.set_rotation_units(RotationUnit.DEGREE) 62 | print() 63 | 64 | print("Detected protocol: {} version: {}".format(sample.get_protocol_name(), sample.get_protocol_version())) 65 | slate = sample.get_slate() 66 | print("On slate: {}".format(slate)) 67 | timecode = sample.get_timecode() 68 | print("Current camera timecode: {}".format(timecode)) 69 | framerate = sample.get_timecode_framerate() 70 | print("At a camera frame rate of: {:.5}".format(framerate)) 71 | print() 72 | print("Sample time PTP time is: {} sec".format(sample.get_sample_time())) 73 | print("Sample time PTP as a string: {}".format(sample.get_sample_time(TimeFormat.STRING))) 74 | print("Sample time PTP as timecode: {}".format(sample.get_sample_time(TimeFormat.TIMECODE))) 75 | print("Sample time PTP elements: {} {} {} {} {} {}".format(sample.get_sample_time(part='yy'), 76 | sample.get_sample_time(part='dd'), 77 | sample.get_sample_time(part='hh'), 78 | sample.get_sample_time(part='mm'), 79 | sample.get_sample_time(part='ss'), 80 | sample.get_sample_time(part='ns'))) 81 | print() 82 | 83 | snum = sample.get_tracking_device_serial_number() 84 | if snum: 85 | print("Tracking device serial number: {}".format(snum)) 86 | else: 87 | print("Unknown tracking device, wait for static sample to come in...") 88 | posX = sample.get_camera_translation(Translation.X) 89 | posY = sample.get_camera_translation(Translation.Y) 90 | posZ =sample.get_camera_translation(Translation.Z) 91 | print("Camera position is: ({},{},{}) cm".format(posX, posY, posZ)) 92 | rotX = sample.get_camera_rotation(Rotation.PAN) 93 | rotY = sample.get_camera_rotation(Rotation.TILT) 94 | rotZ = sample.get_camera_rotation(Rotation.ROLL) 95 | print("Camera rotation is: ({},{},{}) deg".format(rotX, rotY, rotZ)) 96 | sample.set_rotation_units(RotationUnit.RADIAN) 97 | rotX = sample.get_camera_rotation(Rotation.PAN) 98 | rotY = sample.get_camera_rotation(Rotation.TILT) 99 | rotZ = sample.get_camera_rotation(Rotation.ROLL) 100 | print("Camera rotation is: ({:.5},{:.5},{:.5}) radians".format(rotX, rotY, rotZ)) 101 | print() 102 | 103 | fl = sample.get_focal_length() 104 | height = sample.get_sensor_dimension_height() 105 | if height: 106 | width = sample.get_sensor_dimension_width() 107 | units = sample.get_sensor_dimension_units() 108 | print("Active camera sensor height: {}, width: {} {}".format(height,width,units)) 109 | else: 110 | print("Unknown camera sensor, wait for static sample to come in...") 111 | fl_units = sample.sd["properties"]["lens"]["properties"]["pinholeFocalLength"]["units"] 112 | print("Focal length is: {} {}".format(fl,fl_units)) 113 | fd = sample.get_focus_distance() 114 | print("Focus distance is: {} cm".format(fd)) 115 | sample.set_focus_distance_units("in") # end-user preferred units 116 | fd = sample.get_focus_distance() 117 | print("Focus distance is: {:.4} in".format(fd)) 118 | 119 | if __name__ == "__main__": 120 | main() 121 | -------------------------------------------------------------------------------- /src/test/python/parser/opentrackio_receiver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # opentrackio_receiver.py 4 | # 5 | # Reference code for receiving and decoding OpenTrackIO data 6 | # 7 | # SPDX-License-Identifier: BSD-3-Clause 8 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 9 | 10 | import socket 11 | import struct 12 | import argparse 13 | import zlib 14 | import sys 15 | import time 16 | import ntplib 17 | from cbor2 import loads 18 | from opentrackio_lib import * 19 | 20 | VERBOSE = False 21 | 22 | opentrackiolib = None 23 | sequence_number = 0 24 | prev_sequence_number = 0 25 | segment_buffer = {} 26 | 27 | timesource = '' 28 | 29 | ntpclient = ntplib.NTPClient() 30 | ntpresponse = None 31 | ntpoffset = 0.0 32 | 33 | def init_time_source(): 34 | if timesource == TimeSource.PTP: 35 | return 36 | elif timesource == TimeSource.NTP: 37 | global ntpresponse, ntpoffset 38 | ntpresponse = ntpclient.request(NTPSERVER) 39 | ntpoffset = ntpresponse.offset 40 | elif timesource == TimeSource.GENLOCK: 41 | return 42 | else: 43 | return 44 | 45 | def parse_opentrackio_packet(data): 46 | global sequence_number, prev_sequence_number, opentrackiolib, timesource, segment_buffer 47 | 48 | if len(data) < OTRK_HEADER_LENGTH: 49 | print("Invalid packet: Packet is too short.") 50 | return False 51 | 52 | identifier = data[:OTRK_IDENTIFIER_LENGTH] 53 | if identifier != OTRK_IDENTIFIER: 54 | print("Invalid packet: Identifier mismatch.") 55 | return False 56 | 57 | reserved = data[4] 58 | encoding = data[5] 59 | sequence_number = struct.unpack('!H', data[6:8])[0] 60 | segment_offset = struct.unpack('!I', data[8:12])[0] 61 | l_and_payload_length = struct.unpack('!H', data[12:14])[0] 62 | 63 | last_segment = bool(l_and_payload_length >> 15) 64 | payload_length = l_and_payload_length & 0x7FFF 65 | 66 | checksum_val = struct.unpack('!H', data[14:16])[0] 67 | 68 | payload = data[16:] 69 | 70 | if sequence_number == prev_sequence_number: 71 | print(f"Invalid packet: Same sequence number received twice.") 72 | return False 73 | 74 | prev_sequence_number = sequence_number 75 | 76 | if len(payload) != payload_length: 77 | print(f"Invalid packet: Expected payload length {payload_length}, but got {len(payload)}.") 78 | return False 79 | 80 | header_without_checksum = data[:14] 81 | calculated_checksum = fletcher16(header_without_checksum + payload) 82 | if calculated_checksum != struct.pack('!H', checksum_val): 83 | print("Invalid packet: Checksum mismatch.") 84 | return False 85 | 86 | if sequence_number not in segment_buffer: 87 | segment_buffer[sequence_number] = {} 88 | 89 | segment_buffer[sequence_number][segment_offset] = payload 90 | 91 | if last_segment: 92 | segments = segment_buffer[sequence_number] 93 | sorted_offsets = sorted(segments.keys()) 94 | payload_parts = [segments[offset] for offset in sorted_offsets] 95 | assembled_payload = b''.join(payload_parts) 96 | 97 | process_payload(assembled_payload, encoding) 98 | 99 | # Clear the buffer for this sequence number 100 | del segment_buffer[sequence_number] 101 | 102 | def process_payload(payload, encoding): 103 | global opentrackiolib, timesource 104 | 105 | try: 106 | payloadformat = PayloadFormat(encoding) 107 | except ValueError: 108 | print("Invalid payload format.") 109 | return False 110 | 111 | if payloadformat == PayloadFormat.CBOR: 112 | try: 113 | opentrackiolib.parse_cbor(payload) 114 | except OpenTrackIOException as e: 115 | print(e) 116 | return False 117 | elif payloadformat == PayloadFormat.JSON: 118 | try: 119 | opentrackiolib.parse_json(payload) 120 | except OpenTrackIOException as e: 121 | print(e) 122 | return False 123 | 124 | if not timesource: 125 | timesource = opentrackiolib.get_time_source() 126 | init_time_source() 127 | 128 | ref_timestamp = get_local_timestamp() 129 | ref_delta = ref_timestamp - opentrackiolib.get_sample_time(TimeFormat.SECONDS) 130 | 131 | try: 132 | if VERBOSE: 133 | print(f"OpenTrackIO packet:\n") 134 | print(f" Timecode: {opentrackiolib.get_timecode()}") 135 | print(f" Time source: {opentrackiolib.get_time_source().name}") 136 | print(f" Local time: {ref_timestamp}") 137 | print(f" Sample time: {opentrackiolib.get_sample_time(TimeFormat.SECONDS)}") 138 | print(f" Delta: {ref_delta}") 139 | print(f" Translation: {opentrackiolib.get_camera_translations()}") 140 | print(f" Rotation: {opentrackiolib.get_camera_rotations()}") 141 | except OpenTrackIOException as e: 142 | print(e) 143 | 144 | def get_local_timestamp(): 145 | if timesource == TimeSource.NTP: 146 | timestamp = time.time() + ntpoffset 147 | #print(f"Local time: {timestamp}") 148 | return timestamp 149 | 150 | def main(): 151 | global VERBOSE, opentrackiolib 152 | 153 | parser = argparse.ArgumentParser(description=f'OpenTrackIO {OTRK_VERSION} protocol receiver') 154 | parser.add_argument('-n', '--source', type=int, help='The Source Number (1-200) to listen for.', default=OTRK_SOURCE_NUMBER) 155 | parser.add_argument('-p', '--port', type=int, help=f'The port number (49152–65535) to listen on. Default: {OTRK_MULTICAST_PORT}', default=OTRK_MULTICAST_PORT) 156 | parser.add_argument('-s', '--schema', help='The schema (JSON) input file. Default: opentrackio_schema.json', default='opentrackio_schema.json') 157 | parser.add_argument('-v', '--verbose', help='Make script more verbose', action='store_true') 158 | args = parser.parse_args() 159 | 160 | schematext = '' 161 | schemapath = None 162 | source_number = OTRK_SOURCE_NUMBER 163 | multicast_port = OTRK_MULTICAST_PORT 164 | 165 | if (args.schema): 166 | if os.path.exists(args.schema): 167 | schemapath = args.schema 168 | if (schemapath != None): 169 | with open(schemapath, 'r') as fd: 170 | if VERBOSE: 171 | print("Reading OpenTrackIO schema file: {0}".format(schemapath)) 172 | lines = fd.readlines() 173 | for line in lines: 174 | schematext = schematext + line 175 | 176 | if (args.source): 177 | if not (1 <= args.source <= 200): 178 | print("Error: Source Number must be between 1 and 200.") 179 | exit(-1) 180 | else: 181 | source_number = args.source 182 | 183 | if (args.port): 184 | if not (49152 <= args.port <= 65535): 185 | print("Error: port number must be between 49152 and 65535.") 186 | exit(-1) 187 | else: 188 | multicast_port = args.port 189 | 190 | if (args.verbose): 191 | VERBOSE = True 192 | 193 | if not source_number or not schematext: 194 | parser.print_help() 195 | exit(-1) 196 | 197 | opentrackiolib = OpenTrackIOProtocol(schematext, False) 198 | 199 | multicast_group = f"{OTRK_MULTICAST_PREFIX}{source_number}" 200 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) 201 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 202 | sock.bind(("", multicast_port)) 203 | mreq = struct.pack("4sl", socket.inet_aton(multicast_group), socket.INADDR_ANY) 204 | sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) 205 | 206 | print(f"Listening for OpenTrackIO packets on {multicast_group}:{multicast_port}") 207 | 208 | last_time = None 209 | packets = 0 210 | 211 | while True: 212 | data, addr = sock.recvfrom(OTRK_MTU) 213 | packets = packets + 1 214 | 215 | current_time = time.time() 216 | if last_time is not None: 217 | interval = current_time - last_time 218 | frequency = 1 / interval if interval > 0 else 0 219 | if VERBOSE: 220 | print(f"\n") 221 | print(f"Receive frequency: {frequency:.3f} Hz") 222 | print(f"Received packets: {packets}") 223 | 224 | last_time = current_time 225 | 226 | parse_opentrackio_packet(data) 227 | 228 | if __name__ == "__main__": 229 | main() 230 | 231 | -------------------------------------------------------------------------------- /src/test/python/test_arri_reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''ARRI camera reader tests''' 8 | 9 | from fractions import Fraction 10 | import unittest 11 | 12 | import camdkit.arri.reader 13 | import camdkit.model 14 | 15 | class ARRIReaderTest(unittest.TestCase): 16 | 17 | def test_reader(self): 18 | clip = camdkit.arri.reader.to_clip("src/test/resources/arri/B001C001_180327_R1ZA.mov.csv") 19 | 20 | self.assertEqual(clip.iso, 400) 21 | 22 | self.assertEqual( 23 | clip.active_sensor_physical_dimensions, 24 | camdkit.model.Dimensions(width=316.8, height=178.2) 25 | ) 26 | 27 | self.assertEqual(clip.camera_make, "ARRI") 28 | 29 | self.assertEqual(clip.camera_model, "Alexa LF Plus W") 30 | 31 | self.assertEqual(clip.camera_serial_number, "2566") 32 | 33 | self.assertEqual(clip.lens_make, "ARRI") 34 | 35 | self.assertEqual(clip.lens_model, "SP40 T1.8") 36 | 37 | self.assertEqual(clip.lens_serial_number, "2") 38 | 39 | self.assertEqual(clip.capture_frame_rate, 24) 40 | 41 | self.assertEqual(clip.lens_nominal_focal_length, 40) 42 | 43 | self.assertEqual(clip.lens_focus_distance[0], 4.812) 44 | 45 | self.assertEqual(clip.anamorphic_squeeze, 1) 46 | 47 | self.assertEqual(round(clip.lens_t_number[0] * 1000), 1782) 48 | 49 | self.assertEqual(clip.shutter_angle, 172.8) 50 | 51 | def test_linear_iris_value(self): 52 | self.assertEqual(round(camdkit.arri.reader.t_number_from_linear_iris_value(6000) * 1000), 5657) 53 | -------------------------------------------------------------------------------- /src/test/python/test_bmd_reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Blackmagic camera RAW reader tests''' 8 | 9 | import unittest 10 | 11 | import camdkit.bmd.reader 12 | import camdkit.model 13 | 14 | class BMDReaderTest(unittest.TestCase): 15 | 16 | def test_reader(self): 17 | with open("src/test/resources/bmd/metadata.txt", "r", encoding="utf-8") as fp: 18 | clip = camdkit.bmd.reader.to_clip(fp) 19 | 20 | self.assertEqual(clip.camera_make, "Blackmagic Design") 21 | 22 | self.assertEqual(clip.camera_model, "Blackmagic URSA Mini Pro 12K") 23 | 24 | self.assertEqual(clip.camera_serial_number, "7ef33b36-a5ba-4a04-b218-0afc7eb1f8b6") 25 | 26 | self.assertEqual(clip.camera_firmware, "7.2.1") 27 | 28 | self.assertEqual(clip.lens_model, "Cooke Anamorphic /i Prime Lens 50mm") 29 | 30 | self.assertEqual(clip.iso, 800) 31 | 32 | self.assertEqual( 33 | clip.active_sensor_physical_dimensions, 34 | camdkit.model.Dimensions( 35 | width=round(5120 * 270030 / 12288), 36 | height=round(4272 * 14250 / 6480) 37 | ) 38 | ) 39 | 40 | self.assertEqual(clip.capture_frame_rate, 48) 41 | 42 | self.assertEqual(clip.lens_nominal_focal_length, 50) 43 | 44 | self.assertEqual(clip.lens_focus_distance[0], 991) 45 | 46 | # self.assertEqual(clip.white_balance, 6000) 47 | 48 | self.assertEqual(clip.anamorphic_squeeze, 2) 49 | 50 | self.assertEqual(clip.lens_t_number[0], 2.3) 51 | 52 | self.assertEqual(clip.shutter_angle, 180) 53 | -------------------------------------------------------------------------------- /src/test/python/test_canon_reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Canon camera reader tests''' 8 | 9 | import unittest 10 | 11 | import camdkit.canon.reader 12 | 13 | class CanonReaderTest(unittest.TestCase): 14 | 15 | def test_reader(self): 16 | with open("src/test/resources/canon/20221007_TNumber_CanonCameraMetadata_Static.csv", "r", encoding="utf-8") as static_csv, \ 17 | open("src/test/resources/canon/20221007_TNumber_CanonCameraMetadata_Frames.csv", "r", encoding="utf-8") as frame_csv: 18 | clip = camdkit.canon.reader.to_clip(static_csv, frame_csv) 19 | 20 | self.assertEqual(clip.camera_make, "Canon") 21 | 22 | self.assertEqual(clip.iso, 1600) # ISO: 1600 23 | 24 | self.assertEqual(clip.lens_nominal_focal_length, 18) # focal_length: 18 mm 25 | 26 | self.assertEqual(clip.lens_focus_distance[0], 0.5) # focus_position: 0.5 mm 27 | 28 | self.assertEqual(clip.shutter_angle, 180) # shutter_angle: 180 deg 29 | 30 | self.assertIsNone(clip.lens_entrance_pupil_offset) 31 | 32 | self.assertEqual(clip.lens_t_number[0], 4.5) # t_number: 4.5 33 | 34 | self.assertIsNone(clip.capture_frame_rate) 35 | 36 | self.assertIsNone(clip.lens_serial_number) 37 | 38 | self.assertEqual(clip.anamorphic_squeeze, 1) # anamorphic_squeeze: 1 39 | 40 | self.assertIsNone(clip.active_sensor_physical_dimensions) 41 | -------------------------------------------------------------------------------- /src/test/python/test_compatibility.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Tests for components providing compatibility with classic camdkit""" 8 | 9 | import unittest 10 | import json 11 | 12 | from typing import Annotated, Any 13 | from copy import deepcopy 14 | 15 | from pydantic import BaseModel 16 | from pydantic.json_schema import JsonSchemaValue 17 | 18 | from camdkit.camera_types import StaticCamera 19 | from camdkit.compatibility import CompatibleBaseModel 20 | 21 | class PureOpt(BaseModel): 22 | a: int 23 | b: int | None = None 24 | c: str 25 | 26 | EXPECTED_PURE_OPT_SCHEMA = { 27 | # n.b. dict order changed from BaseModel.model_json_schema() output to be sure that 28 | # order of elements isn't important 29 | "type": "object", 30 | "properties": { 31 | "a": { "title": "A", "type": "integer" }, 32 | "b": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], 33 | "default": None, "title": "B" }, 34 | "c": { "title": "C", "type": "string" } 35 | }, 36 | "required": [ "a", "c" ], 37 | "title": "PureOpt" 38 | } 39 | 40 | 41 | class CompatiblePureOpt(CompatibleBaseModel): 42 | a: int 43 | b: int | None = None 44 | c: str 45 | 46 | EXPECTED_COMPATIBLE_PURE_OPT_SCHEMA = { 47 | "type": "object", 48 | "additionalProperties": False, 49 | "properties": { 50 | "a": { "type": "integer" }, 51 | "b": { "type": "integer" }, 52 | "c": { "type": "string" } 53 | }, 54 | "required": [ "a", "c" ] 55 | } 56 | 57 | 58 | class AnnotatedOpt(BaseModel): 59 | class FauxField: 60 | def __init__(self, *_) -> None: 61 | pass 62 | 63 | a: int 64 | b: Annotated[int | None, FauxField("foo")] = None 65 | c: str 66 | 67 | EXPECTED_ANNOTATED_OPT_SCHEMA = { 68 | "type": "object", 69 | "properties": { 70 | "a": { "title": "A", "type": "integer" }, 71 | "b": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], 72 | "default": None, "title": "B" }, 73 | "c": { "title": "C", "type": "string" } 74 | }, 75 | "required": [ "a", "c" ], 76 | "title": "AnnotatedOpt" 77 | } 78 | 79 | 80 | class PureArray(BaseModel): 81 | a: int 82 | b: tuple[int, ...] 83 | c: str 84 | 85 | EXPECTED_PURE_ARRAY_SCHEMA = { 86 | "properties": { 87 | "a": { "title": "A", "type": "integer" }, 88 | "b": { "title": "B", 89 | "type": "array", 90 | "items": {"type": "integer" } }, 91 | "c": { "title": "C", "type": "string" } 92 | }, 93 | "required": [ "a", "b", "c" ], 94 | "title": "PureArray", 95 | "type": "object" 96 | } 97 | 98 | 99 | # regular POD parameters, e.g. lens entrance pupil offset 100 | class OptArray(BaseModel): 101 | a: int 102 | b: tuple[int, ...] | None 103 | c: str 104 | 105 | EXPECTED_OPT_ARRAY_SCHEMA = { 106 | "properties": { 107 | "a": { "title": "A", "type": "integer" }, 108 | "b": { "title": "B", 109 | "anyOf": [ 110 | { "items": { "type": "integer" }, "type": "array" }, 111 | { "type": "null" } ] }, 112 | "c": { "title": "C", "type": "string" } 113 | }, 114 | "required": [ "a", "b", "c" ], 115 | "title": "OptArray", 116 | "type": "object" 117 | } 118 | 119 | CLASSIC_STATIC_CAMERA_SCHEMA_W_JUST_ANAMORPHIC_SQUEEZE = { 120 | "type": "object", 121 | "additionalProperties": False, 122 | "properties": { 123 | "anamorphicSqueeze": { 124 | "type": "object", 125 | "properties": { 126 | "num": { "type": "integer", "minimum": 1, "maximum": 2147483647 }, 127 | "denom": { "type": "integer", "minimum": 1, "maximum": 4294967295 } }, 128 | "required": [ "num", "denom" ], 129 | "additionalProperties": False, 130 | "description": "Nominal ratio of height to width of the image of an axis-aligned\nsquare captured by the camera sensor. It can be used to de-squeeze\nimages but is not however an exact number over the entire captured\narea due to a lens' intrinsic analog nature.\n" 131 | }, 132 | } 133 | } 134 | 135 | STATIC_CAMERA_SCHEMA_W_JUST_ANAMORPHIC_SQUEEZE = { 136 | "type": "object", 137 | "additionalProperties": False, 138 | "properties": { 139 | "anamorphicSqueeze": { 140 | "anyOf": [ 141 | { "type": "object", 142 | "properties": { 143 | "num": { "type": "integer", "maximum": 2147483647, "minimum": 1 }, 144 | "denom": { "type": "integer", "maximum": 4294967295, "minimum": 1 } 145 | }, 146 | "required": [ "num", "denom" ], 147 | "additionalProperties": False }, 148 | { "type": "null" } 149 | ], 150 | "default": None, 151 | "description": "Nominal ratio of height to width of the image of an axis-aligned\nsquare captured by the camera sensor. It can be used to de-squeeze\nimages but is not however an exact number over the entire captured\narea due to a lens' intrinsic analog nature.\n" 152 | } 153 | } 154 | } 155 | 156 | 157 | def remove_properties_besides(schema: JsonSchemaValue, keeper: str) -> JsonSchemaValue: 158 | property_names = [k for k in schema["properties"].keys() if k != keeper] 159 | for property_name in property_names: 160 | schema["properties"].pop(property_name) 161 | return schema 162 | 163 | 164 | class CompatibilityTestCases(unittest.TestCase): 165 | # make sure Pydantic hasn't changed its schema generator without our noticing 166 | def test_schema_generation(self): 167 | self.assertDictEqual(EXPECTED_PURE_OPT_SCHEMA, PureOpt.model_json_schema()) 168 | self.assertDictEqual(EXPECTED_COMPATIBLE_PURE_OPT_SCHEMA, CompatiblePureOpt.make_json_schema()) 169 | self.assertDictEqual(EXPECTED_ANNOTATED_OPT_SCHEMA, AnnotatedOpt.model_json_schema()) 170 | self.assertDictEqual(EXPECTED_PURE_ARRAY_SCHEMA, PureArray.model_json_schema()) 171 | self.assertDictEqual(EXPECTED_OPT_ARRAY_SCHEMA, OptArray.model_json_schema()) 172 | 173 | def test_annotated_opt_same_as_pure_opt(self): 174 | """Convince ourselves Annotated leaves no trace in generated schema""" 175 | pure_opt_schema = PureOpt.model_json_schema() 176 | annotated_opt_schema = AnnotatedOpt.model_json_schema() 177 | pure_opt_schema.pop("title", None) 178 | annotated_opt_schema.pop("title", None) 179 | self.assertDictEqual(pure_opt_schema, annotated_opt_schema) 180 | 181 | 182 | if __name__ == '__main__': 183 | unittest.main() 184 | -------------------------------------------------------------------------------- /src/test/python/test_cooke_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Cooke lens reader tests"''' 8 | 9 | import unittest 10 | 11 | import camdkit.red.cooke 12 | 13 | _COOKE_METADATA = bytes(map(lambda i: int(i, 16), "64/40/40/46/68/48/70/B8/80/40/40/40/42/66/6D/40/40/46/5E/40/40/46/73/45/4E/41/7F/40/40/53/47/35/33/35/39/39/37/36/34/0A/0D".split("/"))) 14 | 15 | class CookeDataTest(unittest.TestCase): 16 | 17 | def test_entrance_pupil_position(self): 18 | c = camdkit.red.cooke.lens_data_from_binary_string(_COOKE_METADATA) 19 | 20 | self.assertEqual(c.entrance_pupil_position, 127) 21 | 22 | def test__position(self): 23 | c = camdkit.red.cooke.lens_data_from_binary_string(_COOKE_METADATA) 24 | 25 | self.assertEqual(c.aperture_value, 560) 26 | -------------------------------------------------------------------------------- /src/test/python/test_example_regression.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | import json 8 | import unittest 9 | 10 | from copy import deepcopy 11 | from pathlib import Path 12 | from typing import Literal 13 | 14 | from pydantic.json_schema import JsonSchemaValue 15 | 16 | from camdkit.examples import _unwrap_clip_to_pseudo_frame 17 | 18 | CLASSIC = Path("src/test/resources/classic") 19 | CLASSIC_EXAMPLES_DIR: Path = CLASSIC / "examples" 20 | 21 | CURRENT = Path("build/opentrackio") 22 | CURRENT_EXAMPLES_DIR: Path = CURRENT / "examples" 23 | 24 | def generify_urn_uuids(clip: JsonSchemaValue) -> None: 25 | paths_to_generify: tuple[str | tuple[str, ...], ...] = ( 26 | "sampleId", 27 | "sourceId", 28 | "relatedSampleIds", 29 | ("static", "camera", "fdlLink") 30 | ) 31 | for path in paths_to_generify: 32 | containing_dict = clip 33 | if isinstance(path, tuple): 34 | if not path: 35 | raise RuntimeError("empty tuple of dict keys in URN generification") 36 | if len(path) == 1: 37 | path = path[0] 38 | else: 39 | for pathlet in path[:-1]: 40 | if pathlet not in containing_dict: 41 | return 42 | containing_dict = containing_dict[pathlet] 43 | path = path[-1] 44 | if path in containing_dict: 45 | if type(containing_dict[path]) is list: 46 | containing_dict[path] = ["urn:uuid:random" for _ in containing_dict[path]] 47 | else: 48 | containing_dict[path] = "urn:uuid:random" 49 | 50 | 51 | # Uncomment during development 52 | 53 | 54 | # class ExampleTestCases(unittest.TestCase): 55 | # 56 | # def test_corruption(self): 57 | # good_clip = { 58 | # "globalStage": ["foo"], 59 | # "lens": { "distortion": [4.1] } 60 | # } 61 | # pseudo_frame = { 62 | # "globalStage": "foo", 63 | # "lens": { "distortion": 4.1} 64 | # } 65 | # corrupted_clip = _unwrap_clip_to_pseudo_frame(deepcopy(good_clip)) 66 | # self.assertEqual(pseudo_frame, corrupted_clip) 67 | # 68 | # def compare(self, completeness: Literal['recommended', 'complete'], 69 | # scope: Literal['static', 'dynamic']) -> None: 70 | # classic_path = Path(CLASSIC_EXAMPLES_DIR, f"{completeness}_{scope}_example.json") 71 | # pydantic_path = Path(CURRENT_EXAMPLES_DIR, f"{completeness}_{scope}_example.json") 72 | # with open(classic_path) as classic_file: 73 | # classic_json = json.load(classic_file) 74 | # with open(pydantic_path) as pydantic_file: 75 | # pydantic_json = json.load(pydantic_file) 76 | # generify_urn_uuids(classic_json) 77 | # generify_urn_uuids(pydantic_json) 78 | # self.assertEqual(classic_json, pydantic_json) 79 | # 80 | # def test_recommended_dynamic(self): 81 | # self.compare('recommended', 'dynamic') 82 | # 83 | # def test_complete_dynamic(self): 84 | # self.compare('complete', 'dynamic') 85 | # 86 | # def test_recommended_static(self): 87 | # self.compare('recommended', 'static') 88 | # 89 | # def test_complete_static(self): 90 | # self.compare('complete', 'static') 91 | # 92 | # 93 | # if __name__ == '__main__': 94 | # unittest.main() 95 | -------------------------------------------------------------------------------- /src/test/python/test_mosys_reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Mo-Sys tracking data reader tests''' 8 | 9 | import unittest 10 | import uuid 11 | 12 | from camdkit.framework import (Vector3, Rotator3, 13 | Synchronization, SynchronizationSourceEnum, 14 | Timecode, StrictlyPositiveRational, 15 | FizEncoders, Distortion, ProjectionOffset) 16 | from camdkit.model import OPENTRACKIO_PROTOCOL_NAME, OPENTRACKIO_PROTOCOL_VERSION 17 | from camdkit.mosys import reader 18 | 19 | class MoSysReaderTest(unittest.TestCase): 20 | 21 | def test_reader(self): 22 | clip = reader.to_clip("src/test/resources/mosys/A003_C001_01 15-03-47-01.f4", 20) 23 | 24 | # Test parameters against known values across multiple frames 25 | self.assertEqual(clip.protocol[0].name, OPENTRACKIO_PROTOCOL_NAME) 26 | self.assertEqual(clip.protocol[0].version, OPENTRACKIO_PROTOCOL_VERSION) 27 | self.assertEqual(len(clip.sample_id[1]), len(uuid.uuid4().urn)) 28 | self.assertEqual(clip.tracker_recording[2], True) 29 | self.assertEqual(clip.tracker_status[3], "Optical Good") 30 | self.assertEqual(clip.timing_sample_rate[4], 25.0) 31 | self.assertEqual(clip.timing_mode[5], "internal") 32 | self.assertEqual(clip.timing_sequence_number[6], 13) 33 | self.assertEqual(clip.timing_synchronization[7], 34 | Synchronization(frequency=25, locked=True, source=SynchronizationSourceEnum.GENLOCK, ptp=None, present=True)) 35 | self.assertEqual(str(clip.timing_timecode[8]), 36 | str(Timecode(hours=15, minutes=3, seconds=47, frames=10, 37 | frame_rate=StrictlyPositiveRational(25,1), 38 | sub_frame=0))) 39 | self.assertEqual(clip.transforms[9][0].translation, Vector3(x=-8.121, y=-185.368, z=119.806)) 40 | self.assertEqual(clip.transforms[10][0].rotation, Rotator3(pan=-2.969, tilt=-28.03, roll=3.1)) 41 | self.assertEqual(clip.lens_encoders[11], FizEncoders(focus=0.7643280029296875, zoom=0.0014190673828125)) 42 | self.assertEqual(clip.lens_distortions[12], (Distortion(radial=(0.15680991113185883, -0.0881580114364624)),)) 43 | self.assertEqual(clip.lens_projection_offset[13], ProjectionOffset(-7.783590793609619, 6.896144866943359)) 44 | self.assertAlmostEqual(clip.lens_pinhole_focal_length[14], 22.35, 2) 45 | self.assertEqual(int(clip.lens_focus_distance[15]*1000), 2313) 46 | -------------------------------------------------------------------------------- /src/test/python/test_red_reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''RED camera reader tests''' 8 | 9 | import unittest 10 | 11 | import camdkit.red.reader 12 | from fractions import Fraction 13 | 14 | class REDReaderTest(unittest.TestCase): 15 | 16 | def test_reader(self): 17 | with open("src/test/resources/red/A001_C066_0303LZ_001.static.csv", "r", encoding="utf-8") as type_3_file, \ 18 | open("src/test/resources/red/A001_C066_0303LZ_001.frames.csv", "r", encoding="utf-8") as type_5_file: 19 | clip = camdkit.red.reader.to_clip(type_3_file, type_5_file) 20 | 21 | self.assertEqual(clip.camera_make, "RED") 22 | 23 | self.assertEqual(clip.camera_model, "RANGER MONSTRO 8K VV") 24 | 25 | self.assertEqual(clip.camera_serial_number, "130-27E-4B5") 26 | 27 | self.assertEqual(clip.camera_firmware, "7.4.1") 28 | 29 | self.assertEqual(clip.lens_make, "SIGMA") 30 | 31 | self.assertEqual(clip.lens_model, "40mm T1.5 FF | 018") 32 | 33 | self.assertEqual(clip.lens_serial_number, "G53599764") 34 | 35 | self.assertEqual(clip.lens_firmware, "1.00") 36 | 37 | self.assertEqual(clip.iso, 250) 38 | 39 | self.assertEqual(clip.lens_nominal_focal_length, 40) 40 | 41 | self.assertEqual(clip.lens_focus_distance[0], 410) 42 | 43 | self.assertEqual(clip.lens_entrance_pupil_offset[0], 0.127) 44 | 45 | self.assertEqual(clip.lens_t_number[0], 5.6) 46 | 47 | self.assertEqual(clip.capture_frame_rate, 24) 48 | 49 | self.assertEqual(clip.anamorphic_squeeze, 1) 50 | 51 | self.assertEqual(clip.shutter_angle, 180) 52 | 53 | self.assertEqual( 54 | clip.active_sensor_physical_dimensions, 55 | camdkit.model.Dimensions(width=(4096 * 5 / 1000.0), height=(2160 * 5 / 1000.0)) 56 | ) 57 | -------------------------------------------------------------------------------- /src/test/python/test_schema_regression.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import json 4 | from pathlib import Path 5 | 6 | 7 | # Uncomment during development 8 | 9 | # from src.test.python.test_example_regression import CLASSIC, CURRENT 10 | # 11 | # CLASSIC_SCHEMA_DIR: Path = CLASSIC 12 | # CLASSIC_SCHEMA = CLASSIC_SCHEMA_DIR / "schema.json" 13 | # 14 | # CURRENT_SCHEMA_DIR: Path = CURRENT 15 | # CURRENT_SCHEMA = CURRENT_SCHEMA_DIR / "schema.json" 16 | # 17 | # 18 | # class schemaTestCases(unittest.TestCase): 19 | # 20 | # def test_current_schema_against_classic_schema(self): 21 | # with open(CLASSIC_SCHEMA) as classic_schema_file: 22 | # classic_schema = json.load(classic_schema_file) 23 | # with open(CURRENT_SCHEMA) as current_schema_file: 24 | # current_schema = json.load(current_schema_file) 25 | # with open("/tmp/sorted_classic_schema_file.json", "w") as sclsf: 26 | # json.dump(classic_schema, sclsf, indent=4, sort_keys=True) 27 | # with open("/tmp/sorted_current_schema_file.json", "w") as scusf: 28 | # json.dump(current_schema, scusf, indent=4, sort_keys=True) 29 | # self.assertEqual(classic_schema, current_schema) 30 | # 31 | # 32 | # if __name__ == '__main__': 33 | # unittest.main() 34 | -------------------------------------------------------------------------------- /src/test/python/test_string_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Tests for string types""" 8 | 9 | import json 10 | import unittest 11 | 12 | from typing import Optional 13 | 14 | from pydantic import ValidationError 15 | 16 | from camdkit.compatibility import CompatibleBaseModel 17 | from camdkit.string_types import NonBlankUTF8String, UUID_URN_PATTERN, UUIDURN 18 | 19 | VALID_SAMPLE_ID_URN_0 = "urn:uuid:5ca5f233-11b5-4f43-8815-948d73e48a33" 20 | VALID_SAMPLE_ID_URN_1 = "urn:uuid:5ca5f233-11b5-dead-beef-948d73e48a33" 21 | 22 | 23 | class StringsTestCases(unittest.TestCase): 24 | 25 | def test_non_blank_utf8_string(self): 26 | class NonBlankUTF8StringTestbed(CompatibleBaseModel): 27 | value: Optional[NonBlankUTF8String] = None 28 | 29 | x = NonBlankUTF8StringTestbed() 30 | self.assertIsNone(x.value) 31 | with self.assertRaises(ValidationError): 32 | x.value = 1 33 | with self.assertRaises(ValidationError): 34 | x.value = "" 35 | smallest_valid_non_blank_utf8_string: NonBlankUTF8String = "x" 36 | x.value = smallest_valid_non_blank_utf8_string 37 | self.assertEqual(smallest_valid_non_blank_utf8_string, x.value) 38 | largest_valid_non_blank_utf8_string: NonBlankUTF8String = "x" * 1023 39 | x.value = largest_valid_non_blank_utf8_string 40 | self.assertEqual(largest_valid_non_blank_utf8_string, x.value) 41 | smallest_too_long_non_blank_utf8_string: NonBlankUTF8String = "x" * 1024 42 | with self.assertRaises(ValidationError): 43 | x.value = smallest_too_long_non_blank_utf8_string 44 | expected_schema = {'type': 'string', 'minLength': 1, 'maxLength': 1023} 45 | entire_schema = NonBlankUTF8StringTestbed.make_json_schema() 46 | value_schema = entire_schema["properties"]["value"] 47 | self.assertDictEqual(expected_schema, value_schema) 48 | 49 | def test_uuid_urn(self): 50 | class UUIDTestbed(CompatibleBaseModel): 51 | value: UUIDURN 52 | 53 | x = UUIDTestbed(value=VALID_SAMPLE_ID_URN_0) 54 | with self.assertRaises(ValidationError): 55 | x.value = 1 56 | with self.assertRaises(ValidationError): 57 | x.value = "" 58 | with self.assertRaises(ValidationError): 59 | x.value = "fail" 60 | x.value = VALID_SAMPLE_ID_URN_0 61 | self.assertEqual(VALID_SAMPLE_ID_URN_0, x.value) 62 | x.value = VALID_SAMPLE_ID_URN_1 63 | self.assertEqual(VALID_SAMPLE_ID_URN_1, x.value) 64 | original_re = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" 65 | expected_schema = { 66 | "type": "string", 67 | "pattern": f"^urn:uuid:{original_re}$" 68 | } 69 | entire_schema = UUIDTestbed.make_json_schema() 70 | uuid_schema = entire_schema["properties"]["value"] 71 | self.assertDictEqual(expected_schema, uuid_schema) 72 | 73 | 74 | if __name__ == '__main__': 75 | unittest.main() 76 | -------------------------------------------------------------------------------- /src/test/python/test_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Utilities test''' 8 | 9 | import unittest 10 | from fractions import Fraction 11 | 12 | import camdkit.utils as utils 13 | 14 | class UtilitiesTest(unittest.TestCase): 15 | 16 | def test_guess_fps(self): 17 | 18 | self.assertEqual(24, utils.guess_fps(24)) 19 | 20 | self.assertEqual(Fraction(24, 1), utils.guess_fps(Fraction(24, 1))) 21 | 22 | self.assertEqual(Fraction(24000, 1001), utils.guess_fps(Fraction(24000, 1001))) 23 | 24 | self.assertEqual(Fraction(24000, 1001), utils.guess_fps(float(Fraction(24000, 1001)))) 25 | 26 | self.assertEqual(Fraction(30000, 1001), utils.guess_fps(float(Fraction(30000, 1001)))) 27 | 28 | self.assertEqual(Fraction(60000, 1001), utils.guess_fps(float(Fraction(60000, 1001)))) 29 | 30 | self.assertEqual(Fraction(24000, 1001), utils.guess_fps(23.98)) 31 | 32 | self.assertEqual(Fraction(30000, 1001), utils.guess_fps(29.97)) 33 | 34 | self.assertEqual(Fraction(60000, 1001), utils.guess_fps(59.94)) 35 | -------------------------------------------------------------------------------- /src/test/python/test_venice_reader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''Sony Venice camera reader tests''' 8 | 9 | import unittest 10 | 11 | import camdkit.venice.reader 12 | from fractions import Fraction 13 | 14 | class VeniceReaderTest(unittest.TestCase): 15 | 16 | def test_frac_stop(self): 17 | self.assertEqual(round(1000*camdkit.venice.reader.t_number_from_frac_stop("T 2 3/10")), 2219) 18 | 19 | self.assertEqual(round(1000*camdkit.venice.reader.t_number_from_frac_stop("T 6")), 8000) 20 | 21 | def test_reader(self): 22 | with open("src/test/resources/venice/D001C005_210716AGM01.xml", "r", encoding="utf-8") as static_file, \ 23 | open("src/test/resources/venice/D001C005_210716AG.csv", "r", encoding="utf-8") as dynamic_file: 24 | clip = camdkit.venice.reader.to_clip(static_file, dynamic_file) 25 | 26 | self.assertEqual(clip.camera_make, "Sony") 27 | 28 | self.assertEqual(clip.camera_model, "MPC-3610") 29 | 30 | self.assertEqual(clip.camera_serial_number, "0010201") 31 | 32 | self.assertEqual(clip.camera_firmware, "6.10") 33 | 34 | self.assertIsNone(clip.lens_make) 35 | 36 | self.assertEqual(clip.lens_model, "S7i-32") 37 | 38 | self.assertEqual(clip.lens_serial_number, "7032.0100") 39 | 40 | self.assertEqual(clip.iso, 500) 41 | 42 | self.assertEqual(clip.lens_nominal_focal_length, 32) 43 | 44 | self.assertEqual(round(1000.0*clip.lens_t_number[0]), 2219) 45 | 46 | self.assertEqual(clip.capture_frame_rate, 24) 47 | 48 | self.assertEqual(clip.anamorphic_squeeze, 1) 49 | 50 | self.assertEqual(clip.shutter_angle, 103.8) 51 | 52 | self.assertEqual( 53 | clip.active_sensor_physical_dimensions, 54 | camdkit.model.Dimensions(width=5674.0 * 5.9375 / 1000.0, height=3192.0 * 5.9375 / 1000.0) 55 | ) 56 | -------------------------------------------------------------------------------- /src/test/python/test_versioning_types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Tests for types for versioning protocols""" 8 | 9 | import unittest 10 | 11 | from pydantic import ValidationError 12 | 13 | from camdkit.versioning_types import VersionedProtocol 14 | 15 | class VersioningTypesTestCases(unittest.TestCase): 16 | def test_versioning_type(self): 17 | valid_name: str = "OpenTrackIO" 18 | valid_major_version = 0 19 | valid_minor_version = 9 20 | valid_patch_version = 2 21 | valid_version = (valid_major_version, valid_minor_version, valid_patch_version) 22 | with self.assertRaises(TypeError): 23 | VersionedProtocol() 24 | with self.assertRaises(ValidationError): 25 | VersionedProtocol("", valid_version) 26 | with self.assertRaises(ValidationError): 27 | VersionedProtocol(valid_name, "foo") 28 | with self.assertRaises(ValidationError): 29 | VersionedProtocol(valid_name, "0.9.2") 30 | with self.assertRaises(ValidationError): 31 | VersionedProtocol(valid_name, "10.9.2") 32 | with self.assertRaises(ValidationError): 33 | VersionedProtocol(valid_name, "0.10.2") 34 | with self.assertRaises(ValidationError): 35 | VersionedProtocol(valid_name, "0.9.10") 36 | with self.assertRaises(ValidationError): 37 | VersionedProtocol(valid_name, 0.92) 38 | with self.assertRaises(ValidationError): 39 | VersionedProtocol(valid_name, 0.9+2j) # creative thinking 40 | with self.assertRaises(ValidationError): 41 | VersionedProtocol(valid_name, ()) 42 | with self.assertRaises(ValidationError): 43 | VersionedProtocol(valid_name, (valid_major_version,)) 44 | with self.assertRaises(ValidationError): 45 | VersionedProtocol(valid_name, (valid_major_version, 46 | valid_minor_version)) 47 | with self.assertRaises(ValidationError): 48 | VersionedProtocol(valid_name, (valid_major_version, 49 | valid_minor_version, 50 | valid_patch_version, 51 | valid_patch_version)) 52 | with self.assertRaises(ValueError): 53 | VersionedProtocol(reversed(valid_name), valid_version) 54 | 55 | 56 | if __name__ == '__main__': 57 | unittest.main() 58 | -------------------------------------------------------------------------------- /src/test/resources/arri/README.txt: -------------------------------------------------------------------------------- 1 | B001C001_180327_R1ZA.mov.csv was extracted from 2 | [B001C001_180327_R1ZA.mov](https://www.arri.com/en/learn-help/learn-help-camera-system/camera-sample-footage) 3 | using the [ARRI Meta Extract 4 | (AME)](https://www.arri.com/en/learn-help/learn-help-camera-system/tools/arri-meta-extract) 5 | tool version 4.4.0.0. -------------------------------------------------------------------------------- /src/test/resources/bmd/README.txt: -------------------------------------------------------------------------------- 1 | Extracted from the Camera Original File of [Boy and the 2 | watch](https://www.blackmagicdesign.com/products/blackmagicursaminipro/gallery) 3 | using the ExtractMetadata.exe sample executable from the RAW SDK 2.7. -------------------------------------------------------------------------------- /src/test/resources/bmd/metadata.txt: -------------------------------------------------------------------------------- 1 | 2 | Clip Metadata 3 | manufacturer: Blackmagic Design 4 | camera_id: 7ef33b36-a5ba-4a04-b218-0afc7eb1f8b6 5 | camera_type: Blackmagic URSA Mini Pro 12K 6 | firmware_version: 7.2.1 7 | braw_compression_ratio: 12:1 8 | braw_codec_bitrate: 1060405248 9 | sensor_area_captured: 7680,6408, 10 | crop_origin: 16,8, 11 | crop_size: 5120,4272, 12 | analog_gain: 1 13 | analog_gain_is_constant: 1 14 | multicard_volume_number: 1 15 | multicard_volume_count: 1 16 | format_frame_rate: 1572864 17 | clip_number: A001_04190112_C102 18 | reel_name: 1 19 | scene: 1 20 | take: 99 21 | good_take: false 22 | environment: interior 23 | day_night: day 24 | shutter_type: Angle 25 | lens_type: Cooke Anamorphic /i Prime Lens 50mm 26 | camera_number: A 27 | anamorphic: 2x 28 | anamorphic_enable: 1 29 | tone_curve_contrast: 1 30 | tone_curve_saturation: 1 31 | tone_curve_midpoint: 0.409008 32 | tone_curve_highlights: 1 33 | tone_curve_shadows: 1 34 | tone_curve_video_black_level: 0 35 | post_3dlut_mode: Disabled 36 | post_3dlut_embedded_name: Ursa_Mini_Pro_Gen5_Natural_Rec709_V1.cube 37 | post_3dlut_embedded_title: Generated by Resolve 38 | post_3dlut_embedded_size: 33 39 | post_3dlut_embedded_data: 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 40 | viewing_gamma: Blackmagic Design Film 41 | viewing_gamut: Blackmagic Design 42 | viewing_bmdgen: 5 43 | date_recorded: 2021:04:19 44 | 45 | Frame 0 Metadata 46 | sensor_rate: 48,1, 47 | shutter_value: 180° 48 | internal_nd: 6 49 | analog_gain: 1 50 | as_shot_kelvin: 0 51 | as_shot_tint: 0 52 | aperture: T2.3 53 | exposure: 0 54 | focal_length: 50mm 55 | distance: 991mm 56 | iso: 800 57 | white_balance_kelvin: 6000 58 | white_balance_tint: 0 59 | -------------------------------------------------------------------------------- /src/test/resources/canon/20221007_TNumber_CanonCameraMetadata_Frames.csv: -------------------------------------------------------------------------------- 1 | Timecode,Daylight_Saving_Time_Flag,Time_Zone_Hour_Value_Sign,Time_Zone_Minute_Value,Time_Zone_Hour_Value,Record_Date_Year,Record_Date_Month,Record_Date_Day,Record_Time_Hour,Record_Time_Minute,Record_Time_Second,Record_Time_MilliSecond,Binary_Group1,Binary_Group2,Binary_Group3,Binary_Group4,Binary_Group5,Binary_Group6,Binary_Group7,Binary_Group8,FocusPosition,FocalLength,FocalLength35mm,ElectricZoom,ApertureNumber,ExposureTime,PhotographicSensitivity,NDFilter,DynamicRange,ApertureMode,ExposureTimeMode,PhotographicSensitivityMode,NDFilterMode,WB_Mode,WB_Preset,AccurateFlag,ColorCompensation,ColorTemperature,DevelopSizeH,DevelopSizeV,WB_ShockLess,AWB_Response,IR_Status,ChromaticAberration,PeripheralIllumination,Diffraction,CP_Preset,CP_Gamma,CP_Color_Space,CP_ColorMatrix,CP_OtherSettingsActivate,CP_Master_PED,CP_Master_BLK_R,CP_Master_BLK_G,CP_Master_BLK_B,CP_Black_Gamma_Level,CP_Black_Gamma_Range,CP_Black_Gamma_Point,CP_Low_Key_SAT_Enable,CP_Low_Key_SAT_Level,CP_KneeEnable,CP_KneeSlope,CP_KneePoint,CP_Knee_SAT,CP_SharpnessLevel,CP_SharpnessH_Detail_FREQ,CP_SharpnessCoringLevel,CP_SharpnessCoringD_Offset,CP_SharpnessCoringD_Curve,CP_SharpnessCoringD_Depth,CP_Sharpness_DTL_HV_BAL,CP_SharpnessLimit,CP_SharpnessSelect,CP_SharpnessKneeAPT_Gain,CP_SharpnessKneeAPT_Slope,CP_SharpnessD_SharpLevel,CP_SharpnessD_SharpSlope,CP_SharpnessD_SharpOffset,CP_NoiseReductionSet,CP_SkinDetailEffectLevel,CP_SkinDetailHue,CP_SkinDetailChroma,CP_SkinDetailArea,CP_SkinDetailY_Level,CP_SelectiveNR_EffectLevel,CP_Selective_NR_Hue,CP_SelectiveNR_Chroma,CP_SelectiveNR_Area,CP_SelectiveNR_Y_Level,CP_ColorMat_Color_Gain,CP_ColorMat_Color_Phase,CP_ColorMat_R_GMatrix,CP_ColorMat_R_BMatrix,CP_ColorMat_G_RMatrix,CP_ColorMat_G_BMatrix,CP_ColorMat_B_RMatrix,CP_ColorMat_B_GMatrix,CP_White_Bal_RGain,CP_White_Bal_BGain,CP_Color_Corr_Correct,CP_Color_Corr_A_Area_SEL_Color_Phase,CP_Color_Corr_A_Area_SEL_Chroma,CP_Color_Corr_A_Area_SEL_Area,CP_Color_Corr_A_Area_SEL_Y_Level,CP_Color_Corr_A_Area_REV_Color_Level,CP_Color_Corr_A_Area_REV_Color_Phase,CP_Color_Corr_B_Area_SEL_Color_Phase,CP_Color_Corr_B_Area_SEL_Chroma,CP_Color_Corr_B_Area_SEL_Area,CP_Color_Corr_B_Area_SEL_Y_Level,CP_Color_Corr_B_Area_REV_Color_Level,CP_Color_Corr_B_Area_REV_Color_Phase,CP_OthersSetupLVL_Level,CP_OthersOver100Percent,CP_HLG_Color,CP_Sharpness_Detail_FREQ,FocusMode,ImageStabilzer,CP_LookFile,ISOMode,BaseISO 2 | 0:00:00:00,0,0,0,1,2022,1,1,0,1,46,65535,0,0,0,0,0,0,0,0,3F000000,180/10,263/10,65535/65535,45/10,18000/100,2147485248/1,65535/65535,409600,2,2,1,255,2,255,0,128,5600,65535,65535,0,1,0,0,0,0,255,2,0,5,255,50,50,50,50,50,20,20,0,50,0,35,45,10,10,255,30,255,255,255,255,50,255,255,255,255,255,255,1,0,16,16,16,16,255,255,255,255,255,50,18,50,50,50,50,50,50,50,50,0,0,16,16,16,50,18,0,16,16,16,50,18,255,0,1,8,255,0,0,0,800 3 | -------------------------------------------------------------------------------- /src/test/resources/canon/20221007_TNumber_CanonCameraMetadata_Static.csv: -------------------------------------------------------------------------------- 1 | SceneNumber,TakeNumber,ShotNumber,ReelNumber,LensSqueezeFactor,UMID,CreationTime,Timescale,Duration,Width,Height,BitDepth,AudioType,AudioChannel,AudioSampleSize,AudioSampleRate 2 | ,,A001C001,1,0,060A2B340101010501010D4313000000CD75881A79950580000085745900200003,2021/12/31 23:01,30000,135135,5952,3140,12,LPCM,4,24,48000 3 | -------------------------------------------------------------------------------- /src/test/resources/canon/README.txt: -------------------------------------------------------------------------------- 1 | Files provided by Hitoshi Yoshida via email on Fri, Oct 21, 2022 -------------------------------------------------------------------------------- /src/test/resources/classic/examples/complete_dynamic_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "tracker": { 3 | "notes": "Example generated sample.", 4 | "recording": false, 5 | "slate": "A101_A_4", 6 | "status": "Optical Good" 7 | }, 8 | "timing": { 9 | "mode": "internal", 10 | "recordedTimestamp": { 11 | "seconds": 1718806000, 12 | "nanoseconds": 500000000 13 | }, 14 | "sampleRate": { 15 | "num": 24000, 16 | "denom": 1001 17 | }, 18 | "sampleTimestamp": { 19 | "seconds": 1718806554, 20 | "nanoseconds": 500000000 21 | }, 22 | "sequenceNumber": 0, 23 | "synchronization": { 24 | "locked": true, 25 | "source": "ptp", 26 | "frequency": { 27 | "num": 24000, 28 | "denom": 1001 29 | }, 30 | "present": true, 31 | "ptp": { 32 | "profile": "SMPTE ST2059-2:2021", 33 | "domain": 1, 34 | "leaderIdentity": "00:11:22:33:44:55", 35 | "leaderPriorities": { 36 | "priority1": 128, 37 | "priority2": 128 38 | }, 39 | "leaderAccuracy": 5e-08, 40 | "leaderTimeSource": "GNSS", 41 | "meanPathDelay": 0.000123, 42 | "vlan": 100 43 | } 44 | }, 45 | "timecode": { 46 | "hours": 1, 47 | "minutes": 2, 48 | "seconds": 3, 49 | "frames": 4, 50 | "frameRate": { 51 | "num": 24000, 52 | "denom": 1001 53 | } 54 | } 55 | }, 56 | "lens": { 57 | "custom": [ 58 | 1.0, 59 | 2.0 60 | ], 61 | "distortion": [ 62 | { 63 | "model": "Brown-Conrady U-D", 64 | "radial": [ 65 | 1.0, 66 | 2.0, 67 | 3.0, 68 | 4.0, 69 | 5.0, 70 | 6.0 71 | ], 72 | "tangential": [ 73 | 1.0, 74 | 2.0 75 | ], 76 | "overscan": 3.0 77 | }, 78 | { 79 | "radial": [ 80 | 1.0, 81 | 2.0, 82 | 3.0, 83 | 4.0, 84 | 5.0, 85 | 6.0 86 | ], 87 | "tangential": [ 88 | 1.0, 89 | 2.0 90 | ], 91 | "overscan": 2.0 92 | } 93 | ], 94 | "distortionOffset": { 95 | "x": 1.0, 96 | "y": 2.0 97 | }, 98 | "encoders": { 99 | "focus": 0.1, 100 | "iris": 0.2, 101 | "zoom": 0.3 102 | }, 103 | "entrancePupilOffset": 0.123, 104 | "exposureFalloff": { 105 | "a1": 1.0, 106 | "a2": 2.0, 107 | "a3": 3.0 108 | }, 109 | "fStop": 4.0, 110 | "pinholeFocalLength": 24.305, 111 | "focusDistance": 10.0, 112 | "projectionOffset": { 113 | "x": 0.1, 114 | "y": 0.2 115 | }, 116 | "rawEncoders": { 117 | "focus": 1000, 118 | "iris": 2000, 119 | "zoom": 3000 120 | }, 121 | "tStop": 4.1 122 | }, 123 | "protocol": { 124 | "name": "OpenTrackIO", 125 | "version": [ 126 | 0, 127 | 9, 128 | 3 129 | ] 130 | }, 131 | "sampleId": "urn:uuid:a4034319-eea8-4fb5-9567-90047d82bbf1", 132 | "sourceId": "urn:uuid:fc9bc480-02bd-4bd0-96c7-bce47197f720", 133 | "sourceNumber": 1, 134 | "relatedSampleIds": [ 135 | "urn:uuid:5210332e-0292-4f8e-a606-a23ebe2ae18e", 136 | "urn:uuid:1cc04717-7d46-4d0b-ad42-5c0d8c1a5c40" 137 | ], 138 | "globalStage": { 139 | "E": 100.0, 140 | "N": 200.0, 141 | "U": 300.0, 142 | "lat0": 100.0, 143 | "lon0": 200.0, 144 | "h0": 300.0 145 | }, 146 | "transforms": [ 147 | { 148 | "translation": { 149 | "x": 1.0, 150 | "y": 2.0, 151 | "z": 3.0 152 | }, 153 | "rotation": { 154 | "pan": 180.0, 155 | "tilt": 90.0, 156 | "roll": 45.0 157 | }, 158 | "id": "Dolly" 159 | }, 160 | { 161 | "translation": { 162 | "x": 1.0, 163 | "y": 2.0, 164 | "z": 3.0 165 | }, 166 | "rotation": { 167 | "pan": 180.0, 168 | "tilt": 90.0, 169 | "roll": 45.0 170 | }, 171 | "scale": { 172 | "x": 1.0, 173 | "y": 2.0, 174 | "z": 3.0 175 | }, 176 | "id": "Crane Arm" 177 | }, 178 | { 179 | "translation": { 180 | "x": 1.0, 181 | "y": 2.0, 182 | "z": 3.0 183 | }, 184 | "rotation": { 185 | "pan": 180.0, 186 | "tilt": 90.0, 187 | "roll": 45.0 188 | }, 189 | "scale": { 190 | "x": 1.0, 191 | "y": 2.0, 192 | "z": 3.0 193 | }, 194 | "id": "Camera" 195 | } 196 | ], 197 | "custom": { 198 | "pot1": 2435, 199 | "button1": false 200 | } 201 | } -------------------------------------------------------------------------------- /src/test/resources/classic/examples/complete_static_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "static": { 3 | "duration": { 4 | "num": 1, 5 | "denom": 25 6 | }, 7 | "camera": { 8 | "captureFrameRate": { 9 | "num": 24000, 10 | "denom": 1001 11 | }, 12 | "activeSensorPhysicalDimensions": { 13 | "height": 24.0, 14 | "width": 36.0 15 | }, 16 | "activeSensorResolution": { 17 | "height": 2160, 18 | "width": 3840 19 | }, 20 | "make": "CameraMaker", 21 | "model": "Model20", 22 | "serialNumber": "1234567890A", 23 | "firmwareVersion": "1.2.3", 24 | "label": "A", 25 | "anamorphicSqueeze": { 26 | "num": 1, 27 | "denom": 1 28 | }, 29 | "isoSpeed": 4000, 30 | "fdlLink": "urn:uuid:62ea03ac-ce56-43c6-ab8d-a9ec8a9ee7b3", 31 | "shutterAngle": 45.0 32 | }, 33 | "lens": { 34 | "distortionOverscanMax": 1.2, 35 | "undistortionOverscanMax": 1.3, 36 | "make": "LensMaker", 37 | "model": "Model15", 38 | "serialNumber": "1234567890A", 39 | "nominalFocalLength": 14.0, 40 | "calibrationHistory": [ 41 | "LensMaker 123", 42 | "TrackerMaker 123" 43 | ] 44 | }, 45 | "tracker": { 46 | "make": "TrackerMaker", 47 | "model": "Tracker", 48 | "serialNumber": "1234567890A", 49 | "firmwareVersion": "1.2.3" 50 | } 51 | }, 52 | "tracker": { 53 | "notes": "Example generated sample.", 54 | "recording": false, 55 | "slate": "A101_A_4", 56 | "status": "Optical Good" 57 | }, 58 | "timing": { 59 | "mode": "internal", 60 | "recordedTimestamp": { 61 | "seconds": 1718806000, 62 | "nanoseconds": 500000000 63 | }, 64 | "sampleRate": { 65 | "num": 24000, 66 | "denom": 1001 67 | }, 68 | "sampleTimestamp": { 69 | "seconds": 1718806554, 70 | "nanoseconds": 500000000 71 | }, 72 | "sequenceNumber": 0, 73 | "synchronization": { 74 | "locked": true, 75 | "source": "ptp", 76 | "frequency": { 77 | "num": 24000, 78 | "denom": 1001 79 | }, 80 | "present": true, 81 | "ptp": { 82 | "profile": "SMPTE ST2059-2:2021", 83 | "domain": 1, 84 | "leaderIdentity": "00:11:22:33:44:55", 85 | "leaderPriorities": { 86 | "priority1": 128, 87 | "priority2": 128 88 | }, 89 | "leaderAccuracy": 5e-08, 90 | "leaderTimeSource": "GNSS", 91 | "meanPathDelay": 0.000123, 92 | "vlan": 100 93 | } 94 | }, 95 | "timecode": { 96 | "hours": 1, 97 | "minutes": 2, 98 | "seconds": 3, 99 | "frames": 4, 100 | "frameRate": { 101 | "num": 24000, 102 | "denom": 1001 103 | } 104 | } 105 | }, 106 | "lens": { 107 | "custom": [ 108 | 1.0, 109 | 2.0 110 | ], 111 | "distortion": [ 112 | { 113 | "model": "Brown-Conrady U-D", 114 | "radial": [ 115 | 1.0, 116 | 2.0, 117 | 3.0, 118 | 4.0, 119 | 5.0, 120 | 6.0 121 | ], 122 | "tangential": [ 123 | 1.0, 124 | 2.0 125 | ], 126 | "overscan": 3.0 127 | }, 128 | { 129 | "radial": [ 130 | 1.0, 131 | 2.0, 132 | 3.0, 133 | 4.0, 134 | 5.0, 135 | 6.0 136 | ], 137 | "tangential": [ 138 | 1.0, 139 | 2.0 140 | ], 141 | "overscan": 2.0 142 | } 143 | ], 144 | "distortionOffset": { 145 | "x": 1.0, 146 | "y": 2.0 147 | }, 148 | "encoders": { 149 | "focus": 0.1, 150 | "iris": 0.2, 151 | "zoom": 0.3 152 | }, 153 | "entrancePupilOffset": 0.123, 154 | "exposureFalloff": { 155 | "a1": 1.0, 156 | "a2": 2.0, 157 | "a3": 3.0 158 | }, 159 | "fStop": 4.0, 160 | "pinholeFocalLength": 24.305, 161 | "focusDistance": 10.0, 162 | "projectionOffset": { 163 | "x": 0.1, 164 | "y": 0.2 165 | }, 166 | "rawEncoders": { 167 | "focus": 1000, 168 | "iris": 2000, 169 | "zoom": 3000 170 | }, 171 | "tStop": 4.1 172 | }, 173 | "protocol": { 174 | "name": "OpenTrackIO", 175 | "version": [ 176 | 0, 177 | 9, 178 | 3 179 | ] 180 | }, 181 | "sampleId": "urn:uuid:e3f34dae-b68c-47d9-99af-e0bfc277723c", 182 | "sourceId": "urn:uuid:889eec71-62ca-4c7f-b1ef-4c2b16f4eacf", 183 | "sourceNumber": 1, 184 | "relatedSampleIds": [ 185 | "urn:uuid:b545b989-34b2-452a-9630-cf952d021a3f", 186 | "urn:uuid:dd8ce967-21f2-444e-a007-5eb33333ca51" 187 | ], 188 | "globalStage": { 189 | "E": 100.0, 190 | "N": 200.0, 191 | "U": 300.0, 192 | "lat0": 100.0, 193 | "lon0": 200.0, 194 | "h0": 300.0 195 | }, 196 | "transforms": [ 197 | { 198 | "translation": { 199 | "x": 1.0, 200 | "y": 2.0, 201 | "z": 3.0 202 | }, 203 | "rotation": { 204 | "pan": 180.0, 205 | "tilt": 90.0, 206 | "roll": 45.0 207 | }, 208 | "id": "Dolly" 209 | }, 210 | { 211 | "translation": { 212 | "x": 1.0, 213 | "y": 2.0, 214 | "z": 3.0 215 | }, 216 | "rotation": { 217 | "pan": 180.0, 218 | "tilt": 90.0, 219 | "roll": 45.0 220 | }, 221 | "scale": { 222 | "x": 1.0, 223 | "y": 2.0, 224 | "z": 3.0 225 | }, 226 | "id": "Crane Arm" 227 | }, 228 | { 229 | "translation": { 230 | "x": 1.0, 231 | "y": 2.0, 232 | "z": 3.0 233 | }, 234 | "rotation": { 235 | "pan": 180.0, 236 | "tilt": 90.0, 237 | "roll": 45.0 238 | }, 239 | "scale": { 240 | "x": 1.0, 241 | "y": 2.0, 242 | "z": 3.0 243 | }, 244 | "id": "Camera" 245 | } 246 | ], 247 | "custom": { 248 | "pot1": 2435, 249 | "button1": false 250 | } 251 | } -------------------------------------------------------------------------------- /src/test/resources/classic/examples/recommended_dynamic_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "tracker": { 3 | "notes": "Example generated sample.", 4 | "recording": false, 5 | "slate": "A101_A_4", 6 | "status": "Optical Good" 7 | }, 8 | "timing": { 9 | "mode": "external", 10 | "sampleRate": { 11 | "num": 24000, 12 | "denom": 1001 13 | }, 14 | "timecode": { 15 | "hours": 1, 16 | "minutes": 2, 17 | "seconds": 3, 18 | "frames": 4, 19 | "frameRate": { 20 | "num": 24000, 21 | "denom": 1001 22 | } 23 | } 24 | }, 25 | "lens": { 26 | "distortion": [ 27 | { 28 | "radial": [ 29 | 1.0, 30 | 2.0, 31 | 3.0 32 | ], 33 | "tangential": [ 34 | 1.0, 35 | 2.0 36 | ], 37 | "overscan": 3.1 38 | } 39 | ], 40 | "encoders": { 41 | "focus": 0.1, 42 | "iris": 0.2, 43 | "zoom": 0.3 44 | }, 45 | "entrancePupilOffset": 0.123, 46 | "fStop": 4.0, 47 | "pinholeFocalLength": 24.305, 48 | "focusDistance": 10.0, 49 | "projectionOffset": { 50 | "x": 0.1, 51 | "y": 0.2 52 | } 53 | }, 54 | "protocol": { 55 | "name": "OpenTrackIO", 56 | "version": [ 57 | 0, 58 | 9, 59 | 3 60 | ] 61 | }, 62 | "sampleId": "urn:uuid:cd5677eb-b18d-4746-9dc3-2890c0711d52", 63 | "sourceId": "urn:uuid:cd3713e1-cc63-4d7e-83d7-c984a6395d5f", 64 | "sourceNumber": 1, 65 | "transforms": [ 66 | { 67 | "translation": { 68 | "x": 1.0, 69 | "y": 2.0, 70 | "z": 3.0 71 | }, 72 | "rotation": { 73 | "pan": 180.0, 74 | "tilt": 90.0, 75 | "roll": 45.0 76 | }, 77 | "id": "Camera" 78 | } 79 | ] 80 | } -------------------------------------------------------------------------------- /src/test/resources/classic/examples/recommended_static_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "static": { 3 | "camera": { 4 | "activeSensorPhysicalDimensions": { 5 | "height": 24.0, 6 | "width": 36.0 7 | }, 8 | "label": "A" 9 | }, 10 | "lens": { 11 | "make": "LensMaker", 12 | "model": "Model15" 13 | } 14 | }, 15 | "tracker": { 16 | "notes": "Example generated sample.", 17 | "recording": false, 18 | "slate": "A101_A_4", 19 | "status": "Optical Good" 20 | }, 21 | "timing": { 22 | "mode": "external", 23 | "sampleRate": { 24 | "num": 24000, 25 | "denom": 1001 26 | }, 27 | "timecode": { 28 | "hours": 1, 29 | "minutes": 2, 30 | "seconds": 3, 31 | "frames": 4, 32 | "frameRate": { 33 | "num": 24000, 34 | "denom": 1001 35 | } 36 | } 37 | }, 38 | "lens": { 39 | "distortion": [ 40 | { 41 | "radial": [ 42 | 1.0, 43 | 2.0, 44 | 3.0 45 | ], 46 | "tangential": [ 47 | 1.0, 48 | 2.0 49 | ], 50 | "overscan": 3.1 51 | } 52 | ], 53 | "encoders": { 54 | "focus": 0.1, 55 | "iris": 0.2, 56 | "zoom": 0.3 57 | }, 58 | "entrancePupilOffset": 0.123, 59 | "fStop": 4.0, 60 | "pinholeFocalLength": 24.305, 61 | "focusDistance": 10.0, 62 | "projectionOffset": { 63 | "x": 0.1, 64 | "y": 0.2 65 | } 66 | }, 67 | "protocol": { 68 | "name": "OpenTrackIO", 69 | "version": [ 70 | 0, 71 | 9, 72 | 3 73 | ] 74 | }, 75 | "sampleId": "urn:uuid:582cbfff-b9c4-4978-8a5e-45bf519e4a89", 76 | "sourceId": "urn:uuid:327811dd-4d85-4e2e-9721-9ceb1d252a17", 77 | "sourceNumber": 1, 78 | "transforms": [ 79 | { 80 | "translation": { 81 | "x": 1.0, 82 | "y": 2.0, 83 | "z": 3.0 84 | }, 85 | "rotation": { 86 | "pan": 180.0, 87 | "tilt": 90.0, 88 | "roll": 45.0 89 | }, 90 | "id": "Camera" 91 | } 92 | ] 93 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/lens.json: -------------------------------------------------------------------------------- 1 | { 2 | "additionalProperties": false, 3 | "properties": { 4 | "custom": { 5 | "description": "This list provides optional additional custom coefficients that can \nextend the existing lens model. The meaning of and how these characteristics\nare to be applied to a virtual camera would require negotiation between a\nparticular producer and consumer.\n", 6 | "items": { 7 | "type": "number" 8 | }, 9 | "type": "array" 10 | }, 11 | "distortion": { 12 | "description": "A list of Distortion objects that each define the coefficients for\ncalculating the distortion characteristics of a lens comprising radial\ndistortion coefficients of the spherical distortion (k1-N) and \n(optionally) the tangential distortion (p1-N). The key 'model'\nnames the distortion model. Typical values for 'model' include \n\"Brown-Conrady D-U\" when mapping distorted to undistorted coordinates,\nand \"Brown-Conrady U-D\" when mapping undistorted to undistorted\ncoordinates. If not provided, the default model is \"Brown-Conrady D-U\".\n", 13 | "items": { 14 | "additionalProperties": false, 15 | "properties": { 16 | "model": { 17 | "maxLength": 1023, 18 | "minLength": 1, 19 | "type": "string" 20 | }, 21 | "overscan": { 22 | "description": "Overscan factor on lens [un]distortion. Overscan may be provided by the\nproducer but can also be overriden or calculated by the consumer. Note\nthis should be the maximum of both projection-matrix-based and field-of-\nview-based rendering as per the OpenLensIO documentation.\n", 23 | "minimum": 1.0, 24 | "type": "number" 25 | }, 26 | "radial": { 27 | "items": { 28 | "type": "number" 29 | }, 30 | "minItems": 1, 31 | "type": "array" 32 | }, 33 | "tangential": { 34 | "items": { 35 | "type": "number" 36 | }, 37 | "minItems": 1, 38 | "type": "array" 39 | } 40 | }, 41 | "required": [ 42 | "radial" 43 | ], 44 | "type": "object" 45 | }, 46 | "minItems": 1, 47 | "type": "array" 48 | }, 49 | "distortionOffset": { 50 | "additionalProperties": false, 51 | "description": "Offset in x and y of the centre of distortion of the virtual camera", 52 | "properties": { 53 | "x": { 54 | "type": "number" 55 | }, 56 | "y": { 57 | "type": "number" 58 | } 59 | }, 60 | "required": [ 61 | "x", 62 | "y" 63 | ], 64 | "type": "object", 65 | "units": "millimeter" 66 | }, 67 | "encoders": { 68 | "additionalProperties": false, 69 | "anyOf": [ 70 | { 71 | "required": [ 72 | "focus" 73 | ] 74 | }, 75 | { 76 | "required": [ 77 | "iris" 78 | ] 79 | }, 80 | { 81 | "required": [ 82 | "zoom" 83 | ] 84 | } 85 | ], 86 | "description": "Normalised real numbers (0-1) for focus, iris and zoom.\nEncoders are represented in this way (as opposed to raw integer\nvalues) to ensure values remain independent of encoder resolution,\nminimum and maximum (at an acceptable loss of precision).\nThese values are only relevant in lenses with end-stops that\ndemarcate the 0 and 1 range.\nValue should be provided in the following directions (if known):\nFocus: 0=infinite 1=closest\nIris: 0=open 1=closed\nZoom: 0=wide angle 1=telephoto\n", 87 | "properties": { 88 | "focus": { 89 | "maximum": 1.0, 90 | "minimum": 0.0, 91 | "type": "number" 92 | }, 93 | "iris": { 94 | "maximum": 1.0, 95 | "minimum": 0.0, 96 | "type": "number" 97 | }, 98 | "zoom": { 99 | "maximum": 1.0, 100 | "minimum": 0.0, 101 | "type": "number" 102 | } 103 | }, 104 | "type": "object" 105 | }, 106 | "entrancePupilOffset": { 107 | "description": "Offset of the entrance pupil relative to the nominal imaging plane\n(positive if the entrance pupil is located on the side of the nominal\nimaging plane that is towards the object, and negative otherwise).\nMeasured in meters as in a render engine it is often applied in the\nvirtual camera's transform chain.\n", 108 | "type": "number", 109 | "units": "meter" 110 | }, 111 | "exposureFalloff": { 112 | "additionalProperties": false, 113 | "description": "Coefficients for calculating the exposure fall-off (vignetting) of\na lens\n", 114 | "properties": { 115 | "a1": { 116 | "type": "number" 117 | }, 118 | "a2": { 119 | "type": "number" 120 | }, 121 | "a3": { 122 | "type": "number" 123 | } 124 | }, 125 | "required": [ 126 | "a1" 127 | ], 128 | "type": "object" 129 | }, 130 | "fStop": { 131 | "description": "The linear f-number of the lens, equal to the focal length divided\nby the diameter of the entrance pupil.\n", 132 | "exclusiveMinimum": 0.0, 133 | "type": "number" 134 | }, 135 | "focusDistance": { 136 | "description": "Focus distance/position of the lens", 137 | "exclusiveMinimum": 0.0, 138 | "type": "number", 139 | "units": "meter" 140 | }, 141 | "pinholeFocalLength": { 142 | "description": "Distance between the pinhole and the image plane in the simple CGI pinhole camera model.", 143 | "exclusiveMinimum": 0.0, 144 | "type": "number", 145 | "units": "millimeter" 146 | }, 147 | "projectionOffset": { 148 | "additionalProperties": false, 149 | "description": "Offset in x and y of the centre of perspective projection of the\nvirtual camera\n", 150 | "properties": { 151 | "x": { 152 | "type": "number" 153 | }, 154 | "y": { 155 | "type": "number" 156 | } 157 | }, 158 | "required": [ 159 | "x", 160 | "y" 161 | ], 162 | "type": "object", 163 | "units": "millimeter" 164 | }, 165 | "rawEncoders": { 166 | "additionalProperties": false, 167 | "anyOf": [ 168 | { 169 | "required": [ 170 | "focus" 171 | ] 172 | }, 173 | { 174 | "required": [ 175 | "iris" 176 | ] 177 | }, 178 | { 179 | "required": [ 180 | "zoom" 181 | ] 182 | } 183 | ], 184 | "description": "Raw encoder values for focus, iris and zoom.\nThese values are dependent on encoder resolution and before any\nhoming / ranging has taken place.\n", 185 | "properties": { 186 | "focus": { 187 | "maximum": 4294967295, 188 | "minimum": 0, 189 | "type": "integer" 190 | }, 191 | "iris": { 192 | "maximum": 4294967295, 193 | "minimum": 0, 194 | "type": "integer" 195 | }, 196 | "zoom": { 197 | "maximum": 4294967295, 198 | "minimum": 0, 199 | "type": "integer" 200 | } 201 | }, 202 | "type": "object" 203 | }, 204 | "tStop": { 205 | "description": "Linear t-number of the lens, equal to the F-number of the lens\ndivided by the square root of the transmittance of the lens.\n", 206 | "exclusiveMinimum": 0.0, 207 | "type": "number" 208 | } 209 | }, 210 | "type": "object" 211 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/static_camera.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "additionalProperties": false, 4 | "properties": { 5 | "activeSensorPhysicalDimensions": { 6 | "type": "object", 7 | "additionalProperties": false, 8 | "required": [ 9 | "height", 10 | "width" 11 | ], 12 | "properties": { 13 | "height": { 14 | "type": "number", 15 | "minimum": 0.0 16 | }, 17 | "width": { 18 | "type": "number", 19 | "minimum": 0.0 20 | } 21 | }, 22 | "description": "Height and width of the active area of the camera sensor in millimeters", 23 | "units": "millimeter" 24 | }, 25 | "activeSensorResolution": { 26 | "type": "object", 27 | "additionalProperties": false, 28 | "required": [ 29 | "height", 30 | "width" 31 | ], 32 | "properties": { 33 | "height": { 34 | "type": "integer", 35 | "minimum": 0, 36 | "maximum": 2147483647 37 | }, 38 | "width": { 39 | "type": "integer", 40 | "minimum": 0, 41 | "maximum": 2147483647 42 | } 43 | }, 44 | "description": "Photosite resolution of the active area of the camera sensor in pixels", 45 | "units": "pixel" 46 | }, 47 | "anamorphicSqueeze": { 48 | "type": "object", 49 | "properties": { 50 | "num": { 51 | "type": "integer", 52 | "minimum": 1, 53 | "maximum": 2147483647 54 | }, 55 | "denom": { 56 | "type": "integer", 57 | "minimum": 1, 58 | "maximum": 4294967295 59 | } 60 | }, 61 | "required": [ 62 | "num", 63 | "denom" 64 | ], 65 | "additionalProperties": false, 66 | "description": "Nominal ratio of height to width of the image of an axis-aligned\nsquare captured by the camera sensor. It can be used to de-squeeze\nimages but is not however an exact number over the entire captured\narea due to a lens' intrinsic analog nature.\n" 67 | }, 68 | "firmwareVersion": { 69 | "type": "string", 70 | "minLength": 1, 71 | "maxLength": 1023, 72 | "description": "Non-blank string identifying camera firmware version" 73 | }, 74 | "label": { 75 | "type": "string", 76 | "minLength": 1, 77 | "maxLength": 1023, 78 | "description": "Non-blank string containing user-determined camera identifier" 79 | }, 80 | "make": { 81 | "type": "string", 82 | "minLength": 1, 83 | "maxLength": 1023, 84 | "description": "Non-blank string naming camera manufacturer" 85 | }, 86 | "model": { 87 | "type": "string", 88 | "minLength": 1, 89 | "maxLength": 1023, 90 | "description": "Non-blank string identifying camera model" 91 | }, 92 | "serialNumber": { 93 | "type": "string", 94 | "minLength": 1, 95 | "maxLength": 1023, 96 | "description": "Non-blank string uniquely identifying the camera" 97 | }, 98 | "captureFrameRate": { 99 | "type": "object", 100 | "properties": { 101 | "num": { 102 | "type": "integer", 103 | "minimum": 1, 104 | "maximum": 2147483647 105 | }, 106 | "denom": { 107 | "type": "integer", 108 | "minimum": 1, 109 | "maximum": 4294967295 110 | } 111 | }, 112 | "required": [ 113 | "num", 114 | "denom" 115 | ], 116 | "additionalProperties": false, 117 | "description": "Capture frame rate of the camera", 118 | "units": "hertz" 119 | }, 120 | "fdlLink": { 121 | "type": "string", 122 | "pattern": "^urn:uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", 123 | "description": "URN identifying the ASC Framing Decision List used by the camera." 124 | }, 125 | "isoSpeed": { 126 | "type": "integer", 127 | "minimum": 1, 128 | "maximum": 4294967295, 129 | "description": "Arithmetic ISO scale as defined in ISO 12232" 130 | }, 131 | "shutterAngle": { 132 | "type": "number", 133 | "minimum": 0.0, 134 | "maximum": 360.0, 135 | "description": "Shutter speed as a fraction of the capture frame rate. The shutter\nspeed (in units of 1/s) is equal to the value of the parameter divided\nby 360 times the capture frame rate.\n", 136 | "units": "degree" 137 | } 138 | } 139 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/static_duration.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "num": { 5 | "type": "integer", 6 | "minimum": 1, 7 | "maximum": 2147483647 8 | }, 9 | "denom": { 10 | "type": "integer", 11 | "minimum": 1, 12 | "maximum": 4294967295 13 | } 14 | }, 15 | "required": [ 16 | "num", 17 | "denom" 18 | ], 19 | "additionalProperties": false, 20 | "description": "Duration of the clip", 21 | "units": "second" 22 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/static_lens.json: -------------------------------------------------------------------------------- 1 | { 2 | "additionalProperties": false, 3 | "properties": { 4 | "calibrationHistory": { 5 | "description": "List of free strings that describe the history of calibrations of the lens.", 6 | "items": { 7 | "maxLength": 1023, 8 | "minLength": 1, 9 | "type": "string" 10 | }, 11 | "type": "array" 12 | }, 13 | "distortionOverscanMax": { 14 | "description": "Static maximum overscan factor on lens distortion. This is an\nalternative to providing dynamic overscan values each frame. Note it\nshould be the maximum of both projection-matrix-based and\nfield-of-view-based rendering as per the OpenLensIO documentation.\n", 15 | "minimum": 1.0, 16 | "type": "number" 17 | }, 18 | "firmwareVersion": { 19 | "description": "Non-blank string identifying lens firmware version", 20 | "maxLength": 1023, 21 | "minLength": 1, 22 | "type": "string" 23 | }, 24 | "make": { 25 | "description": "Non-blank string naming lens manufacturer", 26 | "maxLength": 1023, 27 | "minLength": 1, 28 | "type": "string" 29 | }, 30 | "model": { 31 | "description": "Non-blank string identifying lens model", 32 | "maxLength": 1023, 33 | "minLength": 1, 34 | "type": "string" 35 | }, 36 | "nominalFocalLength": { 37 | "description": "Nominal focal length of the lens. The number printed on the side\nof a prime lens, e.g. 50 mm, and undefined in the case of a zoom lens.\n", 38 | "exclusiveMinimum": 0.0, 39 | "type": "number", 40 | "units": "millimeter" 41 | }, 42 | "serialNumber": { 43 | "description": "Non-blank string uniquely identifying the lens", 44 | "maxLength": 1023, 45 | "minLength": 1, 46 | "type": "string" 47 | }, 48 | "undistortionOverscanMax": { 49 | "description": "Static maximum overscan factor on lens undistortion. This is an\nalternative to providing dynamic overscan values each frame. Note it\nshould be the maximum of both projection-matrix-based and\nfield-of-view-based rendering as per the OpenLensIO documentation.\n", 50 | "minimum": 1.0, 51 | "type": "number" 52 | } 53 | }, 54 | "type": "object" 55 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/static_tracker.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "additionalProperties": false, 4 | "properties": { 5 | "firmwareVersion": { 6 | "type": "string", 7 | "minLength": 1, 8 | "maxLength": 1023, 9 | "description": "Non-blank string identifying tracking device firmware version" 10 | }, 11 | "make": { 12 | "type": "string", 13 | "minLength": 1, 14 | "maxLength": 1023, 15 | "description": "Non-blank string naming tracking device manufacturer" 16 | }, 17 | "model": { 18 | "type": "string", 19 | "minLength": 1, 20 | "maxLength": 1023, 21 | "description": "Non-blank string identifying tracking device model" 22 | }, 23 | "serialNumber": { 24 | "type": "string", 25 | "minLength": 1, 26 | "maxLength": 1023, 27 | "description": "Non-blank string uniquely identifying the tracking device" 28 | } 29 | } 30 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/tracker.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "additionalProperties": false, 4 | "properties": { 5 | "notes": { 6 | "type": "string", 7 | "minLength": 1, 8 | "maxLength": 1023, 9 | "description": "Non-blank string containing notes about tracking system" 10 | }, 11 | "recording": { 12 | "type": "boolean", 13 | "description": "Boolean indicating whether tracking system is recording data" 14 | }, 15 | "slate": { 16 | "type": "string", 17 | "minLength": 1, 18 | "maxLength": 1023, 19 | "description": "Non-blank string describing the recording slate" 20 | }, 21 | "status": { 22 | "type": "string", 23 | "minLength": 1, 24 | "maxLength": 1023, 25 | "description": "Non-blank string describing status of tracking system" 26 | } 27 | } 28 | } -------------------------------------------------------------------------------- /src/test/resources/classic/subschemas/transforms.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "array", 3 | "minItems": 1, 4 | "uniqueItems": false, 5 | "items": { 6 | "type": "object", 7 | "additionalProperties": false, 8 | "properties": { 9 | "translation": { 10 | "type": "object", 11 | "additionalProperties": false, 12 | "properties": { 13 | "x": { 14 | "type": "number" 15 | }, 16 | "y": { 17 | "type": "number" 18 | }, 19 | "z": { 20 | "type": "number" 21 | } 22 | }, 23 | "units": "meter" 24 | }, 25 | "rotation": { 26 | "type": "object", 27 | "additionalProperties": false, 28 | "properties": { 29 | "pan": { 30 | "type": "number" 31 | }, 32 | "tilt": { 33 | "type": "number" 34 | }, 35 | "roll": { 36 | "type": "number" 37 | } 38 | }, 39 | "units": "degree" 40 | }, 41 | "scale": { 42 | "type": "object", 43 | "additionalProperties": false, 44 | "properties": { 45 | "x": { 46 | "type": "number" 47 | }, 48 | "y": { 49 | "type": "number" 50 | }, 51 | "z": { 52 | "type": "number" 53 | } 54 | } 55 | }, 56 | "id": { 57 | "type": "string", 58 | "minLength": 1, 59 | "maxLength": 1023 60 | } 61 | }, 62 | "required": [ 63 | "translation", 64 | "rotation" 65 | ] 66 | }, 67 | "description": "A list of transforms.\nTransforms are composed in order with the last in the list representing\nthe X,Y,Z in meters of camera sensor relative to stage origin.\nThe Z axis points upwards and the coordinate system is right-handed.\nY points in the forward camera direction (when pan, tilt and roll are\nzero).\nFor example in an LED volume Y would point towards the centre of the\nLED wall and so X would point to camera-right.\nRotation expressed as euler angles in degrees of the camera sensor\nrelative to stage origin\nRotations are intrinsic and are measured around the axes ZXY, commonly\nreferred to as [pan, tilt, roll]\nNotes on Euler angles:\nEuler angles are human readable and unlike quarternions, provide the\nability for cycles (with angles >360 or <0 degrees).\nWhere a tracking system is providing the pose of a virtual camera,\ngimbal lock does not present the physical challenges of a robotic\nsystem.\nConversion to and from quarternions is trivial with an acceptable loss\nof precision.\n", 68 | "units": "meter / degree" 69 | } -------------------------------------------------------------------------------- /src/test/resources/mosys/A003_C001_01 15-03-47-01.f4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SMPTE/ris-osvp-metadata-camdkit/06c5de673cf32c3a3ae403da2ccfb74db881e4e5/src/test/resources/mosys/A003_C001_01 15-03-47-01.f4 -------------------------------------------------------------------------------- /src/test/resources/red/A001_C066_0303LZ_001.frames.csv: -------------------------------------------------------------------------------- 1 | FrameNo,Timecode,Timestamp,Aperture,Focus Distance,Focal Length,Acceleration X,Acceleration Y,Acceleration Z,Rotation X,Rotation Y,Rotation Z,Cooke Metadata 2 | 0,23:17:21:08,,5.600000,410,40,0.031000,0.078000,1.003000,-2.257000,4.497000,2.248000,64/40/40/46/68/48/70/B8/80/40/40/40/42/66/6D/40/40/46/5E/40/40/46/73/45/4E/41/7F/40/40/53/47/35/33/35/39/39/37/36/34/0A/0D 3 | 1,23:17:21:09,,5.600000,420,40,0.031000,0.078000,1.003000,-0.008000,2.248000,2.248000,64/40/40/46/74/48/70/B8/80/40/40/40/42/67/4B/40/40/46/69/40/40/47/40/45/4E/41/7E/40/40/53/47/35/33/35/39/39/37/36/34/0A/0D 4 | -------------------------------------------------------------------------------- /src/test/resources/red/A001_C066_0303LZ_001.static.csv: -------------------------------------------------------------------------------- 1 | Clip Name,File Path,File Name,Original File Name,ReelID,AltReelID,CamReelID,Camera,Camera Model,Camera Model ID,Camera Network Name,Camera PIN,Camera Position,Dropped Frame Count,Media Serial Number,Sensor Name,Sensor ID,Sensor OLPF Interchangeable,Sensor OLPF Name,Sensor Sensitivity Id,Sensor Sensitivity Name,Monitor Sharpness,Reel,Clip,UUID,Date,Timestamp,Frame Width,Frame Height,FPS,Record FPS,Total Frames,Abs TC,Edge TC,End Abs TC,End Edge TC,Color Space,Gamma Space,Kelvin,Tint,ISO,Exposure,Saturation,Contrast,Brightness,Red Gain,Green Gain,Blue Gain,Luma Curve: Black X,Luma Curve: Black Y,Luma Curve: Toe X,Luma Curve: Toe Y,Luma Curve: Mid X,Luma Curve: Mid Y,Luma Curve: Knee X,Luma Curve: Knee Y,Luma Curve: White X,Luma Curve: White Y,Shutter (ms),Shutter (1/sec),Shutter (deg),Firmware Version,Firmware Revision,Camera Audio Channels,File Segments,Flip Horizontal,Flip Vertical,Rotation,HDR Mode,HDR Stops Over,HDR Blend Mode,HDR Blend Bias,Aperture,Focal Length,Focus Distance,Lens Mount,Lens,Lens Brand,Lens Name,Lens Owner,Lens Serial Number,Lens Focus Distance,Lens Focus Distance Near,Lens Focus Distance Far,Lens Cooke /i Static,Clip In,Clip Out,Notes,Owner,Project,Scene,Shot,Take,Unit,Production Name,Location,Director Of Photography,Director,Camera Operator,Circle,Copyright,Script Notes,Camera Notes,Edit Notes,Post Notes,Lens,Lens Height,MotionMountND,MotionMountShutter,Aperture,Focal Length,Focal Distance,Filter,Scene Description,Audio Timecode,Audio,Audio Slate,Video Slate,Frame Guide X,Frame Guide Y,Frame Guide Width,Frame Guide Height,Aspect Ratio,Aspect Ratio Numerator,Aspect Ratio Denominator,Frame Guide Name,Genlock Setting,Jamsync Setting,Linked Camera Setup,Pixel Aspect Ratio,Stereo Setup,REDCODE,Look Name,DRX,FLUT,Shadow,LGG: Lift: Red,LGG Lift: Green,LGG Lift: Blue,LGG Gamma: Red,LGG Gamma: Green,LGG Gamma: Blue,LGG Gain: Red,LGG Gain: Green,LGG Gain: Blue,Red Curve: Black X,Red Curve: Black Y,Red Curve: Toe X,Red Curve: Toe Y,Red Curve: Mid X,Red Curve: Mid Y,Red Curve: Knee X,Red Curve: Knee Y,Red Curve: White X,Red Curve: White Y,Green Curve: Black X,Green Curve: Black Y,Green Curve: Toe X,Green Curve: Toe Y,Green Curve: Mid X,Green Curve: Mid Y,Green Curve: Knee X,Green Curve: Knee Y,Green Curve: White X,Green Curve: White Y,Blue Curve: Black X,Blue Curve: Black Y,Blue Curve: Toe X,Blue Curve: Toe Y,Blue Curve: Mid X,Blue Curve: Mid Y,Blue Curve: Knee X,Blue Curve: Knee Y,Blue Curve: White X,Blue Curve: White Y,Clip Current Image Pipeline,Clip Original Image Pipeline,Exposure Adjust,Roll Off,Output Tone Map,Flashing Pixel Adjustment,CDL Enabled:,CDL Slope: Red,CDL Slope: Green,CDL Slope: Blue,CDL Offset: Red,CDL Offset: Green,CDL Offset: Blue,CDL Power: Red,CDL Power: Green,CDL Power: Blue,CDL Saturation,Camera 3D LUT,3D LUT,3D LUT Enabled,Bypass IPP2 Output Transform,Detail,OLPF Compensation,Denoise,WAV Filename 2 | A001_C066_0303LZ,Z:/projects/misc/camera-meta/camera-meta-repo/src/test/resources/red/A001_C066_0303LZ_001.R3D,A001_C066_0303LZ_001.R3D,A001_C066_0303LZ_001.R3D,001,A001_C066_0303LZ,A001,A,RANGER MONSTRO 8K VV,22,RANGER,130-27E-4B5,C,0,20181024AAUS08390041,MONSTRO 8K VV,11,1,STANDARD,,,0,001,066,2EE4BE6E-970F-44F5-9A76-E745B8AAD967,20220303,132146,4096,2160,24,24,2,23:17:21:08,01:20:52:10,23:17:21:09,01:20:52:11,25,34,5600,0,250,0,1,0,0,1,1,1,0.00000,0.00000,0.25000,0.25000,0.50000,0.50000,0.75000,0.75000,1.00000,1.00000,21,48,180,7.4.1,153431,0,1,0,0,0.000000,No HDR,,Off,Off,F5.6,40mm,410mm,P/L,,SIGMA ,40mm T1.5 FF | 018,SIGMA,G53599764,410mm,400mm,430mm,NSG53599764OSIGMA LPN040M040UIT94 B1.00,0,1,,,,,,066,,,,,,,,,,,,,,,,,F5.6,40mm,,,,,,,,0.0151404152,0.116720706,0.969970703,0.766666651,1.89999998,1.89999998,1,,Not Genlocked,External Clock,Not Linked,1,Not Stereo,REDcode 8:1,,0,0,0,0,0,0,1,1,1,1,1,1,0.00000,0.00000,0.25000,0.25000,0.50000,0.50000,0.75000,0.75000,1.00000,1.00000,0.00000,0.00000,0.25000,0.25000,0.50000,0.50000,0.75000,0.75000,1.00000,1.00000,0.00000,0.00000,0.25000,0.25000,0.50000,0.50000,0.75000,0.75000,1.00000,1.00000,IPP2,IPP2,0,3,1,0,false,1,1,1,0,0,0,1,1,1,1,A001_C066_0303LZ-RED_FilmBias.cube,,false,false,High,Off,Off, 3 | -------------------------------------------------------------------------------- /src/test/resources/red/README.txt: -------------------------------------------------------------------------------- 1 | Generated using REDline software retrieved at : 2 | 3 | * `REDline --silent --i A001_C066_0303LZ_001.R3D --printMeta 3` 4 | * `REDline --silent --i A001_C066_0303LZ_001.R3D --printMeta 5` 5 | 6 | Camera file provided by Kris Prygrocki at 7 | . -------------------------------------------------------------------------------- /src/test/resources/venice/D001C005_210716AGM01.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /src/test/resources/venice/README.txt: -------------------------------------------------------------------------------- 1 | Retrieved from https://www.dropbox.com/sh/rznxvipcmjfbnk3/AAAhTRJO_yFVauiXBdeNiusIa/Sony%20Venice 2 | 3 | Provided by Kamata, Haji via email on Thu, Apr 21, 2022. 4 | -------------------------------------------------------------------------------- /src/tools/python/generate_complete_dynamic_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''CLI tool to generate and validate JSON for an example OpenTrackIO complete dynamic metadata sample''' 8 | 9 | import json 10 | from camdkit.examples import get_complete_dynamic_example 11 | 12 | if __name__ == "__main__": 13 | print(json.dumps(get_complete_dynamic_example(), indent=2)) 14 | -------------------------------------------------------------------------------- /src/tools/python/generate_complete_static_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''CLI tool to generate and validate JSON for an example OpenTrackIO complete static metadata sample''' 8 | 9 | import json 10 | from camdkit.examples import get_complete_static_example 11 | 12 | if __name__ == "__main__": 13 | print(json.dumps(get_complete_static_example(), indent=2)) 14 | -------------------------------------------------------------------------------- /src/tools/python/generate_component_schemas.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | """Individual component schema for various sections""" 8 | import json 9 | 10 | from typing import Any 11 | from pathlib import Path 12 | 13 | from camdkit.model import Clip 14 | 15 | __all__ = 'SECTIONS_AND_FILENAMES' 16 | 17 | SECTIONS_AND_FILENAMES = { 18 | ('static', 'camera'): 'static_camera', 19 | ('static', 'duration'): 'static_duration', 20 | ('static', 'lens'): 'static_lens', 21 | ('static', 'tracker'): 'static_tracker', 22 | ('lens',): 'lens', 23 | ('timing',): 'timing', 24 | ('tracker',): 'tracker', 25 | ('transforms',): 'transforms' 26 | } 27 | 28 | def schema_for_section(schema: dict[str, Any], 29 | prop_names: tuple[str, ...]) -> dict[str, Any]: 30 | remaining_schema = schema 31 | for prop_name in prop_names: 32 | remaining_schema = remaining_schema["properties"][prop_name] 33 | return remaining_schema 34 | 35 | def write_schema(schema: dict[str, Any], 36 | output_filename, 37 | output_dir=Path('/tmp')) -> None: 38 | with open(output_dir / f"{output_filename}.json", 'w') as f: 39 | json.dump(schema, f, indent=2, sort_keys=True) 40 | 41 | def write_schemas() -> None: 42 | full_schema = Clip.make_json_schema() 43 | for section, filename in SECTIONS_AND_FILENAMES.items(): 44 | schema = schema_for_section(full_schema, section) 45 | write_schema(schema, filename) 46 | 47 | 48 | if __name__ == '__main__': 49 | write_schemas() 50 | -------------------------------------------------------------------------------- /src/tools/python/generate_opentrackio_schema.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | import camdkit.model 4 | 5 | if __name__ == "__main__": 6 | schema = camdkit.model.Clip.make_json_schema() 7 | json.dump(schema, sys.stdout, indent=2) 8 | -------------------------------------------------------------------------------- /src/tools/python/generate_recommended_dynamic_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''CLI tool to generate and validate JSON for an example OpenTrackIO recommended dynamic metadata sample''' 8 | 9 | import json 10 | from camdkit.examples import get_recommended_dynamic_example 11 | 12 | if __name__ == "__main__": 13 | print(json.dumps(get_recommended_dynamic_example(), indent=2)) 14 | -------------------------------------------------------------------------------- /src/tools/python/generate_recommended_static_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | '''CLI tool to generate and validate JSON for an example OpenTrackIO recommended static metadata sample''' 8 | 9 | import json 10 | from camdkit.examples import get_recommended_static_example 11 | 12 | if __name__ == "__main__": 13 | print(json.dumps(get_recommended_static_example(), indent=2)) 14 | -------------------------------------------------------------------------------- /src/tools/python/make_documentation.py: -------------------------------------------------------------------------------- 1 | import typing 2 | import sys 3 | import json 4 | import camdkit.model 5 | import camdkit.red.reader 6 | import camdkit.arri.reader 7 | import camdkit.venice.reader 8 | import camdkit.canon.reader 9 | import camdkit.mosys.reader 10 | 11 | _CLIP_INTRODUCTION = """# OSVP Clip Documentation 12 | 13 | ## Introduction 14 | 15 | The OSVP Clip (clip) is a collection of metadata parameters sampled over a 16 | specified duration. Each parameter is either: 17 | 18 | * static: the parameter has at constant value over the duration of the clip 19 | * dynamic: the parameter is sampled at regular intervals over the duration of the clip 20 | 21 | Each parameter is identified by a unique name. It also has a general description 22 | as well as a specific set of constraints. 23 | 24 | The OSVP Frame (frame) is a collection of metadata parameters that is dynamic and has a 25 | synchronous relationship with a video frame. In an OSVP environment this describes live 26 | camera position ('tracking') and lens data. 27 | 28 | ## Clip Parameters 29 | 30 | """ 31 | 32 | _COVERAGE = { 33 | "RED" : [], 34 | "ARRI" : [], 35 | "Sony" : [] 36 | } 37 | 38 | def generate_documentation(fp: typing.TextIO, doc, prefix): 39 | 40 | fp.write(prefix) 41 | 42 | for p in doc: 43 | fp.write(f"### `{p['canonical_name']}`\n") 44 | fp.write("\n") 45 | fp.write("#### Description\n") 46 | fp.write("\n") 47 | fp.write(p["description"]) 48 | fp.write("\n") 49 | fp.write("\n") 50 | 51 | fp.write("#### Units\n") 52 | fp.write("\n") 53 | fp.write(p["units"] if p["units"] is not None else "n/a") 54 | fp.write("\n") 55 | fp.write("\n") 56 | 57 | fp.write("#### Sampling\n") 58 | fp.write("\n") 59 | fp.write(p["sampling"]) 60 | fp.write("\n") 61 | fp.write("\n") 62 | 63 | if "constraints" in p and p["constraints"]: 64 | fp.write("#### Constraints\n") 65 | fp.write("\n") 66 | fp.write(p["constraints"]) 67 | fp.write("\n") 68 | fp.write("\n") 69 | 70 | def generate_schema(fp: typing.TextIO, schema, title): 71 | fp.write(f"## {title} JSON Schema\n") 72 | fp.write("\n") 73 | fp.write("```") 74 | json.dump(schema, fp, indent=2) 75 | fp.write("\n") 76 | fp.write("```") 77 | fp.write("\n") 78 | 79 | def generate_clip_reader_coverage(fp: typing.TextIO, doc): 80 | fp.write("## Reader coverage\n") 81 | fp.write("\n") 82 | fp.write("The following table indicates the camera parameters supported by each of the readers.\n") 83 | fp.write("\n") 84 | 85 | # Parameter names 86 | 87 | parameter_names = tuple(e["canonical_name"] for e in doc) 88 | fp.write(f"| Reader | {' | '.join(parameter_names)} |\n") 89 | fp.write(f"| ----------- | {'----------- |' * len(parameter_names)}\n") 90 | 91 | def _generate_reader_coverage(fp, reader_name, doc, clip): 92 | fp.write(f"| {reader_name} |") 93 | for p in doc: 94 | if getattr(clip, p["python_name"], None) is not None: 95 | fp.write(" + |") 96 | else: 97 | fp.write(" |") 98 | fp.write("\n") 99 | 100 | # RED reader 101 | 102 | with open("src/test/resources/red/A001_C066_0303LZ_001.static.csv", "r", encoding="utf-8") as type_3_file, \ 103 | open("src/test/resources/red/A001_C066_0303LZ_001.frames.csv", "r", encoding="utf-8") as type_5_file: 104 | clip = camdkit.red.reader.to_clip(type_3_file, type_5_file) 105 | 106 | _generate_reader_coverage(fp, "RED", doc, clip) 107 | 108 | # ARRI reader 109 | 110 | clip = camdkit.arri.reader.to_clip("src/test/resources/arri/B001C001_180327_R1ZA.mov.csv") 111 | _generate_reader_coverage(fp, "ARRI", doc, clip) 112 | 113 | # Venice reader 114 | 115 | with open("src/test/resources/venice/D001C005_210716AGM01.xml", "r", encoding="utf-8") as static_file, \ 116 | open("src/test/resources/venice/D001C005_210716AG.csv", "r", encoding="utf-8") as dynamic_file: 117 | clip = camdkit.venice.reader.to_clip(static_file, dynamic_file) 118 | 119 | _generate_reader_coverage(fp, "Venice", doc, clip) 120 | 121 | # Canon reader 122 | 123 | with open("src/test/resources/canon/20221007_TNumber_CanonCameraMetadata_Static.csv", "r", encoding="utf-8") as static_csv, \ 124 | open("src/test/resources/canon/20221007_TNumber_CanonCameraMetadata_Frames.csv", "r", encoding="utf-8") as frame_csv: 125 | clip = camdkit.canon.reader.to_clip(static_csv, frame_csv) 126 | 127 | _generate_reader_coverage(fp, "Canon", doc, clip) 128 | 129 | if __name__ == "__main__": 130 | clip_doc = camdkit.model.Clip.make_documentation() 131 | generate_documentation(sys.stdout, clip_doc, _CLIP_INTRODUCTION) 132 | generate_clip_reader_coverage(sys.stdout, clip_doc) 133 | generate_schema(sys.stdout, camdkit.model.Clip.make_json_schema(), "Clip") 134 | -------------------------------------------------------------------------------- /src/tools/python/make_opentrackio_documentation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # Copyright Contributors to the SMTPE RIS OSVP Metadata Project 6 | 7 | import json, os, shutil 8 | from inspect import getmembers, isfunction 9 | 10 | import camdkit.examples 11 | from camdkit.model import Clip, OPENTRACKIO_PROTOCOL_VERSION 12 | from jinja2 import Environment, FileSystemLoader, select_autoescape 13 | 14 | current_path = os.path.dirname(__file__) 15 | templates_path = os.path.join(current_path, "templates") 16 | resources_path = os.path.join(current_path,"..","..","main","resources") 17 | docs_path = os.path.join(current_path,"..","..","..","build","opentrackio") 18 | examples_path = os.path.join(docs_path,"examples") 19 | if not os.path.exists(examples_path): 20 | os.makedirs(examples_path) 21 | 22 | def main(): 23 | template_data = { 24 | "examples": {}, 25 | "fields": Clip.make_documentation(), 26 | "schema": json.dumps(Clip.make_json_schema(), indent=2), 27 | "version": ".".join(str(v) for v in OPENTRACKIO_PROTOCOL_VERSION) 28 | } 29 | # Generate all the examples 30 | for function_name, function in getmembers(camdkit.examples, isfunction): 31 | # Ignore the 'private' functions 32 | if function_name[0] != "_": 33 | example_name = function_name[4:] 34 | file_name = f"{example_name}.json" 35 | print(f"Generating {file_name}") 36 | example_json = json.dumps(function(), indent=2) 37 | template_data["examples"][example_name] = example_json 38 | f = open(os.path.join(examples_path, file_name), "w") 39 | f.write(example_json) 40 | f.close() 41 | 42 | # Generate schema 43 | schema_file_name = "schema.json" 44 | print(f"Generating {schema_file_name}") 45 | schema = camdkit.model.Clip.make_json_schema() 46 | f = open(os.path.join(docs_path, schema_file_name), "w") 47 | schema_json = json.dumps(schema, indent=2) 48 | f.write(schema_json) 49 | f.close() 50 | 51 | # Generate web docs 52 | env = Environment( 53 | loader=FileSystemLoader(templates_path), 54 | autoescape=select_autoescape() 55 | ) 56 | template = env.get_template("OpenTrackIO.html") 57 | html = template.render(template_data) 58 | 59 | print("Generating index.html") 60 | f = open(os.path.join(docs_path, "index.html"), "w") 61 | f.write(html) 62 | f.close() 63 | print("Publishing static web resources") 64 | for folder in ["css", "img", "res"]: 65 | shutil.copytree(os.path.join(resources_path, folder), os.path.join(docs_path, folder), dirs_exist_ok=True) 66 | 67 | if __name__ == "__main__": 68 | main() 69 | --------------------------------------------------------------------------------