├── .gitignore
├── New Text Document.txt
├── README.md
├── animated_drawings
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-312.pyc
│ ├── __init__.cpython-38.pyc
│ ├── config.cpython-312.pyc
│ ├── config.cpython-38.pyc
│ ├── render.cpython-311.pyc
│ ├── render.cpython-312.pyc
│ ├── render.cpython-38.pyc
│ ├── utils.cpython-312.pyc
│ └── utils.cpython-38.pyc
├── config.py
├── controller
│ ├── __pycache__
│ │ ├── controller.cpython-312.pyc
│ │ ├── controller.cpython-38.pyc
│ │ ├── interactive_controller.cpython-312.pyc
│ │ ├── interactive_controller.cpython-38.pyc
│ │ ├── video_render_controller.cpython-312.pyc
│ │ └── video_render_controller.cpython-38.pyc
│ ├── controller.py
│ ├── interactive_controller.py
│ └── video_render_controller.py
├── model
│ ├── __pycache__
│ │ ├── animated_drawing.cpython-312.pyc
│ │ ├── animated_drawing.cpython-38.pyc
│ │ ├── arap.cpython-312.pyc
│ │ ├── arap.cpython-38.pyc
│ │ ├── box.cpython-312.pyc
│ │ ├── box.cpython-38.pyc
│ │ ├── bvh.cpython-312.pyc
│ │ ├── bvh.cpython-38.pyc
│ │ ├── camera.cpython-312.pyc
│ │ ├── camera.cpython-38.pyc
│ │ ├── floor.cpython-312.pyc
│ │ ├── floor.cpython-38.pyc
│ │ ├── joint.cpython-312.pyc
│ │ ├── joint.cpython-38.pyc
│ │ ├── quaternions.cpython-312.pyc
│ │ ├── quaternions.cpython-38.pyc
│ │ ├── rectangle.cpython-312.pyc
│ │ ├── rectangle.cpython-38.pyc
│ │ ├── retargeter.cpython-312.pyc
│ │ ├── retargeter.cpython-38.pyc
│ │ ├── scene.cpython-312.pyc
│ │ ├── scene.cpython-38.pyc
│ │ ├── time_manager.cpython-312.pyc
│ │ ├── time_manager.cpython-38.pyc
│ │ ├── transform.cpython-312.pyc
│ │ ├── transform.cpython-38.pyc
│ │ ├── vectors.cpython-312.pyc
│ │ └── vectors.cpython-38.pyc
│ ├── animated_drawing.py
│ ├── arap.py
│ ├── box.py
│ ├── bvh.py
│ ├── camera.py
│ ├── floor.py
│ ├── joint.py
│ ├── quaternions.py
│ ├── rectangle.py
│ ├── retargeter.py
│ ├── scene.py
│ ├── time_manager.py
│ ├── transform.py
│ ├── transform_widget.py
│ └── vectors.py
├── mvc_base_cfg.yaml
├── render.py
├── utils.py
└── view
│ ├── __pycache__
│ ├── utils.cpython-312.pyc
│ ├── utils.cpython-38.pyc
│ ├── view.cpython-312.pyc
│ ├── view.cpython-38.pyc
│ ├── window_view.cpython-312.pyc
│ └── window_view.cpython-38.pyc
│ ├── mesa_view.py
│ ├── shaders
│ ├── __pycache__
│ │ ├── shader.cpython-312.pyc
│ │ └── shader.cpython-38.pyc
│ ├── bvh.frag
│ ├── bvh.vert
│ ├── color.frag
│ ├── color.vert
│ ├── shader.py
│ ├── texture.frag
│ └── texture.vert
│ ├── utils.py
│ ├── view.py
│ └── window_view.py
├── annotate.py
├── annotation
├── __pycache__
│ ├── annotation.cpython-310.pyc
│ ├── annotation.cpython-311.pyc
│ ├── annotation.cpython-312.pyc
│ ├── annotation.cpython-38.pyc
│ ├── handler.cpython-310.pyc
│ ├── handler.cpython-311.pyc
│ ├── handler.cpython-312.pyc
│ ├── handler.cpython-38.pyc
│ ├── image_to_annotations.cpython-310.pyc
│ ├── image_to_annotations.cpython-311.pyc
│ ├── image_to_annotations.cpython-312.pyc
│ └── image_to_annotations.cpython-38.pyc
├── annotation.py
├── handler.py
└── image_to_annotations.py
├── app.py
├── bvh_skeleton
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-311.pyc
│ ├── __init__.cpython-312.pyc
│ ├── bvh_helper.cpython-311.pyc
│ ├── bvh_helper.cpython-312.pyc
│ ├── cmu_skeleton.cpython-311.pyc
│ ├── cmu_skeleton.cpython-312.pyc
│ ├── h36m_original_skeleton.cpython-311.pyc
│ ├── h36m_original_skeleton.cpython-312.pyc
│ ├── h36m_skeleton.cpython-311.pyc
│ ├── h36m_skeleton.cpython-312.pyc
│ ├── math3d.cpython-311.pyc
│ ├── math3d.cpython-312.pyc
│ ├── openpose_skeleton.cpython-311.pyc
│ └── openpose_skeleton.cpython-312.pyc
├── bvh_helper.py
├── cmu_skeleton.py
├── coco_skeleton.py
├── h36m_original_skeleton.py
├── h36m_skeleton.py
├── math3d.py
└── openpose_skeleton.py
├── character
├── bounding_box.yaml
├── char_cfg.yaml
├── image.png
├── image_with_bg.png
├── joint_overlay.png
├── mask.png
└── texture.png
├── garlic.png
├── image.png
├── imagebvhtogif.py
├── input_image
├── 1.png
├── 2.png
└── garlic.png
├── input_video
└── second.mp4
├── output.png
├── requirements.txt
├── result
├── 1.gif
├── 10.gif
├── 2.gif
├── 3.gif
├── 5.gif
└── video.gif
├── server.py
├── setup.py
├── torchserve
├── __pycache__
│ ├── server.cpython-311.pyc
│ ├── server.cpython-312.pyc
│ └── server.cpython-38.pyc
└── server.py
└── utils
├── __init__.py
├── __pycache__
├── __init__.cpython-311.pyc
├── __init__.cpython-312.pyc
├── camera.cpython-311.pyc
├── camera.cpython-312.pyc
├── smooth.cpython-311.pyc
├── smooth.cpython-312.pyc
├── vis.cpython-311.pyc
└── vis.cpython-312.pyc
├── camera.py
├── smooth.py
└── vis.py
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | build
3 | *.spec
4 | *.h5
5 | *.rar
6 | models
7 | bin
8 | *.pth
9 | input
10 | output
11 | output_json
12 | *.bvh
13 | config
14 |
15 | _internal
16 | .venv
17 |
--------------------------------------------------------------------------------
/New Text Document.txt:
--------------------------------------------------------------------------------
1 | conda activate mmpose
2 |
3 | numpy==1.24.4
4 |
5 | https://mmcv.readthedocs.io/en/master/get_started/installation.html#install-mmcv-full
6 | mmdet==2.27.0
7 | mmpose==0.29.0
8 |
9 |
10 | pyinstaller --onefile --hidden-import=mmcv._ext annotate.py
11 |
12 | pyinstaller --onefile --add-data "C:\Users\admin\anaconda3\envs\mmpose\Lib\site-packages\mmcv\_ext.cp38-win_amd64.pyd:mmcv" annotate.py
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # This is the result of this algorithm.
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/animated_drawings/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/__init__.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/__init__.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/config.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/config.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/config.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/config.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/render.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/render.cpython-311.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/render.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/render.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/render.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/render.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/utils.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/utils.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/__pycache__/utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/__pycache__/utils.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/__pycache__/controller.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/controller/__pycache__/controller.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/__pycache__/controller.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/controller/__pycache__/controller.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/__pycache__/interactive_controller.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/controller/__pycache__/interactive_controller.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/__pycache__/interactive_controller.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/controller/__pycache__/interactive_controller.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/__pycache__/video_render_controller.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/controller/__pycache__/video_render_controller.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/__pycache__/video_render_controller.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/controller/__pycache__/video_render_controller.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/controller/controller.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | """ Controller Abstract Base Class Module """
6 |
7 | from __future__ import annotations
8 | from typing import Optional
9 | from abc import abstractmethod
10 | import logging
11 |
12 | from animated_drawings.model.scene import Scene
13 | from animated_drawings.view.view import View
14 | from animated_drawings.config import ControllerConfig
15 |
16 |
17 | class Controller():
18 | """
19 | Base Controller class from which all other Controllers will be derived.
20 | Controllers are responsible for:
21 | - running the game loop.
22 | - handling user input and forwarding it to the view or scene.
23 | - triggering the scene's update method
24 | - trigger the view's render method
25 | """
26 |
27 | def __init__(self, cfg: ControllerConfig, scene: Scene) -> None:
28 | self.cfg: ControllerConfig = cfg
29 | self.scene: Scene = scene
30 | self.view: Optional[View] = None
31 |
32 | def set_scene(self, scene: Scene) -> None:
33 | """ Sets the scene attached to this controller."""
34 | self.scene = scene
35 |
36 | def set_view(self, view: View) -> None:
37 | """ Sets the view attached to this controller."""
38 | self.view = view
39 |
40 | @abstractmethod
41 | def _tick(self) -> None:
42 | """Subclass and add logic is necessary to progress time"""
43 |
44 | @abstractmethod
45 | def _update(self) -> None:
46 | """Subclass and add logic is necessary to update scene after progressing time"""
47 |
48 | @abstractmethod
49 | def _is_run_over(self) -> bool:
50 | """Subclass and add logic is necessary to end the scene"""
51 |
52 | @abstractmethod
53 | def _start_run_loop_iteration(self) -> None:
54 | """Subclass and add code to start run loop iteration"""
55 |
56 | @abstractmethod
57 | def _handle_user_input(self) -> None:
58 | """Subclass and add code to handle user input"""
59 |
60 | @abstractmethod
61 | def _render(self) -> None:
62 | """Subclass and add logic needed to have viewer render the scene"""
63 |
64 | @abstractmethod
65 | def _finish_run_loop_iteration(self) -> None:
66 | """Subclass and add steps necessary before starting next iteration of run loop. """
67 |
68 | @abstractmethod
69 | def _prep_for_run_loop(self) -> None:
70 | """Subclass and add anything necessary to do immediately prior to run loop. """
71 |
72 | @abstractmethod
73 | def _cleanup_after_run_loop(self) -> None:
74 | """Subclass and add anything necessary to do after run loop has finished. """
75 |
76 | def run(self) -> None:
77 | """ The run loop. Subclassed controllers should overload and define functionality for each step in this function."""
78 |
79 | self._prep_for_run_loop()
80 | while not self._is_run_over():
81 | self._start_run_loop_iteration()
82 | self._update()
83 | self._render()
84 | self._tick()
85 | self._handle_user_input()
86 | self._finish_run_loop_iteration()
87 |
88 | self._cleanup_after_run_loop()
89 |
90 | @staticmethod
91 | def create_controller(controller_cfg: ControllerConfig, scene: Scene, view: View) -> Controller:
92 | """ Takes in a controller dictionary from mvc config file, scene, and view. Constructs and return appropriate controller."""
93 | if controller_cfg.mode == 'video_render':
94 | from animated_drawings.controller.video_render_controller import VideoRenderController
95 | return VideoRenderController(controller_cfg, scene, view,)
96 | elif controller_cfg.mode == 'interactive':
97 | from animated_drawings.controller.interactive_controller import InteractiveController
98 | from animated_drawings.view.window_view import WindowView
99 | assert isinstance(view, WindowView) # for static analysis. checks elsewhere ensure this always passes
100 | return InteractiveController(controller_cfg, scene, view)
101 | else:
102 | msg = f'Unknown controller mode specified: {controller_cfg.mode}'
103 | logging.critical(msg)
104 | assert False, msg
105 |
--------------------------------------------------------------------------------
/animated_drawings/controller/interactive_controller.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | """ Interactive Controller Class Module """
6 |
7 | import time
8 | from typing import Optional
9 | import glfw
10 |
11 | from animated_drawings.controller.controller import Controller
12 | from animated_drawings.model.scene import Scene
13 | from animated_drawings.view.window_view import WindowView
14 | from animated_drawings.config import ControllerConfig
15 |
16 |
17 | class InteractiveController(Controller):
18 | """ Interactive Controller Class """
19 |
20 | def __init__(self, cfg: ControllerConfig, scene: Scene, view: WindowView) -> None:
21 | super().__init__(cfg, scene)
22 |
23 | self.view: WindowView = view
24 | self.prev_time: float = 0.0 # tracks real-world time passing between run loops
25 | self.pause: bool = False # tracks whether time progresses in the scene
26 |
27 | glfw.init()
28 | glfw.set_key_callback(self.view.win, self._on_key)
29 |
30 | def _on_key(self, _win, key: int, _scancode, action, _mods) -> None: # noqa: C901
31 |
32 | if action not in (glfw.PRESS, glfw.REPEAT):
33 | return
34 |
35 | # close window
36 | if key in (glfw.KEY_ESCAPE, glfw.KEY_Q):
37 | glfw.set_window_should_close(self.view.win, True)
38 |
39 | # move camera forward
40 | elif key == glfw.KEY_W:
41 | _, _, fwd = self.view.camera.get_right_up_fwd_vectors()
42 | self.view.camera.offset(-0.1 * fwd)
43 |
44 | # move camera back
45 | elif key == glfw.KEY_S:
46 | _, _, fwd = self.view.camera.get_right_up_fwd_vectors()
47 | self.view.camera.offset(0.1 * fwd)
48 |
49 | # move camera right
50 | elif key == glfw.KEY_A:
51 | right, _, _ = self.view.camera.get_right_up_fwd_vectors()
52 | self.view.camera.offset(-0.1 * right)
53 |
54 | # move camera left
55 | elif key == glfw.KEY_D:
56 | right, _, _ = self.view.camera.get_right_up_fwd_vectors()
57 | self.view.camera.offset(0.1 * right)
58 |
59 | # move camera up
60 | elif key == glfw.KEY_E:
61 | _, up, _ = self.view.camera.get_right_up_fwd_vectors()
62 | self.view.camera.offset(0.1 * up)
63 |
64 | # move camera down
65 | elif key == glfw.KEY_R:
66 | _, up, _ = self.view.camera.get_right_up_fwd_vectors()
67 | self.view.camera.offset(-0.1 * up)
68 |
69 | # toggle start/stop time
70 | elif key == glfw.KEY_SPACE:
71 | self.pause = not self.pause
72 | self.prev_time = time.time()
73 |
74 | # step forward in time
75 | elif key == glfw.KEY_RIGHT:
76 | self._tick(self.cfg.keyboard_timestep)
77 |
78 | # step backward in time
79 | elif key == glfw.KEY_LEFT:
80 | self._tick(-self.cfg.keyboard_timestep)
81 |
82 | def _is_run_over(self) -> None:
83 | return glfw.window_should_close(self.view.win)
84 |
85 | def _prep_for_run_loop(self) -> None:
86 | self.prev_time = time.time()
87 |
88 | def _start_run_loop_iteration(self) -> None:
89 | self.view.clear_window()
90 |
91 | def _tick(self, delta_t: Optional[float] = None) -> None:
92 | # if passed a specific value to progress time by, do so
93 | if delta_t:
94 | self.scene.progress_time(delta_t)
95 | # otherwise, if scene is paused, do nothing
96 | elif self.pause:
97 | pass
98 | # otherwise, calculate real time passed since last call and progress scene by that amount
99 | else:
100 | cur_time = time.time()
101 | self.scene.progress_time(cur_time - self.prev_time)
102 | self.prev_time = cur_time
103 |
104 | def _update(self) -> None:
105 | self.scene.update_transforms()
106 |
107 | def _handle_user_input(self) -> None:
108 | glfw.poll_events()
109 |
110 | def _render(self) -> None:
111 | self.view.render(self.scene)
112 |
113 | def _finish_run_loop_iteration(self) -> None:
114 | self.view.swap_buffers()
115 |
116 | def _cleanup_after_run_loop(self) -> None:
117 | self.view.cleanup()
118 |
--------------------------------------------------------------------------------
/animated_drawings/controller/video_render_controller.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | """ Video Render Controller Class Module """
6 |
7 | from __future__ import annotations
8 | import time
9 | import logging
10 | from typing import List
11 | from pathlib import Path
12 | from abc import abstractmethod
13 | import numpy as np
14 | import numpy.typing as npt
15 | import cv2
16 | from OpenGL import GL
17 | from tqdm import tqdm
18 |
19 | from animated_drawings.controller.controller import Controller
20 | from animated_drawings.model.scene import Scene
21 | from animated_drawings.model.animated_drawing import AnimatedDrawing
22 | from animated_drawings.view.view import View
23 | from animated_drawings.config import ControllerConfig
24 |
25 | NoneType = type(None) # for type checking below
26 |
27 |
28 | class VideoRenderController(Controller):
29 | """ Video Render Controller is used to non-interactively generate a video file """
30 |
31 | def __init__(self, cfg: ControllerConfig, scene: Scene, view: View) -> None:
32 | super().__init__(cfg, scene)
33 |
34 | self.view: View = view
35 |
36 | self.scene: Scene = scene
37 |
38 | self.frames_left_to_render: int # when this becomes zero, stop rendering
39 | self.delta_t: float # amount of time to progress scene between renders
40 | self._set_frames_left_to_render_and_delta_t()
41 |
42 | self.render_start_time: float # track when we started to render frames (for performance stats)
43 | self.frames_rendered: int = 0 # track how many frames we've rendered
44 |
45 | self.video_width: int
46 | self.video_height: int
47 | self.video_width, self.video_height = self.view.get_framebuffer_size()
48 |
49 | self.video_writer: VideoWriter = VideoWriter.create_video_writer(self)
50 |
51 | self.frame_data = np.empty([self.video_height, self.video_width, 4], dtype='uint8') # 4 for RGBA
52 |
53 | self.progress_bar = tqdm(total=self.frames_left_to_render)
54 |
55 | def _set_frames_left_to_render_and_delta_t(self) -> None:
56 | """
57 | Based upon the animated drawings within the scene, computes maximum number of frames in a BVH.
58 | Checks that all frame times within BVHs are equal, logs a warning if not.
59 | Uses results to determine number of frames and frame time for output video.
60 | """
61 |
62 | max_frames = 0
63 | frame_time: List[float] = []
64 | for child in self.scene.get_children():
65 | if not isinstance(child, AnimatedDrawing):
66 | continue
67 | max_frames = max(max_frames, child.retargeter.bvh.frame_max_num)
68 | frame_time.append(child.retargeter.bvh.frame_time)
69 |
70 | if not all(x == frame_time[0] for x in frame_time):
71 | msg = f'frame time of BVH files don\'t match. Using first value: {frame_time[0]}'
72 | logging.warning(msg)
73 |
74 | self.frames_left_to_render = max_frames
75 | self.delta_t = frame_time[0]
76 |
77 | def _prep_for_run_loop(self) -> None:
78 | self.run_loop_start_time = time.time()
79 |
80 | def _is_run_over(self) -> bool:
81 | return self.frames_left_to_render == 0
82 |
83 | def _start_run_loop_iteration(self) -> None:
84 | self.view.clear_window()
85 |
86 | def _update(self) -> None:
87 | self.scene.update_transforms()
88 |
89 | def _render(self) -> None:
90 | self.view.render(self.scene)
91 |
92 | def _tick(self) -> None:
93 | self.scene.progress_time(self.delta_t)
94 |
95 | def _handle_user_input(self) -> None:
96 | """ ignore all user input when rendering video file """
97 |
98 | def _finish_run_loop_iteration(self) -> None:
99 | # get pixel values from the frame buffer, send them to the video writer
100 | GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, 0)
101 | GL.glReadPixels(0, 0, self.video_width, self.video_height, GL.GL_BGRA, GL.GL_UNSIGNED_BYTE, self.frame_data)
102 | self.video_writer.process_frame(self.frame_data[::-1, :, :].copy())
103 |
104 | # update our counts and progress_bar
105 | self.frames_left_to_render -= 1
106 | self.frames_rendered += 1
107 | self.progress_bar.update(1)
108 |
109 | def _cleanup_after_run_loop(self) -> None:
110 | logging.info(f'Rendered {self.frames_rendered} frames in {time.time()-self.run_loop_start_time} seconds.')
111 | self.view.cleanup()
112 |
113 | _time = time.time()
114 | self.video_writer.cleanup()
115 | logging.info(f'Wrote video to file in in {time.time()-_time} seconds.')
116 |
117 |
118 | class VideoWriter():
119 | """ Wrapper to abstract the different backends necessary for writing different video filetypes """
120 |
121 | def __init__(self) -> None:
122 | pass
123 |
124 | @abstractmethod
125 | def process_frame(self, frame: npt.NDArray[np.uint8]) -> None:
126 | """ Subclass must specify how to handle each frame of data received. """
127 | pass
128 |
129 | @abstractmethod
130 | def cleanup(self) -> None:
131 | """ Subclass must specify how to finish up after all frames have been received. """
132 | pass
133 |
134 | @staticmethod
135 | def create_video_writer(controller: VideoRenderController) -> VideoWriter:
136 |
137 | assert isinstance(controller.cfg.output_video_path, str) # for static analysis
138 |
139 | output_p = Path(controller.cfg.output_video_path)
140 | output_p.parent.mkdir(exist_ok=True, parents=True)
141 |
142 | msg = f' Writing video to: {output_p.resolve()}'
143 | logging.info(msg)
144 | print(msg)
145 |
146 | if output_p.suffix == '.gif':
147 | return GIFWriter(controller)
148 | elif output_p.suffix == '.mp4':
149 | return MP4Writer(controller)
150 | else:
151 | msg = f'Unsupported output video file extension ({output_p.suffix}). Only .gif and .mp4 are supported.'
152 | logging.critical(msg)
153 | assert False, msg
154 |
155 |
156 | class GIFWriter(VideoWriter):
157 | """ Video writer for creating transparent, animated GIFs with Pillow """
158 |
159 | def __init__(self, controller: VideoRenderController) -> None:
160 | assert isinstance(controller.cfg.output_video_path, str) # for static analysis
161 | self.output_p = Path(controller.cfg.output_video_path)
162 |
163 | self.duration = int(controller.delta_t*1000)
164 | if self.duration < 20:
165 | msg = f'Specified duration of .gif is too low, replacing with 20: {self.duration}'
166 | logging.warn(msg)
167 | self.duration = 20
168 |
169 | self.frames: List[npt.NDArray[np.uint8]] = []
170 |
171 | def process_frame(self, frame: npt.NDArray[np.uint8]) -> None:
172 | """ Reorder channels and save frames as they arrive"""
173 | self.frames.append(cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA).astype(np.uint8))
174 |
175 | def cleanup(self) -> None:
176 | """ Write all frames to output path specified."""
177 | from PIL import Image
178 | self.output_p.parent.mkdir(exist_ok=True, parents=True)
179 | logging.info(f'VideoWriter will write to {self.output_p.resolve()}')
180 | ims = [Image.fromarray(a_frame) for a_frame in self.frames]
181 | ims[0].save(self.output_p, save_all=True, append_images=ims[1:], duration=self.duration, disposal=2, loop=0)
182 |
183 |
184 | class MP4Writer(VideoWriter):
185 | """ Video writer for creating mp4 videos with cv2.VideoWriter """
186 | def __init__(self, controller: VideoRenderController) -> None:
187 |
188 | # validate and prep output path
189 | if isinstance(controller.cfg.output_video_path, NoneType):
190 | msg = 'output video path not specified for mp4 video writer'
191 | logging.critical(msg)
192 | assert False, msg
193 | output_p = Path(controller.cfg.output_video_path)
194 | output_p.parent.mkdir(exist_ok=True, parents=True)
195 | logging.info(f'VideoWriter will write to {output_p.resolve()}')
196 |
197 | # validate and prep codec
198 | if isinstance(controller.cfg.output_video_codec, NoneType):
199 | msg = 'output video codec not specified for mp4 video writer'
200 | logging.critical(msg)
201 | assert False, msg
202 | fourcc = cv2.VideoWriter_fourcc(*controller.cfg.output_video_codec)
203 | logging.info(f'Using codec {controller.cfg.output_video_codec}')
204 |
205 | # calculate video writer framerate
206 | frame_rate = round(1/controller.delta_t)
207 |
208 | # initialize the video writer
209 | self.video_writer = cv2.VideoWriter(str(output_p), fourcc, frame_rate, (controller.video_width, controller.video_height))
210 |
211 | def process_frame(self, frame: npt.NDArray[np.uint8]) -> None:
212 | """ Remove the alpha channel and send to the video writer as it arrives. """
213 | self.video_writer.write(frame[:, :, :3])
214 |
215 | def cleanup(self) -> None:
216 | self.video_writer.release()
217 |
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/animated_drawing.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/animated_drawing.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/animated_drawing.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/animated_drawing.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/arap.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/arap.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/arap.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/arap.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/box.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/box.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/box.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/box.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/bvh.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/bvh.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/bvh.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/bvh.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/camera.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/camera.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/camera.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/camera.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/floor.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/floor.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/floor.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/floor.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/joint.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/joint.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/joint.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/joint.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/quaternions.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/quaternions.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/quaternions.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/quaternions.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/rectangle.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/rectangle.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/rectangle.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/rectangle.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/retargeter.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/retargeter.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/retargeter.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/retargeter.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/scene.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/scene.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/scene.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/scene.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/time_manager.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/time_manager.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/time_manager.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/time_manager.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/transform.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/transform.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/transform.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/transform.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/vectors.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/vectors.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/__pycache__/vectors.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/model/__pycache__/vectors.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/model/box.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | import numpy as np
6 | import OpenGL.GL as GL
7 | import ctypes
8 | from animated_drawings.model.transform import Transform
9 |
10 |
11 | class Box(Transform):
12 |
13 | def __init__(self, shader_name: str = 'color_shader') -> None:
14 | super().__init__()
15 |
16 | self.points = np.array([
17 | [ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
18 | [ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
19 | [-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
20 | [-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
21 | [-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
22 | [ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
23 |
24 | [-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
25 | [ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
26 | [ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
27 | [ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
28 | [-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
29 | [-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
30 |
31 | [ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
32 | [ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
33 | [ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
34 | [ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
35 | [ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
36 | [ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
37 |
38 | [-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
39 | [-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
40 | [-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
41 | [-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
42 | [-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
43 | [-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
44 |
45 | [-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
46 | [ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
47 | [ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
48 | [ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
49 | [-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
50 | [-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
51 |
52 | [ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
53 | [ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
54 | [-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
55 | [-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
56 | [-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
57 | [ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
58 |
59 | ], np.float32)
60 |
61 | self.indices = np.array([2, 1, 0,
62 | 5, 4, 3,
63 | 6, 7, 8,
64 | 9, 10, 11,
65 | 14, 13, 12,
66 | 17, 16, 15,
67 | 18, 19, 20,
68 | 21, 22, 23,
69 | 24, 25, 26,
70 | 27, 28, 29,
71 | 32, 31, 30,
72 | 35, 34, 33
73 | ], np.uint32)
74 |
75 | self.material = {
76 | 'ambient': np.array([0.4, 0.4, 0.4], np.float32),
77 | 'diffuse': np.array([0.4, 0.4, 0.4], np.float32),
78 | 'specular': np.array([0.3, 0.0, 0.0], np.float32),
79 | 'shininess': 32
80 | }
81 |
82 | self.shader_name: str = shader_name
83 |
84 | self._is_opengl_initialized: bool = False # keep track of whether self._initialize_opengl_resources was called.
85 |
86 | def _initialize_opengl_resources(self) -> None:
87 | """ Method to initialize the OpenGL arrays and buffers necessary to draw the object.
88 | It's better to not initialize these things unless we're definitely going to be drawing the object,
89 | as calling GL functions without calling glfw.init() first can cause a mysterious segfault.
90 | This way, unit tests and other non-rendering operations can proceed without requiring a Controller.
91 | """
92 | self.vao = GL.glGenVertexArrays(1)
93 | self.vbo = GL.glGenBuffers(1)
94 | self.ebo = GL.glGenBuffers(1)
95 |
96 | GL.glBindVertexArray(self.vao)
97 |
98 | # buffer vertex data
99 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
100 | GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
101 |
102 | # buffer element index data
103 | GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
104 | GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.indices, GL.GL_STATIC_DRAW)
105 |
106 | vert_bytes = 4 * self.points.shape[1] # 4 is byte size of np.float32
107 | pos_offset = 4 * 0
108 | color_offset = 4 * 3
109 | normals_offset = 4 * 6
110 |
111 | # position attributes
112 | GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(pos_offset))
113 | GL.glEnableVertexAttribArray(0)
114 |
115 | # color attributes
116 | GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(color_offset))
117 | GL.glEnableVertexAttribArray(1)
118 |
119 | # normals attributes
120 | GL.glVertexAttribPointer(2, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(normals_offset))
121 | GL.glEnableVertexAttribArray(2)
122 |
123 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
124 | GL.glBindVertexArray(0)
125 |
126 | self._is_opengl_initialized = True
127 |
128 | def rebuffer_vertex_data(self) -> None:
129 | GL.glBindVertexArray(self.vao)
130 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
131 | GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
132 |
133 | def _draw(self, **kwargs) -> None:
134 |
135 | if not self._is_opengl_initialized:
136 | self._initialize_opengl_resources()
137 |
138 | # Enable multisampling
139 | GL.glEnable(GL.GL_MULTISAMPLE)
140 |
141 | GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
142 | GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
143 | GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
144 |
145 | # GL.glEnable(GL.GL_ALPHA_TEST)
146 | # GL.glAlphaFunc(GL.GL_GREATER, 0.1) # Adjust the threshold as necessary
147 |
148 | # Enable blending
149 | GL.glEnable(GL.GL_BLEND)
150 | GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
151 |
152 | GL.glUseProgram(kwargs['shader_ids'][self.shader_name])
153 | model_loc = GL.glGetUniformLocation(kwargs['shader_ids'][self.shader_name], "model")
154 | GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
155 |
156 | GL.glBindVertexArray(self.vao)
157 | GL.glDrawArrays(GL.GL_TRIANGLES, 0, 36)
158 |
--------------------------------------------------------------------------------
/animated_drawings/model/bvh.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from __future__ import annotations # so we can refer to class Type inside class
6 | import logging
7 | from pathlib import Path
8 | from typing import List, Tuple, Optional
9 |
10 | import numpy as np
11 | import numpy.typing as npt
12 |
13 | from animated_drawings.model.transform import Transform
14 | from animated_drawings.model.box import Box
15 | from animated_drawings.model.quaternions import Quaternions
16 | from animated_drawings.model.vectors import Vectors
17 | from animated_drawings.model.joint import Joint
18 | from animated_drawings.model.time_manager import TimeManager
19 | from animated_drawings.utils import resolve_ad_filepath
20 |
21 |
22 | class BVH_Joint(Joint):
23 | """
24 | Joint class with channel order attribute and specialized vis widget
25 | """
26 | def __init__(self, channel_order: List[str] = [], widget: bool = True, **kwargs) -> None:
27 | super().__init__(**kwargs)
28 |
29 | self.channel_order = channel_order
30 |
31 | self.widget: Optional[Transform] = None
32 | if widget:
33 | self.widget = Box()
34 | self.add_child(self.widget)
35 |
36 | def _draw(self, **kwargs):
37 | if self.widget:
38 | self.widget.draw(**kwargs)
39 |
40 |
41 | class BVH(Transform, TimeManager):
42 | """
43 | Class to encapsulate BVH (Biovision Hierarchy) animation data.
44 | Include a single skeletal hierarchy defined in the BVH, frame count and speed,
45 | and skeletal pos/rot data for each frame
46 | """
47 |
48 | def __init__(self,
49 | name: str,
50 | root_joint: BVH_Joint,
51 | frame_max_num: int,
52 | frame_time: float,
53 | pos_data: npt.NDArray[np.float32],
54 | rot_data: npt.NDArray[np.float32]
55 | ) -> None:
56 | """
57 | Don't recommend calling this method directly. Instead, use BVH.from_file().
58 | """
59 | super().__init__()
60 |
61 | self.name: str = name
62 | self.frame_max_num: int = frame_max_num
63 | self.frame_time: float = frame_time
64 | self.pos_data: npt.NDArray[np.float32] = pos_data
65 | self.rot_data: npt.NDArray[np.float32] = rot_data
66 |
67 | self.root_joint = root_joint
68 | self.add_child(self.root_joint)
69 | self.joint_num = self.root_joint.joint_count()
70 |
71 | self.cur_frame = 0 # initialize skeleton pose to first frame
72 | self.apply_frame(self.cur_frame)
73 |
74 | def get_joint_names(self) -> List[str]:
75 | """ Get names of joints in skeleton in the order in which BVH rotation data is stored. """
76 | return self.root_joint.get_chain_joint_names()
77 |
78 | def update(self) -> None:
79 | """Based upon internal time, determine which frame should be displayed and apply it"""
80 | cur_time: float = self.get_time()
81 | cur_frame = round(cur_time / self.frame_time) % self.frame_max_num
82 | self.apply_frame(cur_frame)
83 |
84 | def apply_frame(self, frame_num: int) -> None:
85 | """ Apply root position and joint rotation data for specified frame_num """
86 | self.root_joint.set_position(self.pos_data[frame_num])
87 | self._apply_frame_rotations(self.root_joint, frame_num, ptr=np.array(0))
88 |
89 | def _apply_frame_rotations(self, joint: BVH_Joint, frame_num: int, ptr: npt.NDArray[np.int32]) -> None:
90 | q = Quaternions(self.rot_data[frame_num, ptr])
91 | joint.set_rotation(q)
92 |
93 | ptr += 1
94 |
95 | for c in joint.get_children():
96 | if not isinstance(c, BVH_Joint):
97 | continue
98 | self._apply_frame_rotations(c, frame_num, ptr)
99 |
100 | def get_skeleton_fwd(self, forward_perp_vector_joint_names: List[Tuple[str, str]], update: bool = True) -> Vectors:
101 | """
102 | Get current forward vector of skeleton in world coords. If update=True, ensure skeleton transforms are current.
103 | Input forward_perp_vector_joint_names, a list of pairs of joint names (e.g. [[leftshould, rightshoulder], [lefthip, righthip]])
104 | Finds average of vectors between joint pairs, then returns vector perpendicular to their average.
105 | """
106 | if update:
107 | self.root_joint.update_transforms(update_ancestors=True)
108 |
109 | vectors_cw_perpendicular_to_fwd: List[Vectors] = []
110 | for (start_joint_name, end_joint_name) in forward_perp_vector_joint_names:
111 | start_joint = self.root_joint.get_transform_by_name(start_joint_name)
112 | if not start_joint:
113 | msg = f'Could not find BVH joint with name: {start_joint_name}'
114 | logging.critical(msg)
115 | assert False, msg
116 |
117 | end_joint = self.root_joint.get_transform_by_name(end_joint_name)
118 | if not end_joint:
119 | msg = f'Could not find BVH joint with name: {end_joint_name}'
120 | logging.critical(msg)
121 | assert False, msg
122 |
123 | bone_vector: Vectors = Vectors(end_joint.get_world_position()) - Vectors(start_joint.get_world_position())
124 | bone_vector.norm()
125 | vectors_cw_perpendicular_to_fwd.append(bone_vector)
126 |
127 | return Vectors(vectors_cw_perpendicular_to_fwd).average().perpendicular()
128 |
129 | @classmethod
130 | def from_file(cls, bvh_fn: str, start_frame_idx: int = 0, end_frame_idx: Optional[int] = None) -> BVH:
131 | """ Given a path to a .bvh, constructs and returns BVH object"""
132 |
133 | # search for the BVH file specified
134 | bvh_p: Path = resolve_ad_filepath(bvh_fn, 'bvh file')
135 | logging.info(f'Using BVH file located at {bvh_p.resolve()}')
136 |
137 | with open(str(bvh_p), 'r') as f:
138 | lines = f.read().splitlines()
139 |
140 | if lines.pop(0) != 'HIERARCHY':
141 | msg = f'Malformed BVH in line preceding {lines}'
142 | logging.critical(msg)
143 | assert False, msg
144 |
145 | # Parse the skeleton
146 | root_joint: BVH_Joint = BVH._parse_skeleton(lines)
147 |
148 | if lines.pop(0) != 'MOTION':
149 | msg = f'Malformed BVH in line preceding {lines}'
150 | logging.critical(msg)
151 | assert False, msg
152 |
153 | # Parse motion metadata
154 | frame_max_num = int(lines.pop(0).split(':')[-1])
155 | frame_time = float(lines.pop(0).split(':')[-1])
156 |
157 | # Parse motion data
158 | frames = [list(map(float, line.strip().split(' '))) for line in lines]
159 | if len(frames) != frame_max_num:
160 | msg = f'framenum specified ({frame_max_num}) and found ({len(frames)}) do not match'
161 | logging.critical(msg)
162 | assert False, msg
163 |
164 | # Split logically distinct root position data from joint euler angle rotation data
165 | pos_data: npt.NDArray[np.float32]
166 | rot_data: npt.NDArray[np.float32]
167 | pos_data, rot_data = BVH._process_frame_data(root_joint, frames)
168 |
169 | # Set end_frame if not passed in
170 | if not end_frame_idx:
171 | end_frame_idx = frame_max_num
172 |
173 | # Ensure end_frame_idx <= frame_max_num
174 | if frame_max_num < end_frame_idx:
175 | msg = f'config specified end_frame_idx > bvh frame_max_num ({end_frame_idx} > {frame_max_num}). Replacing with frame_max_num.'
176 | logging.warning(msg)
177 | end_frame_idx = frame_max_num
178 |
179 | # slice position and rotation data using start and end frame indices
180 | pos_data = pos_data[start_frame_idx:end_frame_idx, :]
181 | rot_data = rot_data[start_frame_idx:end_frame_idx, :]
182 |
183 | # new frame_max_num based is end_frame_idx minus start_frame_idx
184 | frame_max_num = end_frame_idx - start_frame_idx
185 |
186 | return BVH(bvh_p.name, root_joint, frame_max_num, frame_time, pos_data, rot_data)
187 |
188 | @classmethod
189 | def _parse_skeleton(cls, lines: List[str]) -> BVH_Joint:
190 | """
191 | Called recursively to parse and construct skeleton from BVH
192 | :param lines: partially-processed contents of BVH file. Is modified in-place.
193 | :return: Joint
194 | """
195 |
196 | # Get the joint name
197 | if lines[0].strip().startswith('ROOT'):
198 | _, joint_name = lines.pop(0).strip().split(' ')
199 | elif lines[0].strip().startswith('JOINT'):
200 | _, joint_name = lines.pop(0).strip().split(' ')
201 | elif lines[0].strip().startswith('End Site'):
202 | joint_name = lines.pop(0).strip()
203 | else:
204 | msg = f'Malformed BVH. Line: {lines[0]}'
205 | logging.critical(msg)
206 | assert False, msg
207 |
208 | if lines.pop(0).strip() != '{':
209 | msg = f'Malformed BVH in line preceding {lines}'
210 | logging.critical(msg)
211 | assert False, msg
212 |
213 | # Get offset
214 | if not lines[0].strip().startswith('OFFSET'):
215 | msg = f'Malformed BVH in line preceding {lines}'
216 | logging.critical(msg)
217 | assert False, msg
218 | _, *xyz = lines.pop(0).strip().split(' ')
219 | offset = Vectors(list(map(float, xyz)))
220 |
221 | # Get channels
222 | if lines[0].strip().startswith('CHANNELS'):
223 | channel_order = lines.pop(0).strip().split(' ')
224 | _, channel_num, *channel_order = channel_order
225 | else:
226 | channel_num, channel_order = 0, []
227 | if int(channel_num) != len(channel_order):
228 | msg = f'Malformed BVH in line preceding {lines}'
229 | logging.critical(msg)
230 | assert False, msg
231 |
232 | # Recurse for children
233 | children: List[BVH_Joint] = []
234 | while lines[0].strip() != '}':
235 | children.append(BVH._parse_skeleton(lines))
236 | lines.pop(0) # }
237 |
238 | return BVH_Joint(name=joint_name, offset=offset, channel_order=channel_order, children=children)
239 |
240 | @classmethod
241 | def _process_frame_data(cls, skeleton: BVH_Joint, frames: List[List[float]]) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]:
242 | """ Given skeleton and frame data, return root position data and joint quaternion data, separately"""
243 |
244 | def _get_frame_channel_order(joint: BVH_Joint, channels=[]):
245 | channels.extend(joint.channel_order)
246 | for child in [child for child in joint.get_children() if isinstance(child, BVH_Joint)]:
247 | _get_frame_channel_order(child, channels)
248 | return channels
249 | channels = _get_frame_channel_order(skeleton)
250 |
251 | # create a mask so we retain only joint rotations and root position
252 | mask = np.array(list(map(lambda x: True if 'rotation' in x else False, channels)))
253 | mask[:3] = True # hack to make sure we keep root position
254 |
255 | frames = np.array(frames, dtype=np.float32)[:, mask]
256 |
257 | # split root pose data and joint euler angle data
258 | pos_data, ea_rots = np.split(np.array(frames, dtype=np.float32), [3], axis=1)
259 |
260 | # quaternion rot data will go here
261 | rot_data = np.empty([len(frames), skeleton.joint_count(), 4], dtype=np.float32)
262 | BVH._pose_ea_to_q(skeleton, ea_rots, rot_data)
263 |
264 | return pos_data, rot_data
265 |
266 | @classmethod
267 | def _pose_ea_to_q(cls, joint: BVH_Joint, ea_rots: npt.NDArray[np.float32], q_rots: npt.NDArray[np.float32], p1: int = 0, p2: int = 0) -> Tuple[int, int]:
268 | """
269 | Given joint and array of euler angle rotation data, converts to quaternions and stores in q_rots.
270 | Only called by _process_frame_data(). Modifies q_rots inplace.
271 | :param p1: pointer to find where in ea_rots to read euler angles from
272 | :param p2: pointer to determine where in q_rots to input quaternion
273 | """
274 | axis_chars = "".join([c[0].lower() for c in joint.channel_order if c.endswith('rotation')]) # e.g. 'xyz'
275 |
276 | q_rots[:, p2] = Quaternions.from_euler_angles(axis_chars, ea_rots[:, p1:p1+len(axis_chars)]).qs
277 | p1 += len(axis_chars)
278 | p2 += 1
279 |
280 | for child in joint.get_children():
281 | if isinstance(child, BVH_Joint):
282 | p1, p2 = BVH._pose_ea_to_q(child, ea_rots, q_rots, p1, p2)
283 |
284 | return p1, p2
285 |
--------------------------------------------------------------------------------
/animated_drawings/model/camera.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from animated_drawings.model.transform import Transform
6 | from animated_drawings.model.vectors import Vectors
7 | from typing import Union, List
8 |
9 |
10 | class Camera(Transform):
11 |
12 | def __init__(
13 | self,
14 | pos: Union[Vectors, List[Union[float, int]]] = Vectors([0.0, 0.0, 0.0]),
15 | fwd: Union[Vectors, List[Union[float, int]]] = Vectors([0.0, 0.0, 1.0])
16 | ):
17 | super().__init__()
18 |
19 | if not isinstance(pos, Vectors):
20 | pos = Vectors(pos)
21 | self.set_position(pos)
22 |
23 | if not isinstance(fwd, Vectors):
24 | fwd = Vectors(fwd)
25 | self.look_at(fwd)
26 |
--------------------------------------------------------------------------------
/animated_drawings/model/floor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from animated_drawings.model.rectangle import Rectangle
6 | from animated_drawings.model.transform import Transform
7 | import numpy as np
8 |
9 |
10 | class Floor(Transform):
11 |
12 | def __init__(self):
13 | super().__init__()
14 |
15 | for idx in range(-5, 5):
16 | for jdx in range(-5, 5):
17 | color = 'white' if (idx + jdx) % 2 else 'black'
18 | tile = Rectangle(color=color)
19 | tile.offset(np.array([float(idx), 0, float(jdx)]))
20 | self.add_child(tile)
21 |
--------------------------------------------------------------------------------
/animated_drawings/model/joint.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from __future__ import annotations
6 | from animated_drawings.model.transform import Transform
7 | from typing import List
8 |
9 |
10 | class Joint(Transform):
11 | """
12 | Skeletal joint used representing character poses.
13 | """
14 |
15 | def __init__(self, **kwargs) -> None:
16 | super().__init__(**kwargs)
17 |
18 | def joint_count(self) -> int:
19 | """ Returns 1 + the number of Joint children in this joint's kinematic chain (recursive) """
20 | count: int = 1
21 | for c in self.get_children():
22 | if isinstance(c, Joint):
23 | count += c.joint_count()
24 | return count
25 |
26 | def get_chain_worldspace_positions(self) -> List[float]:
27 | """ Get xzy worldspace coordinates of all joints within the chain. """
28 | self.update_transforms(update_ancestors=True)
29 | return self._get_chain_worldspace_positions(self, [])
30 |
31 | def _get_chain_worldspace_positions(self, joint: Joint, position_list: List[float]) -> List[float]:
32 | position_list.extend(joint.get_world_position(update_ancestors=False))
33 | for c in joint.get_children():
34 | if not isinstance(c, Joint):
35 | continue
36 | self._get_chain_worldspace_positions(c, position_list)
37 | return position_list
38 |
39 | def get_chain_joint_names(self):
40 | """ Traverse through joint in depth-first order and return names of joints in the order they are visited. """
41 | joint_names: List[str] = []
42 | return self._get_chain_joint_names(self, joint_names)
43 |
44 | def _get_chain_joint_names(self, joint: Joint, joint_name_list: List[str]) -> List[str]:
45 | joint_name_list.append(str(joint.name))
46 | for c in joint.get_children():
47 | if not isinstance(c, Joint):
48 | continue
49 | self._get_chain_joint_names(c, joint_name_list)
50 | return joint_name_list
51 |
--------------------------------------------------------------------------------
/animated_drawings/model/quaternions.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from __future__ import annotations # so we can refer to class Type inside class
6 | import numpy as np
7 | import numpy.typing as npt
8 | import logging
9 | from typing import Union, Iterable, List, Tuple
10 | from animated_drawings.model.vectors import Vectors
11 | import math
12 | from animated_drawings.utils import TOLERANCE
13 | from functools import reduce
14 |
15 |
16 | class Quaternions:
17 | """
18 | Wrapper class around ndarray interpreted as one or more quaternions. Quaternion order is [w, x, y, z]
19 | When passing in existing Quaternions, new Quaternions object will share the underlying nparray, so be careful.
20 | Strongly influenced by Daniel Holden's excellent Quaternions class.
21 | """
22 |
23 | def __init__(self, qs: Union[Iterable[Union[int, float]], npt.NDArray[np.float32], Quaternions]) -> None:
24 |
25 | self.qs: npt.NDArray[np.float32]
26 |
27 | if isinstance(qs, np.ndarray):
28 | if not qs.shape[-1] == 4:
29 | msg = f'Final dimension passed to Quaternions must be 4. Found {qs.shape[-1]}'
30 | logging.critical(msg)
31 | assert False, msg
32 |
33 | if len(qs.shape) == 1:
34 | qs = np.expand_dims(qs, axis=0)
35 | self.qs = qs
36 |
37 | elif isinstance(qs, tuple) or isinstance(qs, list):
38 | try:
39 | qs = np.array(qs)
40 | assert qs.shape[-1] == 4
41 | except Exception:
42 | msg = 'Could not convert quaternion data to ndarray with shape[-1] == 4'
43 | logging.critical(msg)
44 | assert False, msg
45 |
46 | if len(qs.shape) == 1:
47 | qs = np.expand_dims(qs, axis=0)
48 | self.qs = qs
49 |
50 | elif isinstance(qs, Quaternions):
51 | self.qs = qs.qs
52 |
53 | else:
54 | msg = 'Quaternions must be constructed from Quaternions or numpy array'
55 | logging.critical(msg)
56 | assert False, msg
57 |
58 | self.normalize()
59 |
60 | def normalize(self) -> None:
61 | self.qs = self.qs / np.expand_dims(np.sum(self.qs ** 2.0, axis=-1) ** 0.5, axis=-1)
62 |
63 | def to_rotation_matrix(self) -> npt.NDArray[np.float32]:
64 | """
65 | From Ken Shoemake
66 | https://www.ljll.math.upmc.fr/~frey/papers/scientific%20visualisation/Shoemake%20K.,%20Quaternions.pdf
67 | :return: 4x4 rotation matrix representation of quaternions
68 | """
69 | w = self.qs[..., 0].squeeze()
70 | x = self.qs[..., 1].squeeze()
71 | y = self.qs[..., 2].squeeze()
72 | z = self.qs[..., 3].squeeze()
73 |
74 | xx, yy, zz = x**2, y**2, z**2
75 |
76 | wx, wy, wz = w*x, w*y, w*z
77 | xy, xz = x*y, x*z # no
78 | yz = y*z
79 |
80 | # Row 1
81 | r00 = 1 - 2 * (yy + zz)
82 | r01 = 2 * (xy - wz)
83 | r02 = 2 * (xz + wy)
84 |
85 | # Row 2
86 | r10 = 2 * (xy + wz)
87 | r11 = 1 - 2 * (xx + zz)
88 | r12 = 2 * (yz - wx)
89 |
90 | # Row 3
91 | r20 = 2 * (xz - wy)
92 | r21 = 2 * (yz + wx)
93 | r22 = 1 - 2 * (xx + yy)
94 |
95 | return np.array([[r00, r01, r02, 0.0],
96 | [r10, r11, r12, 0.0],
97 | [r20, r21, r22, 0.0],
98 | [0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
99 |
100 | @classmethod
101 | def rotate_between_vectors(cls, v1: Vectors, v2: Vectors) -> Quaternions:
102 | """ Computes quaternion rotating from v1 to v2. """
103 |
104 | xyz: List[float] = v1.cross(v2).vs.squeeze().tolist()
105 | w: float = math.sqrt((v1.length**2) * (v2.length**2)) + np.dot(v1.vs.squeeze(), v2.vs.squeeze())
106 |
107 | ret_q = Quaternions([w, *xyz])
108 | ret_q.normalize()
109 | return ret_q
110 |
111 | @classmethod
112 | def from_angle_axis(cls, angles: npt.NDArray[np.float32], axes: Vectors) -> Quaternions:
113 | axes.norm()
114 |
115 | if len(angles.shape) == 1:
116 | angles = np.expand_dims(angles, axis=0)
117 |
118 | ss = np.sin(angles / 2.0)
119 | cs = np.cos(angles / 2.0)
120 | return Quaternions(np.concatenate([cs, axes.vs * ss], axis=-1))
121 |
122 | @classmethod
123 | def identity(cls, ret_shape: Tuple[int]) -> Quaternions:
124 | qs = np.broadcast_to(np.array([1.0, 0.0, 0.0, 0.0]), [*ret_shape, 4])
125 | return Quaternions(qs)
126 |
127 | @classmethod
128 | def from_euler_angles(cls, order: str, angles: npt.NDArray[np.float32]) -> Quaternions:
129 | """
130 | Applies a series of euler angle rotations. Angles applied from right to left
131 | :param order: string comprised of x, y, and/or z
132 | :param angles: angles in degrees
133 | """
134 | if len(angles.shape) == 1:
135 | angles = np.expand_dims(angles, axis=0)
136 |
137 | if len(order) != angles.shape[-1]:
138 | msg = 'length of orders and angles does not match'
139 | logging.critical(msg)
140 | assert False, msg
141 |
142 | _quats = [Quaternions.identity(angles.shape[:-1])]
143 | for axis_char, pos in zip(order, range(len(order))):
144 |
145 | angle = angles[..., pos] * np.pi / 180
146 | angle = np.expand_dims(angle, axis=1)
147 |
148 | axis_char = axis_char.lower()
149 | if axis_char not in 'xyz':
150 | msg = f'order contained unsupported char:{axis_char}'
151 | logging.critical(msg)
152 | assert False, msg
153 |
154 | axis = np.zeros([*angles.shape[:-1], 3])
155 | axis[..., ord(axis_char) - ord('x')] = 1.0
156 |
157 | _quats.insert(0, Quaternions.from_angle_axis(angle, Vectors(axis)))
158 |
159 | ret_q = reduce(lambda a, b: b * a, _quats)
160 | return ret_q
161 |
162 | @classmethod
163 | def from_rotation_matrix(cls, M: npt.NDArray[np.float32]) -> Quaternions:
164 | """
165 | As described here: https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
166 | """
167 | is_orthogonal = np.isclose(M @ M.T, np.identity(4), atol=TOLERANCE)
168 | if not is_orthogonal.all():
169 | msg = "attempted to create quaternion from non-orthogonal rotation matrix"
170 | logging.critical(msg)
171 | assert False, msg
172 |
173 | if not np.isclose(np.linalg.det(M), 1.0):
174 | msg = "attempted to create quaternion from rotation matrix with det != 1"
175 | logging.critical(msg)
176 | assert False, msg
177 |
178 | # Note: Mike Day's article uses row vectors, whereas we used column, so here use transpose of matrix
179 | MT = M.T
180 | m00, m01, m02 = MT[0, 0], MT[0, 1], MT[0, 2]
181 | m10, m11, m12 = MT[1, 0], MT[1, 1], MT[1, 2]
182 | m20, m21, m22 = MT[2, 0], MT[2, 1], MT[2, 2]
183 |
184 | if m22 < 0:
185 | if m00 > m11:
186 | t = 1 + m00 - m11 - m22
187 | q = np.array([m12-m21, t, m01+m10, m20+m02])
188 | else:
189 | t = 1 - m00 + m11 - m22
190 | q = np.array([m20-m02, m01+m10, t, m12+m21])
191 | else:
192 | if m00 < -m11:
193 | t = 1 - m00 - m11 + m22
194 | q = np.array([m01-m10, m20+m02, m12+m21, t])
195 | else:
196 | t = 1 + m00 + m11 + m22
197 | q = np.array([ t, m12-m21, m20-m02, m01-m10])
198 |
199 | q *= (0.5 / math.sqrt(t))
200 |
201 | ret_q = Quaternions(q)
202 | ret_q.normalize()
203 | return ret_q
204 |
205 | def __mul__(self, other: Quaternions):
206 | """
207 | From https://danceswithcode.net/engineeringnotes/quaternions/quaternions.html
208 | """
209 | s0 = self.qs[..., 0]
210 | s1 = self.qs[..., 1]
211 | s2 = self.qs[..., 2]
212 | s3 = self.qs[..., 3]
213 |
214 | r0 = other.qs[..., 0]
215 | r1 = other.qs[..., 1]
216 | r2 = other.qs[..., 2]
217 | r3 = other.qs[..., 3]
218 |
219 | t = np.empty(self.qs.shape)
220 |
221 | t[..., 0] = r0*s0 - r1*s1 - r2*s2 - r3*s3
222 | t[..., 1] = r0*s1 + r1*s0 - r2*s3 + r3*s2
223 | t[..., 2] = r0*s2 + r1*s3 + r2*s0 - r3*s1
224 | t[..., 3] = r0*s3 - r1*s2 + r2*s1 + r3*s0
225 |
226 | return Quaternions(t)
227 |
228 | def __neg__(self):
229 | return Quaternions(self.qs * np.array([1, -1, -1, -1]))
230 |
231 | def __str__(self):
232 | return f"Quaternions({str(self.qs)})"
233 |
234 | def __repr__(self):
235 | return f"Quaternions({str(self.qs)})"
236 |
--------------------------------------------------------------------------------
/animated_drawings/model/rectangle.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | import numpy as np
6 | import OpenGL.GL as GL
7 | from animated_drawings.model.transform import Transform
8 | import ctypes
9 |
10 |
11 | class Rectangle(Transform):
12 |
13 | def __init__(self, color: str = 'white') -> None:
14 |
15 | super().__init__()
16 |
17 | if color == 'white':
18 | c = np.array([1.0, 1.0, 1.0], np.float32)
19 | elif color == 'black':
20 | c = np.array([0.3, 0.3, 0.3], np.float32)
21 | elif color == 'blue':
22 | c = np.array([0.00, 0.0, 1.0], np.float32)
23 | else:
24 | assert len(color) == 3
25 | c = np.array([*color], np.float32)
26 |
27 | self.points = np.array([
28 | [0.5, 0.0, 0.5, *c], # top right
29 | [-0.5, 0.0, -0.5, *c], # bottom left
30 | [-0.5, 0.0, 0.5, *c], # top left
31 | [0.5, 0.0, -0.5, *c], # bottom right
32 | [-0.5, 0.0, -0.5, *c], # bottom left
33 | [0.5, 0.0, 0.5, *c], # top right
34 | ], np.float32)
35 |
36 | self.vao = GL.glGenVertexArrays(1)
37 | self.vbo = GL.glGenBuffers(1)
38 |
39 | GL.glBindVertexArray(self.vao)
40 |
41 | # buffer vertex data
42 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
43 | GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
44 |
45 | # position attributes
46 | GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 4 * self.points.shape[1], None)
47 | GL.glEnableVertexAttribArray(0)
48 |
49 | # color attributes
50 | GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False, 4 * self.points.shape[1], ctypes.c_void_p(4 * 3))
51 | GL.glEnableVertexAttribArray(1)
52 |
53 | # texture attributes
54 | GL.glVertexAttribPointer(2, 2, GL.GL_FLOAT, False, 4 * self.points.shape[1], ctypes.c_void_p(4 * 6))
55 | GL.glEnableVertexAttribArray(2)
56 |
57 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
58 | GL.glBindVertexArray(0)
59 |
60 | def _draw(self, **kwargs) -> None:
61 |
62 | GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
63 | GL.glUseProgram(kwargs['shader_ids']['color_shader'])
64 | model_loc = GL.glGetUniformLocation(kwargs['shader_ids']['color_shader'], "model")
65 | GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
66 |
67 | GL.glBindVertexArray(self.vao)
68 | GL.glDrawArrays(GL.GL_TRIANGLES, 0, 6)
69 |
--------------------------------------------------------------------------------
/animated_drawings/model/scene.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from animated_drawings.model.transform import Transform
6 | from animated_drawings.model.time_manager import TimeManager
7 | from animated_drawings.config import SceneConfig
8 | from animated_drawings.model.floor import Floor
9 | from animated_drawings.model.animated_drawing import AnimatedDrawing
10 |
11 |
12 | class Scene(Transform, TimeManager):
13 | """
14 | The scene is the singular 'world' object.
15 | It contains all objects that need to be drawn.
16 | It keeps track of global time.
17 | """
18 |
19 | def __init__(self, cfg: SceneConfig) -> None:
20 | """ Takes in the scene dictionary from an mvc config file and prepares the scene. """
21 | super().__init__()
22 |
23 | # add floor if required
24 | if cfg.add_floor:
25 | self.add_child(Floor())
26 |
27 | # Add the Animated Drawings
28 | for each in cfg.animated_characters:
29 |
30 | ad = AnimatedDrawing(*each)
31 | self.add_child(ad)
32 |
33 | # add bvh to the scene if we're going to visualize it
34 | if cfg.add_ad_retarget_bvh:
35 | self.add_child(ad.retargeter.bvh)
36 |
37 | def progress_time(self, delta_t: float) -> None:
38 | """
39 | Entry point called to update time in the scene by delta_t seconds.
40 | Because animatable object within the scene may have their own individual timelines,
41 | we recurvisely go through objects in the scene and call tick() on each TimeManager.
42 | """
43 | self._progress_time(self, delta_t)
44 |
45 | def _progress_time(self, t: Transform, delta_t: float) -> None:
46 | """ Recursively calls tick() on all TimeManager objects. """
47 |
48 | if isinstance(t, TimeManager):
49 | t.tick(delta_t)
50 |
51 | for c in t.get_children():
52 | self._progress_time(c, delta_t)
53 |
--------------------------------------------------------------------------------
/animated_drawings/model/time_manager.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from typing import Optional
6 | from abc import abstractmethod
7 |
8 |
9 | class TimeManager():
10 | """ Mixin class designed to be used by objects that must keep track of their own time (e.g. time-varying animations) """
11 |
12 | def __init__(self, **kwargs):
13 | super().__init__(**kwargs)
14 |
15 | self._time: float = 0.0 # object's internal time, in seconds
16 | self._is_paused: bool = False
17 |
18 | def tick(self, delta_t: float) -> None:
19 | """ Progress objects interval time by delta_t seconds if not paused """
20 | if not self._is_paused:
21 | self._time += delta_t
22 | self.update()
23 |
24 | @abstractmethod
25 | def update(self):
26 | """ Contains logic needed to update subclass after tick() """
27 | pass
28 |
29 | def set_pause(self, pause: Optional[bool]) -> None:
30 | if pause is None:
31 | self._is_paused = not self._is_paused
32 | else:
33 | self._is_paused = pause
34 |
35 | def set_time(self, time: float) -> None:
36 | self._time = time
37 |
38 | def get_time(self) -> float:
39 | return self._time
40 |
--------------------------------------------------------------------------------
/animated_drawings/model/transform.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from __future__ import annotations # so we can refer to class Type inside class
6 | import numpy as np
7 | import numpy.typing as npt
8 | from animated_drawings.model.vectors import Vectors
9 | from animated_drawings.model.quaternions import Quaternions
10 | import logging
11 | from typing import Union, Optional, List, Tuple
12 |
13 |
14 | class Transform():
15 | """Base class from which all other scene objects descend"""
16 |
17 | def __init__(self,
18 | parent: Optional[Transform] = None,
19 | name: Optional[str] = None,
20 | children: List[Transform] = [],
21 | offset: Union[npt.NDArray[np.float32], Vectors, None] = None,
22 | **kwargs
23 | ) -> None:
24 |
25 | super().__init__(**kwargs)
26 |
27 | self._parent: Optional[Transform] = parent
28 |
29 | self._children: List[Transform] = []
30 | for child in children:
31 | self.add_child(child)
32 |
33 | self.name: Optional[str] = name
34 |
35 | self._translate_m: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
36 | self._rotate_m: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
37 | self._scale_m: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
38 |
39 | if offset is not None:
40 | self.offset(offset)
41 |
42 | self._local_transform: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
43 | self._world_transform: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
44 | self.dirty_bit: bool = True # are world/local transforms stale?
45 |
46 | def update_transforms(self, parent_dirty_bit: bool = False, recurse_on_children: bool = True, update_ancestors: bool = False) -> None:
47 | """
48 | Updates transforms if stale.
49 | If own dirty bit is set, recompute local matrix
50 | If own or parent's dirty bit is set, recompute world matrix
51 | If own or parent's dirty bit is set, recurses on children, unless param recurse_on_children is false.
52 | If update_ancestors is true, first find first ancestor, then call update_transforms upon it.
53 | Set dirty bit back to false.
54 | """
55 | if update_ancestors:
56 | ancestor, ancestor_parent = self, self.get_parent()
57 | while ancestor_parent is not None:
58 | ancestor, ancestor_parent = ancestor_parent, ancestor_parent.get_parent()
59 | ancestor.update_transforms()
60 |
61 | if self.dirty_bit:
62 | self.compute_local_transform()
63 | if self.dirty_bit | parent_dirty_bit:
64 | self.compute_world_transform()
65 |
66 | if recurse_on_children:
67 | for c in self.get_children():
68 | c.update_transforms(self.dirty_bit | parent_dirty_bit)
69 |
70 | self.dirty_bit = False
71 |
72 | def compute_local_transform(self) -> None:
73 | self._local_transform = self._translate_m @ self._rotate_m @ self._scale_m
74 |
75 | def compute_world_transform(self) -> None:
76 | self._world_transform = self._local_transform
77 | if self._parent:
78 | self._world_transform = self._parent._world_transform @ self._world_transform
79 |
80 | def get_world_transform(self, update_ancestors: bool = True) -> npt.NDArray[np.float32]:
81 | """
82 | Get the transform's world matrix.
83 | If update is true, check to ensure the world_transform is current
84 | """
85 | if update_ancestors:
86 | self.update_transforms(update_ancestors=True)
87 | return np.copy(self._world_transform)
88 |
89 | def set_scale(self, scale: float) -> None:
90 | self._scale_m[:-1, :-1] = scale * np.identity(3, dtype=np.float32)
91 | self.dirty_bit = True
92 |
93 | def set_position(self, pos: Union[npt.NDArray[np.float32], Vectors]) -> None:
94 | """ Set the absolute values of the translational elements of transform """
95 | if isinstance(pos, Vectors):
96 | pos = pos.vs
97 |
98 | if pos.shape == (1, 3):
99 | pos = np.squeeze(pos)
100 | elif pos.shape == (3,):
101 | pass
102 | else:
103 | msg = f'bad vector dim passed to set_position. Found: {pos.shape}'
104 | logging.critical(msg)
105 | assert False, msg
106 |
107 | self._translate_m[:-1, -1] = pos
108 | self.dirty_bit = True
109 |
110 | def get_local_position(self) -> npt.NDArray[np.float32]:
111 | """ Ensure local transform is up-to-date and return local xyz coordinates """
112 | if self.dirty_bit:
113 | self.compute_local_transform()
114 | return np.copy(self._local_transform[:-1, -1])
115 |
116 | def get_world_position(self, update_ancestors: bool = True) -> npt.NDArray[np.float32]:
117 | """
118 | Ensure all parent transforms are update and return world xyz coordinates
119 | If update_ancestor_transforms is true, update ancestor transforms to ensure
120 | up-to-date world_transform before returning
121 | """
122 | if update_ancestors:
123 | self.update_transforms(update_ancestors=True)
124 |
125 | return np.copy(self._world_transform[:-1, -1])
126 |
127 | def offset(self, pos: Union[npt.NDArray[np.float32], Vectors]) -> None:
128 | """ Translational offset by the specified amount """
129 |
130 | if isinstance(pos, Vectors):
131 | pos = pos.vs[0]
132 | assert isinstance(pos, np.ndarray)
133 |
134 | self.set_position(self._translate_m[:-1, -1] + pos)
135 |
136 | def look_at(self, fwd_: Union[npt.NDArray[np.float32], Vectors, None]) -> None:
137 | """Given a forward vector, rotate the transform to face that position"""
138 | if fwd_ is None:
139 | fwd_ = Vectors(self.get_world_position())
140 | elif isinstance(fwd_, np.ndarray):
141 | fwd_ = Vectors(fwd_)
142 | fwd: Vectors = fwd_.copy() # norming will change the vector
143 |
144 | if fwd.vs.shape != (1, 3):
145 | msg = f'look_at fwd_ vector must have shape [1,3]. Found: {fwd.vs.shape}'
146 | logging.critical(msg)
147 | assert False, msg
148 |
149 | tmp: Vectors = Vectors([0.0, 1.0, 0.0])
150 |
151 | # if fwd and tmp are same vector, modify tmp to avoid collapse
152 | if np.isclose(fwd.vs, tmp.vs).all() or np.isclose(fwd.vs, -tmp.vs).all():
153 | tmp.vs[0] += 0.001
154 |
155 | right: Vectors = tmp.cross(fwd)
156 | up: Vectors = fwd.cross(right)
157 |
158 | fwd.norm()
159 | right.norm()
160 | up.norm()
161 |
162 | rotate_m = np.identity(4, dtype=np.float32)
163 | rotate_m[:-1, 0] = np.squeeze(right.vs)
164 | rotate_m[:-1, 1] = np.squeeze(up.vs)
165 | rotate_m[:-1, 2] = np.squeeze(fwd.vs)
166 |
167 | self._rotate_m = rotate_m
168 | self.dirty_bit = True
169 |
170 | def get_right_up_fwd_vectors(self) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32], npt.NDArray[np.float32]]:
171 | inverted: npt.NDArray[np.float32] = np.linalg.inv(self.get_world_transform())
172 | right: npt.NDArray[np.float32] = inverted[:-1, 0]
173 | up: npt.NDArray[np.float32] = inverted[:-1, 1]
174 | fwd: npt.NDArray[np.float32] = inverted[:-1, 2]
175 |
176 | return right, up, fwd
177 |
178 | def set_rotation(self, q: Quaternions) -> None:
179 | if q.qs.shape != (1, 4):
180 | msg = f'set_rotate q must have dimension (1, 4). Found: {q.qs.shape}'
181 | logging.critical(msg)
182 | assert False, msg
183 | self._rotate_m = q.to_rotation_matrix()
184 | self.dirty_bit = True
185 |
186 | def rotation_offset(self, q: Quaternions) -> None:
187 | if q.qs.shape != (1, 4):
188 | msg = f'set_rotate q must have dimension (1, 4). Found: {q.qs.shape}'
189 | logging.critical(msg)
190 | assert False, msg
191 | self._rotate_m = (q * Quaternions.from_rotation_matrix(self._rotate_m)).to_rotation_matrix()
192 | self.dirty_bit = True
193 |
194 | def add_child(self, child: Transform) -> None:
195 | self._children.append(child)
196 | child.set_parent(self)
197 |
198 | def get_children(self) -> List[Transform]:
199 | return self._children
200 |
201 | def set_parent(self, parent: Transform) -> None:
202 | self._parent = parent
203 | self.dirty_bit = True
204 |
205 | def get_parent(self) -> Optional[Transform]:
206 | return self._parent
207 |
208 | def get_transform_by_name(self, name: str) -> Optional[Transform]:
209 | """ Search self and children for transform with matching name. Return it if found, None otherwise. """
210 |
211 | # are we match?
212 | if self.name == name:
213 | return self
214 |
215 | # recurse to check if a child is match
216 | for c in self.get_children():
217 | transform_or_none = c.get_transform_by_name(name)
218 | if transform_or_none: # if we found it
219 | return transform_or_none
220 |
221 | # no match
222 | return None
223 |
224 | def draw(self, recurse: bool = True, **kwargs) -> None:
225 | """ Draw this transform and recurse on children """
226 | self._draw(**kwargs)
227 |
228 | if recurse:
229 | for child in self.get_children():
230 | child.draw(**kwargs)
231 |
232 | def _draw(self, **kwargs) -> None:
233 | """Transforms default to not being drawn. Subclasses must implement how they appear"""
234 |
--------------------------------------------------------------------------------
/animated_drawings/model/transform_widget.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from animated_drawings.model.transform import Transform
6 | import numpy as np
7 | import numpy.typing as npt
8 | import OpenGL.GL as GL
9 | import ctypes
10 |
11 |
12 | class TransformWidget(Transform):
13 | def __init__(self, shader_name: str = 'color_shader'):
14 |
15 | super().__init__()
16 |
17 | self.points: npt.NDArray[np.float32] = np.array([
18 | [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
19 | [1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
20 | [0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
21 | [0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
22 | [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
23 | [0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
24 | ], np.float32)
25 |
26 | self.shader_name: str = shader_name
27 |
28 | self._is_opengl_initialized: bool = False
29 |
30 | def _initialize_opengl_resources(self):
31 | self.vao = GL.glGenVertexArrays(1)
32 | self.vbo = GL.glGenBuffers(1)
33 |
34 | GL.glBindVertexArray(self.vao)
35 |
36 | # buffer vertex data
37 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
38 | GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
39 |
40 | vert_bytes = 4 * self.points.shape[1] # 4 is byte size of np.float32
41 |
42 | pos_offset = 4 * 0
43 | color_offset = 4 * 3
44 |
45 | # position attributes
46 | GL.glVertexAttribPointer(
47 | 0, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(pos_offset))
48 | GL.glEnableVertexAttribArray(0)
49 |
50 | # color attributes
51 | GL.glVertexAttribPointer(
52 | 1, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(color_offset))
53 | GL.glEnableVertexAttribArray(1)
54 |
55 | GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
56 | GL.glBindVertexArray(0)
57 |
58 | self._is_opengl_initialized = True
59 |
60 | def _draw(self, **kwargs):
61 |
62 | if not self._is_opengl_initialized:
63 | self._initialize_opengl_resources()
64 |
65 | GL.glUseProgram(kwargs['shader_ids'][self.shader_name])
66 | model_loc = GL.glGetUniformLocation(
67 | kwargs['shader_ids'][self.shader_name], "model")
68 | GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE,
69 | self._world_transform.T)
70 |
71 | GL.glBindVertexArray(self.vao)
72 | GL.glDrawArrays(GL.GL_LINES, 0, len(self.points))
73 | GL.glBindVertexArray(0)
74 |
--------------------------------------------------------------------------------
/animated_drawings/model/vectors.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from __future__ import annotations # so we can refer to class Type inside class
6 | import numpy as np
7 | import numpy.typing as npt
8 | import logging
9 | from typing import Union, Iterable, Tuple
10 | from numbers import Number
11 | from copy import copy
12 | from animated_drawings.utils import TOLERANCE
13 |
14 |
15 | class Vectors():
16 | """
17 | Wrapper class around ndarray interpreted as one or more vectors of equal dimensionality
18 | When passing in existing Vectors, new Vectors object will share the underlying nparray, so be careful.
19 | """
20 |
21 | def __init__(self, vs_: Union[Iterable[Union[float, int, Vectors, npt.NDArray[np.float32]]], Vectors]) -> None: # noqa: C901
22 |
23 | self.vs: npt.NDArray[np.float32]
24 |
25 | # initialize from single ndarray
26 | if isinstance(vs_, np.ndarray):
27 | if len(vs_.shape) == 1:
28 | vs_ = np.expand_dims(vs_, axis=0)
29 | self.vs = vs_
30 |
31 | # initialize from tuple or list of numbers
32 | elif isinstance(vs_, (tuple, list)) and isinstance(vs_[0], Number):
33 | try:
34 | vs_ = np.array(vs_)
35 | if len(vs_.shape) == 1:
36 | vs_ = np.expand_dims(vs_, axis=0)
37 | except Exception as e:
38 | msg = f'Error initializing Vectors: {str(e)}'
39 | logging.critical(msg)
40 | assert False, msg
41 | self.vs = vs_
42 |
43 | # initialize from tuple or list of ndarrays
44 | elif isinstance(vs_, (tuple, list)) and isinstance(vs_[0], np.ndarray):
45 | try:
46 | vs_ = np.stack(vs_) # pyright: ignore[reportGeneralTypeIssues]
47 | except Exception as e:
48 | msg = f'Error initializing Vectors: {str(e)}'
49 | logging.critical(msg)
50 | assert False, msg
51 | self.vs = vs_ # pyright: ignore[reportGeneralTypeIssues]
52 |
53 | # initialize from tuple or list of Vectors
54 | elif isinstance(vs_, (tuple, list)) and isinstance(vs_[0], Vectors):
55 | try:
56 | vs_ = np.stack([v.vs.squeeze() for v in vs_]) # pyright: ignore[reportGeneralTypeIssues]
57 | except Exception as e:
58 | msg = f'Error initializing Vectors: {str(e)}'
59 | logging.critical(msg)
60 | assert False, msg
61 | self.vs = vs_
62 |
63 | # initialize from single Vectors
64 | elif isinstance(vs_, Vectors):
65 | self.vs = vs_.vs
66 |
67 | else:
68 | msg = 'Vectors must be constructed from Vectors, ndarray, or Tuples/List of floats/ints or Vectors'
69 | logging.critical(msg)
70 | assert False, msg
71 |
72 | def norm(self) -> None:
73 | ns: npt.NDArray[np.float64] = np.linalg.norm(self.vs, axis=-1)
74 |
75 | if np.min(ns) < TOLERANCE:
76 | logging.info(f"Encountered values close to zero in vector norm. Replacing with {TOLERANCE}")
77 | ns[ns < TOLERANCE] = TOLERANCE
78 |
79 | self.vs = self.vs / np.expand_dims(ns, axis=-1)
80 |
81 | def cross(self, v2: Vectors) -> Vectors:
82 | """ Cross product of a series of 2 or 3 dimensional vectors. All dimensions of vs must match."""
83 |
84 | if self.vs.shape != v2.vs.shape:
85 | msg = f'Cannot cross product different sized vectors: {self.vs.shape} {v2.vs.shape}.'
86 | logging.critical(msg)
87 | assert False, msg
88 |
89 | if not self.vs.shape[-1] in [2, 3]:
90 | msg = f'Cannot cross product vectors of size: {self.vs.shape[-1]}. Must be 2 or 3.'
91 | logging.critical(msg)
92 | assert False, msg
93 |
94 | return Vectors(np.cross(self.vs, v2.vs))
95 |
96 | def perpendicular(self, ccw: bool = True) -> Vectors:
97 | """
98 | Returns ndarray of vectors perpendicular to the original ones.
99 | Only 2D and 3D vectors are supported.
100 | By default returns the counter clockwise vector, but passing ccw=False returns clockwise
101 | """
102 | if not self.vs.shape[-1] in [2, 3]:
103 | msg = f'Cannot get perpendicular of vectors of size: {self.vs.shape[-1]}. Must be 2 or 3.'
104 | logging.critical(msg)
105 | assert False, msg
106 |
107 | v_up: Vectors = Vectors(np.tile([0.0, 1.0, 0.0], [*self.shape[:-1], 1]))
108 |
109 | v_perp = v_up.cross(self)
110 | v_perp.norm()
111 |
112 | if not ccw:
113 | v_perp *= -1
114 |
115 | return v_perp
116 |
117 | def average(self) -> Vectors:
118 | """ Return the average of a collection of vectors, along the first axis"""
119 | return Vectors(np.mean(self.vs, axis=0))
120 |
121 | def copy(self) -> Vectors:
122 | return copy(self)
123 |
124 | @property
125 | def shape(self) -> Tuple[int, ...]:
126 | return self.vs.shape
127 |
128 | @property
129 | def length(self) -> npt.NDArray[np.float32]:
130 | return np.linalg.norm(self.vs, axis=-1).astype(np.float32)
131 |
132 | def __mul__(self, val: float) -> Vectors:
133 | return Vectors(self.vs * val)
134 |
135 | def __truediv__(self, scale: Union[int, float]) -> Vectors:
136 | return Vectors(self.vs / scale)
137 |
138 | def __sub__(self, other: Vectors) -> Vectors:
139 | if self.vs.shape != other.vs.shape:
140 | msg = 'Attempted to subtract Vectors with different dimensions'
141 | logging.critical(msg)
142 | assert False, msg
143 | return Vectors(np.subtract(self.vs, other.vs))
144 |
145 | def __add__(self, other: Vectors) -> Vectors:
146 | if self.vs.shape != other.vs.shape:
147 | msg = 'Attempted to add Vectors with different dimensions'
148 | logging.critical(msg)
149 | assert False, msg
150 | return Vectors(np.add(self.vs, other.vs))
151 |
152 | def __copy__(self) -> Vectors:
153 | return Vectors(self)
154 |
155 | def __str__(self) -> str:
156 | return f"Vectors({str(self.vs)})"
157 |
158 | def __repr__(self) -> str:
159 | return f"Vectors({str(self.vs)})"
160 |
--------------------------------------------------------------------------------
/animated_drawings/mvc_base_cfg.yaml:
--------------------------------------------------------------------------------
1 | scene:
2 | ADD_FLOOR: True
3 | ADD_AD_RETARGET_BVH: False
4 | view:
5 | CLEAR_COLOR: [1.0, 1.0, 1.0, 0.0]
6 | BACKGROUND_IMAGE: null
7 | WINDOW_DIMENSIONS: [500, 500]
8 | DRAW_AD_RIG: False
9 | DRAW_AD_TXTR: True
10 | DRAW_AD_COLOR: False
11 | DRAW_AD_MESH_LINES: False
12 | USE_MESA: False
13 | CAMERA_POS: [0.0, 0.7, 2.0]
14 | CAMERA_FWD: [0.0, 0.3, 2.0]
15 | controller:
16 | MODE: 'interactive'
17 | KEYBOARD_TIMESTEP: 0.0333 # only used if mode is 'interactive'
18 | OUTPUT_VIDEO_PATH: ./output_video.mp4 # only used if mode is 'video_render'
19 | OUTPUT_VIDEO_CODEC: avc1 # only used if mode is 'video_render'
20 |
--------------------------------------------------------------------------------
/animated_drawings/render.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | import logging
6 | import sys
7 |
8 |
9 | def start(user_mvc_cfg_fn: str):
10 |
11 | # build cfg
12 | from animated_drawings.config import Config
13 | cfg: Config = Config(user_mvc_cfg_fn)
14 |
15 | # create view
16 | from animated_drawings.view.view import View
17 | view = View.create_view(cfg.view)
18 |
19 | # create scene
20 | from animated_drawings.model.scene import Scene
21 | scene = Scene(cfg.scene)
22 |
23 | # create controller
24 | from animated_drawings.controller.controller import Controller
25 | controller = Controller.create_controller(cfg.controller, scene, view)
26 |
27 | # start the run loop
28 | controller.run()
29 |
30 |
31 | if __name__ == '__main__':
32 | logging.basicConfig(filename='log.txt', level=logging.DEBUG)
33 |
34 | # user-specified mvc configuration filepath. Can be absolute, relative to cwd, or relative to ${AD_ROOT_DIR}
35 | user_mvc_cfg_fn = sys.argv[1]
36 |
37 | start(user_mvc_cfg_fn)
38 |
--------------------------------------------------------------------------------
/animated_drawings/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from PIL import Image, ImageOps
6 | import numpy as np
7 | import numpy.typing as npt
8 | import cv2
9 | from pathlib import Path
10 | import logging
11 | from pkg_resources import resource_filename
12 |
13 | TOLERANCE = 10**-5
14 |
15 |
16 | def resolve_ad_filepath(file_name: str, file_type: str) -> Path:
17 | """
18 | Given input filename, attempts to find the file, first by relative to cwd,
19 | then by absolute, the relative to animated_drawings root directory.
20 | If not found, prints error message indicating which file_type it is.
21 | """
22 | if Path(file_name).exists():
23 | return Path(file_name)
24 | elif Path.joinpath(Path.cwd(), file_name).exists():
25 | return Path.joinpath(Path.cwd(), file_name)
26 | elif Path(resource_filename(__name__, file_name)).exists():
27 | return Path(resource_filename(__name__, file_name))
28 | elif Path(resource_filename(__name__, str(Path('..', file_name)))):
29 | return Path(resource_filename(__name__, str(Path('..', file_name))))
30 |
31 | msg = f'Could not find the {file_type} specified: {file_name}'
32 | logging.critical(msg)
33 | assert False, msg
34 |
35 |
36 | def read_background_image(file_name: str) -> npt.NDArray[np.uint8]:
37 | """
38 | Given path to input image file, opens it, flips it based on EXIF tags, if present, and returns image with proper orientation.
39 | """
40 | # Check the file path
41 | file_path = resolve_ad_filepath(file_name, 'background_image')
42 |
43 | # Open the image and rotate as needed depending upon exif tag
44 | image = Image.open(str(file_path))
45 | image = ImageOps.exif_transpose(image)
46 |
47 | # Convert to numpy array and flip rightside up
48 | image_np = np.asarray(image)
49 | image_np = cv2.flip(image_np, 0)
50 |
51 | # Ensure we have RGBA
52 | if len(image_np.shape) == 3 and image_np.shape[-1] == 3: # if RGB
53 | image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2RGBA)
54 | if len(image_np.shape) == 2: # if grayscale
55 | image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGBA)
56 |
57 | return image_np.astype(np.uint8)
58 |
--------------------------------------------------------------------------------
/animated_drawings/view/__pycache__/utils.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/__pycache__/utils.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/__pycache__/utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/__pycache__/utils.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/__pycache__/view.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/__pycache__/view.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/__pycache__/view.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/__pycache__/view.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/__pycache__/window_view.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/__pycache__/window_view.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/__pycache__/window_view.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/__pycache__/window_view.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/mesa_view.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | import os
6 | os.environ['PYOPENGL_PLATFORM'] = "osmesa"
7 | os.environ['MESA_GL_VERSION_OVERRIDE'] = "3.3"
8 | from OpenGL import GL, osmesa
9 |
10 | from animated_drawings.model.camera import Camera
11 | from animated_drawings.model.scene import Scene
12 | from animated_drawings.model.transform import Transform
13 | from animated_drawings.view.view import View
14 | from animated_drawings.view.utils import get_projection_matrix
15 | from animated_drawings.utils import read_background_image
16 | from animated_drawings.view.shaders.shader import Shader
17 | from animated_drawings.config import ViewConfig
18 |
19 | import logging
20 | from typing import Tuple, Dict
21 | import numpy as np
22 | import numpy.typing as npt
23 | from pathlib import Path
24 | from pkg_resources import resource_filename
25 |
26 |
27 | class MesaView(View):
28 | """ Mesa View for Headless Rendering """
29 |
30 | def __init__(self, cfg: ViewConfig) -> None:
31 | super().__init__(cfg)
32 |
33 | self.camera: Camera = Camera(self.cfg.camera_pos, self.cfg.camera_fwd)
34 |
35 | self.ctx: osmesa.OSMesaContext
36 | self.buffer: npt.NDArray[np.uint8]
37 | self._initialize_mesa()
38 |
39 | self.shaders: Dict[str, Shader] = {}
40 | self.shader_ids: Dict[str, int] = {}
41 | self._prep_shaders()
42 |
43 | self._prep_background_image()
44 |
45 | self._set_shader_projections(get_projection_matrix(*self.get_framebuffer_size()))
46 |
47 | def _prep_background_image(self) -> None:
48 | """ Initialize framebuffer object for background image, if specified. """
49 |
50 | # if nothing specified, return
51 | if not self.cfg.background_image:
52 | return
53 |
54 | _txtr = read_background_image(self.cfg.background_image)
55 |
56 | self.txtr_h, self.txtr_w, _ = _txtr.shape
57 | self.txtr_id = GL.glGenTextures(1)
58 | GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 4)
59 | GL.glBindTexture(GL.GL_TEXTURE_2D, self.txtr_id)
60 | GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
61 | GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
62 | GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, self.txtr_w, self.txtr_h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, _txtr)
63 |
64 | self.fboId: GL.GLint = GL.glGenFramebuffers(1)
65 | GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
66 | GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, self.txtr_id, 0)
67 |
68 | def _prep_shaders(self) -> None:
69 | BVH_VERT = Path(resource_filename(__name__, "shaders/bvh.vert"))
70 | BVH_FRAG = Path(resource_filename(__name__, "shaders/bvh.frag"))
71 | self._initiatize_shader('bvh_shader', str(BVH_VERT), str(BVH_FRAG))
72 |
73 | COLOR_VERT = Path(resource_filename(__name__, "shaders/color.vert"))
74 | COLOR_FRAG = Path(resource_filename(__name__, "shaders/color.frag"))
75 | self._initiatize_shader('color_shader', str(COLOR_VERT), str(COLOR_FRAG))
76 |
77 | TEXTURE_VERT = Path(resource_filename(__name__, "shaders/texture.vert"))
78 | TEXTURE_FRAG = Path(resource_filename(__name__, "shaders/texture.frag"))
79 | self._initiatize_shader('texture_shader', str(TEXTURE_VERT), str(TEXTURE_FRAG), texture=True)
80 |
81 | def _update_shaders_view_transform(self, camera: Camera) -> None:
82 | try:
83 | view_transform: npt.NDArray[np.float32] = np.linalg.inv(camera.get_world_transform())
84 | except Exception as e:
85 | msg = f'Error inverting camera world transform: {e}'
86 | logging.critical(msg)
87 | assert False, msg
88 |
89 | for shader_name in self.shaders:
90 | GL.glUseProgram(self.shader_ids[shader_name])
91 | view_loc = GL.glGetUniformLocation(self.shader_ids[shader_name], "view")
92 | GL.glUniformMatrix4fv(view_loc, 1, GL.GL_FALSE, view_transform.T)
93 |
94 | def _set_shader_projections(self, proj_m: npt.NDArray[np.float32]) -> None:
95 | for shader_id in self.shader_ids.values():
96 | GL.glUseProgram(shader_id)
97 | proj_loc = GL.glGetUniformLocation(shader_id, "proj")
98 | GL.glUniformMatrix4fv(proj_loc, 1, GL.GL_FALSE, proj_m.T)
99 |
100 | def _initiatize_shader(self, shader_name: str, vert_path: str, frag_path: str, **kwargs) -> None:
101 | self.shaders[shader_name] = Shader(vert_path, frag_path)
102 | self.shader_ids[shader_name] = self.shaders[shader_name].glid # pyright: ignore[reportGeneralTypeIssues]
103 |
104 | if 'texture' in kwargs and kwargs['texture'] is True:
105 | GL.glUseProgram(self.shader_ids[shader_name])
106 | GL.glUniform1i(GL.glGetUniformLocation(
107 | self.shader_ids[shader_name], 'texture0'), 0)
108 |
109 | def _initialize_mesa(self) -> None:
110 |
111 | width, height = self.cfg.window_dimensions
112 | self.ctx = osmesa.OSMesaCreateContext(osmesa.OSMESA_RGBA, None)
113 | self.buffer: npt.NDArray[np.uint8] = GL.arrays.GLubyteArray.zeros((height, width, 4)) # type: ignore
114 | osmesa.OSMesaMakeCurrent(self.ctx, self.buffer, GL.GL_UNSIGNED_BYTE, width, height)
115 |
116 | GL.glClearColor(*self.cfg.clear_color)
117 |
118 | def set_scene(self, scene: Scene) -> None:
119 | self.scene = scene
120 |
121 | def render(self, scene: Transform) -> None:
122 | GL.glViewport(0, 0, *self.get_framebuffer_size())
123 |
124 | # Draw the background
125 | if self.cfg.background_image:
126 | GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, 0)
127 | GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
128 | win_w, win_h = self.get_framebuffer_size()
129 | GL.glBlitFramebuffer(0, 0, self.txtr_w, self.txtr_h, 0, 0, win_w, win_h, GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
130 |
131 | self._update_shaders_view_transform(self.camera)
132 |
133 | scene.draw(shader_ids=self.shader_ids, viewer_cfg=self.cfg)
134 |
135 | def get_framebuffer_size(self) -> Tuple[int, int]:
136 | """ Return (width, height) of view's window. """
137 | return self.buffer.shape[:2][::-1]
138 |
139 | def clear_window(self) -> None:
140 | GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) # type: ignore
141 |
142 | def cleanup(self) -> None:
143 | """ Destroy the context when it is finished. """
144 | osmesa.OSMesaDestroyContext(self.ctx)
145 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/__pycache__/shader.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/shaders/__pycache__/shader.cpython-312.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/__pycache__/shader.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/animated_drawings/view/shaders/__pycache__/shader.cpython-38.pyc
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/bvh.frag:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | // This source code is licensed under the MIT license found in the
3 | // LICENSE file in the root directory of this source tree.
4 |
5 | #version 330 core
6 |
7 | flat in int vertex_id;
8 |
9 | uniform int frame_num;
10 | uniform int joint_num;
11 |
12 | in vec3 ourColor;
13 | out vec4 FragColor;
14 |
15 |
16 | void main() {
17 | int first_vertex_of_frame = joint_num * frame_num;
18 | int final_vertex_of_frame = first_vertex_of_frame + joint_num;
19 |
20 | if (first_vertex_of_frame < vertex_id && vertex_id < final_vertex_of_frame){
21 | FragColor = vec4(ourColor, 1.0);
22 | }else{
23 | discard;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/bvh.vert:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | // This source code is licensed under the MIT license found in the
3 | // LICENSE file in the root directory of this source tree.
4 |
5 | #version 330 core
6 | uniform mat4 model;
7 | uniform mat4 view;
8 | uniform mat4 proj;
9 |
10 | layout(location = 0) in vec3 pos;
11 | layout(location = 1) in vec3 color;
12 |
13 | flat out int vertex_id;
14 |
15 | out vec3 ourColor;
16 |
17 | void main(){
18 | gl_Position = proj * view * model * vec4(pos, 1);
19 |
20 | vertex_id = gl_VertexID;
21 |
22 | ourColor = color;
23 | }
24 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/color.frag:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | // This source code is licensed under the MIT license found in the
3 | // LICENSE file in the root directory of this source tree.
4 |
5 | #version 330 core
6 | out vec4 FragColor;
7 |
8 | in vec3 ourColor;
9 |
10 | void main() {
11 | FragColor = vec4(ourColor, 1.0);
12 | }
13 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/color.vert:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | // This source code is licensed under the MIT license found in the
3 | // LICENSE file in the root directory of this source tree.
4 |
5 | #version 330 core
6 | layout(location = 0) in vec3 pos;
7 | layout(location = 1) in vec3 color;
8 |
9 | out vec3 ourColor;
10 |
11 | uniform mat4 model;
12 | uniform mat4 view;
13 | uniform mat4 proj;
14 |
15 | uniform bool color_black;
16 |
17 | void main() {
18 | gl_Position = proj * view * model * vec4(pos, 1.0);
19 | if (color_black){
20 | ourColor = vec3(0.0, 0.0, 0.0);
21 | } else{
22 | ourColor = color;
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/shader.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | import OpenGL.GL as GL
6 | import logging
7 |
8 |
9 | class Shader:
10 | """Class to create shader programs"""
11 |
12 | @staticmethod
13 | def _compile_shader(src: str, shader_type):
14 | with open(src, 'r') as f:
15 | src = f.read()
16 | shader = GL.glCreateShader(shader_type)
17 |
18 | GL.glShaderSource(shader, src)
19 | GL.glCompileShader(shader)
20 |
21 | status: bool = GL.glGetShaderiv(shader, GL.GL_COMPILE_STATUS)
22 | if not status:
23 | log = GL.glGetShaderInfoLog(shader).decode('ascii')
24 |
25 | src = '\n'.join([f'{idx + 1}: {l}' for idx, l in enumerate(src.splitlines())])
26 |
27 | msg = f'Compile failed for {shader_type}\n{log}\n{src}'
28 | logging.critical(msg)
29 | assert False, msg
30 |
31 | return shader
32 |
33 | def __init__(self, vertex_source, fragment_source):
34 | """Takes paths to shader code"""
35 | vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)
36 | frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)
37 |
38 | if not (vert and frag):
39 | msg = 'Error compiling shaders'
40 | logging.critical(msg)
41 | assert False, msg
42 |
43 | self.glid = GL.glCreateProgram()
44 |
45 | GL.glAttachShader(self.glid, vert)
46 | GL.glAttachShader(self.glid, frag)
47 | GL.glLinkProgram(self.glid)
48 | GL.glDeleteShader(vert)
49 | GL.glDeleteShader(frag)
50 |
51 | status: bool = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)
52 | if not status:
53 | msg = f'Error creating shader program: {GL.glGetProgramInfoLog(self.glid).decode("ascii")}'
54 | logging.critical(msg)
55 | assert False, msg
56 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/texture.frag:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | // This source code is licensed under the MIT license found in the
3 | // LICENSE file in the root directory of this source tree.
4 |
5 | #version 330 core
6 | out vec4 FragColor;
7 |
8 | in vec2 TexCoord;
9 |
10 | uniform sampler2D texture0;
11 |
12 | void main() {
13 | vec4 color = texture(texture0, TexCoord);
14 |
15 | if (color.a < 0.1){
16 | discard;
17 | }
18 | // Apply a smooth step for edges
19 | // float alpha = smoothstep(0.1, 0.5, color.a); // Adjust parameters as needed
20 | // FragColor = vec4(color.rgb, alpha); // Use the original color with modified alpha
21 | FragColor = color;
22 | }
23 |
--------------------------------------------------------------------------------
/animated_drawings/view/shaders/texture.vert:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | // This source code is licensed under the MIT license found in the
3 | // LICENSE file in the root directory of this source tree.
4 |
5 | #version 330 core
6 | layout(location = 0) in vec3 pos;
7 | layout(location = 2) in vec2 texCoord;
8 |
9 | out vec3 ourColor;
10 | out vec2 TexCoord;
11 |
12 | uniform mat4 model;
13 | uniform mat4 view;
14 | uniform mat4 proj;
15 |
16 | void main() {
17 | gl_Position = proj * view * model * vec4(pos, 1.0);
18 | TexCoord = texCoord;
19 | }
20 |
--------------------------------------------------------------------------------
/animated_drawings/view/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | import numpy as np
6 | import numpy.typing as npt
7 | import logging
8 |
9 |
10 | def get_projection_matrix(buffer_w: int, buffer_h: int, type_: str = 'perspective') -> npt.NDArray[np.float32]:
11 |
12 | if type_ == 'perspective':
13 |
14 | fov = 35.0
15 | near = 0.1
16 | aspect = buffer_w / buffer_h
17 | top = near * np.tan(fov * np.pi / 360)
18 | right = top * aspect
19 | far = 10000.0
20 | bottom = -top
21 | left = -right
22 |
23 | M_0_0 = (2 * near) / (right - left)
24 | M_0_2 = (left + right) / (left - right)
25 | M_1_1 = (2 * near) / (top - bottom)
26 | M_1_2 = (bottom + top) / (bottom-top)
27 | M_2_2 = (far + near) / (near - far)
28 | M_2_3 = (2 * far * near) / (near - far)
29 | M_3_2 = -1
30 |
31 | M: npt.NDArray[np.float32] = np.zeros([4, 4], dtype=np.float32)
32 | M[0, 0] = M_0_0
33 | M[0, 2] = M_0_2
34 | M[1, 1] = M_1_1
35 | M[1, 2] = M_1_2
36 | M[2, 2] = M_2_2
37 | M[2, 3] = M_2_3
38 | M[3, 2] = M_3_2
39 | return M
40 |
41 | else:
42 | logging.critical(f'unsupported camera type specified: {type_}')
43 | assert False
44 |
--------------------------------------------------------------------------------
/animated_drawings/view/view.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from __future__ import annotations
6 | from abc import abstractmethod
7 | from typing import Tuple
8 | from animated_drawings.config import ViewConfig
9 |
10 |
11 | class View:
12 | """
13 | Base View class which all other Views must be derived.
14 | Views are responsible for controlling what is and isn't visible to them.
15 | Views are responsible for initiating the 'draw' methods for each object which they want to render.
16 | """
17 |
18 | def __init__(self, cfg: ViewConfig):
19 | self.cfg: ViewConfig = cfg
20 | pass
21 |
22 | @abstractmethod
23 | def render(self, scene) -> None: # pyright: ignore[reportUnknownParameterType,reportMissingParameterType]
24 | """ Called by the controller to render the scene. """
25 |
26 | @abstractmethod
27 | def clear_window(self) -> None:
28 | """ Clear output from previous render loop. """
29 |
30 | @abstractmethod
31 | def cleanup(self) -> None:
32 | """ Cleanup after render loop is finished. """
33 |
34 | @abstractmethod
35 | def get_framebuffer_size(self) -> Tuple[int, int]:
36 | """ Return (width, height) of framebuffer. """
37 |
38 | @staticmethod
39 | def create_view(view_cfg: ViewConfig) -> View:
40 | """ Takes in a view dictionary from mvc config file and returns the appropriate view. """
41 | # create view
42 | if view_cfg.use_mesa:
43 | from animated_drawings.view.mesa_view import MesaView
44 | return MesaView(view_cfg)
45 | else:
46 | from animated_drawings.view.window_view import WindowView
47 | return WindowView(view_cfg)
48 |
--------------------------------------------------------------------------------
/animated_drawings/view/window_view.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from animated_drawings.view.view import View
6 | from animated_drawings.view.utils import get_projection_matrix
7 | from animated_drawings.utils import read_background_image
8 | from animated_drawings.model.camera import Camera
9 | from animated_drawings.model.transform import Transform
10 | from animated_drawings.config import ViewConfig
11 | import glfw
12 | import OpenGL.GL as GL
13 | from animated_drawings.view.shaders.shader import Shader
14 | from animated_drawings.model.scene import Scene
15 | import logging
16 | from typing import Tuple, Dict
17 | import numpy as np
18 | import numpy.typing as npt
19 | from pathlib import Path
20 | from pkg_resources import resource_filename
21 |
22 |
23 | class WindowView(View):
24 | """Window View for rendering into a visible window"""
25 |
26 | def __init__(self, cfg: ViewConfig) -> None:
27 | super().__init__(cfg)
28 |
29 | glfw.init()
30 |
31 | self.camera: Camera = Camera(cfg.camera_pos, cfg.camera_fwd)
32 |
33 | self.win: glfw._GLFWwindow
34 | self._create_window(*cfg.window_dimensions) # pyright: ignore[reportGeneralTypeIssues]
35 |
36 | self.shaders: Dict[str, Shader] = {}
37 | self.shader_ids: Dict[str, int] = {}
38 | self._prep_shaders()
39 |
40 | self.fboId: GL.GLint
41 | self._prep_background_image()
42 |
43 | self._set_shader_projections(get_projection_matrix(*self.get_framebuffer_size()))
44 |
45 | def _prep_background_image(self) -> None:
46 | """ Initialize framebuffer object for background image, if specified. """
47 |
48 | # if nothing specified, return
49 | if not self.cfg.background_image:
50 | return
51 |
52 | # load background image
53 | _txtr: npt.NDArray[np.uint8] = read_background_image(self.cfg.background_image)
54 |
55 | # create the opengl texture and send it data
56 | self.txtr_h, self.txtr_w, _ = _txtr.shape
57 | self.txtr_id = GL.glGenTextures(1)
58 | GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 4)
59 | GL.glBindTexture(GL.GL_TEXTURE_2D, self.txtr_id)
60 | GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
61 | GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
62 | GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, self.txtr_w, self.txtr_h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, _txtr)
63 |
64 | # make framebuffer object
65 | self.fboId: GL.GLint = GL.glGenFramebuffers(1)
66 | GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
67 | GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, self.txtr_id, 0)
68 |
69 | def _prep_shaders(self) -> None:
70 | BVH_VERT = Path(resource_filename(__name__, "shaders/bvh.vert"))
71 | BVH_FRAG = Path(resource_filename(__name__, "shaders/bvh.frag"))
72 | self._initiatize_shader('bvh_shader', str(BVH_VERT), str(BVH_FRAG))
73 |
74 | COLOR_VERT = Path(resource_filename(__name__, "shaders/color.vert"))
75 | COLOR_FRAG = Path(resource_filename(__name__, "shaders/color.frag"))
76 | self._initiatize_shader('color_shader', str(COLOR_VERT), str(COLOR_FRAG))
77 |
78 | TEXTURE_VERT = Path(resource_filename(__name__, "shaders/texture.vert"))
79 | TEXTURE_FRAG = Path(resource_filename(__name__, "shaders/texture.frag"))
80 | self._initiatize_shader('texture_shader', str(TEXTURE_VERT), str(TEXTURE_FRAG), texture=True)
81 |
82 | def _update_shaders_view_transform(self, camera: Camera) -> None:
83 | try:
84 | view_transform: npt.NDArray[np.float32] = np.linalg.inv(camera.get_world_transform())
85 | except Exception as e:
86 | msg = f'Error inverting camera world transform: {e}'
87 | logging.critical(msg)
88 | assert False, msg
89 |
90 | for shader_name in self.shaders:
91 | GL.glUseProgram(self.shader_ids[shader_name])
92 | view_loc = GL.glGetUniformLocation(self.shader_ids[shader_name], "view")
93 | GL.glUniformMatrix4fv(view_loc, 1, GL.GL_FALSE, view_transform.T)
94 |
95 | def _set_shader_projections(self, proj_m: npt.NDArray[np.float32]) -> None:
96 | for shader_id in self.shader_ids.values():
97 | GL.glUseProgram(shader_id)
98 | proj_loc = GL.glGetUniformLocation(shader_id, "proj")
99 | GL.glUniformMatrix4fv(proj_loc, 1, GL.GL_FALSE, proj_m.T)
100 |
101 | def _initiatize_shader(self, shader_name: str, vert_path: str, frag_path: str, **kwargs) -> None:
102 | self.shaders[shader_name] = Shader(vert_path, frag_path)
103 | self.shader_ids[shader_name] = self.shaders[shader_name].glid # pyright: ignore[reportGeneralTypeIssues]
104 |
105 | if 'texture' in kwargs and kwargs['texture'] is True:
106 | GL.glUseProgram(self.shader_ids[shader_name])
107 | GL.glUniform1i(GL.glGetUniformLocation(
108 | self.shader_ids[shader_name], 'texture0'), 0)
109 |
110 | def _create_window(self, width: int, height: int) -> None:
111 |
112 | glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
113 | glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
114 | glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
115 | glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
116 | glfw.window_hint(glfw.RESIZABLE, False)
117 |
118 | self.win = glfw.create_window(width, height, 'Viewer', None, None)
119 |
120 | glfw.make_context_current(self.win)
121 |
122 | GL.glEnable(GL.GL_CULL_FACE)
123 | GL.glEnable(GL.GL_DEPTH_TEST)
124 | # GL.glClearColor(*self.cfg.clear_color)
125 |
126 | logging.info(f'OpenGL Version: {GL.glGetString(GL.GL_VERSION).decode()}') # pyright: ignore[reportGeneralTypeIssues]
127 | logging.info(f'GLSL: { GL.glGetString(GL.GL_SHADING_LANGUAGE_VERSION).decode()}') # pyright: ignore[reportGeneralTypeIssues]
128 | logging.info(f'Renderer: {GL.glGetString(GL.GL_RENDERER).decode()}') # pyright: ignore[reportGeneralTypeIssues]
129 |
130 | def set_scene(self, scene: Scene) -> None:
131 | self.scene = scene
132 |
133 | def render(self, scene: Transform) -> None:
134 | GL.glViewport(0, 0, *self.get_framebuffer_size())
135 |
136 | # draw the background image if exists
137 | if self.cfg.background_image:
138 | GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, 0)
139 | GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
140 | win_w, win_h = self.get_framebuffer_size()
141 | GL.glBlitFramebuffer(0, 0, self.txtr_w, self.txtr_h, 0, 0, win_w, win_h, GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
142 |
143 | self._update_shaders_view_transform(self.camera)
144 |
145 | scene.draw(shader_ids=self.shader_ids, viewer_cfg=self.cfg)
146 |
147 | def get_framebuffer_size(self) -> Tuple[int, int]:
148 | """ Return (width, height) of view's window. """
149 | return glfw.get_framebuffer_size(self.win)
150 |
151 | def swap_buffers(self) -> None:
152 | glfw.swap_buffers(self.win)
153 |
154 | def clear_window(self) -> None:
155 | GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) # type: ignore
156 |
157 | def cleanup(self) -> None:
158 | """ Destroy the window when it's no longer being used. """
159 | glfw.destroy_window(self.win)
160 |
--------------------------------------------------------------------------------
/annotate.py:
--------------------------------------------------------------------------------
1 | from annotation import annotation
2 | import shutil
3 | from pathlib import Path
4 | import argparse
5 | import sys
6 | import os
7 |
8 | # Add the _internal directory to the system path
9 | sys.path.append(os.path.join(os.path.dirname(__file__), '_internal'))
10 |
11 | annotation_dir = Path(f'character')
12 | if annotation_dir.exists():
13 | shutil.rmtree(annotation_dir)
14 |
15 | parser = argparse.ArgumentParser()
16 |
17 | parser.add_argument("-i", "--image", type=str, help="The input image file.")
18 | args = parser.parse_args()
19 | image_file = args.image
20 |
21 | annotation.run(image_file, annotation_dir)
22 |
--------------------------------------------------------------------------------
/annotation/__pycache__/annotation.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/annotation.cpython-310.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/annotation.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/annotation.cpython-311.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/annotation.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/annotation.cpython-312.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/annotation.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/annotation.cpython-38.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/handler.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/handler.cpython-310.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/handler.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/handler.cpython-311.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/handler.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/handler.cpython-312.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/handler.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/handler.cpython-38.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/image_to_annotations.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/image_to_annotations.cpython-310.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/image_to_annotations.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/image_to_annotations.cpython-311.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/image_to_annotations.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/image_to_annotations.cpython-312.pyc
--------------------------------------------------------------------------------
/annotation/__pycache__/image_to_annotations.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/annotation/__pycache__/image_to_annotations.cpython-38.pyc
--------------------------------------------------------------------------------
/annotation/annotation.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from .image_to_annotations import image_to_annotations
6 |
7 |
8 | def run(img_fn, char_anno_dir):
9 |
10 | image_to_annotations(img_fn, char_anno_dir)
--------------------------------------------------------------------------------
/annotation/handler.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | from mmdet.apis import inference_detector, init_detector
4 | from mmpose.apis import (inference_bottom_up_pose_model,
5 | inference_top_down_pose_model, init_pose_model)
6 | from mmpose.models.detectors import AssociativeEmbedding, TopDown
7 |
8 | class MMdetHandler:
9 | def __init__(self, model_dir):
10 | self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
11 | self.device = torch.device(self.map_location)
12 |
13 | self.config_file = os.path.join(model_dir, 'detector_config.py')
14 | self.checkpoint = os.path.join(model_dir, 'detector_model.pth') # Adjust for your detection model
15 |
16 | self.model = init_detector(self.config_file, self.checkpoint, self.device)
17 |
18 | def inference(self, data):
19 | results = []
20 | for image in data:
21 | result = inference_detector(self.model, image)
22 | results.append(result)
23 | return results
24 |
25 | def postprocess(self, data):
26 | output = []
27 | for image_index, image_result in enumerate(data):
28 | output.append([])
29 | bbox_result = image_result if not isinstance(image_result, tuple) else image_result[0]
30 | for class_index, class_result in enumerate(bbox_result):
31 | class_name = self.model.CLASSES[class_index]
32 | for bbox in class_result:
33 | bbox_coords = bbox[:-1].tolist()
34 | score = float(bbox[-1])
35 | if score >= 0.5: # Set your score threshold
36 | output[image_index].append({
37 | 'class_name': class_name,
38 | 'bbox': bbox_coords,
39 | 'score': score
40 | })
41 | return output
42 |
43 | # Initialize the MMPoseHandler for pose estimation
44 | class MMPoseHandler:
45 | def __init__(self, model_dir):
46 | self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
47 | self.device = torch.device(self.map_location)
48 |
49 | self.config_file = os.path.join(model_dir, 'pose_config.py')
50 | self.checkpoint = os.path.join(model_dir, 'pose_model.pth') # Adjust for your pose model
51 |
52 | self.model = init_pose_model(self.config_file, self.checkpoint, self.device)
53 |
54 | def inference(self, data):
55 | if isinstance(self.model, TopDown):
56 | return self._inference_top_down_pose_model(data)
57 | elif isinstance(self.model, AssociativeEmbedding):
58 | return self._inference_bottom_up_pose_model(data)
59 | else:
60 | raise NotImplementedError(f'Model type {type(self.model)} is not supported.')
61 |
62 | def _inference_top_down_pose_model(self, data):
63 | results = []
64 | for image in data:
65 | preds, _ = inference_top_down_pose_model(self.model, image, person_results=None)
66 | results.append(preds)
67 | return results
68 |
69 | def _inference_bottom_up_pose_model(self, data):
70 | results = []
71 | for image in data:
72 | preds, _ = inference_bottom_up_pose_model(self.model, image)
73 | results.append(preds)
74 | return results
75 |
76 | def postprocess(self, data):
77 | output = [[{'keypoints': pred['keypoints'].tolist()} for pred in preds] for preds in data]
78 | return output
79 |
80 | # Create instances of both handlers
81 | detector_handler = MMdetHandler('models') # Use the model directory for detection
82 | pose_handler = MMPoseHandler('models') # Use the model directory for pose estimation
83 |
84 | def detect(image):
85 | try:
86 | images = [image]
87 |
88 | # Run detection inference
89 | results = detector_handler.inference(images)
90 |
91 | # Postprocess results
92 | output = detector_handler.postprocess(results)
93 |
94 | return output[0]
95 |
96 | except Exception as e:
97 | return e
98 |
99 | def pose(image):
100 | try:
101 |
102 | # Preprocess the image
103 | images = [image]
104 |
105 | # Run pose estimation inference
106 | results = pose_handler.inference(images)
107 |
108 | # Postprocess results
109 | output = pose_handler.postprocess(results)
110 |
111 | return output[0]
112 |
113 | except Exception as e:
114 | return e
115 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | from pose_estimator_3d import estimator_3d
2 | from utils import smooth, camera
3 | from bvh_skeleton import cmu_skeleton
4 |
5 | import cv2
6 | import numpy as np
7 | import os
8 | from pathlib import Path
9 | import json
10 | import pathlib
11 | import shutil
12 | import subprocess
13 | import argparse
14 | from animated_drawings import render
15 |
16 | temp = pathlib.PosixPath
17 | pathlib.PosixPath = pathlib.WindowsPath
18 |
19 | parser = argparse.ArgumentParser()
20 |
21 | parser.add_argument("-v", "--video", type=str, help="The input video file.")
22 | parser.add_argument("-i", "--image", type=str, help="The input image file.")
23 |
24 | args = parser.parse_args()
25 |
26 | video_file = args.video
27 | image_file = args.image
28 |
29 | json_dir = Path(f'output_json')
30 |
31 | output_dir = Path(f'output')
32 | if output_dir.exists():
33 | shutil.rmtree(output_dir)
34 |
35 | os.makedirs(output_dir)
36 | if json_dir.exists():
37 | shutil.rmtree("output_json")
38 |
39 | os.system(rf'annotate.exe -i {image_file}')
40 | os.system(rf'bin\OpenPoseDemo.exe --video {video_file} --write_json output_json/')
41 |
42 | cap = cv2.VideoCapture(str(video_file))
43 | keypoints_list = []
44 | img_width, img_height = None, None
45 | ret, frame = cap.read()
46 | img_height = frame.shape[0]
47 | img_width = frame.shape[1]
48 | cap.release()
49 |
50 | keypoints_list = []
51 | for json_files in os.listdir("output_json"):
52 | with open(rf'output_json\{json_files}', 'r') as file:
53 | data = json.load(file)["people"][0]['pose_keypoints_2d']
54 | keypoints_list.append(data)
55 | keypoints_list = np.array(keypoints_list).reshape((len(keypoints_list), 25, 3)).astype("float32")
56 |
57 | keypoints_list = smooth.filter_missing_value(
58 | keypoints_list=keypoints_list,
59 | method='ignore'
60 | )
61 |
62 | pose2d = np.stack(keypoints_list)[:, :, :2]
63 | pose2d_file = Path(output_dir / '2d_pose.npy')
64 | np.save(pose2d_file, pose2d)
65 |
66 | e3d = estimator_3d.Estimator3D(
67 | config_file='models/openpose_video_pose_243f/video_pose.yaml',
68 | checkpoint_file='models/openpose_video_pose_243f/best_58.58.pth'
69 | )
70 |
71 | pose2d = np.load(pose2d_file)
72 | pose3d = e3d.estimate(pose2d, image_width=img_width, image_height=img_height)
73 |
74 | subject = 'S1'
75 | cam_id = '55011271'
76 | cam_params = camera.load_camera_params('./cameras.h5')[subject][cam_id]
77 | R = cam_params['R']
78 | T = 0
79 |
80 | pose3d_world = camera.camera2world(pose=pose3d, R=R, T=T)
81 | pose3d_world[:, :, 2] -= np.min(pose3d_world[:, :, 2])
82 |
83 | pose3d_file = output_dir / '3d_pose.npy'
84 | np.save(pose3d_file, pose3d_world)
85 |
86 | bvh_file = output_dir / f'result.bvh'
87 | cmu_skel = cmu_skeleton.CMUSkeleton()
88 | channels, header = cmu_skel.poses2bvh(pose3d_world, output_file=bvh_file)
89 |
90 | render.start('./config/config/mvc/interactive_window_example.yaml')
91 |
--------------------------------------------------------------------------------
/bvh_skeleton/__init__.py:
--------------------------------------------------------------------------------
1 | from . import h36m_original_skeleton
2 | from . import h36m_skeleton
3 | from . import openpose_skeleton
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/__init__.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/__init__.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/bvh_helper.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/bvh_helper.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/bvh_helper.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/bvh_helper.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/cmu_skeleton.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/cmu_skeleton.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/cmu_skeleton.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/cmu_skeleton.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/h36m_original_skeleton.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/h36m_original_skeleton.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/h36m_original_skeleton.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/h36m_original_skeleton.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/h36m_skeleton.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/h36m_skeleton.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/h36m_skeleton.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/h36m_skeleton.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/math3d.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/math3d.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/math3d.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/math3d.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/openpose_skeleton.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/openpose_skeleton.cpython-311.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/__pycache__/openpose_skeleton.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/bvh_skeleton/__pycache__/openpose_skeleton.cpython-312.pyc
--------------------------------------------------------------------------------
/bvh_skeleton/bvh_helper.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 |
5 | class BvhNode(object):
6 | def __init__(
7 | self, name, offset, rotation_order,
8 | children=None, parent=None, is_root=False, is_end_site=False
9 | ):
10 | if not is_end_site and \
11 | rotation_order not in ['xyz', 'xzy', 'yxz', 'yzx', 'zxy', 'zyx']:
12 | raise ValueError(f'Rotation order invalid.')
13 | self.name = name
14 | self.offset = offset
15 | self.rotation_order = rotation_order
16 | self.children = children
17 | self.parent = parent
18 | self.is_root = is_root
19 | self.is_end_site = is_end_site
20 |
21 |
22 | class BvhHeader(object):
23 | def __init__(self, root, nodes):
24 | self.root = root
25 | self.nodes = nodes
26 |
27 |
28 | def write_header(writer, node, level):
29 | indent = ' ' * 4 * level
30 | if node.is_root:
31 | writer.write(f'{indent}ROOT {node.name}\n')
32 | channel_num = 6
33 | elif node.is_end_site:
34 | writer.write(f'{indent}End Site\n')
35 | channel_num = 0
36 | else:
37 | writer.write(f'{indent}JOINT {node.name}\n')
38 | channel_num = 3
39 | writer.write(f'{indent}{"{"}\n')
40 |
41 | indent = ' ' * 4 * (level + 1)
42 | writer.write(
43 | f'{indent}OFFSET '
44 | f'{node.offset[0]} {node.offset[1]} {node.offset[2]}\n'
45 | )
46 | if channel_num:
47 | channel_line = f'{indent}CHANNELS {channel_num} '
48 | if node.is_root:
49 | channel_line += f'Xposition Yposition Zposition '
50 | channel_line += ' '.join([
51 | f'{axis.upper()}rotation'
52 | for axis in node.rotation_order
53 | ])
54 | writer.write(channel_line + '\n')
55 |
56 | for child in node.children:
57 | write_header(writer, child, level + 1)
58 |
59 | indent = ' ' * 4 * level
60 | writer.write(f'{indent}{"}"}\n')
61 |
62 |
63 | def write_bvh(output_file, header, channels, frame_rate=30):
64 | output_file = Path(output_file)
65 | if not output_file.parent.exists():
66 | os.makedirs(output_file.parent)
67 |
68 | with output_file.open('w') as f:
69 | f.write('HIERARCHY\n')
70 | write_header(writer=f, node=header.root, level=0)
71 |
72 | f.write('MOTION\n')
73 | f.write(f'Frames: {len(channels)}\n')
74 | f.write(f'Frame Time: {1 / frame_rate}\n')
75 |
76 | for channel in channels:
77 | f.write(' '.join([f'{element}' for element in channel]) + '\n')
--------------------------------------------------------------------------------
/bvh_skeleton/cmu_skeleton.py:
--------------------------------------------------------------------------------
1 | from . import math3d
2 | from . import bvh_helper
3 |
4 | import numpy as np
5 | from pprint import pprint
6 |
7 |
8 | class CMUSkeleton(object):
9 |
10 | def __init__(self):
11 | self.root = 'Hips'
12 | self.keypoint2index = {
13 | 'Hips': 0,
14 | 'RightUpLeg': 1,
15 | 'RightLeg': 2,
16 | 'RightFoot': 3,
17 | 'LeftUpLeg': 4,
18 | 'LeftLeg': 5,
19 | 'LeftFoot': 6,
20 | 'Spine': 7,
21 | 'Spine1': 8,
22 | 'Neck1': 9,
23 | 'HeadEndSite': 10,
24 | 'LeftArm': 11,
25 | 'LeftForeArm': 12,
26 | 'LeftHand': 13,
27 | 'RightArm': 14,
28 | 'RightForeArm': 15,
29 | 'RightHand': 16,
30 | 'RightHipJoint': -1,
31 | 'RightFootEndSite': -1,
32 | 'LeftHipJoint': -1,
33 | 'LeftFootEndSite': -1,
34 | 'LeftShoulder': -1,
35 | 'LeftHandEndSite': -1,
36 | 'RightShoulder': -1,
37 | 'RightHandEndSite': -1,
38 | 'LowerBack': -1,
39 | 'Neck': -1
40 | }
41 | self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
42 | self.keypoint_num = len(self.keypoint2index)
43 |
44 | self.children = {
45 | 'Hips': ['LeftHipJoint', 'LowerBack', 'RightHipJoint'],
46 | 'LeftHipJoint': ['LeftUpLeg'],
47 | 'LeftUpLeg': ['LeftLeg'],
48 | 'LeftLeg': ['LeftFoot'],
49 | 'LeftFoot': ['LeftFootEndSite'],
50 | 'LeftFootEndSite': [],
51 | 'LowerBack': ['Spine'],
52 | 'Spine': ['Spine1'],
53 | 'Spine1': ['LeftShoulder', 'Neck', 'RightShoulder'],
54 | 'LeftShoulder': ['LeftArm'],
55 | 'LeftArm': ['LeftForeArm'],
56 | 'LeftForeArm': ['LeftHand'],
57 | 'LeftHand': ['LeftHandEndSite'],
58 | 'LeftHandEndSite': [],
59 | 'Neck': ['Neck1'],
60 | 'Neck1': ['HeadEndSite'],
61 | 'HeadEndSite': [],
62 | 'RightShoulder': ['RightArm'],
63 | 'RightArm': ['RightForeArm'],
64 | 'RightForeArm': ['RightHand'],
65 | 'RightHand': ['RightHandEndSite'],
66 | 'RightHandEndSite': [],
67 | 'RightHipJoint': ['RightUpLeg'],
68 | 'RightUpLeg': ['RightLeg'],
69 | 'RightLeg': ['RightFoot'],
70 | 'RightFoot': ['RightFootEndSite'],
71 | 'RightFootEndSite': [],
72 | }
73 | self.parent = {self.root: None}
74 | for parent, children in self.children.items():
75 | for child in children:
76 | self.parent[child] = parent
77 |
78 | self.left_joints = [
79 | joint for joint in self.keypoint2index
80 | if 'Left' in joint
81 | ]
82 | self.right_joints = [
83 | joint for joint in self.keypoint2index
84 | if 'Right' in joint
85 | ]
86 |
87 | # T-pose
88 | self.initial_directions = {
89 | 'Hips': [0, 0, 0],
90 | 'LeftHipJoint': [1, 0, 0],
91 | 'LeftUpLeg': [1, 0, 0],
92 | 'LeftLeg': [0, 0, -1],
93 | 'LeftFoot': [0, 0, -1],
94 | 'LeftFootEndSite': [0, -1, 0],
95 | 'LowerBack': [0, 0, 1],
96 | 'Spine': [0, 0, 1],
97 | 'Spine1': [0, 0, 1],
98 | 'LeftShoulder': [1, 0, 0],
99 | 'LeftArm': [1, 0, 0],
100 | 'LeftForeArm': [1, 0, 0],
101 | 'LeftHand': [1, 0, 0],
102 | 'LeftHandEndSite': [1, 0, 0],
103 | 'Neck': [0, 0, 1],
104 | 'Neck1': [0, 0, 1],
105 | 'HeadEndSite': [0, 0, 1],
106 | 'RightShoulder': [-1, 0, 0],
107 | 'RightArm': [-1, 0, 0],
108 | 'RightForeArm': [-1, 0, 0],
109 | 'RightHand': [-1, 0, 0],
110 | 'RightHandEndSite': [-1, 0, 0],
111 | 'RightHipJoint': [-1, 0, 0],
112 | 'RightUpLeg': [-1, 0, 0],
113 | 'RightLeg': [0, 0, -1],
114 | 'RightFoot': [0, 0, -1],
115 | 'RightFootEndSite': [0, -1, 0]
116 | }
117 |
118 |
119 | def get_initial_offset(self, poses_3d):
120 | # TODO: RANSAC
121 | bone_lens = {self.root: [0]}
122 | stack = [self.root]
123 | while stack:
124 | parent = stack.pop()
125 | p_idx = self.keypoint2index[parent]
126 | p_name = parent
127 | while p_idx == -1:
128 | # find real parent
129 | p_name = self.parent[p_name]
130 | p_idx = self.keypoint2index[p_name]
131 | for child in self.children[parent]:
132 | stack.append(child)
133 |
134 | if self.keypoint2index[child] == -1:
135 | bone_lens[child] = [0.1]
136 | else:
137 | c_idx = self.keypoint2index[child]
138 | bone_lens[child] = np.linalg.norm(
139 | poses_3d[:, p_idx] - poses_3d[:, c_idx],
140 | axis=1
141 | )
142 |
143 | bone_len = {}
144 | for joint in self.keypoint2index:
145 | if 'Left' in joint or 'Right' in joint:
146 | base_name = joint.replace('Left', '').replace('Right', '')
147 | left_len = np.mean(bone_lens['Left' + base_name])
148 | right_len = np.mean(bone_lens['Right' + base_name])
149 | bone_len[joint] = (left_len + right_len) / 2
150 | else:
151 | bone_len[joint] = np.mean(bone_lens[joint])
152 |
153 | initial_offset = {}
154 | for joint, direction in self.initial_directions.items():
155 | direction = np.array(direction) / max(np.linalg.norm(direction), 1e-12)
156 | initial_offset[joint] = direction * bone_len[joint]
157 |
158 | return initial_offset
159 |
160 |
161 | def get_bvh_header(self, poses_3d):
162 | initial_offset = self.get_initial_offset(poses_3d)
163 |
164 | nodes = {}
165 | for joint in self.keypoint2index:
166 | is_root = joint == self.root
167 | is_end_site = 'EndSite' in joint
168 | nodes[joint] = bvh_helper.BvhNode(
169 | name=joint,
170 | offset=initial_offset[joint],
171 | rotation_order='zxy' if not is_end_site else '',
172 | is_root=is_root,
173 | is_end_site=is_end_site,
174 | )
175 | for joint, children in self.children.items():
176 | nodes[joint].children = [nodes[child] for child in children]
177 | for child in children:
178 | nodes[child].parent = nodes[joint]
179 |
180 | header = bvh_helper.BvhHeader(root=nodes[self.root], nodes=nodes)
181 | return header
182 |
183 |
184 | def pose2euler(self, pose, header):
185 | channel = []
186 | quats = {}
187 | eulers = {}
188 | stack = [header.root]
189 | while stack:
190 | node = stack.pop()
191 | joint = node.name
192 | joint_idx = self.keypoint2index[joint]
193 |
194 | if node.is_root:
195 | channel.extend(pose[joint_idx])
196 |
197 | index = self.keypoint2index
198 | order = None
199 | if joint == 'Hips':
200 | x_dir = pose[index['LeftUpLeg']] - pose[index['RightUpLeg']]
201 | y_dir = None
202 | z_dir = pose[index['Spine']] - pose[joint_idx]
203 | order = 'zyx'
204 | elif joint in ['RightUpLeg', 'RightLeg']:
205 | child_idx = self.keypoint2index[node.children[0].name]
206 | x_dir = pose[index['Hips']] - pose[index['RightUpLeg']]
207 | y_dir = None
208 | z_dir = pose[joint_idx] - pose[child_idx]
209 | order = 'zyx'
210 | elif joint in ['LeftUpLeg', 'LeftLeg']:
211 | child_idx = self.keypoint2index[node.children[0].name]
212 | x_dir = pose[index['LeftUpLeg']] - pose[index['Hips']]
213 | y_dir = None
214 | z_dir = pose[joint_idx] - pose[child_idx]
215 | order = 'zyx'
216 | elif joint == 'Spine':
217 | x_dir = pose[index['LeftUpLeg']] - pose[index['RightUpLeg']]
218 | y_dir = None
219 | z_dir = pose[index['Spine1']] - pose[joint_idx]
220 | order = 'zyx'
221 | elif joint == 'Spine1':
222 | x_dir = pose[index['LeftArm']] - \
223 | pose[index['RightArm']]
224 | y_dir = None
225 | z_dir = pose[joint_idx] - pose[index['Spine']]
226 | order = 'zyx'
227 | elif joint == 'Neck1':
228 | x_dir = None
229 | y_dir = pose[index['Spine1']] - pose[joint_idx]
230 | z_dir = pose[index['HeadEndSite']] - pose[index['Spine1']]
231 | order = 'zxy'
232 | elif joint == 'LeftArm':
233 | x_dir = pose[index['LeftForeArm']] - pose[joint_idx]
234 | y_dir = pose[index['LeftForeArm']] - pose[index['LeftHand']]
235 | z_dir = None
236 | order = 'xzy'
237 | elif joint == 'LeftForeArm':
238 | x_dir = pose[index['LeftHand']] - pose[joint_idx]
239 | y_dir = pose[joint_idx] - pose[index['LeftArm']]
240 | z_dir = None
241 | order = 'xzy'
242 | elif joint == 'RightArm':
243 | x_dir = pose[joint_idx] - pose[index['RightForeArm']]
244 | y_dir = pose[index['RightForeArm']] - pose[index['RightHand']]
245 | z_dir = None
246 | order = 'xzy'
247 | elif joint == 'RightForeArm':
248 | x_dir = pose[joint_idx] - pose[index['RightHand']]
249 | y_dir = pose[joint_idx] - pose[index['RightArm']]
250 | z_dir = None
251 | order = 'xzy'
252 |
253 | if order:
254 | dcm = math3d.dcm_from_axis(x_dir, y_dir, z_dir, order)
255 | quats[joint] = math3d.dcm2quat(dcm)
256 | else:
257 | quats[joint] = quats[self.parent[joint]].copy()
258 |
259 | local_quat = quats[joint].copy()
260 | if node.parent:
261 | local_quat = math3d.quat_divide(
262 | q=quats[joint], r=quats[node.parent.name]
263 | )
264 |
265 | euler = math3d.quat2euler(
266 | q=local_quat, order=node.rotation_order
267 | )
268 | euler = np.rad2deg(euler)
269 | eulers[joint] = euler
270 | channel.extend(euler)
271 |
272 | for child in node.children[::-1]:
273 | if not child.is_end_site:
274 | stack.append(child)
275 |
276 | return channel
277 |
278 |
279 | def poses2bvh(self, poses_3d, header=None, output_file=None):
280 | if not header:
281 | header = self.get_bvh_header(poses_3d)
282 |
283 | channels = []
284 | for frame, pose in enumerate(poses_3d):
285 | channels.append(self.pose2euler(pose, header))
286 |
287 | if output_file:
288 | bvh_helper.write_bvh(output_file, header, channels)
289 |
290 | return channels, header
--------------------------------------------------------------------------------
/bvh_skeleton/coco_skeleton.py:
--------------------------------------------------------------------------------
1 | class COCOSkeleton(object):
2 |
3 | def __init__(self):
4 | self.root = 'Neck' # median of left shoulder and right shoulder
5 | self.keypoint2index = {
6 | 'Nose': 0,
7 | 'LeftEye': 1,
8 | 'RightEye': 2,
9 | 'LeftEar': 3,
10 | 'RightEar': 4,
11 | 'LeftShoulder': 5,
12 | 'RightShoulder': 6,
13 | 'LeftElbow': 7,
14 | 'RightElbow': 8,
15 | 'LeftWrist': 9,
16 | 'RightWrist': 10,
17 | 'LeftHip': 11,
18 | 'RightHip': 12,
19 | 'LeftKnee': 13,
20 | 'RightKnee': 14,
21 | 'LeftAnkle': 15,
22 | 'RightAnkle': 16,
23 | 'Neck': 17
24 | }
25 | self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
26 | self.keypoint_num = len(self.keypoint2index)
27 |
28 | self.children = {
29 | 'Neck': [
30 | 'Nose', 'LeftShoulder', 'RightShoulder', 'LeftHip', 'RightHip'
31 | ],
32 | 'Nose': ['LeftEye', 'RightEye'],
33 | 'LeftEye': ['LeftEar'],
34 | 'LeftEar': [],
35 | 'RightEye': ['RightEar'],
36 | 'RightEar': [],
37 | 'LeftShoulder': ['LeftElbow'],
38 | 'LeftElbow': ['LeftWrist'],
39 | 'LeftWrist': [],
40 | 'RightShoulder': ['RightElbow'],
41 | 'RightElbow': ['RightWrist'],
42 | 'RightWrist': [],
43 | 'LeftHip': ['LeftKnee'],
44 | 'LeftKnee': ['LeftAnkle'],
45 | 'LeftAnkle': [],
46 | 'RightHip': ['RightKnee'],
47 | 'RightKnee': ['RightAnkle'],
48 | 'RightAnkle': []
49 | }
50 | self.parent = {self.root: None}
51 | for parent, children in self.children.items():
52 | for child in children:
53 | self.parent[child] = parent
--------------------------------------------------------------------------------
/bvh_skeleton/h36m_original_skeleton.py:
--------------------------------------------------------------------------------
1 | class H36mOriginalSkeleton(object):
2 |
3 | def __init__(self):
4 | self.root = 'Hip'
5 | self.keypoint2index = {
6 | 'Hip': 0,
7 | 'RightUpLeg': 1,
8 | 'RightLeg': 2,
9 | 'RightFoot': 3,
10 | 'RightToeBase': 4,
11 | 'RightToeBaseEndSite': 5,
12 | 'LeftUpLeg': 6,
13 | 'LeftLeg': 7,
14 | 'LeftFoot': 8,
15 | 'LeftToeBase': 9,
16 | 'LeftToeBaseEndSite': 10,
17 | 'Spine': 11,
18 | 'Spine1': 12,
19 | 'Neck': 13,
20 | 'Head': 14,
21 | 'HeadEndSite': 15,
22 | 'LeftShoulder': 16,
23 | 'LeftArm': 17,
24 | 'LeftForeArm': 18,
25 | 'LeftHand': 19,
26 | 'LeftHandThumb': 20,
27 | 'LeftHandThumbEndSite': 21,
28 | 'LeftWristEnd': 22,
29 | 'LeftWristEndEndSite': 23,
30 | 'RightShoulder': 24,
31 | 'RightArm': 25,
32 | 'RightForeArm': 26,
33 | 'RightHand': 27,
34 | 'RightHandThumb': 28,
35 | 'RightHandThumbEndSite': 29,
36 | 'RightWristEnd': 30,
37 | 'RightWristEndEndSite': 31
38 | }
39 | self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
40 | self.keypoint_num = len(self.keypoint2index)
41 |
42 | self.children = {
43 | 'Hip': ['RightUpLeg', 'LeftUpLeg', 'Spine'],
44 | 'RightUpLeg': ['RightLeg'],
45 | 'RightLeg': ['RightFoot'],
46 | 'RightFoot': ['RightToeBase'],
47 | 'RightToeBase': ['RightToeBaseEndSite'],
48 | 'RightToeBaseEndSite': [],
49 | 'LeftUpLeg': ['LeftLeg'],
50 | 'LeftLeg': ['LeftFoot'],
51 | 'LeftFoot': ['LeftToeBase'],
52 | 'LeftToeBase': ['LeftToeBaseEndSite'],
53 | 'LeftToeBaseEndSite': [],
54 | 'Spine': ['Spine1'],
55 | 'Spine1': ['Neck', 'LeftShoulder', 'RightShoulder'],
56 | 'Neck': ['Head'],
57 | 'Head': ['HeadEndSite'],
58 | 'HeadEndSite': [],
59 | 'LeftShoulder': ['LeftArm'],
60 | 'LeftArm': ['LeftForeArm'],
61 | 'LeftForeArm': ['LeftHand'],
62 | 'LeftHand': ['LeftHandThumb', 'LeftWristEnd'],
63 | 'LeftHandThumb': ['LeftHandThumbEndSite'],
64 | 'LeftHandThumbEndSite': [],
65 | 'LeftWristEnd': ['LeftWristEndEndSite'],
66 | 'LeftWristEndEndSite': [],
67 | 'RightShoulder': ['RightArm'],
68 | 'RightArm': ['RightForeArm'],
69 | 'RightForeArm': ['RightHand'],
70 | 'RightHand': ['RightHandThumb', 'RightWristEnd'],
71 | 'RightHandThumb': ['RightHandThumbEndSite'],
72 | 'RightHandThumbEndSite': [],
73 | 'RightWristEnd': ['RightWristEndEndSite'],
74 | 'RightWristEndEndSite': [],
75 | }
76 | self.parent = {self.root: None}
77 | for parent, children in self.children.items():
78 | for child in children:
79 | self.parent[child] = parent
80 |
81 | self.left_joints = [
82 | joint for joint in self.keypoint2index
83 | if 'Left' in joint
84 | ]
85 | self.right_joints = [
86 | joint for joint in self.keypoint2index
87 | if 'Right' in joint
88 | ]
--------------------------------------------------------------------------------
/bvh_skeleton/h36m_skeleton.py:
--------------------------------------------------------------------------------
1 | from . import math3d
2 | from . import bvh_helper
3 |
4 | import numpy as np
5 |
6 |
7 | class H36mSkeleton(object):
8 |
9 | def __init__(self):
10 | self.root = 'Hip'
11 | self.keypoint2index = {
12 | 'Hip': 0,
13 | 'RightHip': 1,
14 | 'RightKnee': 2,
15 | 'RightAnkle': 3,
16 | 'LeftHip': 4,
17 | 'LeftKnee': 5,
18 | 'LeftAnkle': 6,
19 | 'Spine': 7,
20 | 'Thorax': 8,
21 | 'Neck': 9,
22 | 'HeadEndSite': 10,
23 | 'LeftShoulder': 11,
24 | 'LeftElbow': 12,
25 | 'LeftWrist': 13,
26 | 'RightShoulder': 14,
27 | 'RightElbow': 15,
28 | 'RightWrist': 16,
29 | 'RightAnkleEndSite': -1,
30 | 'LeftAnkleEndSite': -1,
31 | 'LeftWristEndSite': -1,
32 | 'RightWristEndSite': -1
33 | }
34 | self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
35 | self.keypoint_num = len(self.keypoint2index)
36 |
37 | self.children = {
38 | 'Hip': ['RightHip', 'LeftHip', 'Spine'],
39 | 'RightHip': ['RightKnee'],
40 | 'RightKnee': ['RightAnkle'],
41 | 'RightAnkle': ['RightAnkleEndSite'],
42 | 'RightAnkleEndSite': [],
43 | 'LeftHip': ['LeftKnee'],
44 | 'LeftKnee': ['LeftAnkle'],
45 | 'LeftAnkle': ['LeftAnkleEndSite'],
46 | 'LeftAnkleEndSite': [],
47 | 'Spine': ['Thorax'],
48 | 'Thorax': ['Neck', 'LeftShoulder', 'RightShoulder'],
49 | 'Neck': ['HeadEndSite'],
50 | 'HeadEndSite': [], # Head is an end site
51 | 'LeftShoulder': ['LeftElbow'],
52 | 'LeftElbow': ['LeftWrist'],
53 | 'LeftWrist': ['LeftWristEndSite'],
54 | 'LeftWristEndSite': [],
55 | 'RightShoulder': ['RightElbow'],
56 | 'RightElbow': ['RightWrist'],
57 | 'RightWrist': ['RightWristEndSite'],
58 | 'RightWristEndSite': []
59 | }
60 | self.parent = {self.root: None}
61 | for parent, children in self.children.items():
62 | for child in children:
63 | self.parent[child] = parent
64 |
65 | self.left_joints = [
66 | joint for joint in self.keypoint2index
67 | if 'Left' in joint
68 | ]
69 | self.right_joints = [
70 | joint for joint in self.keypoint2index
71 | if 'Right' in joint
72 | ]
73 |
74 | # T-pose
75 | self.initial_directions = {
76 | 'Hip': [0, 0, 0],
77 | 'RightHip': [-1, 0, 0],
78 | 'RightKnee': [0, 0, -1],
79 | 'RightAnkle': [0, 0, -1],
80 | 'RightAnkleEndSite': [0, -1, 0],
81 | 'LeftHip': [1, 0, 0],
82 | 'LeftKnee': [0, 0, -1],
83 | 'LeftAnkle': [0, 0, -1],
84 | 'LeftAnkleEndSite': [0, -1, 0],
85 | 'Spine': [0, 0, 1],
86 | 'Thorax': [0, 0, 1],
87 | 'Neck': [0, 0, 1],
88 | 'HeadEndSite': [0, 0, 1],
89 | 'LeftShoulder': [1, 0, 0],
90 | 'LeftElbow': [1, 0, 0],
91 | 'LeftWrist': [1, 0, 0],
92 | 'LeftWristEndSite': [1, 0, 0],
93 | 'RightShoulder': [-1, 0, 0],
94 | 'RightElbow': [-1, 0, 0],
95 | 'RightWrist': [-1, 0, 0],
96 | 'RightWristEndSite': [-1, 0, 0]
97 | }
98 |
99 |
100 | def get_initial_offset(self, poses_3d):
101 | # TODO: RANSAC
102 | bone_lens = {self.root: [0]}
103 | stack = [self.root]
104 | while stack:
105 | parent = stack.pop()
106 | p_idx = self.keypoint2index[parent]
107 | for child in self.children[parent]:
108 | if 'EndSite' in child:
109 | bone_lens[child] = 0.4 * bone_lens[parent]
110 | continue
111 | stack.append(child)
112 |
113 | c_idx = self.keypoint2index[child]
114 | bone_lens[child] = np.linalg.norm(
115 | poses_3d[:, p_idx] - poses_3d[:, c_idx],
116 | axis=1
117 | )
118 |
119 | bone_len = {}
120 | for joint in self.keypoint2index:
121 | if 'Left' in joint or 'Right' in joint:
122 | base_name = joint.replace('Left', '').replace('Right', '')
123 | left_len = np.mean(bone_lens['Left' + base_name])
124 | right_len = np.mean(bone_lens['Right' + base_name])
125 | bone_len[joint] = (left_len + right_len) / 2
126 | else:
127 | bone_len[joint] = np.mean(bone_lens[joint])
128 |
129 | initial_offset = {}
130 | for joint, direction in self.initial_directions.items():
131 | direction = np.array(direction) / max(np.linalg.norm(direction), 1e-12)
132 | initial_offset[joint] = direction * bone_len[joint]
133 |
134 | return initial_offset
135 |
136 |
137 | def get_bvh_header(self, poses_3d):
138 | initial_offset = self.get_initial_offset(poses_3d)
139 |
140 | nodes = {}
141 | for joint in self.keypoint2index:
142 | is_root = joint == self.root
143 | is_end_site = 'EndSite' in joint
144 | nodes[joint] = bvh_helper.BvhNode(
145 | name=joint,
146 | offset=initial_offset[joint],
147 | rotation_order='zxy' if not is_end_site else '',
148 | is_root=is_root,
149 | is_end_site=is_end_site,
150 | )
151 | for joint, children in self.children.items():
152 | nodes[joint].children = [nodes[child] for child in children]
153 | for child in children:
154 | nodes[child].parent = nodes[joint]
155 |
156 | header = bvh_helper.BvhHeader(root=nodes[self.root], nodes=nodes)
157 | return header
158 |
159 |
160 | def pose2euler(self, pose, header):
161 | channel = []
162 | quats = {}
163 | eulers = {}
164 | stack = [header.root]
165 | while stack:
166 | node = stack.pop()
167 | joint = node.name
168 | joint_idx = self.keypoint2index[joint]
169 |
170 | if node.is_root:
171 | channel.extend(pose[joint_idx])
172 |
173 | index = self.keypoint2index
174 | order = None
175 | if joint == 'Hip':
176 | x_dir = pose[index['LeftHip']] - pose[index['RightHip']]
177 | y_dir = None
178 | z_dir = pose[index['Spine']] - pose[joint_idx]
179 | order = 'zyx'
180 | elif joint in ['RightHip', 'RightKnee']:
181 | child_idx = self.keypoint2index[node.children[0].name]
182 | x_dir = pose[index['Hip']] - pose[index['RightHip']]
183 | y_dir = None
184 | z_dir = pose[joint_idx] - pose[child_idx]
185 | order = 'zyx'
186 | elif joint in ['LeftHip', 'LeftKnee']:
187 | child_idx = self.keypoint2index[node.children[0].name]
188 | x_dir = pose[index['LeftHip']] - pose[index['Hip']]
189 | y_dir = None
190 | z_dir = pose[joint_idx] - pose[child_idx]
191 | order = 'zyx'
192 | elif joint == 'Spine':
193 | x_dir = pose[index['LeftHip']] - pose[index['RightHip']]
194 | y_dir = None
195 | z_dir = pose[index['Thorax']] - pose[joint_idx]
196 | order = 'zyx'
197 | elif joint == 'Thorax':
198 | x_dir = pose[index['LeftShoulder']] - \
199 | pose[index['RightShoulder']]
200 | y_dir = None
201 | z_dir = pose[joint_idx] - pose[index['Spine']]
202 | order = 'zyx'
203 | elif joint == 'Neck':
204 | x_dir = None
205 | y_dir = pose[index['Thorax']] - pose[joint_idx]
206 | z_dir = pose[index['HeadEndSite']] - pose[index['Thorax']]
207 | order = 'zxy'
208 | elif joint == 'LeftShoulder':
209 | x_dir = pose[index['LeftElbow']] - pose[joint_idx]
210 | y_dir = pose[index['LeftElbow']] - pose[index['LeftWrist']]
211 | z_dir = None
212 | order = 'xzy'
213 | elif joint == 'LeftElbow':
214 | x_dir = pose[index['LeftWrist']] - pose[joint_idx]
215 | y_dir = pose[joint_idx] - pose[index['LeftShoulder']]
216 | z_dir = None
217 | order = 'xzy'
218 | elif joint == 'RightShoulder':
219 | x_dir = pose[joint_idx] - pose[index['RightElbow']]
220 | y_dir = pose[index['RightElbow']] - pose[index['RightWrist']]
221 | z_dir = None
222 | order = 'xzy'
223 | elif joint == 'RightElbow':
224 | x_dir = pose[joint_idx] - pose[index['RightWrist']]
225 | y_dir = pose[joint_idx] - pose[index['RightShoulder']]
226 | z_dir = None
227 | order = 'xzy'
228 | if order:
229 | dcm = math3d.dcm_from_axis(x_dir, y_dir, z_dir, order)
230 | quats[joint] = math3d.dcm2quat(dcm)
231 | else:
232 | quats[joint] = quats[self.parent[joint]].copy()
233 |
234 | local_quat = quats[joint].copy()
235 | if node.parent:
236 | local_quat = math3d.quat_divide(
237 | q=quats[joint], r=quats[node.parent.name]
238 | )
239 |
240 | euler = math3d.quat2euler(
241 | q=local_quat, order=node.rotation_order
242 | )
243 | euler = np.rad2deg(euler)
244 | eulers[joint] = euler
245 | channel.extend(euler)
246 |
247 | for child in node.children[::-1]:
248 | if not child.is_end_site:
249 | stack.append(child)
250 |
251 | return channel
252 |
253 |
254 | def poses2bvh(self, poses_3d, header=None, output_file=None):
255 | if not header:
256 | header = self.get_bvh_header(poses_3d)
257 |
258 | channels = []
259 | for frame, pose in enumerate(poses_3d):
260 | channels.append(self.pose2euler(pose, header))
261 |
262 | if output_file:
263 | bvh_helper.write_bvh(output_file, header, channels)
264 |
265 | return channels, header
--------------------------------------------------------------------------------
/bvh_skeleton/math3d.py:
--------------------------------------------------------------------------------
1 | """
2 | ! left handed coordinate, z-up, y-forward
3 | ! left to right rotation matrix multiply: v'=vR
4 | ! non-standard quaternion multiply
5 | """
6 |
7 | import numpy as np
8 |
9 |
10 | def normalize(x):
11 | return x / max(np.linalg.norm(x), 1e-12)
12 |
13 |
14 | def dcm_from_axis(x_dir, y_dir, z_dir, order):
15 | assert order in ['yzx', 'yxz', 'xyz', 'xzy', 'zxy', 'zyx']
16 |
17 | axis = {'x': x_dir, 'y': y_dir, 'z': z_dir}
18 | name = ['x', 'y', 'z']
19 | idx0 = name.index(order[0])
20 | idx1 = name.index(order[1])
21 | idx2 = name.index(order[2])
22 |
23 | axis[order[0]] = normalize(axis[order[0]])
24 | axis[order[1]] = normalize(np.cross(
25 | axis[name[(idx1 + 1) % 3]], axis[name[(idx1 + 2) % 3]]
26 | ))
27 | axis[order[2]] = normalize(np.cross(
28 | axis[name[(idx2 + 1) % 3]], axis[name[(idx2 + 2) % 3]]
29 | ))
30 |
31 | dcm = np.asarray([axis['x'], axis['y'], axis['z']])
32 |
33 | return dcm
34 |
35 |
36 | def dcm2quat(dcm):
37 | q = np.zeros([4])
38 | tr = np.trace(dcm)
39 |
40 | if tr > 0:
41 | sqtrp1 = np.sqrt(tr + 1.0)
42 | q[0] = 0.5 * sqtrp1
43 | q[1] = (dcm[1, 2] - dcm[2, 1]) / (2.0 * sqtrp1)
44 | q[2] = (dcm[2, 0] - dcm[0, 2]) / (2.0 * sqtrp1)
45 | q[3] = (dcm[0, 1] - dcm[1, 0]) / (2.0 * sqtrp1)
46 | else:
47 | d = np.diag(dcm)
48 | if d[1] > d[0] and d[1] > d[2]:
49 | sqdip1 = np.sqrt(d[1] - d[0] - d[2] + 1.0)
50 | q[2] = 0.5 * sqdip1
51 |
52 | if sqdip1 != 0:
53 | sqdip1 = 0.5 / sqdip1
54 |
55 | q[0] = (dcm[2, 0] - dcm[0, 2]) * sqdip1
56 | q[1] = (dcm[0, 1] + dcm[1, 0]) * sqdip1
57 | q[3] = (dcm[1, 2] + dcm[2, 1]) * sqdip1
58 |
59 | elif d[2] > d[0]:
60 | sqdip1 = np.sqrt(d[2] - d[0] - d[1] + 1.0)
61 | q[3] = 0.5 * sqdip1
62 |
63 | if sqdip1 != 0:
64 | sqdip1 = 0.5 / sqdip1
65 |
66 | q[0] = (dcm[0, 1] - dcm[1, 0]) * sqdip1
67 | q[1] = (dcm[2, 0] + dcm[0, 2]) * sqdip1
68 | q[2] = (dcm[1, 2] + dcm[2, 1]) * sqdip1
69 |
70 | else:
71 | sqdip1 = np.sqrt(d[0] - d[1] - d[2] + 1.0)
72 | q[1] = 0.5 * sqdip1
73 |
74 | if sqdip1 != 0:
75 | sqdip1 = 0.5 / sqdip1
76 |
77 | q[0] = (dcm[1, 2] - dcm[2, 1]) * sqdip1
78 | q[2] = (dcm[0, 1] + dcm[1, 0]) * sqdip1
79 | q[3] = (dcm[2, 0] + dcm[0, 2]) * sqdip1
80 |
81 | return q
82 |
83 |
84 | def quat_dot(q0, q1):
85 | original_shape = q0.shape
86 | q0 = np.reshape(q0, [-1, 4])
87 | q1 = np.reshape(q1, [-1, 4])
88 |
89 | w0, x0, y0, z0 = q0[:, 0], q0[:, 1], q0[:, 2], q0[:, 3]
90 | w1, x1, y1, z1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3]
91 | q_product = w0 * w1 + x1 * x1 + y0 * y1 + z0 * z1
92 | q_product = np.expand_dims(q_product, axis=1)
93 | q_product = np.tile(q_product, [1, 4])
94 |
95 | return np.reshape(q_product, original_shape)
96 |
97 |
98 | def quat_inverse(q):
99 | original_shape = q.shape
100 | q = np.reshape(q, [-1, 4])
101 |
102 | q_conj = [q[:, 0], -q[:, 1], -q[:, 2], -q[:, 3]]
103 | q_conj = np.stack(q_conj, axis=1)
104 | q_inv = np.divide(q_conj, quat_dot(q_conj, q_conj))
105 |
106 | return np.reshape(q_inv, original_shape)
107 |
108 |
109 | def quat_mul(q0, q1):
110 | original_shape = q0.shape
111 | q1 = np.reshape(q1, [-1, 4, 1])
112 | q0 = np.reshape(q0, [-1, 1, 4])
113 | terms = np.matmul(q1, q0)
114 | w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
115 | x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
116 | y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
117 | z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
118 |
119 | q_product = np.stack([w, x, y, z], axis=1)
120 | return np.reshape(q_product, original_shape)
121 |
122 |
123 | def quat_divide(q, r):
124 | return quat_mul(quat_inverse(r), q)
125 |
126 |
127 | def quat2euler(q, order='zxy', eps=1e-8):
128 | original_shape = list(q.shape)
129 | original_shape[-1] = 3
130 | q = np.reshape(q, [-1, 4])
131 |
132 | q0 = q[:, 0]
133 | q1 = q[:, 1]
134 | q2 = q[:, 2]
135 | q3 = q[:, 3]
136 |
137 | if order == 'zxy':
138 | x = np.arcsin(np.clip(2 * (q0 * q1 + q2 * q3), -1 + eps, 1 - eps))
139 | y = np.arctan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
140 | z = np.arctan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3))
141 | euler = np.stack([z, x, y], axis=1)
142 | else:
143 | raise ValueError('Not implemented')
144 |
145 | return np.reshape(euler, original_shape)
--------------------------------------------------------------------------------
/bvh_skeleton/openpose_skeleton.py:
--------------------------------------------------------------------------------
1 | class OpenPoseSkeleton(object):
2 |
3 | def __init__(self):
4 | self.root = 'MidHip'
5 | self.keypoint2index = {
6 | 'Nose': 0,
7 | 'Neck': 1,
8 | 'RShoulder': 2,
9 | 'RElbow': 3,
10 | 'RWrist': 4,
11 | 'LShoulder': 5,
12 | 'LElbow': 6,
13 | 'LWrist': 7,
14 | 'MidHip': 8,
15 | 'RHip': 9,
16 | 'RKnee': 10,
17 | 'RAnkle': 11,
18 | 'LHip': 12,
19 | 'LKnee': 13,
20 | 'LAnkle': 14,
21 | 'REye': 15,
22 | 'LEye': 16,
23 | 'REar': 17,
24 | 'LEar': 18,
25 | 'LBigToe': 19,
26 | 'LSmallToe': 20,
27 | 'LHeel': 21,
28 | 'RBigToe': 22,
29 | 'RSmallToe': 23,
30 | 'RHeel': 24
31 | }
32 | self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
33 | self.keypoint_num = len(self.keypoint2index)
34 |
35 | self.children = {
36 | 'MidHip': ['Neck', 'RHip', 'LHip'],
37 | 'Neck': ['Nose', 'RShoulder', 'LShoulder'],
38 | 'Nose': ['REye', 'LEye'],
39 | 'REye': ['REar'],
40 | 'REar': [],
41 | 'LEye': ['LEar'],
42 | 'LEar': [],
43 | 'RShoulder': ['RElbow'],
44 | 'RElbow': ['RWrist'],
45 | 'RWrist': [],
46 | 'LShoulder': ['LElbow'],
47 | 'LElbow': ['LWrist'],
48 | 'LWrist': [],
49 | 'RHip': ['RKnee'],
50 | 'RKnee': ['RAnkle'],
51 | 'RAnkle': ['RBigToe', 'RSmallToe', 'RHeel'],
52 | 'RBigToe': [],
53 | 'RSmallToe': [],
54 | 'RHeel': [],
55 | 'LHip': ['LKnee'],
56 | 'LKnee': ['LAnkle'],
57 | 'LAnkle': ['LBigToe', 'LSmallToe', 'LHeel'],
58 | 'LBigToe': [],
59 | 'LSmallToe': [],
60 | 'LHeel': [],
61 | }
62 | self.parent = {self.root: None}
63 | for parent, children in self.children.items():
64 | for child in children:
65 | self.parent[child] = parent
--------------------------------------------------------------------------------
/character/bounding_box.yaml:
--------------------------------------------------------------------------------
1 | bottom: 708
2 | left: 201
3 | right: 646
4 | top: 80
5 |
--------------------------------------------------------------------------------
/character/char_cfg.yaml:
--------------------------------------------------------------------------------
1 | height: 628
2 | skeleton:
3 | - loc:
4 | - 232
5 | - 366
6 | name: root
7 | parent: null
8 | - loc:
9 | - 232
10 | - 366
11 | name: hip
12 | parent: root
13 | - loc:
14 | - 229
15 | - 234
16 | name: torso
17 | parent: hip
18 | - loc:
19 | - 226
20 | - 72
21 | name: neck
22 | parent: torso
23 | - loc:
24 | - 207
25 | - 231
26 | name: right_shoulder
27 | parent: torso
28 | - loc:
29 | - 115
30 | - 231
31 | name: right_elbow
32 | parent: right_shoulder
33 | - loc:
34 | - 11
35 | - 237
36 | name: right_hand
37 | parent: right_elbow
38 | - loc:
39 | - 250
40 | - 237
41 | name: left_shoulder
42 | parent: torso
43 | - loc:
44 | - 336
45 | - 237
46 | name: left_elbow
47 | parent: left_shoulder
48 | - loc:
49 | - 440
50 | - 237
51 | name: left_hand
52 | parent: left_elbow
53 | - loc:
54 | - 219
55 | - 366
56 | name: right_hip
57 | parent: root
58 | - loc:
59 | - 158
60 | - 477
61 | name: right_knee
62 | parent: right_hip
63 | - loc:
64 | - 85
65 | - 611
66 | name: right_foot
67 | parent: right_knee
68 | - loc:
69 | - 244
70 | - 366
71 | name: left_hip
72 | parent: root
73 | - loc:
74 | - 293
75 | - 483
76 | name: left_knee
77 | parent: left_hip
78 | - loc:
79 | - 354
80 | - 611
81 | name: left_foot
82 | parent: left_knee
83 | width: 445
84 |
--------------------------------------------------------------------------------
/character/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/character/image.png
--------------------------------------------------------------------------------
/character/image_with_bg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/character/image_with_bg.png
--------------------------------------------------------------------------------
/character/joint_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/character/joint_overlay.png
--------------------------------------------------------------------------------
/character/mask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/character/mask.png
--------------------------------------------------------------------------------
/character/texture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/character/texture.png
--------------------------------------------------------------------------------
/garlic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/garlic.png
--------------------------------------------------------------------------------
/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/image.png
--------------------------------------------------------------------------------
/imagebvhtogif.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import os
4 | from pathlib import Path
5 | import json
6 | import pathlib
7 | import shutil
8 | from annotation import annotation
9 | import argparse
10 | import sys
11 | import os
12 |
13 |
14 | # Add the _internal directory to the system path
15 | sys.path.append(os.path.join(os.path.dirname(__file__), '_internal'))
16 |
17 | temp = pathlib.PosixPath
18 | pathlib.PosixPath = pathlib.WindowsPath
19 |
20 | json_dir = Path(f'output_json')
21 |
22 | # image_dir = Path(f'input_image')
23 | # file = '1.png'
24 | # image_file = os.path.join(image_dir, file)
25 |
26 | parser = argparse.ArgumentParser()
27 |
28 | parser.add_argument("-i", "--image", type=str, help="The input image file.")
29 |
30 | parser.add_argument("-b", "--bvh", type=str, help="The input bvh file.")
31 |
32 | args = parser.parse_args()
33 |
34 | image_file = args.image
35 |
36 | bvh_file = args.bvh
37 |
38 | annotation_dir = Path(f'character')
39 |
40 | output_dir = Path(f'output')
41 | if output_dir.exists():
42 | shutil.rmtree(output_dir)
43 | os.makedirs(output_dir)
44 | if json_dir.exists():
45 | shutil.rmtree("output_json")
46 |
47 | annotation.run(image_file, annotation_dir)
48 |
49 | shutil.copy(bvh_file, os.path.join(output_dir, 'result.bvh'))
50 | from animated_drawings import render
51 | render.start('./config/config/mvc/interactive_window_example.yaml')
52 |
53 |
--------------------------------------------------------------------------------
/input_image/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/input_image/1.png
--------------------------------------------------------------------------------
/input_image/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/input_image/2.png
--------------------------------------------------------------------------------
/input_image/garlic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/input_image/garlic.png
--------------------------------------------------------------------------------
/input_video/second.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/input_video/second.mp4
--------------------------------------------------------------------------------
/output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/output.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | addict==2.4.0
2 | altgraph==0.17.4
3 | anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1726753373685/work
4 | arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1696128962909/work
5 | asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733175639022/work
6 | attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work
7 | backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
8 | binaryornot==0.4.4
9 | Brotli @ file:///C:/b/abs_3d36mno480/croot/brotli-split_1714483178642/work
10 | cachetools @ file:///home/conda/feedstock_root/build_artifacts/cachetools_1724028158384/work
11 | certifi @ file:///C:/b/abs_1fw_exq1si/croot/certifi_1725551736618/work/certifi
12 | chardet @ file:///D:/bld/chardet_1695468786478/work
13 | charset-normalizer @ file:///croot/charset-normalizer_1721748349566/work
14 | chumpy==0.70
15 | click @ file:///D:/bld/click_1692312014553/work
16 | colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work
17 | comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work
18 | contourpy @ file:///D:/bld/contourpy_1695554279172/work
19 | cookiecutter @ file:///home/conda/feedstock_root/build_artifacts/cookiecutter_1708608886262/work
20 | cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1696677705766/work
21 | Cython==3.0.12
22 | debugpy @ file:///D:/bld/debugpy_1722923915790/work
23 | decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
24 | exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work
25 | executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1725214404607/work
26 | fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1718477020893/work/dist
27 | filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1726613473834/work
28 | fonttools @ file:///D:/bld/fonttools_1720359137189/work
29 | glfw==2.5.5
30 | glm==0.4.4
31 | gmpy2 @ file:///C:/ci/gmpy2_1645456279018/work
32 | h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1664132893548/work
33 | humanize @ file:///home/conda/feedstock_root/build_artifacts/humanize_1720456301165/work
34 | idna @ file:///C:/b/abs_aad84bnnw5/croot/idna_1714398896795/work
35 | imageio==2.35.1
36 | importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1726082825846/work
37 | importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1725921340658/work
38 | ipykernel @ file:///D:/bld/ipykernel_1719845595208/work
39 | ipython @ file:///D:/bld/ipython_1683289169355/work
40 | ipyvue @ file:///home/conda/feedstock_root/build_artifacts/ipyvue_1731616800342/work
41 | ipyvuetify @ file:///home/conda/feedstock_root/build_artifacts/ipyvuetify_1740581263055/work
42 | ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1724334859652/work
43 | jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work
44 | Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work
45 | joblib==1.4.2
46 | json-tricks==3.17.3
47 | jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1720529478715/work
48 | jsonschema-specifications @ file:///tmp/tmpvslgxhz5/src
49 | jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1726610684920/work
50 | jupyter_core @ file:///D:/bld/jupyter_core_1727163532151/work
51 | jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1724331334887/work
52 | kiwisolver @ file:///D:/bld/kiwisolver_1695380176938/work
53 | lazy_loader==0.4
54 | Markdown @ file:///home/conda/feedstock_root/build_artifacts/markdown_1710435156458/work
55 | markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1686175045316/work
56 | MarkupSafe @ file:///D:/bld/markupsafe_1706900107493/work
57 | matplotlib @ file:///D:/bld/matplotlib-suite_1695077059522/work
58 | matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work
59 | mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1704317613764/work
60 | Mesa @ file:///home/conda/feedstock_root/build_artifacts/mesa_1727233829772/work
61 | Mesa-Viz-Tornado @ file:///home/conda/feedstock_root/build_artifacts/mesa-viz-tornado_1702488965922/work
62 | mkl-fft @ file:///C:/b/abs_19i1y8ykas/croot/mkl_fft_1695058226480/work
63 | mkl-random @ file:///C:/b/abs_edwkj1_o69/croot/mkl_random_1695059866750/work
64 | mkl-service==2.4.0
65 | mmcv-full==1.7.0
66 | mmdet==2.27.0
67 | mmengine==0.10.7
68 | mmpose==0.29.0
69 | mpmath @ file:///C:/b/abs_7833jrbiox/croot/mpmath_1690848321154/work
70 | munkres==1.1.4
71 | nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1712238998817/work
72 | nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work
73 | networkx @ file:///home/conda/feedstock_root/build_artifacts/networkx_1680692919326/work
74 | numpy @ file:///C:/Users/dev-admin/mkl/numpy_and_numpy_base_1682982345978/work
75 | opencv-python==4.11.0.86
76 | packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
77 | pandas @ file:///D:/bld/pandas_1688740647960/work
78 | parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work
79 | pefile==2023.2.7
80 | pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
81 | pillow @ file:///C:/b/abs_32o8er3uqp/croot/pillow_1721059447598/work
82 | pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1694617248815/work
83 | platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1726613481435/work
84 | prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work
85 | psutil @ file:///D:/bld/psutil_1719274704414/work
86 | pure_eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1721585709575/work
87 | pycocotools==2.0.7
88 | Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1714846767233/work
89 | pyinstaller==6.12.0
90 | pyinstaller-hooks-contrib==2025.1
91 | pymdown-extensions @ file:///home/conda/feedstock_root/build_artifacts/pymdown-extensions_1730190036531/work
92 | PyOpenGL==3.1.1a1
93 | pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work
94 | PySocks @ file:///C:/ci/pysocks_1605287845585/work
95 | python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work
96 | python-slugify @ file:///home/conda/feedstock_root/build_artifacts/python-slugify-split_1707425621764/work
97 | pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work
98 | PyWavelets==1.4.1
99 | pywin32==306
100 | pywin32-ctypes==0.2.3
101 | PyYAML @ file:///D:/bld/pyyaml_1723018320093/work
102 | pyzmq @ file:///D:/bld/pyzmq_1724399214825/work
103 | reacton @ file:///home/conda/feedstock_root/build_artifacts/reacton_1734528158621/work
104 | referencing @ file:///home/conda/feedstock_root/build_artifacts/referencing_1714619483868/work
105 | regex==2024.11.6
106 | requests @ file:///C:/b/abs_9frifg92q2/croot/requests_1721410901096/work
107 | rich @ file:///home/conda/feedstock_root/build_artifacts/rich_1730592237829/work/dist
108 | rich-click @ file:///home/conda/feedstock_root/build_artifacts/rich-click_1718026092578/work
109 | rpds-py @ file:///D:/bld/rpds-py_1723039254776/work
110 | scikit-image==0.21.0
111 | scikit-learn==1.3.2
112 | scipy==1.10.1
113 | shapely==2.0.7
114 | six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
115 | sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work
116 | solara @ file:///home/conda/feedstock_root/build_artifacts/solara-suite_1734683618346/work/solara
117 | solara-server @ file:///home/conda/feedstock_root/build_artifacts/solara-suite_1734683618346/work/solara_server
118 | solara-ui @ file:///home/conda/feedstock_root/build_artifacts/solara-suite_1734683618346/work/solara_ui
119 | stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work
120 | starlette @ file:///home/conda/feedstock_root/build_artifacts/starlette_1730305708644/work
121 | sympy @ file:///C:/b/abs_4ay0cgwu2w/croot/sympy_1734629241647/work
122 | termcolor==2.4.0
123 | terminaltables==3.1.10
124 | text-unidecode @ file:///home/conda/feedstock_root/build_artifacts/text-unidecode_1694707102786/work
125 | threadpoolctl==3.5.0
126 | tifffile==2023.7.10
127 | tomli==2.2.1
128 | torch==1.13.1
129 | torchvision==0.14.1
130 | tornado @ file:///D:/bld/tornado_1717722870081/work
131 | tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1732497199771/work
132 | traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1713535121073/work
133 | types-python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/types-python-dateutil_1727940235703/work
134 | typing_extensions @ file:///C:/b/abs_0as9mdbkfl/croot/typing_extensions_1715268906610/work
135 | tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1727140567071/work
136 | unicodedata2 @ file:///D:/bld/unicodedata2_1695848058846/work
137 | urllib3 @ file:///C:/b/abs_9a_f8h_bn2/croot/urllib3_1727769836930/work
138 | uvicorn @ file:///D:/bld/uvicorn_1732185154543/work
139 | watchdog @ file:///D:/bld/watchdog_1716561935652/work
140 | watchfiles @ file:///D:/bld/watchfiles_1723066568671/work
141 | wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1704731205417/work
142 | websockets @ file:///D:/bld/websockets_1724222240678/work
143 | widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1724331337528/work
144 | win-inet-pton @ file:///C:/ci/win_inet_pton_1605306167264/work
145 | window==0.0.3
146 | xtcocotools==1.14.3
147 | yapf==0.43.0
148 | zipp==3.20.2
149 |
--------------------------------------------------------------------------------
/result/1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/result/1.gif
--------------------------------------------------------------------------------
/result/10.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/result/10.gif
--------------------------------------------------------------------------------
/result/2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/result/2.gif
--------------------------------------------------------------------------------
/result/3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/result/3.gif
--------------------------------------------------------------------------------
/result/5.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/result/5.gif
--------------------------------------------------------------------------------
/result/video.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/result/video.gif
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, request, jsonify
2 | import torch
3 | import base64
4 | import mmcv
5 | import os
6 | from mmdet.apis import inference_detector, init_detector
7 | from mmpose.apis import (inference_bottom_up_pose_model,
8 | inference_top_down_pose_model, init_pose_model)
9 | from mmpose.models.detectors import AssociativeEmbedding, TopDown
10 |
11 | app = Flask(__name__)
12 |
13 | # Initialize the MMdetHandler for detection
14 | class MMdetHandler:
15 | def __init__(self, model_dir):
16 | self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
17 | self.device = torch.device(self.map_location)
18 |
19 | self.config_file = os.path.join(model_dir, 'detector_config.py')
20 | self.checkpoint = os.path.join(model_dir, 'detector_model.pth') # Adjust for your detection model
21 |
22 | self.model = init_detector(self.config_file, self.checkpoint, self.device)
23 |
24 | def preprocess(self, data):
25 | images = []
26 | for row in data:
27 | image = row.get('data') or row.get('body')
28 | if isinstance(image, str):
29 | image = base64.b64decode(image)
30 | image = mmcv.imfrombytes(image)
31 | images.append(image)
32 | return images
33 |
34 | def inference(self, data):
35 | results = []
36 | for image in data:
37 | result = inference_detector(self.model, image)
38 | results.append(result)
39 | return results
40 |
41 | def postprocess(self, data):
42 | output = []
43 | for image_index, image_result in enumerate(data):
44 | output.append([])
45 | bbox_result = image_result if not isinstance(image_result, tuple) else image_result[0]
46 | for class_index, class_result in enumerate(bbox_result):
47 | class_name = self.model.CLASSES[class_index]
48 | for bbox in class_result:
49 | bbox_coords = bbox[:-1].tolist()
50 | score = float(bbox[-1])
51 | if score >= 0.5: # Set your score threshold
52 | output[image_index].append({
53 | 'class_name': class_name,
54 | 'bbox': bbox_coords,
55 | 'score': score
56 | })
57 | return output
58 |
59 | # Initialize the MMPoseHandler for pose estimation
60 | class MMPoseHandler:
61 | def __init__(self, model_dir):
62 | self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
63 | self.device = torch.device(self.map_location)
64 |
65 | self.config_file = os.path.join(model_dir, 'pose_config.py')
66 | self.checkpoint = os.path.join(model_dir, 'pose_model.pth') # Adjust for your pose model
67 |
68 | self.model = init_pose_model(self.config_file, self.checkpoint, self.device)
69 |
70 | def preprocess(self, data):
71 | images = []
72 | for row in data:
73 | image = row.get('data') or row.get('body')
74 | if isinstance(image, str):
75 | image = base64.b64decode(image)
76 | image = mmcv.imfrombytes(image)
77 | images.append(image)
78 | return images
79 |
80 | def inference(self, data):
81 | if isinstance(self.model, TopDown):
82 | return self._inference_top_down_pose_model(data)
83 | elif isinstance(self.model, AssociativeEmbedding):
84 | return self._inference_bottom_up_pose_model(data)
85 | else:
86 | raise NotImplementedError(f'Model type {type(self.model)} is not supported.')
87 |
88 | def _inference_top_down_pose_model(self, data):
89 | results = []
90 | for image in data:
91 | preds, _ = inference_top_down_pose_model(self.model, image, person_results=None)
92 | results.append(preds)
93 | return results
94 |
95 | def _inference_bottom_up_pose_model(self, data):
96 | results = []
97 | for image in data:
98 | preds, _ = inference_bottom_up_pose_model(self.model, image)
99 | results.append(preds)
100 | return results
101 |
102 | def postprocess(self, data):
103 | output = [[{'keypoints': pred['keypoints'].tolist()} for pred in preds] for preds in data]
104 | return output
105 |
106 | # Create instances of both handlers
107 | detector_handler = MMdetHandler('models') # Use the model directory for detection
108 | pose_handler = MMPoseHandler('models') # Use the model directory for pose estimation
109 |
110 | @app.route('/detect', methods=['POST'])
111 | def detect():
112 | try:
113 | # Get input data from request
114 | input_data = request.files['image'].read()
115 |
116 | # Preprocess the image
117 | images = detector_handler.preprocess([{'data': base64.b64encode(input_data).decode('utf-8')}])
118 |
119 | # Run detection inference
120 | results = detector_handler.inference(images)
121 |
122 | # Postprocess results
123 | output = detector_handler.postprocess(results)
124 |
125 | return jsonify(output[0])
126 |
127 | except Exception as e:
128 | return jsonify({'error': str(e)}), 500
129 |
130 | @app.route('/pose', methods=['POST'])
131 | def pose():
132 | try:
133 | # Get input data from request
134 | input_data = request.files['image'].read()
135 |
136 | # Preprocess the image
137 | images = pose_handler.preprocess([{'data': base64.b64encode(input_data).decode('utf-8')}])
138 |
139 | # Run pose estimation inference
140 | results = pose_handler.inference(images)
141 |
142 | # Postprocess results
143 | output = pose_handler.postprocess(results)
144 |
145 | return jsonify(output[0])
146 |
147 | except Exception as e:
148 | return jsonify({'error': str(e)}), 500
149 |
150 | def run():
151 | app.run(debug=True)
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # This source code is licensed under the MIT license found in the
3 | # LICENSE file in the root directory of this source tree.
4 |
5 | from setuptools import find_packages, setup
6 |
7 | setup(
8 | name='animated_drawings',
9 | description="Companion code for `A Method For Automatically Animating Children's Drawings of the Human Form.`",
10 | author='FAIR',
11 | author_email='jesse.smith@meta.com',
12 | python_requires='>=3.8.13',
13 | install_requires=[
14 | 'numpy==1.24.4',
15 | 'scipy==1.10.0',
16 | 'scikit-image==0.19.3',
17 | 'scikit-learn==1.1.2',
18 | 'shapely==1.8.5.post1',
19 | 'opencv-python==4.6.0.66',
20 | 'Pillow==10.1.0',
21 | 'glfw==2.5.5',
22 | 'PyOpenGL==3.1.6',
23 | 'PyYAML==6.0.1',
24 | 'requests==2.28.2',
25 | 'torchserve==0.7.0',
26 | 'tqdm==4.65.0',
27 | 'Flask==2.3.2'
28 | ],
29 | packages=find_packages(),
30 | )
31 |
--------------------------------------------------------------------------------
/torchserve/__pycache__/server.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/torchserve/__pycache__/server.cpython-311.pyc
--------------------------------------------------------------------------------
/torchserve/__pycache__/server.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/torchserve/__pycache__/server.cpython-312.pyc
--------------------------------------------------------------------------------
/torchserve/__pycache__/server.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/torchserve/__pycache__/server.cpython-38.pyc
--------------------------------------------------------------------------------
/torchserve/server.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, request, jsonify
2 | import torch
3 | import base64
4 | import mmcv
5 | import os
6 | from mmdet.apis import inference_detector, init_detector
7 | from mmpose.apis import (inference_bottom_up_pose_model,
8 | inference_top_down_pose_model, init_pose_model)
9 | from mmpose.models.detectors import AssociativeEmbedding, TopDown
10 |
11 | app = Flask(__name__)
12 |
13 | # Initialize the MMdetHandler for detection
14 | class MMdetHandler:
15 | def __init__(self, model_dir):
16 | self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
17 | self.device = torch.device(self.map_location)
18 |
19 | self.config_file = os.path.join(model_dir, 'detector_config.py')
20 | self.checkpoint = os.path.join(model_dir, 'detector_model.pth') # Adjust for your detection model
21 |
22 | self.model = init_detector(self.config_file, self.checkpoint, self.device)
23 |
24 | def preprocess(self, data):
25 | images = []
26 | for row in data:
27 | image = row.get('data') or row.get('body')
28 | if isinstance(image, str):
29 | image = base64.b64decode(image)
30 | image = mmcv.imfrombytes(image)
31 | images.append(image)
32 | return images
33 |
34 | def inference(self, data):
35 | results = []
36 | for image in data:
37 | result = inference_detector(self.model, image)
38 | results.append(result)
39 | return results
40 |
41 | def postprocess(self, data):
42 | output = []
43 | for image_index, image_result in enumerate(data):
44 | output.append([])
45 | bbox_result = image_result if not isinstance(image_result, tuple) else image_result[0]
46 | for class_index, class_result in enumerate(bbox_result):
47 | class_name = self.model.CLASSES[class_index]
48 | for bbox in class_result:
49 | bbox_coords = bbox[:-1].tolist()
50 | score = float(bbox[-1])
51 | if score >= 0.5: # Set your score threshold
52 | output[image_index].append({
53 | 'class_name': class_name,
54 | 'bbox': bbox_coords,
55 | 'score': score
56 | })
57 | return output
58 |
59 | # Initialize the MMPoseHandler for pose estimation
60 | class MMPoseHandler:
61 | def __init__(self, model_dir):
62 | self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
63 | self.device = torch.device(self.map_location)
64 |
65 | self.config_file = os.path.join(model_dir, 'pose_config.py')
66 | self.checkpoint = os.path.join(model_dir, 'pose_model.pth') # Adjust for your pose model
67 |
68 | self.model = init_pose_model(self.config_file, self.checkpoint, self.device)
69 |
70 | def preprocess(self, data):
71 | images = []
72 | for row in data:
73 | image = row.get('data') or row.get('body')
74 | if isinstance(image, str):
75 | image = base64.b64decode(image)
76 | image = mmcv.imfrombytes(image)
77 | images.append(image)
78 | return images
79 |
80 | def inference(self, data):
81 | if isinstance(self.model, TopDown):
82 | return self._inference_top_down_pose_model(data)
83 | elif isinstance(self.model, AssociativeEmbedding):
84 | return self._inference_bottom_up_pose_model(data)
85 | else:
86 | raise NotImplementedError(f'Model type {type(self.model)} is not supported.')
87 |
88 | def _inference_top_down_pose_model(self, data):
89 | results = []
90 | for image in data:
91 | preds, _ = inference_top_down_pose_model(self.model, image, person_results=None)
92 | results.append(preds)
93 | return results
94 |
95 | def _inference_bottom_up_pose_model(self, data):
96 | results = []
97 | for image in data:
98 | preds, _ = inference_bottom_up_pose_model(self.model, image)
99 | results.append(preds)
100 | return results
101 |
102 | def postprocess(self, data):
103 | output = [[{'keypoints': pred['keypoints'].tolist()} for pred in preds] for preds in data]
104 | return output
105 |
106 | # Create instances of both handlers
107 | detector_handler = MMdetHandler('model-store') # Use the model directory for detection
108 | pose_handler = MMPoseHandler('model-store') # Use the model directory for pose estimation
109 |
110 | @app.route('/detect', methods=['POST'])
111 | def detect():
112 | try:
113 | # Get input data from request
114 | input_data = request.files['image'].read()
115 |
116 | # Preprocess the image
117 | images = detector_handler.preprocess([{'data': base64.b64encode(input_data).decode('utf-8')}])
118 |
119 | # Run detection inference
120 | results = detector_handler.inference(images)
121 |
122 | # Postprocess results
123 | output = detector_handler.postprocess(results)
124 |
125 | return jsonify(output[0])
126 |
127 | except Exception as e:
128 | return jsonify({'error': str(e)}), 500
129 |
130 | @app.route('/pose', methods=['POST'])
131 | def pose():
132 | try:
133 | # Get input data from request
134 | input_data = request.files['image'].read()
135 |
136 | # Preprocess the image
137 | images = pose_handler.preprocess([{'data': base64.b64encode(input_data).decode('utf-8')}])
138 |
139 | # Run pose estimation inference
140 | results = pose_handler.inference(images)
141 |
142 | # Postprocess results
143 | output = pose_handler.postprocess(results)
144 |
145 | return jsonify(output[0])
146 |
147 | except Exception as e:
148 | return jsonify({'error': str(e)}), 500
149 |
150 | def run():
151 | app.run(debug=True)
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from . import smooth, camera, vis
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/__init__.cpython-312.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/camera.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/camera.cpython-311.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/camera.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/camera.cpython-312.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/smooth.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/smooth.cpython-311.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/smooth.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/smooth.cpython-312.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/vis.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/vis.cpython-311.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/vis.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/WesleyCodeMast/mmpose-bvh-detection/fbe1d76d27528716022c97bd320ec33924785646/utils/__pycache__/vis.cpython-312.pyc
--------------------------------------------------------------------------------
/utils/camera.py:
--------------------------------------------------------------------------------
1 | import h5py
2 | import numpy as np
3 | from pathlib import Path
4 |
5 | def load_camera_params(file):
6 | cam_file = Path(file)
7 | cam_params = {}
8 | azimuth = {
9 | '54138969': 70, '55011271': -70, '58860488': 110, '60457274': -100
10 | }
11 | with h5py.File(cam_file) as f:
12 | subjects = [1, 5, 6, 7, 8, 9, 11]
13 | for s in subjects:
14 | cam_params[f'S{s}'] = {}
15 | for _, params in f[f'subject{s}'].items():
16 | name = params['Name'][:]
17 | name = "".join( [chr(int(item)) for item in name] )
18 | val = {}
19 | val['R'] = np.array(params['R'])
20 | val['T'] = np.array(params['T'])
21 | val['c'] = np.array(params['c'])
22 | val['f'] = np.array(params['f'])
23 | val['k'] = np.array(params['k'])
24 | val['p'] = np.array(params['p'])
25 | val['azimuth'] = azimuth[name]
26 | cam_params[f'S{s}'][name] = val
27 |
28 | return cam_params
29 |
30 | def world2camera(pose, R, T):
31 | """
32 | Args:
33 | pose: numpy array with shape (-1, 3)
34 | R: numpy array with shape (3, 3)
35 | T: numyp array with shape (3, 1)
36 | """
37 | assert pose.shape[-1] == 3
38 | original_shape = pose.shape
39 | pose_world = pose.copy().reshape((-1, 3)).T
40 | pose_cam = np.matmul(R.T, pose_world - T)
41 | pose_cam = pose_cam.T.reshape(original_shape)
42 | return pose_cam
43 |
44 |
45 | def camera2world(pose, R, T):
46 | """
47 | Args:
48 | pose: numpy array with shape (..., 3)
49 | R: numpy array with shape (3, 3)
50 | T: numyp array with shape (3, 1)
51 | """
52 | assert pose.shape[-1] == 3
53 | original_shape = pose.shape
54 | pose_cam = pose.copy().reshape((-1, 3)).T
55 | pose_world = np.matmul(R, pose_cam) + T
56 | pose_world = pose_world.T.reshape(original_shape)
57 | return pose_world
58 |
--------------------------------------------------------------------------------
/utils/smooth.py:
--------------------------------------------------------------------------------
1 | def filter_missing_value(keypoints_list, method='ignore'):
2 | # TODO: impletemd 'interpolate' method.
3 | """Filter missing value in pose list.
4 | Args:
5 | keypoints_list: Estimate result returned by 2d estimator. Missing value
6 | will be None.
7 | method: 'ignore' -> drop missing value.
8 | Return:
9 | Keypoints list without missing value.
10 | """
11 |
12 | result = []
13 | if method == 'ignore':
14 | result = [pose for pose in keypoints_list if pose is not None]
15 | else:
16 | raise ValueError(f'{method} is not a valid method.')
17 |
18 | return result
--------------------------------------------------------------------------------
/utils/vis.py:
--------------------------------------------------------------------------------
1 | from . import camera
2 |
3 | import cv2
4 | import numpy as np
5 | import os
6 | from pathlib import Path
7 |
8 | import matplotlib.pyplot as plt
9 | import mpl_toolkits.mplot3d.axes3d
10 | from matplotlib.animation import FuncAnimation, writers
11 |
12 |
13 | def vis_2d_keypoints(
14 | keypoints, img, skeleton, kp_thresh,
15 | alpha=0.7, output_file=None, show_name=False):
16 |
17 | # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
18 | cmap = plt.get_cmap('rainbow')
19 | colors = [cmap(i) for i in np.linspace(0, 1, skeleton.keypoint_num)]
20 | colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
21 |
22 | mask = img.copy()
23 | root = skeleton.root
24 | stack = [root]
25 | while stack:
26 | parent = stack.pop()
27 | p_idx = skeleton.keypoint2index[parent]
28 | p_pos = int(keypoints[p_idx, 0]), int(keypoints[p_idx, 1])
29 | p_score = keypoints[p_idx, 2] if kp_thresh is not None else None
30 | if kp_thresh is None or p_score > kp_thresh:
31 | cv2.circle(
32 | mask, p_pos, radius=3,
33 | color=colors[p_idx], thickness=-1, lineType=cv2.LINE_AA)
34 | if show_name:
35 | cv2.putText(mask, parent, p_pos, cv2.FONT_HERSHEY_SIMPLEX,
36 | 0.5, (0, 255, 0))
37 | for child in skeleton.children[parent]:
38 | if child not in skeleton.keypoint2index or \
39 | skeleton.keypoint2index[child] < 0:
40 | continue
41 | stack.append(child)
42 | c_idx = skeleton.keypoint2index[child]
43 | c_pos = int(keypoints[c_idx, 0]), int(keypoints[c_idx, 1])
44 | c_score = keypoints[c_idx, 2] if kp_thresh else None
45 | if kp_thresh is None or \
46 | (p_score > kp_thresh and c_score > kp_thresh):
47 | cv2.line(
48 | mask, p_pos, c_pos,
49 | color=colors[c_idx], thickness=2, lineType=cv2.LINE_AA)
50 |
51 | vis_result = cv2.addWeighted(img, 1.0 - alpha, mask, alpha, 0)
52 | if output_file:
53 | file = Path(output_file)
54 | if not file.parent.exists():
55 | os.makedirs(file.parent)
56 | cv2.imwrite(str(output_file), vis_result)
57 |
58 | return vis_result
59 |
60 |
61 | def vis_3d_keypoints( keypoints, skeleton, azimuth, elev=15):
62 | x_max, x_min = np.max(keypoints[:, 0]), np.min(keypoints[:, 0])
63 | y_max, y_min = np.max(keypoints[:, 1]), np.min(keypoints[:, 1])
64 | z_max, z_min = np.max(keypoints[:, 2]), np.min(keypoints[:, 2])
65 | radius = max(x_max - x_min, y_max - y_min, z_max - z_min) / 2
66 |
67 | fig = plt.figure()
68 | ax = fig.add_subplot(111, projection='3d')
69 | ax.view_init(elev=elev, azim=azimuth)
70 | ax.set_xlim3d([-radius, radius])
71 | ax.set_ylim3d([-radius, radius])
72 | ax.set_zlim3d([0, 2 * radius])
73 |
74 | root = skeleton.root
75 | stack = [root]
76 | while stack:
77 | parent = stack.pop()
78 | p_idx = skeleton.keypoint2index[parent]
79 | p_pos = keypoints[p_idx]
80 | for child in skeleton.children[parent]:
81 | if skeleton.keypoint2index.get(child, -1) == -1:
82 | continue
83 | stack.append(child)
84 | c_idx = skeleton.keypoint2index[child]
85 | c_pos = keypoints[c_idx]
86 | if child in skeleton.left_joints:
87 | color = 'b'
88 | elif child in skeleton.right_joints:
89 | color = 'r'
90 | else:
91 | color = 'k'
92 | line = ax.plot(
93 | xs=[p_pos[0], c_pos[0]],
94 | ys=[p_pos[1], c_pos[1]],
95 | zs=[p_pos[2], c_pos[2]],
96 | c=color, marker='.', zdir='z'
97 | )
98 |
99 | return
100 |
101 |
102 | def vis_3d_keypoints_sequence(
103 | keypoints_sequence, skeleton, azimuth,
104 | fps=30, elev=15, output_file=None
105 | ):
106 | kps_sequence = keypoints_sequence
107 | x_max, x_min = np.max(kps_sequence[:, :, 0]), np.min(kps_sequence[:, :, 0])
108 | y_max, y_min = np.max(kps_sequence[:, :, 1]), np.min(kps_sequence[:, :, 1])
109 | z_max, z_min = np.max(kps_sequence[:, :, 2]), np.min(kps_sequence[:, :, 2])
110 | radius = max(x_max - x_min, y_max - y_min, z_max - z_min) / 2
111 |
112 | fig = plt.figure()
113 | ax = fig.add_subplot(111, projection='3d')
114 | ax.view_init(elev=elev, azim=azimuth)
115 | ax.set_xlim3d([-radius, radius])
116 | ax.set_ylim3d([-radius, radius])
117 | ax.set_zlim3d([0, 2 * radius])
118 |
119 | initialized = False
120 | lines = []
121 |
122 | def update(frame):
123 | nonlocal initialized
124 |
125 | if not initialized:
126 | root = skeleton.root
127 | stack = [root]
128 | while stack:
129 | parent = stack.pop()
130 | p_idx = skeleton.keypoint2index[parent]
131 | p_pos = kps_sequence[0, p_idx]
132 | for child in skeleton.children[parent]:
133 | if skeleton.keypoint2index.get(child, -1) == -1:
134 | continue
135 | stack.append(child)
136 | c_idx = skeleton.keypoint2index[child]
137 | c_pos = kps_sequence[0, c_idx]
138 | if child in skeleton.left_joints:
139 | color = 'b'
140 | elif child in skeleton.right_joints:
141 | color = 'r'
142 | else:
143 | color = 'k'
144 | line = ax.plot(
145 | xs=[p_pos[0], c_pos[0]],
146 | ys=[p_pos[1], c_pos[1]],
147 | zs=[p_pos[2], c_pos[2]],
148 | c=color, marker='.', zdir='z'
149 | )
150 | lines.append(line)
151 | initialized = True
152 | else:
153 | line_idx = 0
154 | root = skeleton.root
155 | stack = [root]
156 | while stack:
157 | parent = stack.pop()
158 | p_idx = skeleton.keypoint2index[parent]
159 | p_pos = kps_sequence[frame, p_idx]
160 | for child in skeleton.children[parent]:
161 | if skeleton.keypoint2index.get(child, -1) == -1:
162 | continue
163 | stack.append(child)
164 | c_idx = skeleton.keypoint2index[child]
165 | c_pos = kps_sequence[frame, c_idx]
166 | if child in skeleton.left_joints:
167 | color = 'b'
168 | elif child in skeleton.right_joints:
169 | color = 'r'
170 | else:
171 | color = 'k'
172 | lines[line_idx][0].set_xdata([p_pos[0], c_pos[0]])
173 | lines[line_idx][0].set_ydata([p_pos[1], c_pos[1]])
174 | lines[line_idx][0].set_3d_properties( [p_pos[2], c_pos[2]])
175 | line_idx += 1
176 |
177 | anim = FuncAnimation(
178 | fig=fig, func=update, frames=kps_sequence.shape[0], interval=1000 / fps
179 | )
180 |
181 | if output_file:
182 | output_file = Path(output_file)
183 | if not output_file.parent.exists():
184 | os.makedirs(output_file.parent)
185 | if output_file.suffix == '.mp4':
186 | Writer = writers['ffmpeg']
187 | writer = Writer(fps=fps, metadata={}, bitrate=3000)
188 | anim.save(output_file, writer=writer)
189 | elif output_file.suffix == '.gif':
190 | anim.save(output_file, dpi=80, writer='imagemagick')
191 | else:
192 | raise ValueError(f'Unsupported output format.'
193 | f'Only mp4 and gif are supported.')
194 |
195 | return anim
196 |
--------------------------------------------------------------------------------