├── .github
└── workflows
│ └── python-package.yml
├── .gitignore
├── Changelog.md
├── LICENSE
├── Readme.md
├── benchmarks
└── benchmark.py
├── etc
├── blendtorch_intro_v3.svg
├── export_paths.bat
└── result_physics.png
├── examples
├── compositor_normals_depth
│ ├── Readme.md
│ ├── compositor_normals_depth.blend
│ ├── compositor_normals_depth.blend.py
│ ├── etc
│ │ └── normals_depth.png
│ ├── generate.py
│ └── scene_helpers.py
├── control
│ ├── Readme.md
│ ├── cartpole.py
│ ├── cartpole_gym
│ │ ├── __init__.py
│ │ └── envs
│ │ │ ├── __init__.py
│ │ │ ├── cartpole.blend
│ │ │ ├── cartpole.blend.py
│ │ │ └── cartpole_env.py
│ └── etc
│ │ ├── blendtorch_gym.svg
│ │ └── capture.gif
├── datagen
│ ├── Readme.md
│ ├── cube.blend
│ ├── cube.blend.py
│ ├── cube_compositor.blend
│ ├── cube_compositor.blend.py
│ ├── etc
│ │ ├── blendtorch_datagen.svg
│ │ └── result_physics.png
│ ├── falling_cubes.blend
│ ├── falling_cubes.blend.py
│ ├── generate.py
│ ├── minimal.py
│ └── tmp
│ │ └── __keep__
└── densityopt
│ ├── Readme.md
│ ├── densityopt.py
│ ├── etc
│ ├── blendtorch_stochopt.pdf
│ ├── densityopt.gif
│ ├── real.png
│ ├── sim_samples_010.png
│ ├── sim_samples_020.png
│ ├── sim_samples_030.png
│ ├── sim_samples_040.png
│ ├── sim_samples_050.png
│ ├── sim_samples_060.png
│ ├── sim_samples_070.png
│ └── sim_samples_080.png
│ ├── supershape.blend
│ ├── supershape.blend.py
│ └── tmp
│ └── __keep__
├── pkg_blender
├── blendtorch
│ └── btb
│ │ ├── __init__.py
│ │ ├── animation.py
│ │ ├── arguments.py
│ │ ├── camera.py
│ │ ├── constants.py
│ │ ├── duplex.py
│ │ ├── env.py
│ │ ├── materials.py
│ │ ├── offscreen.py
│ │ ├── paths.py
│ │ ├── publisher.py
│ │ ├── renderer.py
│ │ ├── signal.py
│ │ ├── utils.py
│ │ └── version.py
├── requirements.txt
└── setup.py
├── pkg_pytorch
├── blendtorch
│ └── btt
│ │ ├── __init__.py
│ │ ├── apps
│ │ ├── __init__.py
│ │ └── launch.py
│ │ ├── colors.py
│ │ ├── constants.py
│ │ ├── dataset.py
│ │ ├── duplex.py
│ │ ├── env.py
│ │ ├── env_rendering.py
│ │ ├── file.py
│ │ ├── finder.py
│ │ ├── launch_info.py
│ │ ├── launcher.py
│ │ ├── utils.py
│ │ └── version.py
├── requirements.txt
└── setup.py
├── pytest.ini
├── requirements_dev.txt
├── scripts
├── install_blender.sh
└── install_btb.py
└── tests
├── __init__.py
├── blender
├── anim.blend.py
├── cam.blend
├── cam.blend.py
├── compositor.blend
├── compositor.blend.py
├── dataset.blend.py
├── dataset_robust.blend.py
├── duplex.blend.py
├── env.blend
├── env.blend.py
└── launcher.blend.py
├── test_animation.py
├── test_camera.py
├── test_compositor.py
├── test_dataset.py
├── test_duplex.py
├── test_env.py
├── test_file.py
└── test_launcher.py
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [develop]
9 | pull_request:
10 | branches: [develop]
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 | strategy:
16 | fail-fast: false
17 | matrix:
18 | python-version: ["3.8", "3.9", "3.10"]
19 |
20 | steps:
21 | - uses: actions/checkout@v3
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v3
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 | cache: "pip"
27 | - name: Prepare
28 | run: |
29 | sudo apt-get update
30 | sudo apt-get install -y mesa-utils xvfb libgl1-mesa-dev
31 | - name: Download Blender
32 | run: |
33 | scripts/install_blender.sh
34 | source .envs
35 | - name: Install Blender dependencies
36 | run: |
37 | source .envs
38 | blender --background --python scripts/install_btb.py
39 | blender --background --python-use-system-env --python-expr "import blendtorch.btb as btb; print (btb.__version__)"
40 | - name: Install dependencies
41 | run: |
42 | python -m pip install --upgrade pip
43 | python -m pip install flake8 pytest
44 | python -m pip install -e pkg_pytorch
45 | python -m pip install -r requirements_dev.txt
46 | python -c "import blendtorch.btt as btt; print(btt.__version__)"
47 | - name: Lint with flake8
48 | run: |
49 | # stop the build if there are Python syntax errors or undefined names
50 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
51 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
52 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
53 | - name: Test with pytest
54 | run: |
55 | source .envs
56 | xvfb-run pytest tests -m background
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | **/tmp/*
107 | *.blend1
108 | **/tmp/*
109 | !__keep__
--------------------------------------------------------------------------------
/Changelog.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 | All notable changes to this project will be documented in this file.
3 |
4 | ## [Unreleased]
5 |
6 | ## [0.2.0] - 2020-08-04
7 | - Support for training RL agents in OpenAI environments defined in Blender. See `examples/control` for details.
8 | - Support for dataset transformations when `num_workers > 0`.
9 | - Support for message recording when `num_workers > 0`.
10 | - Made `blendtorch.btb` and `blendtorch.btt` installable packages. See `Readme.md` for details.
11 | - Remote data streaming now uses PyTorch IterableDataset and
12 | hence simplifies the interface. See `examples/datagen` for details.
13 | - Added unit tests and CI.
14 |
15 | ## [0.1.0] - 2020-07-10
16 | - added Blender Eevee 2.8 support
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Christoph Heindl
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/cheind/pytorch-blender/actions/workflows/python-package.yml)
2 |
3 | # blendtorch
4 |
5 | **blendtorch** is a Python framework to seamlessly integrate Blender into PyTorch for deep learning from artificial visual data. We utilize Eevee, a new physically based real-time renderer, to synthesize images and annotations in real-time and thus avoid stalling model training in many cases.
6 |
7 | If you find the project helpful, you consider [citing](#cite_anchor) it.
8 |
9 | Feature summary
10 | - ***Data Generation***: Stream distributed Blender renderings directly into PyTorch data pipelines in real-time for supervised learning and domain randomization applications. Supports arbitrary pickle-able objects to be send alongside images/videos. Built-in recording capability to replay data without Blender. Bi-directional communication channels allow Blender simulations to adapt during network training. More info [\[examples/datagen\]](examples/datagen), [\[examples/compositor_normals_depth\]](examples/compositor_normals_depth), [\[examples/densityopt\]](examples/densityopt)
11 | - ***OpenAI Gym Support***: Create and run remotely controlled Blender gyms to train reinforcement agents. Blender serves as simulation, visualization, and interactive live manipulation environment.
12 | More info [\[examples/control\]](examples/control)
13 |
14 | The figure below visualizes the basic concept of **blendtorch** used in the context of generating artificial training data for a real-world detection task.
15 |
16 |
17 |

18 |
Fig 1: With Blendtorch, you are able to train your PyTorch modules on massively randomized artificial data generated by Blender simulations.
19 |
20 |
21 | ## Getting started
22 | 1. Read the installation instructions below
23 | 1. To get started with **blendtorch** for training data training read [\[examples/datagen\]](examples/datagen).
24 | 1. To learn about using **blendtorch** for creating reinforcement training environments read [\[examples/control\]](examples/control).
25 |
26 | ## Prerequisites
27 | This package has been tested with
28 | - [Blender](https://www.blender.org/) >= 2.83/2.91/3.0/3.1 (Python >= 3.7)
29 | - [PyTorch](http://pytorch.org) >= 1.5/1.10 (Python >= 3.7)
30 |
31 | running Windows 10 and Linux. Other versions might work as well, but have not been tested.
32 |
33 | ## Installation
34 |
35 | **blendtorch** is composed of two distinct sub-packages:
36 | - `bendtorch.btt` located in [pkg_pytorch](./pkg_pytorch) and
37 | - `blendtorch.btb` located in [pkg_blender](./pkg_blender),
38 |
39 | providing the PyTorch and Blender views on **blendtorch**. `bendtorch.btt` will be installed to your local Python environment, while `blendtorch.btb` will be installed to the Python environment that ships with Blender.
40 |
41 | 1. Clone this repository
42 |
43 | ```
44 | git clone https://github.com/cheind/pytorch-blender.git
45 | ```
46 | 1. Extend `PATH`
47 |
48 | Ensure Blender executable is in your environments lookup `PATH`. On Windows this can be accomplished by
49 | ```
50 | set PATH=c:\Program Files\Blender Foundation\Blender 2.91;%PATH%
51 | ```
52 | On Ubuntu when blender is [installed using snap](https://snapcraft.io/install/blender/ubuntu), the path may be included by adding the following line to your ~/.bashrc,
53 | ```
54 | export PATH=/snap/blender/current/${PATH:+:${PATH}}
55 | ```
56 | 1. Complete Blender settings
57 |
58 | Open Blender at least once, and complete the initial settings. If this step is missed, some of the tests (especially the tests relating RL) will fail (Blender 2.91).
59 | 1. Install `blendtorch.btb`
60 |
61 | Run
62 | ```
63 | blender --background --python /scripts/install_btb.py
64 | ```
65 | to `blendtorch-btb` into the Python environment bundled with Blender.
66 | 1. Install `blendtorch.btt`
67 |
68 | Run
69 | ```
70 | pip install -e /pkg_pytorch
71 | ```
72 | installs `blendtorch-btt` into the Python environment that you intend to run PyTorch from.
73 | 1. Install `gym` [optional]
74 |
75 | While not required, it is advised to install OpenAI gym if you intend to use **blendtorch** for reinforcement learning
76 | ```
77 | pip install gym
78 | ```
79 | 1. Install dev requirements [optional]
80 |
81 | This step is optional. If you plan to run the unit tests
82 | ```
83 | pip install -r requirements_dev.txt
84 | pytest tests/
85 | ```
86 |
87 | ## Troubleshooting
88 | Run
89 | ```
90 | blender --version
91 | ```
92 | and check if the correct Blender version (>=2.83) is written to console. Next, ensure that `blendtorch-btb` installed correctly
93 | ```
94 | blender --background --python-use-system-env --python-expr "import blendtorch.btb as btb; print(btb.__version__)"
95 | ```
96 | which should print **blendtorch** version number on success. Next, ensure that `blendtorch-btt` installed correctly
97 | ```
98 | python -c "import blendtorch.btt as btt; print(btt.__version__)"
99 | ```
100 | which should print **blendtorch** version number on success.
101 |
102 | ## Architecture
103 | Please see [\[examples/datagen\]](examples/datagen) and [\[examples/control\]](examples/control) for an in-depth architectural discussion. Bi-directional communication is explained in [\[examples/densityopt\]](examples/densityopt).
104 |
105 | ## Runtimes
106 |
107 | The following tables show the mean runtimes per batch (8) and per image for a simple Cube scene (640x480xRGBA). See [benchmarks/benchmark.py](./benchmarks/benchmark.py) for details. The timings include rendering, transfer, decoding and batch collating. Reported timings are for Blender 2.8. Blender 2.9 performs equally well on this scene, but is usually faster for more complex renderings.
108 |
109 | | Blender Instances | Runtime sec/batch | Runtime sec/image | Arguments|
110 | |:-:|:-:|:-:|:-:|
111 | | 1 | 0.236 | 0.030| UI refresh|
112 | | 2 | 0.14 | 0.018| UI refresh|
113 | | 4 | 0.099 | 0.012| UI refresh|
114 | | 5 | 0.085 | 0.011| no UI refresh|
115 |
116 | Note: If no image transfer is needed, i.e in reinforcement learning of physical simulations, 2000Hz are easily achieved.
117 |
118 |
119 | ## Cite
120 | The code accompanies our academic work [[1]](https://arxiv.org/abs/1907.01879),[[2]](https://arxiv.org/abs/2010.11696) in the field of machine learning from artificial images. Please consider the following publications when citing **blendtorch**
121 | ```
122 | @inproceedings{blendtorch_icpr2020_cheind,
123 | author = {Christoph Heindl, Lukas Brunner, Sebastian Zambal and Josef Scharinger},
124 | title = {BlendTorch: A Real-Time, Adaptive Domain Randomization Library},
125 | booktitle = {
126 | 1st Workshop on Industrial Machine Learning
127 | at International Conference on Pattern Recognition (ICPR2020)
128 | },
129 | year = {2020},
130 | }
131 |
132 | @inproceedings{robotpose_etfa2019_cheind,
133 | author={Christoph Heindl, Sebastian Zambal, Josef Scharinger},
134 | title={Learning to Predict Robot Keypoints Using Artificially Generated Images},
135 | booktitle={
136 | 24th IEEE International Conference on
137 | Emerging Technologies and Factory Automation (ETFA)
138 | },
139 | year={2019}
140 | }
141 | ```
142 |
143 | ## Caveats
144 | - Despite offscreen rendering is supported in Blender 2.8x it requires a UI frontend and thus cannot run in `--background` mode. If your application does not require offscreen renderings you may enable background usage (see [tests/](tests/) for examples).
145 | - The renderings produced by Blender are by default in linear color space and thus will appear darker than expected when displayed.
146 |
--------------------------------------------------------------------------------
/benchmarks/benchmark.py:
--------------------------------------------------------------------------------
1 | import time
2 | import argparse
3 | from pathlib import Path
4 | import torch.utils.data as data
5 | import matplotlib.pyplot as plt
6 | import numpy as np
7 |
8 | from blendtorch import btt
9 |
10 | BATCH = 8
11 | INSTANCES = 4
12 | WORKER_INSTANCES = 4
13 | NUM_ITEMS = 512
14 | EXAMPLES_DIR = Path(__file__).parent / ".." / "examples" / "datagen"
15 |
16 |
17 | def main():
18 | parser = argparse.ArgumentParser()
19 | parser.add_argument("scene", help="Blender scene name to run", default="cube")
20 | args = parser.parse_args()
21 |
22 | launch_args = dict(
23 | scene=EXAMPLES_DIR / f"{args.scene}.blend",
24 | script=EXAMPLES_DIR / f"{args.scene}.blend.py",
25 | num_instances=INSTANCES,
26 | named_sockets=["DATA"],
27 | )
28 |
29 | with btt.BlenderLauncher(**launch_args) as bl:
30 | ds = btt.RemoteIterableDataset(bl.launch_info.addresses["DATA"])
31 | ds.stream_length(NUM_ITEMS)
32 | dl = data.DataLoader(
33 | ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=False
34 | )
35 |
36 | # Wait to avoid timing startup times of Blender
37 | time.sleep(5)
38 |
39 | t0 = None
40 | tlast = None
41 | imgshape = None
42 |
43 | elapsed = []
44 | n = 0
45 | for item in dl:
46 | n += len(item["image"])
47 | if t0 is None: # 1st is warmup
48 | t0 = time.time()
49 | tlast = t0
50 | imgshape = item["image"].shape
51 | elif n % (50 * BATCH) == 0:
52 | t = time.time()
53 | elapsed.append(t - tlast)
54 | tlast = t
55 | print(".", end="")
56 | assert n == NUM_ITEMS
57 |
58 | t1 = time.time()
59 | N = NUM_ITEMS - BATCH
60 | B = NUM_ITEMS // BATCH - 1
61 | print(
62 | f"Time {(t1-t0)/N:.3f}sec/image, {(t1-t0)/B:.3f}sec/batch, shape {imgshape}"
63 | )
64 |
65 | fig, _ = plt.subplots()
66 | plt.plot(np.arange(len(elapsed)), elapsed)
67 | plt.title("Receive times between 50 consecutive batches")
68 | save_path = EXAMPLES_DIR / "tmp" / "batches_elapsed.png"
69 | fig.savefig(str(save_path))
70 | plt.close(fig)
71 | print(f"Figure saved to {save_path}")
72 |
73 |
74 | if __name__ == "__main__":
75 | main()
76 |
--------------------------------------------------------------------------------
/etc/export_paths.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | set PATH=c:\Program Files\Blender Foundation\Blender 2.90;%PATH%
3 | @echo on
--------------------------------------------------------------------------------
/etc/result_physics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/etc/result_physics.png
--------------------------------------------------------------------------------
/examples/compositor_normals_depth/Readme.md:
--------------------------------------------------------------------------------
1 | ## Compositor Render Support
2 |
3 | This directory showcases synthetic data generation using **blendtorch** for supervised machine learning. In particular, we use composite rendering to extract normals and depths from a randomized scene. The scene is composed of fixed plane and a number of parametric 3D supershapes. Using physics, we drop a random initial constellation of objects onto the plane. Once the object come to rest (we speed up the physics, so this roughly happens after a single frame), we publish dense camera depth and normal information.
4 |
5 |
6 |
7 |
8 |
9 | ### Composite rendering
10 | This sample uses the compositor to access different render passes. Unfortunately, Blender (2.9) does not offer a straight forward way to access the result of various render passes in memory. Therefore, `btb.CompositeRenderer` requires `FileOutput` nodes for temporary storage of data. For this purpose a fast OpenEXR reader, [py-minexr](https://github.com/cheind/py-minexr) was developed and integrated into **blendtorch**.
11 |
12 | ### Normals
13 | Camera normals are generated by a custom geometry-based material. Since colors must be in range (0,1), but normals are in (-1,1) a transformation is applied to make them compatible with color ranges. Hence, in PyTorch apply the following transformation to get true normals
14 | ```python
15 | true_normals = (normals - 0.5)*np.array([2., 2., -2.]).reshape(1,1,1,-1) # BxHxWx3
16 | ```
17 |
18 | ### Run
19 |
20 | To recreate these results run [generate.py](./generate.py)
21 | ```
22 | python generate.py
23 | ```
--------------------------------------------------------------------------------
/examples/compositor_normals_depth/compositor_normals_depth.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/compositor_normals_depth/compositor_normals_depth.blend
--------------------------------------------------------------------------------
/examples/compositor_normals_depth/compositor_normals_depth.blend.py:
--------------------------------------------------------------------------------
1 | import blendtorch.btb as btb
2 | import bpy
3 |
4 | SHAPE = (30, 30)
5 | NSHAPES = 70
6 |
7 |
8 | def main():
9 | # Update python-path with current blend file directory
10 | btb.add_scene_dir_to_path()
11 | import scene_helpers as scene
12 |
13 | def pre_anim(meshes):
14 | # Called before each animation
15 | # Randomize supershapes
16 | for m in meshes:
17 | scene.update_mesh(m, sshape_res=SHAPE)
18 |
19 | def post_frame(render, pub, animation):
20 | # After frame
21 | if anim.frameid == 1:
22 | imgs = render.render()
23 | pub.publish(normals=imgs["normals"], depth=imgs["depth"])
24 |
25 | # Parse script arguments passed via blendtorch launcher
26 | btargs, _ = btb.parse_blendtorch_args()
27 |
28 | # Fetch camera
29 | cam = bpy.context.scene.camera
30 |
31 | bpy.context.scene.rigidbody_world.time_scale = 100
32 | bpy.context.scene.rigidbody_world.substeps_per_frame = 300
33 |
34 | # Setup supershapes
35 | meshes = scene.prepare(NSHAPES, sshape_res=SHAPE)
36 |
37 | # Data source
38 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid)
39 |
40 | # Setup default image rendering
41 | cam = btb.Camera()
42 | render = btb.CompositeRenderer(
43 | [
44 | btb.CompositeSelection("normals", "Out1", "Normals", "RGB"),
45 | btb.CompositeSelection("depth", "Out1", "Depth", "V"),
46 | ],
47 | btid=btargs.btid,
48 | camera=cam,
49 | )
50 |
51 | # Setup the animation and run endlessly
52 | anim = btb.AnimationController()
53 | anim.pre_animation.add(pre_anim, meshes)
54 | anim.post_frame.add(post_frame, render, pub, anim)
55 | anim.play(
56 | frame_range=(0, 1), num_episodes=-1, use_offline_render=False, use_physics=True
57 | )
58 |
59 |
60 | main()
61 |
--------------------------------------------------------------------------------
/examples/compositor_normals_depth/etc/normals_depth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/compositor_normals_depth/etc/normals_depth.png
--------------------------------------------------------------------------------
/examples/compositor_normals_depth/generate.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import blendtorch.btt as btt
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from torch.utils import data
7 |
8 |
9 | def main():
10 | # Define how we want to launch Blender
11 | launch_args = dict(
12 | scene=Path(__file__).parent / "compositor_normals_depth.blend",
13 | script=Path(__file__).parent / "compositor_normals_depth.blend.py",
14 | num_instances=1,
15 | named_sockets=["DATA"],
16 | )
17 |
18 | # Launch Blender
19 | with btt.BlenderLauncher(**launch_args) as bl:
20 | # Create remote dataset and limit max length to 16 elements.
21 | addr = bl.launch_info.addresses["DATA"]
22 | ds = btt.RemoteIterableDataset(addr, max_items=4)
23 | dl = data.DataLoader(ds, batch_size=4, num_workers=0)
24 |
25 | for item in dl:
26 | normals = item["normals"]
27 | # Note, normals are color-coded (0..1), to convert back to original
28 | # range (-1..1) use
29 | # true_normals = (normals - 0.5) * \
30 | # torch.tensor([2., 2., -2.]).view(1, 1, 1, -1)
31 | depth = item["depth"]
32 | print("Received", normals.shape, depth.shape, depth.dtype, np.ptp(depth))
33 |
34 | fig, axs = plt.subplots(2, 2)
35 | axs = np.asarray(axs).reshape(-1)
36 | for i in range(4):
37 | axs[i].imshow(depth[i, :, :, 0], vmin=1, vmax=2.5)
38 | fig, axs = plt.subplots(2, 2)
39 | axs = np.asarray(axs).reshape(-1)
40 | for i in range(4):
41 | axs[i].imshow(normals[i, :, :])
42 | plt.show()
43 |
44 |
45 | if __name__ == "__main__":
46 | main()
47 |
--------------------------------------------------------------------------------
/examples/compositor_normals_depth/scene_helpers.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 | import blendtorch.btb as btb
4 | import supershape as sshape
5 |
6 | SCN = bpy.context.scene
7 |
8 |
9 | def create_sshape_mesh(shape, material=None, fake_user=False):
10 | new_obj = sshape.make_bpy_mesh(shape, name="sshape", coll=False, weld=True)
11 | new_obj.data.use_fake_user = fake_user
12 | new_obj.use_fake_user = fake_user
13 | if material is not None:
14 | new_obj.data.materials.append(material)
15 | new_obj.active_material_index = 0
16 | return new_obj
17 |
18 |
19 | def prepare(n_sshapes, sshape_res=(100, 100), collection="Generated", fake_user=False):
20 | coll = SCN.collection.children[collection]
21 |
22 | # The following material renders camera-space normals
23 | mat = btb.materials.create_normal_material("normals")
24 |
25 | plane = bpy.data.objects["Plane"]
26 | plane.active_material = mat
27 |
28 | sshapes = [
29 | create_sshape_mesh(sshape_res, material=mat, fake_user=fake_user)
30 | for _ in range(n_sshapes)
31 | ]
32 |
33 | # Setup physics
34 | for s in sshapes:
35 | coll.objects.link(s)
36 | SCN.rigidbody_world.collection.objects.link(s)
37 | # Rigid body settings
38 | s.rigid_body.enabled = True
39 | s.rigid_body.collision_shape = "BOX"
40 | s.rigid_body.friction = 0.7
41 | s.rigid_body.linear_damping = 0.3
42 | s.rigid_body.angular_damping = 0.4
43 | s.rigid_body.type = "ACTIVE"
44 |
45 | return sshapes
46 |
47 |
48 | def update_mesh(mesh, sshape_res=(100, 100)):
49 | params = np.random.uniform(
50 | low=[1.00, 1, 1, 6.0, 6.0, 6.0],
51 | high=[4.00, 1, 1, 10.0, 10.0, 10.0],
52 | size=(2, 6),
53 | )
54 | scale = np.abs(np.random.normal(0.05, 0.05, size=3))
55 | x, y, z = sshape.supercoords(params, shape=sshape_res)
56 | sshape.update_bpy_mesh(x * scale[0], y * scale[1], z * scale[2], mesh)
57 | mesh.location = np.random.uniform(low=[-0.5, -0.5, 1], high=[0.5, 0.5, 3], size=(3))
58 | mesh.rotation_euler = np.random.uniform(low=-np.pi, high=np.pi, size=(3))
59 |
--------------------------------------------------------------------------------
/examples/control/Readme.md:
--------------------------------------------------------------------------------
1 | ## Classic Control
2 |
3 | This directory contains a recreation of OpenAI's `CartPole-v0` environment running in a remote Blender process. In contrast top OpenAI's version, this environment leverages Blender's built-in physics engine to simulate the cartpole. The agent operates the cart by applying forces to the cart from a separate process.
4 |
5 | All communication is handled by **blendtorch** in the background, so it appears like any other native OpenAI environment for the agent.
6 |
7 |
8 |
9 |
10 |
11 | ### Code
12 |
13 | ```python
14 | import gym
15 | import cartpole_gym
16 |
17 | KAPPA = 30
18 |
19 | def control(obs):
20 | xcart, xpole, _ = obs
21 | return (xpole-xcart)*KAPPA
22 |
23 | def main():
24 | env = gym.make('blendtorch-cartpole-v0', real_time=False)
25 |
26 | obs = env.reset()
27 | while True:
28 | obs, reward, done, info = env.step(control(obs))
29 | if done:
30 | obs = env.reset()
31 | env.done()
32 | ```
33 | Related code: environment [cartpole_env](./cartpole_env), agent [cartpole.py](cartpole.py)
34 |
35 | ### Run it
36 | Make sure you have Blender, **blendtorch** (see main [Readme](/Readme.md)), and OpenAI gym (`pip install gym`) installed. Navigate to `examples/control` and run
37 | ```
38 | python cartpole.py
39 | ```
40 |
41 | ### Real-time vs. non real-time
42 | Environments running via **blendtorch** support a real-time execution mode `real_time=True`. When enabled, the simulation continues independent of the time it takes the agent to respond. Enabling this mode will require your agent to take into account any latency that occurs from network transmission to action computation.
43 |
44 | ### Environment rendering
45 | We consider Blender itself as the main tool to view and (interactively) manipulate the state of the environment. In case you want a separate viewer call `env.render()` during your training loop.
46 |
47 | ### Architecture
48 | In Reinforcement Learning (RL) we are interested in training an agent, embedded in an environment, to solve a specific task through experience.
49 |
50 | OpenAI's [gym](https://gym.openai.com/) offers a well established API and a set of predefined environments/tasks to work with RL challenges. Our goal with **blendtorch** is to integrate Blender as an ecosystem into OpenAI's [gym]. With Blender, the RL community gains a tool that allows them to easily model, simulate and manipulate an environment.
51 |
52 | Our design separates the agent from the Blender environment. The figure below shows the architecture for a simple cartpole environment. While the actual environment is designed and implemented in Blender, the agent lives in a separate Python process. The agent interacts through a proxy cartpole environment with the actual environment running in Blender. The environment interface exposed to the agent follows standard OpenAI recommendations.
53 |
54 |
55 |
56 |
57 |
58 | Adding a new environment usually requires the following steps (see [cartpole_env](./cartpole_env) for details):
59 | - Create a new Python package `cartpole_gym`.
60 | - Add a Blender scene `cartpole.blend` and model the entities required.
61 | - Add a script `cartpole.blend.py` to contain
62 | - The environment implementation `CartpoleEnv` by inheriting from `btb.env.BaseEnv`.
63 | - Instantiate `CartpoleEnv` and use an instance of `btb.env.RemoteControlledAgent` upon startup.
64 | - Add a `cartpole_env.py` and expose `CartpoleEnv` by inheriting from `btt.env.OpenAIRemoteEnv`. Additionally define OpenAI action and observation spaces.
65 | - Register your gym `blendtorch-cartpole-v0` with OpenAI in `__init__.py`.
66 | - Optionally provide a `setup.py` to make your package installable.
67 |
68 | You may use your environment as follows
69 | ```python
70 | import gym
71 | import cartpole_gym
72 |
73 | env = gym.make('blendtorch-cartpole-v0')
74 | ...
75 | ```
76 |
77 | In the background, `btt.env.OpenAIRemoteEnv` starts a single Blender instance which executes the scene `cartpole.blend` and the script `cartpole.blend.py`. Resetting `env.reset()` and stepping `env.step()` are automatically converted to remote service calls to Blender that are invisible to the agent.
78 |
79 |
80 |
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/examples/control/cartpole.py:
--------------------------------------------------------------------------------
1 | """Cartpole controller that interacts with Blender environment through OpenAI.
2 |
3 | Run
4 | python cartpole.py
5 | from this directory.
6 |
7 | Note, the custom environment is registered in `cartpole_gym/__init__.py`. The
8 | file `cartpole_gym/cartpole_env.py` defines a OpenAI compatible Blender environment.
9 | The actual environment logic is implemented in Blender script
10 | `cartpole_gym/cartpole.blend.py` and scene `cartpole_gym/cartpole.blend`.
11 | """
12 |
13 |
14 | import gym
15 | import cartpole_gym
16 |
17 | KAPPA = 30
18 |
19 |
20 | def control(obs):
21 | # Simple P controller defining the error as xpole-xcart
22 | xcart, xpole, _ = obs
23 | return (xpole - xcart) * KAPPA
24 |
25 |
26 | def main():
27 | # Create the environment. The environment is registered in
28 | # `cartpole_gym/__init__.py`. Set `real_time=True` when you
29 | # want to keep the simulation running until the agent response.
30 | env = gym.make("blendtorch-cartpole-v0", real_time=False)
31 |
32 | obs = env.reset()
33 | while True:
34 | obs, reward, done, info = env.step(control(obs))
35 | env.render()
36 | if done:
37 | obs = env.reset()
38 | env.done()
39 |
40 |
41 | if __name__ == "__main__":
42 | main()
43 |
--------------------------------------------------------------------------------
/examples/control/cartpole_gym/__init__.py:
--------------------------------------------------------------------------------
1 | from gym.envs.registration import register
2 |
3 | register(id="blendtorch-cartpole-v0", entry_point="cartpole_gym.envs:CartpoleEnv")
4 |
--------------------------------------------------------------------------------
/examples/control/cartpole_gym/envs/__init__.py:
--------------------------------------------------------------------------------
1 | from .cartpole_env import CartpoleEnv # noqa
2 |
--------------------------------------------------------------------------------
/examples/control/cartpole_gym/envs/cartpole.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/control/cartpole_gym/envs/cartpole.blend
--------------------------------------------------------------------------------
/examples/control/cartpole_gym/envs/cartpole.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 |
4 | from blendtorch import btb
5 |
6 |
7 | class CartpoleEnv(btb.env.BaseEnv):
8 | def __init__(self, agent):
9 | super().__init__(agent)
10 | self.cart = bpy.data.objects["Cart"]
11 | self.pole = bpy.data.objects["Pole"]
12 | self.polerot = bpy.data.objects["PoleRotHelp"]
13 | self.motor = bpy.data.objects["Motor"].rigid_body_constraint
14 | # Note, ensure that physics run at same speed.
15 | self.fps = bpy.context.scene.render.fps
16 | self.total_mass = self.cart.rigid_body.mass + self.pole.rigid_body.mass
17 |
18 | def _env_prepare_step(self, action):
19 | self._apply_motor_force(action)
20 |
21 | def _env_reset(self):
22 | self.motor.motor_lin_target_velocity = 0.0
23 | self.cart.location = np.array([0.0, 0, 1.2])
24 | self.polerot.rotation_euler[1] = np.random.uniform(-0.6, 0.6)
25 | return self._env_post_step()
26 |
27 | def _env_post_step(self):
28 | c = self.cart.matrix_world.translation[0]
29 | p = self.pole.matrix_world.translation[0]
30 | a = self.pole.matrix_world.to_euler("XYZ")[1]
31 | return dict(obs=(c, p, a), reward=0.0, done=bool(abs(a) > 0.6 or abs(c) > 4.0))
32 |
33 | def _apply_motor_force(self, f):
34 | # a = f/m
35 | # assuming constant acceleration between two steps we have
36 | # v_(t+1) = v(t) + a*dt, from which we get
37 | # v_(t+1) = v(t) + (f/m)*dt
38 | self.motor.motor_lin_target_velocity = (
39 | self.motor.motor_lin_target_velocity + f / self.total_mass / self.fps
40 | )
41 |
42 |
43 | def main():
44 | args, remainder = btb.parse_blendtorch_args()
45 | import argparse
46 |
47 | parser = argparse.ArgumentParser()
48 | parser.add_argument("--render-every", default=None, type=int)
49 | parser.add_argument("--real-time", dest="realtime", action="store_true")
50 | parser.add_argument("--no-real-time", dest="realtime", action="store_false")
51 | envargs = parser.parse_args(remainder)
52 |
53 | agent = btb.env.RemoteControlledAgent(
54 | args.btsockets["GYM"], real_time=envargs.realtime
55 | )
56 | env = CartpoleEnv(agent)
57 | env.attach_default_renderer(every_nth=envargs.render_every)
58 | env.run(frame_range=(1, 10000), use_animation=True)
59 |
60 |
61 | main()
62 |
--------------------------------------------------------------------------------
/examples/control/cartpole_gym/envs/cartpole_env.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import numpy as np
3 | from gym import spaces
4 | from blendtorch import btt
5 |
6 |
7 | class CartpoleEnv(btt.env.OpenAIRemoteEnv):
8 | def __init__(self, render_every=10, real_time=False):
9 |
10 | super().__init__(version="0.0.1")
11 | self.launch(
12 | scene=Path(__file__).parent / "cartpole.blend",
13 | script=Path(__file__).parent / "cartpole.blend.py",
14 | real_time=real_time,
15 | render_every=10,
16 | )
17 |
18 | self.action_space = spaces.Box(np.float32(-100), np.float32(100), shape=(1,))
19 | self.observation_space = spaces.Box(np.float32(-10), np.float32(10), shape=(1,))
20 |
--------------------------------------------------------------------------------
/examples/control/etc/capture.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/control/etc/capture.gif
--------------------------------------------------------------------------------
/examples/datagen/Readme.md:
--------------------------------------------------------------------------------
1 | ## Supervised Training Data Generation
2 |
3 | This directory showcases synthetic data generation using **blendtorch** for supervised machine learning. In particular, several blender processes render randomized scene configurations and stream images as well as annotations into a PyTorch dataset used in training neural networks.
4 |
5 | The figure below visualizes a single image/label batch received by PyTorch from four parallel Blender instances. Each Blender process repeatedly performs motion simulations of randomized cubes.
6 |
7 |
8 |
9 |
10 |
11 | ### Run
12 |
13 | To recreate these results run [generate.py](./generate.py) using the [falling_cubes](./) scene as follows
14 | ```
15 | python generate.py falling_cubes
16 | ```
17 | which will generate output images in `./tmp/output_##.png`.
18 |
19 | ### Record and replay
20 |
21 | See [generate.py](./generate.py) for usage of recording and replay features.
22 | Saving images is only done for demonstration purposes. **blendtorch** does not require intermediate disk storage to run.
23 |
24 | ### Minimal example
25 | The following snippets show the minimal necessary code to use **blendtorch** for connecting PyTorch datasets to Blender renderings/ annotations. To run the example, invoke
26 | ```
27 | python minimal.py
28 | ```
29 |
30 | #### PyTorch part
31 | On the PyTorch side, [minimal.py](./minimal.py) the following steps:
32 |
33 | ```python
34 | from pathlib import Path
35 | from torch.utils import data
36 |
37 | import blendtorch.btt as btt
38 |
39 | def main():
40 | launch_args = dict(
41 | scene=Path(__file__).parent/'cube.blend',
42 | script=Path(__file__).parent/'cube.blend.py',
43 | num_instances=2,
44 | named_sockets=['DATA'],
45 | )
46 |
47 | # Launch Blender
48 | with btt.BlenderLauncher(**launch_args) as bl:
49 | # Create remote dataset and limit max length to 16 elements.
50 | addr = bl.launch_info.addresses['DATA']
51 | ds = btt.RemoteIterableDataset(addr, max_items=16)
52 | dl = data.DataLoader(ds, batch_size=4, num_workers=4)
53 |
54 | for item in dl:
55 | # item is a dict with custom content (see cube.blend.py)
56 | img, xy = item['image'], item['xy']
57 | print('Received', img.shape, xy.shape)
58 |
59 | if __name__ == '__main__':
60 | main()
61 | ```
62 | #### Blender part
63 | When [minimal.py](./minimal.py) launches Blender, each instance will be running
64 | scene [cube.blend](./cube.blend) and script [cube.blend.py](./cube.blend.py) containing:
65 |
66 | ```python
67 | import bpy
68 | import numpy as np
69 |
70 | import blendtorch.btb as btb
71 |
72 | def main():
73 | # Parse script arguments passed via blendtorch
74 | btargs, remainder = btb.parse_blendtorch_args()
75 |
76 | cam = bpy.context.scene.camera
77 | cube = bpy.data.objects["Cube"]
78 |
79 | def pre_frame():
80 | # Randomize cube rotation
81 | cube.rotation_euler = np.random.uniform(0,np.pi,size=3)
82 |
83 | def post_frame(off, pub, anim, cam):
84 | # Called every after Blender finished processing a frame.
85 | pub.publish(
86 | image=off.render(),
87 | xy=cam.object_to_pixel(cube),
88 | frameid=anim.frameid
89 | )
90 |
91 | # Data source
92 | pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid)
93 |
94 | # Setup default image rendering
95 | cam = btb.Camera()
96 | off = btb.OffScreenRenderer(camera=cam, mode='rgb')
97 | off.set_render_style(shading='RENDERED', overlays=False)
98 |
99 | # Setup the animation and run endlessly
100 | anim = btb.AnimationController()
101 | anim.pre_frame.add(pre_frame)
102 | anim.post_frame.add(post_frame, off, pub, anim, cam)
103 | anim.play(frame_range=(0,100), num_episodes=-1)
104 |
105 | main()
106 | ```
107 |
108 | ### Launching and connecting to remote instances
109 | Often you will find it convenient to launch Blender instances on a machine *A* while model training is supposed to happen on machine *B*. To facilitate this use case, **blendtorch** comes with a set of supporting tools.
110 |
111 | From here on we assume that *A* has `pkg_pytorch` and `pkg_blender` installed, while *B* has at least `pkg_pytorch` installed.
112 |
113 | On *A* run
114 | ```
115 | blendtorch-launch launch.json
116 | ```
117 | where `launch.json` contains a dictionary of keyword arguments for `btt.BlenderLauncher`. For example
118 | ```json
119 | {
120 | "scene": "",
121 | "script": "/tests/blender/launcher.blend.py",
122 | "num_instances": 2,
123 | "named_sockets": [
124 | "DATA",
125 | "GYM"
126 | ],
127 | "background": true,
128 | "seed": 10,
129 | "bind_addr": "primaryip"
130 | }
131 | ```
132 | Upon launch, `blendtorch-launch` writes connection information to `launch_info.json`. For example
133 | ```json
134 | {
135 | "addresses": {
136 | "DATA": [
137 | "tcp://192.168.20.148:11000",
138 | "tcp://192.168.20.148:11001"
139 | ],
140 | "GYM": [
141 | "tcp://192.168.20.148:11002",
142 | "tcp://192.168.20.148:11003"
143 | ]
144 | },
145 | }
146 | ```
147 | Notice, `primaryip` was automatically resolved into an IP addresses with default route configured. Now, to connect from *B*, ensure the machine has access to `launch_info.json` and connect as follows
148 |
149 | ```python
150 | import blendtorch.btt as btt
151 | launch_info = btt.LaunchInfo.load_json('launch_info.json')
152 | ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2)
153 | item = next(iter(ds))
154 | print(item.keys())
155 | #...
156 | ```
157 |
158 | ### Architecture
159 |
160 | **blendtorch** is composed of two distinct sub-packages: `bendtorch.btt` (in [pkg_pytorch](./pkg_pytorch)) and `blendtorch.btb` (in [pkg_blender](./pkg_blender)), providing the PyTorch and Blender views on **blendtorch**.
161 |
162 | In data streaming, we are interested in sending supervised image data from multiple Blender processes to a Python process running model training. This process is depicted below.
163 |
164 |
165 |
166 |
167 |
168 | Typically a Python script, e.g `train.py`, launches and maintains one or more Blender instances using `blendtorch.btt.BlenderLauncher`. Each Blender instance will be instructed to run particular scene and script, e.g `blend.py`. Next, `train.py` creates a `RemoteIterableDataset` to listen for incoming network messages from Blender instances. We use a `PUSH/PULL` pipeline pattern that supports fair queuing and will stall Blender instances when `train.py` is too slow to process all messages.
169 |
170 | Each Blender instance, running `blend.py`, meanwhile creates a `blendtorch.btb.DataPublisher` to send outward messages. The addresses are taken from command-line arguments and are automatically provided by `blendtorch.btt.BlenderLauncher`. Next, `blend.py` registers the necessary animation hooks and usually creates one or more `blendtorch.btb.OffScreenRenderer` to capture offscreen images. Usually at `pre_frame` callbacks the scene is randomized and during `post_frame` the resulting frame is rendered and sent via output channel alongside with any (pickle-able) meta information desired.
171 |
172 | #### Distributed computing
173 | **blendtorch** supports two kinds of data parallism: Blender instances and PyTorch workers. We use a [PUSH/PULL pattern](https://learning-0mq-with-pyzmq.readthedocs.io/en/latest/pyzmq/patterns/pushpull.html) that allows us to fan out from multiple Blender instances and distribute the workload to any number of PyTorch workers.
174 |
175 | It is guaranteed that only one PyTorch worker receives a particular message, no message is lost, but the order in which it is received is not guaranteed. When PyTorch is too slow to process all messages in time, the Blender instances will eventually block until new slosts are available. When the number of PyTorch workers is one (i.e `num_workers=0` in DataLoader) then all messages will be received in the order they have been generated.
176 |
177 | Every PyTorch worker interleaves messages from all connected Blender instances in a fair manner. You may use the `btid` message field to determine which Blender instance sent which message.
178 |
179 |
180 |
181 |
--------------------------------------------------------------------------------
/examples/datagen/cube.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/datagen/cube.blend
--------------------------------------------------------------------------------
/examples/datagen/cube.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 |
4 | import blendtorch.btb as btb
5 |
6 |
7 | def main():
8 | # Parse script arguments passed via blendtorch launcher
9 | btargs, remainder = btb.parse_blendtorch_args()
10 |
11 | cam = bpy.context.scene.camera
12 | cube = bpy.data.objects["Cube"]
13 |
14 | def pre_frame():
15 | # Randomize cube rotation
16 | cube.rotation_euler = np.random.uniform(0, np.pi, size=3)
17 |
18 | def post_frame(off, pub, anim, cam):
19 | # Called every after Blender finished processing a frame.
20 | # Will be sent to one of the remote dataset listener connected.
21 | pub.publish(
22 | image=off.render(), xy=cam.object_to_pixel(cube), frameid=anim.frameid
23 | )
24 |
25 | # Data source
26 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid)
27 |
28 | # Setup default image rendering
29 | cam = btb.Camera()
30 | off = btb.OffScreenRenderer(camera=cam, mode="rgb")
31 | off.set_render_style(shading="RENDERED", overlays=False)
32 |
33 | # Setup the animation and run endlessly
34 | anim = btb.AnimationController()
35 | anim.pre_frame.add(pre_frame)
36 | anim.post_frame.add(post_frame, off, pub, anim, cam)
37 | anim.play(frame_range=(0, 100), num_episodes=-1)
38 |
39 |
40 | main()
41 |
--------------------------------------------------------------------------------
/examples/datagen/cube_compositor.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/datagen/cube_compositor.blend
--------------------------------------------------------------------------------
/examples/datagen/cube_compositor.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 |
4 | import blendtorch.btb as btb
5 |
6 |
7 | def main():
8 | # Parse script arguments passed via blendtorch launcher
9 | btargs, remainder = btb.parse_blendtorch_args()
10 |
11 | cam = bpy.context.scene.camera
12 | cube = bpy.data.objects["Cube"]
13 |
14 | def pre_frame():
15 | # Randomize cube rotation
16 | cube.rotation_euler = np.random.uniform(0, np.pi, size=3)
17 |
18 | def post_frame(off, pub, anim, cam):
19 | # Called every after Blender finished processing a frame.
20 | # Will be sent to one of the remote dataset listener connected.
21 | pub.publish(
22 | image=off.render(), xy=cam.object_to_pixel(cube), frameid=anim.frameid
23 | )
24 |
25 | # Data source
26 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid)
27 |
28 | # Setup default image rendering
29 | cam = btb.Camera()
30 | off = btb.Renderer(btargs.btid, camera=cam, mode="rgb")
31 |
32 | # Setup the animation and run endlessly
33 | anim = btb.AnimationController()
34 | anim.pre_frame.add(pre_frame)
35 | anim.post_frame.add(post_frame, off, pub, anim, cam)
36 | anim.play(frame_range=(0, 100), num_episodes=-1, use_offline_render=False)
37 |
38 |
39 | main()
40 |
--------------------------------------------------------------------------------
/examples/datagen/etc/result_physics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/datagen/etc/result_physics.png
--------------------------------------------------------------------------------
/examples/datagen/falling_cubes.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/datagen/falling_cubes.blend
--------------------------------------------------------------------------------
/examples/datagen/falling_cubes.blend.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import bpy
3 |
4 | import blendtorch.btb as btb
5 |
6 |
7 | def main():
8 | args, remainder = btb.parse_blendtorch_args()
9 | np.random.seed(args.btseed)
10 | cam = bpy.context.scene.camera
11 |
12 | # Random colors for all cubes
13 | cubes = list(bpy.data.collections["Cubes"].objects)
14 | for idx, c in enumerate(cubes):
15 | mat = bpy.data.materials.new(name=f"random{idx}")
16 | mat.diffuse_color = np.concatenate((np.random.random(size=3), [1.0]))
17 | c.data.materials.append(mat)
18 | c.active_material = mat
19 |
20 | def pre_anim():
21 | xyz = np.random.uniform((-3, -3, 6), (3, 3, 12.0), size=(len(cubes), 3))
22 | rot = np.random.uniform(-np.pi, np.pi, size=(len(cubes), 3))
23 | for idx, c in enumerate(cubes):
24 | c.location = xyz[idx]
25 | c.rotation_euler = rot[idx]
26 |
27 | def post_frame(anim, off, pub, cam):
28 | pub.publish(
29 | image=off.render(), xy=cam.object_to_pixel(*cubes), frameid=anim.frameid
30 | )
31 |
32 | pub = btb.DataPublisher(args.btsockets["DATA"], args.btid)
33 |
34 | cam = btb.Camera()
35 | off = btb.OffScreenRenderer(camera=cam, mode="rgb")
36 | off.set_render_style(shading="RENDERED", overlays=False)
37 |
38 | anim = btb.AnimationController()
39 | anim.pre_animation.add(pre_anim)
40 | anim.post_frame.add(post_frame, anim, off, pub, cam)
41 | anim.play(frame_range=(0, 100), num_episodes=-1)
42 |
43 |
44 | main()
45 |
--------------------------------------------------------------------------------
/examples/datagen/generate.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import DataLoader
2 | from contextlib import ExitStack
3 | from pathlib import Path
4 | import matplotlib.pyplot as plt
5 |
6 | import blendtorch.btt as btt
7 |
8 |
9 | def item_transform(item):
10 | item["image"] = btt.colors.gamma(item["image"])
11 | return item
12 |
13 |
14 | def iterate(dl):
15 | DPI = 96
16 | for step, item in enumerate(dl):
17 | img, xy, btid, fid = item["image"], item["xy"], item["btid"], item["frameid"]
18 | print(
19 | f"Received batch from Blender processes {btid.numpy()}, frames {fid.numpy()}"
20 | )
21 | H, W = img.shape[1], img.shape[2]
22 | fig = plt.figure(frameon=False, figsize=(W * 2 / DPI, H * 2 / DPI), dpi=DPI)
23 | axs = [
24 | fig.add_axes([0, 0, 0.5, 0.5]),
25 | fig.add_axes([0.5, 0.0, 0.5, 0.5]),
26 | fig.add_axes([0.0, 0.5, 0.5, 0.5]),
27 | fig.add_axes([0.5, 0.5, 0.5, 0.5]),
28 | ]
29 | for i in range(img.shape[0]):
30 | axs[i].imshow(img[i], origin="upper")
31 | axs[i].scatter(xy[i, :, 0], xy[i, :, 1], s=15)
32 | axs[i].set_axis_off()
33 | axs[i].set_xlim(0, W - 1)
34 | axs[i].set_ylim(H - 1, 0)
35 | fig.savefig(f"./tmp/output_{step}.png")
36 | plt.close(fig)
37 |
38 |
39 | BATCH = 4
40 | BLENDER_INSTANCES = 4
41 | WORKER_INSTANCES = 4
42 |
43 |
44 | def main():
45 | import logging
46 |
47 | logging.basicConfig(level=logging.INFO)
48 |
49 | import argparse
50 |
51 | parser = argparse.ArgumentParser()
52 | parser.add_argument("scene", help="Blender scene name to run")
53 | parser.add_argument(
54 | "--replay",
55 | action="store_true",
56 | help="Replay from disc instead of launching from Blender",
57 | )
58 | parser.add_argument("--record", action="store_true", help="Record raw blender data")
59 | args = parser.parse_args()
60 |
61 | with ExitStack() as es:
62 | if not args.replay:
63 | # Launch Blender instance. Upon exit of this script all Blender instances will be closed.
64 | bl = es.enter_context(
65 | btt.BlenderLauncher(
66 | scene=Path(__file__).parent / f"{args.scene}.blend",
67 | script=Path(__file__).parent / f"{args.scene}.blend.py",
68 | num_instances=BLENDER_INSTANCES,
69 | named_sockets=["DATA"],
70 | )
71 | )
72 |
73 | # Setup a streaming dataset
74 | ds = btt.RemoteIterableDataset(
75 | bl.launch_info.addresses["DATA"], item_transform=item_transform
76 | )
77 | # Iterable datasets do not support shuffle
78 | shuffle = False
79 |
80 | # Limit the total number of streamed elements
81 | ds.stream_length(64)
82 |
83 | # Setup raw recording if desired
84 | if args.record:
85 | ds.enable_recording(f"./tmp/record_{args.scene}")
86 | else:
87 | # Otherwise we replay from file.
88 | ds = btt.FileDataset(
89 | f"./tmp/record_{args.scene}", item_transform=item_transform
90 | )
91 | shuffle = True
92 |
93 | # Setup DataLoader and iterate
94 | dl = DataLoader(
95 | ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=shuffle
96 | )
97 | iterate(dl)
98 |
99 |
100 | if __name__ == "__main__":
101 | main()
102 |
--------------------------------------------------------------------------------
/examples/datagen/minimal.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from torch.utils import data
3 |
4 | import blendtorch.btt as btt
5 |
6 |
7 | def main():
8 | # Define how we want to launch Blender
9 | launch_args = dict(
10 | scene=Path(__file__).parent / "cube.blend",
11 | script=Path(__file__).parent / "cube.blend.py",
12 | num_instances=2,
13 | named_sockets=["DATA"],
14 | )
15 |
16 | # Launch Blender
17 | with btt.BlenderLauncher(**launch_args) as bl:
18 | # Create remote dataset and limit max length to 16 elements.
19 | addr = bl.launch_info.addresses["DATA"]
20 | ds = btt.RemoteIterableDataset(addr, max_items=16)
21 | dl = data.DataLoader(ds, batch_size=4, num_workers=4)
22 |
23 | for item in dl:
24 | # item is a dict with custom content (see cube.blend.py)
25 | img, xy = item["image"], item["xy"]
26 | print("Received", img.shape, xy.shape)
27 | # Will get here after 16/BATCH=4 iterations.
28 |
29 |
30 | if __name__ == "__main__":
31 | main()
32 |
--------------------------------------------------------------------------------
/examples/datagen/tmp/__keep__:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/datagen/tmp/__keep__
--------------------------------------------------------------------------------
/examples/densityopt/Readme.md:
--------------------------------------------------------------------------------
1 | ## Density Estimation
2 |
3 | This example demonstrates optimizing simulation parameters towards target distribution parameters by minimizing the expected loss between target and simulation images measured by a learnable discriminator function. In this particular example we assume to be given a probabilistic model + a _non-differentiable_ render function to create synthetic images. Our goal is to find the distributional parametrization that created the given set of target images.
4 |
5 | For this demo, we consider synthetic images of 3D parametric [supershapes](https://en.wikipedia.org/wiki/Superformula). A supershape can be described by 12 parameters and its appearance varies greatly along with the parameters (spheres, cubes, flower-like,...). In particular, the optimization tries to adjust the parameters `m1` and `m2` in such a way that the discriminator is not able to distinguish whether the given image is more likely to come from the simulation distribution or the target distribution. Once the discriminator reaches this state, the optimization parameters have usually converged towards the true target parameters.
6 |
7 | Note, we can frame this example in a GAN framework: the generator consists of the probabilistic model governing the shape parameters + rendering function to generate synthetic supershape images. The discriminator is a neural network attempting to distinguish between images of the target distribution and those to the simulation distribution.
8 |
9 | However, there are two **key difference** to GANs:
10 | 1. *non-differentiable renderer*: the render function (Blender) is a considered black-box function without access to gradients.
11 | 1. *distributional parameters*: The parameters of the generator are distributional parameters, compared to GANs that model them as structural parameters (e.g. parameters of a neural network).
12 |
13 | For both reasons, we frame out optimization as a minimization over an expected discriminator loss using gradient estimators that do not require gradients of the render function to be known. We explain our optimization idea in [this article](etc/blendtorch_stochopt.pdf).
14 |
15 | ### Results
16 |
17 | The image series below shows images from the target distribution (left) and optimization progress as animated GIF (right). In the animation the start solution is marked with a purple border, the final iteration is shown with a green border. Notice how the images on the right side gradually start to look like the shapes of the target distribution on the left.
18 |
19 |

20 |

21 |
22 |
23 | ### Run
24 |
25 | To recreate these results run [densityopt.py](./densityopt.py) as follows
26 | ```
27 | python densityopt.py
28 | ```
29 | which will generate output images in `./tmp/` and output the optimization progress on the console
30 | ```
31 | D step: mean real 0.7307174801826477 mean sim 0.6040560007095337
32 | D step: mean real 0.6737353205680847 mean sim 0.29735732078552246
33 | D step: mean real 0.825548529624939 mean sim 0.2806171178817749
34 | D step: mean real 0.8810040950775146 mean sim 0.1778283715248108
35 | S step: [1.25 2.95] [0.4205084443092346, 0.4205084443092346] mean sim 0.13082975149154663
36 | D step: mean real 0.9086431860923767 mean sim 0.21175092458724976
37 | S step: [1.2831218 2.9007795] [0.4404144883155823, 0.4418575167655945] mean sim 0.14274010062217712
38 | ...
39 | ```
40 | The true parameters being around 2.5/2.5 with a standard deviation of 0.1/0.1.
41 |
42 | ### Dependencies
43 |
44 | Besides **blendtorch**, this examples requires [this tiny supershape](https://github.com/cheind/supershape) library to be acessible from within Blender (update `PYTHONPATH` prior executing the example).
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/examples/densityopt/densityopt.py:
--------------------------------------------------------------------------------
1 | """Demonstrates adapting simulation parameters to match an empirical target distribution.
2 | """
3 |
4 | from pathlib import Path
5 | import numpy as np
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.utils import data
10 | from torch.distributions import LogNormal
11 | import torchvision.utils as vutils
12 |
13 | from blendtorch import btt
14 |
15 | """Batch size"""
16 | BATCH = 64
17 | """Target label at descriminator"""
18 | TARGET_LABEL = 1
19 | """Simulation label at descriminator"""
20 | SIM_LABEL = 0
21 | """Number of Blender instances."""
22 | SIM_INSTANCES = 4
23 | """Long./Lat. log-normal supershape frequency (m1,m2) target mean"""
24 | DEFAULT_MEAN_TARGET = 2.25 # rough sample range: 8.5-10.5
25 | """Long./Lat. log-normal supershape frequency (m1,m2) target standard deviation"""
26 | DEFAULT_STD_TARGET = 0.1
27 | """Baseline (control variate) smoothing factor"""
28 | BASELINE_ALPHA = 0.9
29 |
30 |
31 | class ProbModel(nn.Module):
32 | """Probabilistic model governing supershape parameters.
33 |
34 | In this example, we model the shape m1/m2 as random variables. We assume
35 | independence and associate a log-normal distribution for each of them. We choose
36 | in order to avoid +/- parameter ambiguities that yield the same shape.
37 |
38 | p(m1,m2) = p(m1)p(m2) with
39 | p(m1) = LogNormal(mu_m1, std_m1),
40 | p(m2) = LogNormal(mu_m2, std_m2)
41 |
42 | We consider the mean/scale of each distribution to be parameters subject to
43 | optimization. Note, we model the scale parameter as log-scale to allow
44 | unconstrained (scale > 0) optimization.
45 | """
46 |
47 | def __init__(self, m1m2_mean, m1m2_std):
48 | super().__init__()
49 |
50 | self.m1m2_mean = nn.Parameter(
51 | torch.as_tensor(m1m2_mean).float(), requires_grad=True
52 | )
53 | self.m1m2_log_std = nn.Parameter(
54 | torch.log(torch.as_tensor(m1m2_std).float()), requires_grad=True
55 | )
56 |
57 | def sample(self, n):
58 | """Returns n samples."""
59 | m1, m2 = self.dists
60 | return {
61 | "m1": m1.sample((n,)),
62 | "m2": m2.sample((n,)),
63 | }
64 |
65 | def log_prob(self, samples):
66 | """Returns the joint log-probabilities of the given samples."""
67 | m1, m2 = self.dists
68 | return m1.log_prob(samples["m1"]) + m2.log_prob(samples["m2"])
69 |
70 | @property
71 | def dists(self):
72 | """Returns the parametrized distributions for m1/m2."""
73 | # Creating the distributions always on the fly, otherwise we get
74 | # PyTorch warnings about differentiating a second time.
75 | return (
76 | LogNormal(self.m1m2_mean[0], torch.exp(self.m1m2_log_std[0])),
77 | LogNormal(self.m1m2_mean[1], torch.exp(self.m1m2_log_std[1])),
78 | )
79 |
80 | def readable_params(self):
81 | """Helper function to access params as vector."""
82 | return torch.cat(
83 | [self.m1m2_mean.detach(), torch.exp(self.m1m2_log_std).detach()]
84 | )
85 |
86 | @staticmethod
87 | def to_supershape(samples):
88 | """Converts m1/m2 samples to full supershape parameters.
89 |
90 | We assume all parameter except for m1/m2 to be fixed in this
91 | example.
92 | """
93 | N = samples["m1"].shape[0]
94 | params = (
95 | samples["m1"]
96 | .new_tensor(
97 | [
98 | [0, 1, 1, 3, 3, 3],
99 | [0, 1, 1, 3, 3, 3],
100 | ]
101 | )
102 | .float()
103 | .view(1, 2, 6)
104 | .repeat(N, 1, 1)
105 | )
106 | params[:, 0, 0] = samples["m1"].detach()
107 | params[:, 1, 0] = samples["m2"].detach()
108 | return params
109 |
110 |
111 | def update_simulations(remote_sims, params):
112 | """Updates all remote simulations with new supershape samples.
113 |
114 | We split N parameter samples into N/R chunks where R is the number of
115 | simulation instances. Besides the parameters, we send subset indices
116 | to the simulation instances which will be returned to us alongside
117 | with the rendered images. The subset indices allow us to associate
118 | parameters with images in the optimization.
119 | """
120 | ids = torch.arange(params.shape[0]).long()
121 | R = len(remote_sims)
122 | for remote, subset, subset_ids in zip(
123 | remote_sims, torch.chunk(params, R), torch.chunk(ids, R)
124 | ):
125 | remote.send(shape_params=subset.cpu().numpy(), shape_ids=subset_ids.numpy())
126 |
127 |
128 | def item_transform(item):
129 | """Transformation applied to each received simulation item.
130 |
131 | Extract the image, normalize it and return it together
132 | with meta-data. In particular we return the `shape_id` which
133 | allows us to associate the received item with a parameter
134 | sample generated earlier.
135 | """
136 | x = item["image"].astype(np.float32)
137 | x = (x - 127.5) / 127.5
138 | return np.transpose(x, (2, 0, 1)), item["shape_id"]
139 |
140 |
141 | def get_target_images(dl, remotes, mu_m1m2, std_m1m2, n):
142 | """Returns a set of images from the target distribution."""
143 | pm = ProbModel(mu_m1m2, std_m1m2)
144 | samples = pm.sample(n)
145 | update_simulations(remotes, ProbModel.to_supershape(samples))
146 | images = []
147 | gen = iter(dl)
148 | for _ in range(n // BATCH):
149 | (img, shape_id) = next(gen)
150 | images.append(img)
151 | return data.TensorDataset(torch.tensor(np.concatenate(images, 0)))
152 |
153 |
154 | def infinite_batch_generator(dl):
155 | """Generate infinite number of batches from a dataloader."""
156 | while True:
157 | for d in dl:
158 | yield d
159 |
160 |
161 | class Discriminator(nn.Module):
162 | """Image descriminator.
163 |
164 | The task of the discriminator is to distinguish images from the target
165 | distribution from those of the simulator distribution. In the beginning
166 | this is easy, as the target distribution is quite narrow, while the
167 | simulator is producing images of supershapes from large spectrum. During
168 | optimization of the simulation parameters the classification of images
169 | will get continously harder as the simulation parameters are tuned
170 | towards the (unkown) target distribution parameters.
171 |
172 | The discriminator weights are trained via backpropagation.
173 | """
174 |
175 | def __init__(self):
176 | super().__init__()
177 | ndf = 32
178 | self.features = nn.Sequential(
179 | # state size. (ndf) x 64 x 64
180 | nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
181 | nn.BatchNorm2d(ndf),
182 | nn.LeakyReLU(0.2, inplace=True),
183 | # state size. (ndf*2) x 32 x 32
184 | nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
185 | nn.BatchNorm2d(ndf * 2),
186 | nn.LeakyReLU(0.2, inplace=True),
187 | # state size. (ndf*4) x 16 x 16
188 | nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
189 | nn.BatchNorm2d(ndf * 4),
190 | nn.LeakyReLU(0.2, inplace=True),
191 | # state size. (ndf*4) x 8 x 8
192 | nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
193 | nn.BatchNorm2d(ndf * 8),
194 | nn.LeakyReLU(0.2, inplace=True),
195 | # state size. (ndf*8) x 4 x 4
196 | nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
197 | nn.Sigmoid(),
198 | )
199 | self.apply(self._weights_init)
200 |
201 | def _weights_init(self, m):
202 | classname = m.__class__.__name__
203 | if classname.find("Conv") != -1:
204 | torch.nn.init.normal_(m.weight, 0.0, 0.02)
205 | elif classname.find("BatchNorm") != -1:
206 | torch.nn.init.normal_(m.weight, 1.0, 0.02)
207 | torch.nn.init.zeros_(m.bias)
208 |
209 | def forward(self, x):
210 | x = self.features(x)
211 | return x.view(-1, 1).squeeze(1)
212 |
213 |
214 | def run(args):
215 |
216 | # Define how we want to launch Blender
217 | launch_args = dict(
218 | scene=Path(__file__).parent / "supershape.blend",
219 | script=Path(__file__).parent / "supershape.blend.py",
220 | num_instances=SIM_INSTANCES,
221 | named_sockets=["DATA", "CTRL"],
222 | )
223 |
224 | # Create an untrained discriminator.
225 | dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
226 | netD = Discriminator().to(dev)
227 |
228 | # Launch Blender
229 | with btt.BlenderLauncher(**launch_args) as bl:
230 | # Create remote dataset
231 | addr = bl.launch_info.addresses["DATA"]
232 | sim_ds = btt.RemoteIterableDataset(addr, item_transform=item_transform)
233 | sim_dl = data.DataLoader(sim_ds, batch_size=BATCH, num_workers=0, shuffle=False)
234 |
235 | # Create a control channel to each Blender instance. We use this channel to
236 | # communicate new shape parameters to be rendered.
237 | addr = bl.launch_info.addresses["CTRL"]
238 | remotes = [btt.DuplexChannel(a) for a in addr]
239 |
240 | # Fetch images of the target distribution. In the following we assume the
241 | # target distribution to be unknown.
242 | if args.random_start:
243 | mu_m1m2_target = np.random.uniform(0.0, 3, size=2).astype(np.float32)
244 | else:
245 | mu_m1m2_target = [DEFAULT_MEAN_TARGET, DEFAULT_MEAN_TARGET]
246 | std_m1m2_target = [DEFAULT_STD_TARGET, DEFAULT_STD_TARGET]
247 | print("Target params:", mu_m1m2_target, std_m1m2_target)
248 |
249 | target_ds = get_target_images(
250 | sim_dl, remotes, mu_m1m2_target, std_m1m2_target, n=BATCH
251 | )
252 | target_dl = data.DataLoader(
253 | target_ds, batch_size=BATCH, num_workers=0, shuffle=True
254 | )
255 |
256 | # Initial simulation parameters. The parameters in mean and std are off from the target
257 | # distribution parameters. Note that we especially enlarge the scale of the initial
258 | # distribution to get explorative behaviour in the beginning.
259 | if args.random_start:
260 | mu_m1m2 = np.asarray(mu_m1m2_target) + np.random.randn(2)
261 | else:
262 | mu_m1m2 = [1.2, 3.0]
263 | std_m1m2 = [0.4, 0.4]
264 | pm = ProbModel(mu_m1m2, std_m1m2)
265 |
266 | # Setup discriminator and simulation optimizer
267 | optD = optim.Adam(netD.parameters(), lr=5e-5, betas=(0.5, 0.999))
268 | optS = optim.Adam(pm.parameters(), lr=5e-2, betas=(0.7, 0.999))
269 |
270 | # Get generators for image batches from target and simulation.
271 | gen_real = infinite_batch_generator(target_dl)
272 | gen_sim = infinite_batch_generator(sim_dl)
273 | crit = nn.BCELoss(reduction="none")
274 |
275 | epoch = 0
276 | b = 0.7 # Baseline to reduce variance of gradient estimator.
277 | first = True
278 | param_history = []
279 |
280 | # Send instructions to render supershapes from the starting point.
281 | samples = pm.sample(BATCH)
282 | update_simulations(remotes, pm.to_supershape(samples))
283 | for (real, sim) in zip(gen_real, gen_sim):
284 | # Train the discriminator from target and simulation images.
285 | label = torch.full((BATCH,), TARGET_LABEL, dtype=torch.float32, device=dev)
286 | netD.zero_grad()
287 | target_img = real[0].to(dev)
288 | output = netD(target_img)
289 | errD_real = crit(output, label)
290 | errD_real.mean().backward()
291 | D_real = output.mean().item()
292 |
293 | sim_img, sim_shape_id = sim
294 | sim_img = sim_img.to(dev)
295 | label.fill_(SIM_LABEL)
296 | output = netD(sim_img)
297 | errD_sim = crit(output, label)
298 | errD_sim.mean().backward()
299 | D_sim = output.mean().item()
300 | if (D_real - D_sim) < 0.7:
301 | optD.step()
302 | print("D step: mean real", D_real, "mean sim", D_sim)
303 |
304 | # Optimize the simulation parameters.
305 | # We update the simulation parameters once the discriminator
306 | # has started to converge. Note that unlike to GANs the generator
307 | # (simulation) is giving meaningful output from the very beginning, so we
308 | # give the discriminator some time to adjust and avoid spurious signals
309 | # in gradient estimation of the simulation parameters.
310 | #
311 | # Note, the rendering function is considered a black-box and we cannot
312 | # propagate through it. Therefore we reformulate the optimization as
313 | # minimization of an expectation with the parameters in the distribution
314 | # the expectation runs over. Using score-function gradients permits gradient
315 | # based optimization _without_ access to gradients of the render function.
316 | if not first or (D_real - D_sim) >= 0.7:
317 | optS.zero_grad()
318 | label.fill_(TARGET_LABEL)
319 | with torch.no_grad():
320 | output = netD(sim_img)
321 | errS_sim = crit(output, label)
322 | GD_sim = output.mean().item()
323 |
324 | log_probs = pm.log_prob(samples)
325 | loss = log_probs[sim_shape_id] * (errS_sim.cpu() - b)
326 | loss.mean().backward()
327 | optS.step()
328 |
329 | if first:
330 | b = errS_sim.mean()
331 | else:
332 | b = BASELINE_ALPHA * errS_sim.mean() + (1 - BASELINE_ALPHA) * b
333 |
334 | print(
335 | "S step:",
336 | pm.m1m2_mean.detach().numpy(),
337 | torch.exp(pm.m1m2_log_std).detach().numpy(),
338 | "mean sim",
339 | GD_sim,
340 | )
341 | first = False
342 | del log_probs, loss
343 |
344 | # Generate shapes/images according to updated parameters.
345 | samples = pm.sample(BATCH)
346 | update_simulations(remotes, pm.to_supershape(samples))
347 |
348 | # Bookkeeping
349 | param_history.append(pm.readable_params())
350 | epoch += 1
351 | if epoch % 5 == 0:
352 | vutils.save_image(
353 | target_img, "tmp/real_%03d.png" % (epoch), normalize=True
354 | )
355 | vutils.save_image(
356 | sim_img, "tmp/sim_samples_%03d.png" % (epoch), normalize=True
357 | )
358 | if epoch > args.num_epochs:
359 | # Append true target
360 | target = torch.tensor(
361 | np.concatenate((mu_m1m2_target, std_m1m2_target))
362 | ).float()
363 | print("Abs.Diff to true params", abs(target - param_history[-1]))
364 | param_history.append(target)
365 | break
366 |
367 | param_history = torch.stack(param_history)
368 | return param_history
369 |
370 |
371 | if __name__ == "__main__":
372 | import argparse
373 | import time
374 |
375 | parser = argparse.ArgumentParser()
376 | parser.add_argument("--random-start", action="store_true")
377 | parser.add_argument("--num-runs", default=1, type=int)
378 | parser.add_argument("--num-epochs", default=70, type=int)
379 | args = parser.parse_args()
380 |
381 | timestr = time.strftime("%Y%m%d_%H%M%S")
382 | for i in range(args.num_runs):
383 | param_history = run(args)
384 | np.savetxt(
385 | f"tmp/run_{timestr}_{i:02d}_densityopt.txt",
386 | param_history,
387 | header="mu_m1, mu_m2, std_m1, std_m2",
388 | comments="last entry corresponds to target params",
389 | )
390 |
--------------------------------------------------------------------------------
/examples/densityopt/etc/blendtorch_stochopt.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/blendtorch_stochopt.pdf
--------------------------------------------------------------------------------
/examples/densityopt/etc/densityopt.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/densityopt.gif
--------------------------------------------------------------------------------
/examples/densityopt/etc/real.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/real.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_010.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_020.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_030.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_040.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_050.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_060.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_060.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_070.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_070.png
--------------------------------------------------------------------------------
/examples/densityopt/etc/sim_samples_080.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/etc/sim_samples_080.png
--------------------------------------------------------------------------------
/examples/densityopt/supershape.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/supershape.blend
--------------------------------------------------------------------------------
/examples/densityopt/supershape.blend.py:
--------------------------------------------------------------------------------
1 | import bpy # noqa
2 | from blendtorch import btb
3 |
4 | # See https://github.com/cheind/supershape and this examples readme.
5 | import supershape as sshape
6 |
7 |
8 | def generate_supershape(msg, shape=(100, 100)):
9 | for params, shape_id in zip(msg["shape_params"], msg["shape_ids"]):
10 | yield (params, shape_id, sshape.supercoords(params, shape=shape))
11 |
12 |
13 | def main():
14 | btargs, remainder = btb.parse_blendtorch_args()
15 |
16 | uvshape = (100, 100)
17 | obj = sshape.make_bpy_mesh(uvshape)
18 | idx = None
19 | coords = None
20 | params = None
21 | gen = None
22 |
23 | def pre_frame(duplex):
24 | nonlocal gen, params, coords, idx
25 | msg = duplex.recv(timeoutms=0)
26 | if msg is not None:
27 | gen = generate_supershape(msg, shape=uvshape)
28 | if gen is not None:
29 | try:
30 | params, idx, coords = next(gen)
31 | sshape.update_bpy_mesh(*coords, obj)
32 | except StopIteration:
33 | gen = None
34 |
35 | def post_frame(off, pub):
36 | if gen is not None:
37 | pub.publish(image=off.render(), shape_id=idx)
38 |
39 | # Data source
40 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid)
41 | duplex = btb.DuplexChannel(btargs.btsockets["CTRL"], btargs.btid)
42 |
43 | # Setup default image rendering
44 | cam = btb.Camera()
45 | off = btb.OffScreenRenderer(camera=cam, mode="rgb")
46 | off.set_render_style(shading="SOLID", overlays=False)
47 |
48 | # Setup the animation and run endlessly
49 | anim = btb.AnimationController()
50 | anim.pre_frame.add(pre_frame, duplex)
51 | anim.post_frame.add(post_frame, off, pub)
52 | anim.play(frame_range=(0, 10000), num_episodes=-1)
53 |
54 |
55 | main()
56 |
--------------------------------------------------------------------------------
/examples/densityopt/tmp/__keep__:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/examples/densityopt/tmp/__keep__
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from .animation import AnimationController
3 | from .offscreen import OffScreenRenderer
4 | from .renderer import CompositeRenderer, CompositeSelection
5 | from .arguments import parse_blendtorch_args
6 | from .paths import add_scene_dir_to_path
7 | from .publisher import DataPublisher
8 | from .camera import Camera
9 | from .duplex import DuplexChannel
10 | from . import env, utils, materials
11 | from .version import __version__
12 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/animation.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import bpy
3 | from contextlib import contextmanager
4 |
5 | from .signal import Signal
6 | from .utils import find_first_view3d
7 |
8 |
9 | class AnimationController:
10 | """Provides an interface to Blender's animation system with fine-grained callbacks.
11 |
12 | To play nice with Blender, blendtorch provides a callback based class for interacting
13 | with the Blender's animation and rendering system. The usual way to interact with
14 | this class is through an object of AnimationController. Depending on the required
15 | callbacks, one or more signals are connected to Python functions.
16 | `AnimationController.play` starts the animation loop.
17 |
18 | By default `AnimationController.play` is non-blocking and therefore requires a
19 | non background instance of Blender. In case `--background` is required,
20 | `AnimationController.play` also supports blocking animation loop variant. In blocking
21 | execution, offscreen rendering works but may crash Blender once the loop is exited (2.83.2),
22 | and is therefore not recommended when image data is required.
23 |
24 | `AnimationController` exposes the following signals
25 | - pre_play() invoked before playing starts
26 | - pre_animation() invoked before first frame of animation range is processed
27 | - pre_frame() invoked before a frame begins
28 | - post_frame() invoked after a frame is finished
29 | - post_animation() invoked after the last animation frame has completed
30 | - post_play() invoked after playing ends
31 | """
32 |
33 | def __init__(self):
34 | """Create a new instance."""
35 | self.pre_animation = Signal()
36 | self.pre_frame = Signal()
37 | self.post_frame = Signal()
38 | self.post_animation = Signal()
39 | self.pre_play = Signal()
40 | self.post_play = Signal()
41 | self._plyctx = None
42 |
43 | class _PlayContext:
44 | """Internal bookkeeping of animation veriables."""
45 |
46 | def __init__(
47 | self, frame_range, num_episodes, use_animation, use_offline_render
48 | ):
49 | self.frame_range = frame_range
50 | self.use_animation = use_animation
51 | self.use_offline_render = use_offline_render
52 | self.episode = 0
53 | self.num_episodes = num_episodes
54 | self.pending_post_frame = False
55 | self.draw_handler = None
56 | self.draw_space = None
57 | self.last_post_frame = 0
58 | self._allow_events = True
59 |
60 | def skip_post_frame(self, current_frame):
61 | return (
62 | not self.allow_events
63 | or not self.pending_post_frame
64 | or self.last_post_frame == current_frame
65 | or (
66 | self.use_animation
67 | and self.use_offline_render
68 | and bpy.context.space_data != self.draw_space
69 | )
70 | )
71 |
72 | @contextmanager
73 | def disable_events(self):
74 | old = self._allow_events
75 | self._allow_events = False
76 | yield
77 | self._allow_events = old
78 |
79 | @contextmanager
80 | def enable_events(self):
81 | old = self._allow_events
82 | self._allow_events = True
83 | yield
84 | self._allow_events = old
85 |
86 | @property
87 | def allow_events(self):
88 | return self._allow_events
89 |
90 | @property
91 | def frameid(self):
92 | """Returns the current frame id."""
93 | return bpy.context.scene.frame_current
94 |
95 | def play(
96 | self,
97 | frame_range=None,
98 | num_episodes=-1,
99 | use_animation=True,
100 | use_offline_render=True,
101 | use_physics=True,
102 | ):
103 | """Start the animation loop.
104 |
105 | Params
106 | ------
107 | frame_range: tuple
108 | Start and end of frame range to play. Note that start and end are inclusive.
109 | num_episodes: int
110 | The number of loops to play. -1 loops forever.
111 | use_animation: bool
112 | Whether to use Blender's non-blocking animation system or use a blocking variant.
113 | By default True. When True, allows BlenderUI to refresh and be responsive. The animation
114 | will be run in target FPS. When false, does not allow Blender UI to refresh. The animation
115 | runs as fast as it can.
116 | use_offline_render: bool
117 | Whether offline rendering should be supported. By default True. When True, calls to
118 | `OffscreenRenderer` are safe inside the `post_frame` callback.
119 | use_physics: bool
120 | Whether physics should be enabled. Default is True. When True, sets the simulation range
121 | to match the animation frame range.
122 | """
123 | assert self._plyctx is None, "Animation already running"
124 |
125 | self._plyctx = AnimationController._PlayContext(
126 | frame_range=AnimationController.setup_frame_range(
127 | frame_range, physics=use_physics
128 | ),
129 | num_episodes=(num_episodes if num_episodes >= 0 else sys.maxsize),
130 | use_animation=use_animation,
131 | use_offline_render=use_offline_render,
132 | )
133 |
134 | if use_animation:
135 | self._play_animation()
136 | else:
137 | self._play_manual()
138 |
139 | @staticmethod
140 | def setup_frame_range(frame_range, physics=True):
141 | """Setup the animation and physics frame range.
142 |
143 | Params
144 | ------
145 | frame_range: tuple
146 | Start and end (inclusive) frame range to be animated.
147 | Can be None, in which case the scenes frame range is used.
148 | physics: bool
149 | Whether or not to apply the frame range settings to the rigid body
150 | simulation.
151 |
152 | Returns
153 | -------
154 | frame_range: tuple
155 | the updated frame range.
156 | """
157 |
158 | if frame_range is None:
159 | frame_range = (bpy.context.scene.frame_start, bpy.context.scene.frame_end)
160 | bpy.context.scene.frame_start = frame_range[0]
161 | bpy.context.scene.frame_end = frame_range[1]
162 | if physics and bpy.context.scene.rigidbody_world:
163 | bpy.context.scene.rigidbody_world.point_cache.frame_start = frame_range[0]
164 | bpy.context.scene.rigidbody_world.point_cache.frame_end = frame_range[1]
165 | return frame_range
166 |
167 | def _play_animation(self):
168 | """Setup and start Blender animation loop."""
169 | with self._plyctx.disable_events():
170 | self.pre_play.invoke()
171 | bpy.app.handlers.frame_change_pre.append(self._on_pre_frame)
172 | if self._plyctx.use_offline_render:
173 | # To be save, we need to draw from `POST_PIXEL` not `frame_change_post`.
174 | # However `POST_PIXEL` might be called more than once per frame. We therefore
175 | # set and release `pending_post_pixel` to match things up.
176 | _, self._plyctx.draw_space, _ = find_first_view3d()
177 | self._plyctx.draw_handler = bpy.types.SpaceView3D.draw_handler_add(
178 | self._on_post_frame, (), "WINDOW", "POST_PIXEL"
179 | )
180 | else:
181 | bpy.app.handlers.frame_change_post.append(self._on_post_frame)
182 | # Set to first frame.
183 | bpy.context.scene.frame_set(self._plyctx.frame_range[0])
184 | # The following does not block. Note, in --offscreen this does nothing.
185 | bpy.ops.screen.animation_play()
186 |
187 | def _play_manual(self):
188 | """Setup and start blocking animation loop."""
189 | with self._plyctx.disable_events():
190 | self.pre_play.invoke()
191 | bpy.app.handlers.frame_change_pre.append(self._on_pre_frame)
192 | bpy.app.handlers.frame_change_post.append(self._on_post_frame)
193 |
194 | while self._plyctx.episode < self._plyctx.num_episodes:
195 | bpy.context.scene.frame_set(self._plyctx.frame_range[0])
196 | while self.frameid < self._plyctx.frame_range[1]:
197 | bpy.context.scene.frame_set(self.frameid + 1)
198 | if (
199 | self._plyctx is None
200 | ): # The above frame_set might have called _cancel,
201 | return # which in turn deletes _plyctx
202 |
203 | def rewind(self):
204 | """Request resetting the animation to first frame."""
205 | if self._plyctx is not None:
206 | self._set_frame(self._plyctx.frame_range[0])
207 |
208 | def _set_frame(self, frame_index):
209 | """Step to a specific frame."""
210 | with self._plyctx.enable_events(): # needed for env support?
211 | bpy.context.scene.frame_set(frame_index)
212 |
213 | def _on_pre_frame(self, scene, *args):
214 | """Handle pre-frame events internally."""
215 | if not self._plyctx.allow_events:
216 | return
217 |
218 | pre_first = self.frameid == self._plyctx.frame_range[0]
219 |
220 | with self._plyctx.disable_events():
221 | if pre_first:
222 | self.pre_animation.invoke()
223 | self.pre_frame.invoke()
224 | # The following guards us from multiple calls to `_on_post_frame`
225 | # when we hooked into `POST_PIXEL`
226 | self._plyctx.pending_post_frame = True
227 |
228 | def _on_post_frame(self, *args):
229 | """Handle post-frame events internally."""
230 | if self._plyctx.skip_post_frame(self.frameid):
231 | return
232 | self._plyctx.pending_post_frame = False
233 | self._plyctx.last_post_frame = self.frameid
234 |
235 | with self._plyctx.disable_events():
236 | self.post_frame.invoke()
237 | post_last = self.frameid == self._plyctx.frame_range[1]
238 | if post_last:
239 | self.post_animation.invoke()
240 | self._plyctx.episode += 1
241 | if self._plyctx.episode == self._plyctx.num_episodes:
242 | self._cancel()
243 |
244 | def _cancel(self):
245 | """Stop the animation."""
246 | bpy.app.handlers.frame_change_pre.remove(self._on_pre_frame)
247 | if self._plyctx.draw_handler is not None:
248 | bpy.types.SpaceView3D.draw_handler_remove(
249 | self._plyctx.draw_handler, "WINDOW"
250 | )
251 | self._plyctx.draw_handler = None
252 | else:
253 | bpy.app.handlers.frame_change_post.remove(self._on_post_frame)
254 | bpy.ops.screen.animation_cancel(restore_frame=False)
255 | self.post_play.invoke()
256 | del self._plyctx
257 | self._plyctx = None
258 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/arguments.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 |
4 |
5 | def parse_blendtorch_args(argv=None):
6 | """Parses blendtorch instance parameters and returns the remainder arguments.
7 |
8 | This method is intended to be used with Blender instances launched via
9 | `btt.BlenderLauncher`. It parses specific command line arguments that
10 | - identify the Blender process `btid`,
11 | - provide a random number seed `btseed`, and
12 | - lists a number of named socket addresses to connect to.
13 |
14 | This script parses command-line arguments after a special end of
15 | command line element `--`.
16 |
17 | Params
18 | ------
19 | argv: list-like, None
20 | The command line arguments to be parsed.
21 |
22 | Returns
23 | -------
24 | args: argparse.Namespace
25 | The parsed command line arguments
26 | remainder: list-like
27 | The remaining command line arguments.
28 | """
29 | argv = argv or sys.argv
30 | if "--" in argv:
31 | argv = argv[argv.index("--") + 1 :] # noqa
32 | else:
33 | raise ValueError("No script arguments found; missing `--`?")
34 |
35 | def addrsplit(x):
36 | return tuple(x.split("="))
37 |
38 | parser = argparse.ArgumentParser()
39 | parser.add_argument("-btid", type=int, help="Identifier for this Blender instance")
40 | parser.add_argument("-btseed", type=int, help="Random number seed")
41 | parser.add_argument(
42 | "-btsockets",
43 | metavar="NAME=ADDRESS",
44 | nargs="*",
45 | type=addrsplit,
46 | help="Set a number of named address pairs.",
47 | )
48 | args, remainder = parser.parse_known_args(argv)
49 | args.btsockets = dict(args.btsockets)
50 | return args, remainder
51 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/camera.py:
--------------------------------------------------------------------------------
1 | """Provides helper functions to deal with Blender cameras."""
2 | import bpy # noqa
3 | from mathutils import Vector # noqa
4 | import numpy as np
5 |
6 | from . import utils
7 |
8 |
9 | class Camera:
10 | """Camera related settings and functions.
11 |
12 | An instance of `Camera` is a shallow wrapper around `bpy.types.Camera`
13 | that provides additional convenience functions as well as intrinsic
14 | and extrinsic parameters. `Camera` is mainly to be used together with
15 | `btb.OffScreenRenderer` to create scene renderings and to convert world
16 | coordinates to pixel coordinates and linear depth measurements.
17 | """
18 |
19 | def __init__(self, bpy_camera=None, shape=None):
20 | """Initialize camera object
21 |
22 | Params
23 | ------
24 | bpy_camera: bpy.types.Camera, None
25 | Blender camera to attach to. When None, uses the scenes
26 | default camera.
27 | shape: tuple, None
28 | (H,W) of image to create. When None, uses the default
29 | render settings.
30 | """
31 | self.bpy_camera = bpy_camera or bpy.context.scene.camera
32 | self.shape = shape or Camera.shape_from_bpy()
33 | self.view_matrix = Camera.view_from_bpy(self.bpy_camera)
34 | self.proj_matrix = Camera.proj_from_bpy(self.bpy_camera, self.shape)
35 |
36 | def update_view_matrix(self):
37 | """Update the view matrix of the camera."""
38 | self.view_matrix = Camera.view_from_bpy(self.bpy_camera)
39 |
40 | def update_proj_matrix(self):
41 | """Update the projection matrix of the camera."""
42 | self.proj_matrix = Camera.proj_from_bpy(self.bpy_camera, self.shape)
43 |
44 | @property
45 | def type(self):
46 | """Returns the Blender type of this camera."""
47 | return self.bpy_camera.type
48 |
49 | @property
50 | def clip_range(self):
51 | """Returns the camera clip range."""
52 | return (self.bpy_camera.data.clip_start, self.bpy_camera.data.clip_end)
53 |
54 | @staticmethod
55 | def shape_from_bpy(bpy_render=None):
56 | """Returns the image shape as (HxW) from the given render settings."""
57 | render = bpy_render or bpy.context.scene.render
58 | scale = render.resolution_percentage / 100.0
59 | shape = (int(render.resolution_y * scale), int(render.resolution_x * scale))
60 | return shape
61 |
62 | @staticmethod
63 | def view_from_bpy(bpy_camera):
64 | """Returns 4x4 view matrix from the specified Blender camera."""
65 | camera = bpy_camera or bpy.context.scene.camera
66 | return camera.matrix_world.normalized().inverted()
67 |
68 | @staticmethod
69 | def proj_from_bpy(bpy_camera, shape):
70 | """Returns 4x4 projection matrix from the specified Blender camera."""
71 | camera = bpy_camera or bpy.context.scene.camera
72 | shape = shape or Camera.shape_from_bpy()
73 | return camera.calc_matrix_camera(
74 | bpy.context.evaluated_depsgraph_get(), x=shape[1], y=shape[0]
75 | )
76 |
77 | def world_to_ndc(self, xyz_world, return_depth=False):
78 | """Returns normalized device coordinates (NDC) and optionally linear depth for the given world coordinates.
79 |
80 | Params
81 | ------
82 | xyz_world: Nx3 array
83 | World coordinates given as numpy compatible array.
84 | return_depth: bool
85 | Whether or not to return depths w.r.t camera frame.
86 |
87 | Returns
88 | -------
89 | ndc: Nx3 array
90 | Normalized device coordinates.
91 | z: N array
92 | Linear depth in camera space. Returned when `return_depth`
93 | is True.
94 | """
95 |
96 | xyz = np.atleast_2d(xyz_world)
97 | xyzw = utils.hom(xyz, 1.0)
98 | if return_depth:
99 | xyzw = xyzw @ np.asarray(self.view_matrix).T
100 | d = -xyzw[:, -2].copy()
101 | xyzw = xyzw @ np.asarray(self.proj_matrix).T
102 | return utils.dehom(xyzw), d
103 | else:
104 | m = np.asarray(self.proj_matrix @ self.view_matrix)
105 | return utils.dehom(xyzw @ m.T)
106 |
107 | def ndc_to_pixel(self, ndc, origin="upper-left"):
108 | """Converts NDC coordinates to pixel values
109 |
110 | Params
111 | ------
112 | ndc: Nx3 array
113 | Normalized device coordinates.
114 | origin: str
115 | Pixel coordinate orgin. Supported values are `upper-left` (OpenCV) and `lower-left` (OpenGL)
116 |
117 | Returns
118 | -------
119 | xy: Nx2 array
120 | Camera pixel coordinates
121 | """
122 | assert origin in ["upper-left", "lower-left"]
123 | h, w = self.shape
124 | xy = np.atleast_2d(ndc)[:, :2]
125 | xy = (xy + 1) * 0.5
126 | if origin == "upper-left":
127 | xy[:, 1] = 1.0 - xy[:, 1]
128 | return xy * np.array([[w, h]])
129 |
130 | def object_to_pixel(self, *objs, return_depth=False):
131 | """Convenience composition of `ndc_to_pixel(world_to_ndc(utils.world_coordinates(*objs)))`
132 |
133 | Params
134 | ------
135 | objs: array of bpy.types.Object
136 | Collection of objects whose vertices to convert to camera pixel coordinates.
137 | return_depth: bool
138 | When True, returns the linear depth in camera space of each coordinate.
139 |
140 | Returns
141 | -------
142 | xy : Mx2 array
143 | Concatenated list object vertex coordinates expressed in camera pixels.
144 | z: array, optional
145 | Linear depth in camera space when `return_depth==True`
146 | """
147 | if return_depth:
148 | ndc, z = self.world_to_ndc(
149 | utils.world_coordinates(*objs), return_depth=True
150 | )
151 | px = self.ndc_to_pixel(ndc)
152 | return px, z
153 | else:
154 | ndc = self.world_to_ndc(utils.world_coordinates(*objs))
155 | px = self.ndc_to_pixel(ndc)
156 | return px
157 |
158 | def bbox_object_to_pixel(self, *objs, return_depth=False):
159 | """Convenience composition of `ndc_to_pixel(world_to_ndc(utils.bbox_world_coordinates(*objs)))`
160 |
161 | Params
162 | ------
163 | objs: array of bpy.types.Object
164 | Collection of objects whose vertices to convert to camera pixel coordinates.
165 | return_depth: bool
166 | When True, returns the linear depth in camera space of each coordinate.
167 |
168 | Returns
169 | -------
170 | xy : Mx2 array
171 | Concatenated list object vertex coordinates expressed in camera pixels.
172 | z: array, optional
173 | Linear depth in camera space when `return_depth==True`
174 | """
175 | if return_depth:
176 | ndc, z = self.world_to_ndc(
177 | utils.bbox_world_coordinates(*objs), return_depth=True
178 | )
179 | px = self.ndc_to_pixel(ndc)
180 | return px, z
181 | else:
182 | ndc = self.world_to_ndc(utils.bbox_world_coordinates(*objs))
183 | px = self.ndc_to_pixel(ndc)
184 | return px
185 |
186 | def look_at(self, look_at=None, look_from=None):
187 | """Helper function to look at specific location."""
188 | if look_from is None:
189 | look_from = self.bpy_camera.location
190 | if look_at is None:
191 | look_at = Vector([0, 0, 0])
192 |
193 | direction = Vector(look_at) - Vector(look_from)
194 | # point the cameras '-Z' and use its 'Y' as up
195 | rot_quat = direction.to_track_quat("-Z", "Y")
196 | self.bpy_camera.rotation_euler = rot_quat.to_euler()
197 | self.bpy_camera.location = look_from
198 | # bpy.context.evaluated_depsgraph_get().update()
199 | self.update_view_matrix()
200 |
201 | def set_as_active_camera(self, scene=None):
202 | """Make this camera the active scene camera."""
203 | scene = scene or bpy.context.scene
204 | scene.camera = self.bpy_camera
205 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/constants.py:
--------------------------------------------------------------------------------
1 | """Commonly used constants."""
2 |
3 | """Default socket timeout 5 sec."""
4 | DEFAULT_TIMEOUTMS = 5000
5 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/duplex.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | import os
3 | import sys
4 |
5 | from .constants import DEFAULT_TIMEOUTMS
6 |
7 |
8 | class DuplexChannel:
9 | """Provides generic bidirectional communication with a single PyTorch instance."""
10 |
11 | def __init__(self, address, btid=None, lingerms=0, timeoutms=DEFAULT_TIMEOUTMS):
12 | self.ctx = zmq.Context()
13 | self.sock = self.ctx.socket(zmq.PAIR)
14 | self.sock.setsockopt(zmq.LINGER, lingerms)
15 | self.sock.setsockopt(zmq.RCVHWM, 10)
16 | self.sock.setsockopt(zmq.SNDHWM, 10)
17 | self.sock.setsockopt(zmq.SNDTIMEO, timeoutms)
18 | self.sock.setsockopt(zmq.RCVTIMEO, timeoutms)
19 | self.sock.bind(address)
20 |
21 | self.poller = zmq.Poller()
22 | self.poller.register(self.sock, zmq.POLLIN)
23 | self.btid = btid
24 |
25 | def recv(self, timeoutms=None):
26 | """Return next message or None.
27 |
28 | Kwargs
29 | ------
30 | timeoutms: int
31 | Max time to spend waiting for messages. If None, blocks until
32 | at least one message is available.
33 |
34 | Returns
35 | -------
36 | msg: dict, None
37 | Message received or None.
38 | """
39 | socks = dict(self.poller.poll(timeoutms))
40 | if self.sock in socks:
41 | return self.sock.recv_pyobj()
42 | else:
43 | return None
44 |
45 | def send(self, **kwargs):
46 | """Send a message to remote Blender process.
47 |
48 | Automatically attaches the process identifier `btid` and
49 | a unique message id `btmid` to the dictionary.
50 |
51 | Params
52 | ------
53 | kwargs: dict
54 | Message to send.
55 |
56 | Returns
57 | -------
58 | messageid: integer
59 | Message id attached to dictionary
60 | """
61 | mid = int.from_bytes(os.urandom(4), sys.byteorder)
62 | data = {"btid": self.btid, "btmid": mid, **kwargs}
63 | self.sock.send_pyobj(data)
64 | return mid
65 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/env.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | import numpy as np
3 |
4 | from .animation import AnimationController
5 | from .offscreen import OffScreenRenderer
6 | from .constants import DEFAULT_TIMEOUTMS
7 | from .camera import Camera
8 |
9 |
10 | class BaseEnv:
11 | """Abstract base class for environments to be interacted with by agents.
12 |
13 | This class is what `gym.Env` is to OpenAI gym: it defines the basic interface
14 | required to be implemented by all Blender environments.
15 |
16 | Blender defines a callback based animation system. This also affects `BaseEnv` in that it
17 | requires the agent to be given by a callable method having following signature:
18 | cmd, action = agent(env, **kwargs)
19 | The arguments passed via **kwargs are at least `obs`, `reward`, `done`. Other variables
20 | correspond to additional information (`info` dict in OpenAI). The agent is expected to
21 | return a command (BaseEnv.CMD_RESTART or BaseEnv.CMD_STEP) with an optional `action`
22 | to perform. Note, `action`, `obs`, `reward` and `info` depend on the actual environment
23 | implementation.
24 |
25 | Since a callback based agent is unusual to OpenAI users, blendtorch offers a
26 | `RemoteControlledAgent`, that communicates with a remotly implemented agent. The remote
27 | agent can then be implements with the common blocking: `agent.step()`, `agent.restart()`.
28 | See `blendtorch.btt` for details.
29 |
30 | Each environment inheriting from `BaseEnv`needs to implement the following three methods
31 | - `BaseEnv._env_reset()` to reset environment state to initial
32 | - `BaseEnv._env_prepare_step(action) to apply an action in a pre-frame manner.
33 | - `BaseEnv._env_post_step()` to gather the environment state, reward and other variables
34 | after the frame has completed (i.e after physics, and animation have computed their values).
35 | """
36 |
37 | STATE_INIT = object()
38 | STATE_RUN = object()
39 | CMD_RESTART = object()
40 | CMD_STEP = object()
41 |
42 | def __init__(self, agent):
43 | """Initialize the environment."""
44 | self.events = AnimationController()
45 | self.events.pre_frame.add(self._pre_frame)
46 | self.events.pre_animation.add(self._pre_animation)
47 | self.events.post_frame.add(self._post_frame)
48 | self.agent = agent
49 | self.ctx = None
50 | self.renderer = None
51 | self.render_every = None
52 | self.frame_range = None
53 | self.state = BaseEnv.STATE_INIT
54 |
55 | def run(self, frame_range=None, use_animation=True):
56 | """Run the environment.
57 |
58 | This hooks with animation system to receive callbacks. The callbacks eventually
59 | will invoke the actual environments interface methods as described above.
60 |
61 | Params
62 | ------
63 | frame_range: tuple, None
64 | Start and end (inclusive and starts usually at 1 in Blender). When None,
65 | uses the configured scene frame range.
66 | use_animation: bool
67 | Whether to use Blender's non-blocking animation system or a blocking
68 | variant. Set this to True, when you want to see the agents actions rendered
69 | live in Blender. When set to False, does not allow Blender UI to refresh, but
70 | may run at much higher FPS. Consider when training the agent.
71 | """
72 | self.frame_range = AnimationController.setup_frame_range(frame_range)
73 | self.events.play(
74 | # we allow playing the simulation past end.
75 | (self.frame_range[0], 2147483647),
76 | num_episodes=-1,
77 | use_animation=use_animation,
78 | use_offline_render=True,
79 | )
80 |
81 | def attach_default_renderer(self, every_nth=1):
82 | """Attach a default renderer to the environment.
83 |
84 | Convenience function to provide render images for remotely controlled agents (i.e `env.render()`). Uses the default camera perspective for image generation.
85 |
86 | The image rendered will be provided in the `rgb_array` field of the context provided to the agent.
87 |
88 | Params
89 | ------
90 | every_nth: int
91 | Render every nth frame of the simulation.
92 | """
93 |
94 | self.renderer = OffScreenRenderer(camera=Camera(), mode="rgb")
95 | self.render_every = every_nth
96 |
97 | def _pre_frame(self):
98 | """Internal pre-frame callback."""
99 | self.ctx["time"] = self.events.frameid
100 | self.ctx["done"] |= self.events.frameid >= self.frame_range[1]
101 | if self.events.frameid > self.frame_range[0]:
102 | cmd, action = self.agent(self, **self.ctx)
103 | if cmd == BaseEnv.CMD_RESTART:
104 | self._restart()
105 | elif cmd == BaseEnv.CMD_STEP:
106 | if np.all(action is not None):
107 | self._env_prepare_step(action)
108 | self.ctx["prev_action"] = action
109 | self.state = BaseEnv.STATE_RUN
110 |
111 | def _pre_animation(self):
112 | """Internal pre-animation callback."""
113 | self.state = BaseEnv.STATE_INIT
114 | self.ctx = {"prev_action": None, "done": False}
115 | init_ctx = self._env_reset()
116 | if init_ctx is not None:
117 | self.ctx = {**self.ctx, **init_ctx}
118 |
119 | def _post_frame(self):
120 | """Internal post-frame callback."""
121 | self._render(self.ctx)
122 | next_ctx = self._env_post_step()
123 | self.ctx = {**self.ctx, **next_ctx}
124 |
125 | def _render(self, ctx):
126 | """Internal render command."""
127 | cur, start = self.events.frameid, self.frame_range[0]
128 | render = bool(self.renderer and ((cur - start) % self.render_every) == 0)
129 | if render:
130 | ctx["rgb_array"] = self.renderer.render()
131 |
132 | def _restart(self):
133 | """Restart the environment internally."""
134 | self.events.rewind()
135 |
136 | def _env_reset(self):
137 | """Reset the environment state.
138 |
139 | To be implemented by actual environments. Returns nothing.
140 | """
141 | raise NotImplementedError()
142 |
143 | def _env_prepare_step(self, action):
144 | """Prepare environment with action.
145 |
146 | Due to the callback system in Blender, the agents `step` method
147 | is best split into two distinct function. One that takes the action
148 | before a frame is animated/rendered and one that collects the new
149 | state/reward once the frame has completed. Doing so, allows
150 | the physics system to take the action before the frame simulation into
151 | consideration and thus work out the phyisical state at the end of frame.
152 |
153 | In case a remote controlled agent is used, make sure the action is pickle-able.
154 |
155 | Returns nothing.
156 | """
157 |
158 | raise NotImplementedError()
159 |
160 | def _env_post_step(self):
161 | """Return the environments new state as dict.
162 |
163 | Requires at least the following fields to be set: `obs`, `reward`. You might
164 | also want to specify `done`. All other fields set will be passed to the agent
165 | as well.
166 |
167 | In case a remote controlled agent is used, make sure all items are pickle-able.
168 |
169 | Returns
170 | -------
171 | ddict: dict
172 | dictionary of key-values describing the new environment state as well as
173 | any reward and auxilary information.
174 | """
175 | raise NotImplementedError()
176 |
177 |
178 | class RemoteControlledAgent:
179 | """Agent implementation that receives commands from a remote peer.
180 |
181 | Uses a request(remote-agent)/reply(self) pattern to model a [blocking]
182 | service call. The agent is expected to initiate a request using a dictionary:
183 | - `cmd` field set either to `'reset'` or `'step'`.
184 | - `action` field set when `cmd=='step'`.
185 | The remote agent will then be passed a dictionary response that contains
186 | all kwargs passed from the environment to `RemoteControlledAgent`.
187 |
188 | Per default, request/response pairs will eventually block Blender. That allows
189 | the remote agent to process each frame of the simulation, independent of the time
190 | it takes to generate an answer. However, this class also supports a special
191 | `real_time` flag, in which case the environment continues the simulation. Once an
192 | agent request arrives, it will be applied to the current simulation time.
193 |
194 | Params
195 | ------
196 | address: str
197 | ZMQ remote address to bind to.
198 | real_time: bool
199 | Whether or not to continue simulation while waiting for a
200 | new remote agent request. Default False.
201 | timeoutms: int
202 | Default timeout in milliseconds to wait for new agent requests,
203 | only applies when `real_time=True`.
204 | """
205 |
206 | STATE_REQ = 0
207 | STATE_REP = 1
208 |
209 | def __init__(self, address, real_time=False, timeoutms=DEFAULT_TIMEOUTMS):
210 | """Initialize the remote controlled agent."""
211 | self.context = zmq.Context()
212 | self.socket = self.context.socket(zmq.REP)
213 | self.socket.setsockopt(zmq.LINGER, 0)
214 | self.socket.setsockopt(zmq.SNDTIMEO, timeoutms)
215 | self.socket.setsockopt(zmq.RCVTIMEO, timeoutms)
216 | self.socket.bind(address)
217 | self.real_time = real_time
218 | self.state = RemoteControlledAgent.STATE_REQ
219 |
220 | def __call__(self, env, **ctx):
221 | """Process agent environment callback."""
222 | flags = 0
223 | if self.real_time and (env.state == BaseEnv.STATE_RUN):
224 | flags = zmq.NOBLOCK
225 |
226 | if self.state == RemoteControlledAgent.STATE_REP:
227 | try:
228 | self.socket.send_pyobj(ctx, flags=flags)
229 | self.state = RemoteControlledAgent.STATE_REQ
230 | except zmq.error.Again:
231 | if not self.real_time:
232 | raise ValueError("Failed to send to remote agent.")
233 | return BaseEnv.CMD_STEP, None
234 |
235 | if self.state == RemoteControlledAgent.STATE_REQ:
236 | try:
237 | rcv = self.socket.recv_pyobj(flags=flags)
238 | assert rcv["cmd"] in ["reset", "step"]
239 | self.state = RemoteControlledAgent.STATE_REP
240 |
241 | if rcv["cmd"] == "reset":
242 | cmd = BaseEnv.CMD_RESTART
243 | action = None
244 | if env.state == BaseEnv.STATE_INIT:
245 | # Already reset
246 | cmd, action = self.__call__(env, **ctx)
247 | elif rcv["cmd"] == "step":
248 | cmd = BaseEnv.CMD_STEP
249 | action = rcv["action"]
250 | return cmd, action
251 | except zmq.error.Again:
252 | return BaseEnv.CMD_STEP, None
253 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/materials.py:
--------------------------------------------------------------------------------
1 | import bpy
2 |
3 |
4 | def create_normal_material(name):
5 | """Returns a surface material to render camera-space normals.
6 |
7 | The normal vectors range (-1,1) is transformed to match the color range (0,1) as follows:
8 | c = n*(0.5,0.5,-0.5) + 0.5
9 | To recover the orginal normal, thus
10 | n = (c - 0.5)/(0.5,0.5,-0.5)
11 | where multiplication and division is component-wise.
12 |
13 | Params
14 | ------
15 | name : str
16 | Name of matrial
17 |
18 | Returns
19 | -------
20 | mat : Material
21 | Blender material
22 | """
23 | m = bpy.data.materials.new(name)
24 | m.use_nodes = True
25 | t = m.node_tree
26 | for n in t.nodes:
27 | t.nodes.remove(n)
28 |
29 | out = t.nodes.new(type="ShaderNodeOutputMaterial")
30 | geo = t.nodes.new(type="ShaderNodeNewGeometry")
31 | vt = t.nodes.new(type="ShaderNodeVectorTransform")
32 | mul = t.nodes.new(type="ShaderNodeVectorMath")
33 | add = t.nodes.new(type="ShaderNodeVectorMath")
34 |
35 | # Transform normal from world to camera
36 | vt.convert_from = "WORLD"
37 | vt.convert_to = "CAMERA"
38 | vt.vector_type = "VECTOR"
39 | t.links.new(geo.outputs["Normal"], vt.inputs["Vector"])
40 |
41 | # Shift and scale to [0..1] range required for colors
42 | mul.operation = "MULTIPLY"
43 | mul.inputs[1].default_value = (0.5, 0.5, -0.5)
44 | t.links.new(vt.outputs["Vector"], mul.inputs[0])
45 |
46 | add.operation = "ADD"
47 | add.inputs[1].default_value = (0.5, 0.5, 0.5)
48 | t.links.new(mul.outputs["Vector"], add.inputs[0])
49 |
50 | # Use the output vector as color of the surface
51 | t.links.new(add.outputs["Vector"], out.inputs["Surface"])
52 |
53 | return m
54 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/offscreen.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import bpy # noqa
3 | import gpu # noqa
4 | import bgl # noqa
5 | from OpenGL.GL import glGetTexImage
6 |
7 | from .camera import Camera
8 | from .utils import find_first_view3d
9 |
10 |
11 | class OffScreenRenderer:
12 | """Provides offscreen scene rendering using Eevee.
13 |
14 | Rendering reusing the first found 3D Space View in Blender. The way this view is configured also defines how the resulting image looks like. Use the helper method `set_render_style` to adjust the appearance from within Python.
15 |
16 | This class' `render` method is expected to be called from a `POST_PIXEL` callback, which `AnimationController` takes care of. That is, invoking `render()` from withing `post_frame` is considered save.
17 |
18 | Note, color images are in linear color spaces and may appear darker than
19 | expected when visualized. Use `btt.colors.gamma` to gamma encode before
20 | visualizing.
21 |
22 | Params
23 | ------
24 | camera: btb.Camera, None
25 | Camera view to be rendered. When None, default camera is used.
26 | origin: str
27 | When 'upper-left' flip the rendered data to match OpenCV image coordinate
28 | system. When 'lower-left' image is created using OpenGL coordinate system. Defaults to 'upper-left'.
29 | mode: str
30 | Defines the number of color channels. Either 'RGBA' or 'RGB'
31 | """
32 |
33 | def __init__(self, camera=None, mode="rgba", origin="upper-left"):
34 | assert mode in ["rgba", "rgb"]
35 | assert origin in ["upper-left", "lower-left"]
36 | self.camera = camera or Camera()
37 | self.offscreen = gpu.types.GPUOffScreen(self.shape[1], self.shape[0])
38 | self.area, self.space, self.region = find_first_view3d()
39 | self.handle = None
40 | self.origin = origin
41 | channels = 4 if mode == "rgba" else 3
42 | self.buffer = np.zeros((self.shape[0], self.shape[1], channels), dtype=np.uint8)
43 | self.mode = bgl.GL_RGBA if mode == "rgba" else bgl.GL_RGB
44 |
45 | @property
46 | def shape(self):
47 | return self.camera.shape
48 |
49 | def render(self):
50 | """Render the scene and return image as buffer.
51 |
52 | Returns
53 | -------
54 | image: HxWxD array
55 | where D is 4 when `mode=='RGBA'` else 3.
56 | """
57 | with self.offscreen.bind():
58 | self.offscreen.draw_view3d(
59 | bpy.context.scene,
60 | bpy.context.view_layer,
61 | self.space, # bpy.context.space_data
62 | self.region, # bpy.context.region
63 | self.camera.view_matrix,
64 | self.camera.proj_matrix,
65 | )
66 |
67 | bgl.glActiveTexture(bgl.GL_TEXTURE0)
68 | bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.offscreen.color_texture)
69 |
70 | # np.asarray seems slow, because bgl.buffer does not support the python buffer protocol
71 | # bgl.glGetTexImage(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGB, bgl.GL_UNSIGNED_BYTE, self.buffer)
72 | # https://docs.blender.org/api/blender2.8/gpu.html
73 | # That's why we use PyOpenGL at this point instead.
74 | glGetTexImage(
75 | bgl.GL_TEXTURE_2D, 0, self.mode, bgl.GL_UNSIGNED_BYTE, self.buffer
76 | )
77 |
78 | buffer = self.buffer
79 | if self.origin == "upper-left":
80 | buffer = np.flipud(buffer)
81 | return buffer
82 |
83 | def set_render_style(self, shading="RENDERED", overlays=False):
84 | self.space.shading.type = shading
85 | self.space.overlay.show_overlays = overlays
86 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/paths.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import sys
3 |
4 |
5 | def add_scene_dir_to_path():
6 | """Adds directory of scene file to Python path"""
7 | p = bpy.path.abspath("//")
8 | if p not in sys.path:
9 | sys.path.append(p)
10 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/publisher.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import zmq
3 |
4 |
5 | class DataPublisher:
6 | """Publish rendered images and auxilary data.
7 |
8 | This class acts as data source for `btt.RemoteIterableDataset`.
9 | Keyword arguments to `publish` must be pickle-able.
10 |
11 | Params
12 | ------
13 | bind_address: str
14 | ZMQ remote address to bind to.
15 | btid: integer
16 | blendtorch process identifier when available. Will be auto-added
17 | to the send dictionary. Defaults to None.
18 | send_hwm: integer
19 | Max send queue size before blocking caller of `publish`.
20 | """
21 |
22 | def __init__(self, bind_address, btid=None, send_hwm=10, lingerms=0):
23 | self.ctx = zmq.Context()
24 | self.sock = self.ctx.socket(zmq.PUSH)
25 | self.sock.setsockopt(zmq.SNDHWM, send_hwm)
26 | self.sock.setsockopt(zmq.LINGER, lingerms)
27 | self.sock.setsockopt(zmq.IMMEDIATE, 1)
28 | self.sock.bind(bind_address)
29 | self.btid = btid
30 |
31 | def publish(self, **kwargs):
32 | """Publish a message.
33 |
34 | This method will implicitly add bendtorch instance id `btid`
35 | to the send dictionary.
36 |
37 | Params
38 | ------
39 | kwargs: optional
40 | Dictionary of pickable objects composing the message.
41 | """
42 |
43 | data = {"btid": self.btid, **kwargs}
44 | self.sock.send_pyobj(data)
45 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/renderer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | from dataclasses import dataclass
4 | from functools import partial
5 | from itertools import groupby
6 | from pathlib import Path
7 |
8 | import bpy
9 | import minexr
10 | import numpy as np
11 |
12 | from .camera import Camera
13 |
14 |
15 | @dataclass
16 | class CompositeSelection:
17 | """Name of given to this element, serves as key in the result dict."""
18 |
19 | key: str
20 | """Name of the target FileOutput node in the compositing tree."""
21 | node: str
22 | """Name of the slot to select."""
23 | slot: str
24 | """Selection of channels to extract from slot.
25 | Note, use 'RGBA', 'RGB', 'BGR' for color information and 'V' for depth information."""
26 | channels: str
27 |
28 |
29 | @dataclass
30 | class _EXRSelection:
31 | key: str
32 | channels: list
33 |
34 |
35 | class CompositeRenderer:
36 | """Provides composite rendering support for EEVEE.
37 |
38 | Rendering with compositor support. This implementation currently assumes that at least one FileOutput node is present in the compositing tree. Each FileOutput node has to be configured as follows:
39 | - Filename: has include hashes placeholders #### for frame index.
40 | - Format: OPEN_EXR_MULTILAYER,
41 | - Codec: None
42 | - Precision: Half
43 | The initialization is passed an array of `CompositeSelection` elements that basically define which elements should be returned after a call to `render`.
44 |
45 | Note, color images are in linear color spaces and may appear darker than
46 | expected when visualized. Use `btt.colors.gamma` to gamma encode before
47 | visualizing.
48 |
49 | Params
50 | ------
51 | slots: Iterable[CompositeSelection]
52 | Slots to be selected and returned by `render`.
53 | camera: btb.Camera, None
54 | Camera view to be rendered. When None, default camera is used.
55 | delete_render_files: bool, optional
56 | Delete intermediate render files. Defaults to true.
57 | btid: int, optional
58 | Blendtorch worker index. Required to avoid file collisions for multiple workers.
59 | """
60 |
61 | def __init__(self, slots, camera=None, delete_render_files=True, btid=1):
62 | self.btid = btid
63 | self.camera = camera or Camera()
64 | self.delete_render_files = delete_render_files
65 | self._scene = bpy.context.scene
66 | assert self._scene.use_nodes, "CompositeRenderer requires compositing nodes"
67 | assert len(slots) > 0, "No slots selected"
68 |
69 | self.outnodes = self._find_output_nodes()
70 | self.outnodes = self._update_output_paths(self.outnodes)
71 | self._outpath_re = re.compile(r"((#)+)")
72 |
73 | def node_from_name(name):
74 | nodes = [n for n in self.outnodes if n.name == name]
75 | assert len(nodes) > 0, f"Could not find output node {name}"
76 | return nodes[0]
77 |
78 | def exr_from_slots(slots):
79 | exr = []
80 | for s in slots:
81 | channels = [f"{s.slot}.{c}" for c in s.channels]
82 | exr.append(_EXRSelection(s.key, channels))
83 | return exr
84 |
85 | self.mapped_slots = {
86 | node_from_name(k): exr_from_slots(g)
87 | for k, g in groupby(slots, key=lambda s: s.node)
88 | }
89 |
90 | def _find_output_nodes(self):
91 | tree = self._scene.node_tree
92 | outnodes = [n for n in tree.nodes if n.type == "OUTPUT_FILE"]
93 |
94 | def is_compatible(n):
95 | return (
96 | n.format.file_format == "OPEN_EXR_MULTILAYER"
97 | and n.format.exr_codec == "NONE"
98 | and n.format.color_depth == "16" # currently, minexr assumes fp16
99 | )
100 |
101 | outnodes_ok = [n for n in outnodes if is_compatible(n)]
102 | # outnodes_dropped = [n for n in outnodes if not is_compatible(n)]
103 | assert (
104 | len(outnodes_ok) > 0
105 | ), "Could not find a single compatible output filenode"
106 | return outnodes_ok
107 |
108 | def _update_output_paths(self, outnodes):
109 | for n in outnodes:
110 | path = Path(bpy.path.abspath(n.base_path))
111 | path = path.parent / f"{path.stem}_{self.btid:02d}"
112 | path = path.with_suffix(".exr")
113 | n.base_path = str(path)
114 | return outnodes
115 |
116 | def _actual_path(self, fidx, base_path):
117 | def _replicate(g, fidx):
118 | len = g.span()[1] - g.span()[0]
119 | return str(fidx).zfill(len)
120 |
121 | newpath, cnt = self._outpath_re.subn(partial(_replicate, fidx=fidx), base_path)
122 | assert (
123 | cnt > 0
124 | ), "Composite renderer requires hash placeholders in output paths to identify frame number."
125 | path = Path(bpy.path.abspath(newpath))
126 | assert path.exists(), f"Could not find output file {path}"
127 | return path
128 |
129 | def render(self):
130 | """Render the scene and return data of selected slots.
131 |
132 | Returns
133 | -------
134 | data: dict
135 | Mapping from name to np.array
136 | """
137 |
138 | fidx = self._scene.frame_current
139 | self.camera.set_as_active_camera(self._scene)
140 | bpy.ops.render.render(animation=False, write_still=False, use_viewport=True)
141 |
142 | key_data = {}
143 | for node, exrsel in self.mapped_slots.items():
144 | path = self._actual_path(fidx, node.base_path)
145 | with open(path, "rb") as fp:
146 | reader = minexr.load(fp)
147 | # print(reader.attrs)
148 | for exr in exrsel:
149 | data = reader.select(exr.channels).astype(np.float32)
150 | key_data[exr.key] = data
151 | if self.delete_render_files:
152 | os.remove(path)
153 |
154 | return key_data
155 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/signal.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 |
4 | class Signal:
5 | """Distribute messages to multiple callbacks.
6 |
7 | Usage
8 | >>> from blendtorch.btb.signal import Signal
9 | >>> def mul(a,b):
10 | ... print(a*b)
11 | ...
12 | >>> sig = Signal()
13 | >>> h = sig.add(mul, b=3)
14 | >>> sig.invoke(4)
15 | 12
16 | """
17 |
18 | def __init__(self):
19 | self.slots = []
20 |
21 | def add(self, fn, *args, **kwargs):
22 | """Register `fn` as callback.
23 |
24 | Params
25 | ------
26 | *args: optional
27 | Additional positional arguments to provide callback with
28 | **kwargs: optional
29 | Additional keyword arguments to provide callback with
30 |
31 | Returns
32 | -------
33 | handle: object
34 | Handle that can be used to unregister callback.
35 | """
36 | fnp = partial(fn, *args, **kwargs)
37 | self.slots.append(fnp)
38 | return fnp
39 |
40 | def remove(self, handle):
41 | """Unregister callback using handle returned from `add`."""
42 | self.slots.remove(handle)
43 |
44 | def invoke(self, *args, **kwargs):
45 | """Invoke the signal.
46 |
47 | Params
48 | ------
49 | *args: optional
50 | Positional arguments to send to all callbacks
51 | **kwargs: optional
52 | Keyword arguments to send to all callbacks
53 | """
54 | for s in self.slots:
55 | s(*args, **kwargs)
56 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/utils.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 | from mathutils import Vector
4 |
5 |
6 | def find_first_view3d():
7 | """Helper function to find first space view 3d and associated window region.
8 |
9 | The three returned objects are useful for setting up offscreen rendering in
10 | Blender.
11 |
12 | Returns
13 | -------
14 | area: object
15 | Area associated with space view.
16 | window: object
17 | Window region associated with space view.
18 | space: bpy.types.SpaceView3D
19 | Space view.
20 |
21 | """
22 | areas = [a for a in bpy.context.screen.areas if a.type == "VIEW_3D"]
23 | assert len(areas) > 0
24 | area = areas[0]
25 | region = sorted(
26 | [r for r in area.regions if r.type == "WINDOW"],
27 | key=lambda x: x.width,
28 | reverse=True,
29 | )[0]
30 | spaces = [s for s in areas[0].spaces if s.type == "VIEW_3D"]
31 | assert len(spaces) > 0
32 | return area, spaces[0], region
33 |
34 |
35 | def object_coordinates(*objs, depsgraph=None):
36 | """Returns XYZ object coordinates of all objects in positional *args.
37 |
38 | Params
39 | ------
40 | objs: list-like of bpy.types.Object
41 | Object to return vertices for
42 | depsgraph: bpy.types.Depsgraph, None
43 | Dependency graph
44 |
45 | Returns
46 | -------
47 | xyz: Nx3 array
48 | World coordinates of object vertices
49 | """
50 |
51 | # To be on the safe side, we use the evaluated object after
52 | # all modifiers etc. applied (done internally by bmesh)
53 |
54 | dg = depsgraph or bpy.context.evaluated_depsgraph_get()
55 | xyz = []
56 | for obj in objs:
57 | eval_obj = obj.evaluated_get(dg)
58 | xyz_obj = [v.co for v in eval_obj.data.vertices]
59 | xyz.extend(xyz_obj)
60 | return np.stack(xyz)
61 |
62 |
63 | def world_coordinates(*objs, depsgraph=None):
64 | """Returns XYZ world coordinates of all objects in positional *args.
65 |
66 | Params
67 | ------
68 | objs: list-like of bpy.types.Object
69 | Object to return vertices for
70 | depsgraph: bpy.types.Depsgraph, None
71 | Dependency graph
72 |
73 | Returns
74 | -------
75 | xyz: Nx3 array
76 | World coordinates of object vertices
77 | """
78 |
79 | # To be on the safe side, we use the evaluated object after
80 | # all modifiers etc. applied (done internally by bmesh)
81 |
82 | dg = depsgraph or bpy.context.evaluated_depsgraph_get()
83 | xyz = []
84 | for obj in objs:
85 | eval_obj = obj.evaluated_get(dg)
86 | xyz_obj = [(eval_obj.matrix_world @ v.co) for v in eval_obj.data.vertices]
87 | xyz.extend(xyz_obj)
88 | return np.stack(xyz)
89 |
90 |
91 | def bbox_world_coordinates(*objs, depsgraph=None):
92 | """Returns XYZ world coordinates of all bounding box corners of each object in *objs.
93 |
94 | Params
95 | ------
96 | objs: list-like of bpy.types.Object
97 | Object to return vertices for
98 | depsgraph: bpy.types.Depsgraph, None
99 | Dependency graph
100 |
101 | Returns
102 | -------
103 | xyz: Nx3 array
104 | World coordinates of object vertices
105 | """
106 |
107 | # To be on the safe side, we use the evaluated object after
108 | # all modifiers etc. applied (done internally by bmesh)
109 |
110 | dg = depsgraph or bpy.context.evaluated_depsgraph_get()
111 | xyz = []
112 | for obj in objs:
113 | eval_obj = obj.evaluated_get(dg)
114 | xyz_obj = [(eval_obj.matrix_world @ Vector(c)) for c in eval_obj.bound_box]
115 | xyz.extend(xyz_obj)
116 | return np.stack(xyz)
117 |
118 |
119 | def hom(x, v=1.0):
120 | """Convert to homogeneous coordinates in the last dimension."""
121 | return np.concatenate((x, np.full((x.shape[0], 1), v, dtype=x.dtype)), -1)
122 |
123 |
124 | def dehom(x):
125 | """Return de-homogeneous coordinates by perspective division."""
126 | return x[..., :-1] / x[..., -1:]
127 |
128 |
129 | def random_spherical_loc(radius_range=None, theta_range=None, phi_range=None):
130 | """Return random locations on sphere.
131 |
132 | Params
133 | ------
134 | radius_range: tuple
135 | min/max radius of sphere. Defaults to (1,1)
136 | theta: tuple
137 | min/max longitudinal range. Defaults to (0, pi)
138 | phi: tuple
139 | min/max latitudinal range. Defaults to (0, 2*pi)
140 |
141 | Returns
142 | -------
143 | xyz : array
144 | location on sphere
145 | """
146 | if radius_range is None:
147 | radius_range = (1, 1)
148 | if theta_range is None:
149 | theta_range = (0, np.pi)
150 | if phi_range is None:
151 | phi_range = (0, 2 * np.pi)
152 |
153 | # Not really uniform on sphere, but fine for us.
154 | r = np.random.uniform(radius_range[0], radius_range[1]) # radii
155 | t = np.random.uniform(theta_range[0], theta_range[1]) # inclination
156 | p = np.random.uniform(phi_range[0], phi_range[1]) # azimuth
157 |
158 | return np.array([np.sin(t) * np.cos(p), np.sin(t) * np.sin(p), np.cos(t)]) * r
159 |
160 |
161 | def compute_object_visibility(obj, cam, N=25, scene=None, view_layer=None, dist=None):
162 | """Computes object visibility using Monte Carlo ray-tracing."""
163 | scene = scene or bpy.context.scene
164 | vl = view_layer or bpy.context.view_layer
165 | src = cam.bpy_camera.matrix_world.translation
166 | dist = dist or 1.70141e38
167 |
168 | caminv = cam.bpy_camera.matrix_world.inverted()
169 |
170 | ids = np.random.choice(len(obj.data.vertices), size=N)
171 | vis = 0
172 | for idx in ids:
173 | dst_world = obj.matrix_world @ obj.data.vertices[idx].co
174 | d = (dst_world - src).normalized()
175 | dst_cam = caminv @ dst_world
176 | if dst_cam.z <= 0.0 and np.isfinite(d).all(): # view towards neg. z
177 | res, x, n, face, object, m = scene.ray_cast(vl, src, d, distance=dist)
178 | if res and object == obj:
179 | vis += 1
180 | del object, m, x, n, res
181 | del d, dst_world, dst_cam
182 | return vis / N
183 |
184 |
185 | def scene_stats():
186 | """Returns debug information on the current scene."""
187 | stats = {}
188 | for attr in dir(bpy.data):
189 | if isinstance(attr, bpy.types.Collection):
190 | objs = getattr(bpy.data, attr).all_objects
191 | if len(objs) == 0:
192 | continue
193 | orphaned = [o for o in objs if o.users == 0]
194 | active = [o for o in objs if o.users > 0]
195 | stats[attr] = (len(active), len(orphaned))
196 | return stats
197 |
--------------------------------------------------------------------------------
/pkg_blender/blendtorch/btb/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.4.0"
2 |
--------------------------------------------------------------------------------
/pkg_blender/requirements.txt:
--------------------------------------------------------------------------------
1 | pyzmq>=18.1.1
2 | numpy>=1.18.2
3 | pyopengl>=3.1.5
4 | minexr>=1.0.0
5 | supershape>=1.1.0
--------------------------------------------------------------------------------
/pkg_blender/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from pathlib import Path
3 |
4 | THISDIR = Path(__file__).parent
5 |
6 | with open(THISDIR / "requirements.txt") as f:
7 | required = f.read().splitlines()
8 |
9 | with open(THISDIR / ".." / "Readme.md", encoding="utf-8") as f:
10 | long_description = f.read()
11 |
12 | main_ns = {}
13 | with open(THISDIR / "blendtorch" / "btb" / "version.py") as ver_file:
14 | exec(ver_file.read(), main_ns)
15 |
16 | setup(
17 | name="blendtorch-btb",
18 | author="Christoph Heindl and Sebastian Zambal",
19 | description="Blender part of project blendtorch. See also blendtorch-btt.",
20 | url="https://github.com/cheind/pytorch-blender",
21 | license="MIT",
22 | long_description=long_description,
23 | long_description_content_type="text/markdown",
24 | version=main_ns["__version__"],
25 | packages=["blendtorch.btb"],
26 | install_requires=required,
27 | zip_safe=False,
28 | )
29 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from .launcher import BlenderLauncher, LaunchInfo
4 | from .dataset import RemoteIterableDataset, FileDataset
5 | from .finder import discover_blender
6 | from .file import FileRecorder, FileReader
7 | from .duplex import DuplexChannel
8 | from . import env
9 | from . import colors
10 | from .version import __version__
11 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/apps/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/pkg_pytorch/blendtorch/btt/apps/__init__.py
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/apps/launch.py:
--------------------------------------------------------------------------------
1 | """Launch Blender instances from console.
2 |
3 | This application loads a json-serialized message from `jsonargs` argument. The content
4 | of the file has to match the keyword arguments of `btt.BlenderLauncher`. Example
5 | {
6 | "scene": "",
7 | "script": "C:\\dev\\pytorch-blender\\tests\\blender\\launcher.blend.py",
8 | "num_instances": 2,
9 | "named_sockets": [
10 | "DATA",
11 | "GYM"
12 | ],
13 | "background": true,
14 | "seed": 10
15 | }
16 | The application than invokes `btt.BlenderLauncher` using these arguments and waits
17 | for the spawned Blender processes to exit. The launch-information is written to
18 | `--out-launch-info` in json format, so that one can connect to the launched intances
19 | from a remote location using LaunchInfo.
20 | """
21 |
22 | from ..launcher import BlenderLauncher
23 | from ..launch_info import LaunchInfo
24 | import json
25 |
26 |
27 | def main(inargs=None):
28 | import argparse
29 | from argparse import RawTextHelpFormatter
30 |
31 | parser = argparse.ArgumentParser(
32 | "Blender Launcher", description=__doc__, formatter_class=RawTextHelpFormatter
33 | )
34 | parser.add_argument(
35 | "--out-launch-info",
36 | help="Path to save connection info to.",
37 | default="launch_info.json",
38 | )
39 | parser.add_argument(
40 | "jsonargs",
41 | type=str,
42 | help="JSON Dict of arguments for blendtorch.btt.BlenderLauncher",
43 | )
44 | args = parser.parse_args(inargs)
45 |
46 | with open(args.jsonargs, "r") as fp:
47 | launch_args = json.load(fp)
48 |
49 | # print(json.dumps(launch_args, indent=4))
50 |
51 | with BlenderLauncher(**launch_args) as bl:
52 | LaunchInfo.save_json(args.out_launch_info, bl.launch_info)
53 | bl.wait()
54 |
55 |
56 | if __name__ == "__main__":
57 | main()
58 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/colors.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def gamma(x, coeff=2.2):
5 | """Return sRGB (gamme encoded o=i**(1/coeff)) image.
6 |
7 | This gamma encodes linear colorspaces as produced by Blender
8 | renderings.
9 |
10 | Params
11 | ------
12 | x: HxWxC uint8 array
13 | C either 3 (RGB) or 4 (RGBA)
14 | coeff: scalar
15 | correction coefficient
16 |
17 | Returns
18 | -------
19 | y: HxWxC uint8 array
20 | Gamma encoded array.
21 | """
22 | y = x[..., :3].astype(np.float32) / 255
23 | y = np.uint8(255.0 * y ** (1 / 2.2))
24 | if x.shape[-1] == 3:
25 | return y
26 | else:
27 | return np.concatenate((y, x[..., 3:4]), axis=-1)
28 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/constants.py:
--------------------------------------------------------------------------------
1 | """Commonly used constants."""
2 |
3 | """Default socket timeout 10 sec."""
4 | DEFAULT_TIMEOUTMS = 10000
5 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/dataset.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | import pickle
3 | from contextlib import ExitStack
4 | from glob import glob
5 | import torch.utils as utils
6 |
7 | from .file import FileRecorder, FileReader
8 | from .constants import DEFAULT_TIMEOUTMS
9 |
10 |
11 | def _identity_item_transform(x):
12 | return x
13 |
14 |
15 | class RemoteIterableDataset(utils.data.IterableDataset):
16 | """Base class for iteratable datasets that receive data from remote Blender instances.
17 |
18 | To support multiple DataLoader workers in PyTorch, this class lazily constructs the data stream upon start of iteration. Each received message is represented as item dictionary.
19 |
20 | `RemoteIterableDataset` supports two ways to manipulate items before returning them to the caller
21 | - Provide an `item_transform` that takes a dictionary and returns transformed elements
22 | - Inherit from `RemoteIterableDataset` and override `RemoteIterableDataset._item()` method.
23 |
24 | Note, at a dataset level you will usually operate with numpy arrays, because that's what Blender usually publishes. When wrapping the dataset in a PyTorch dataloader, however, PyTorch will try to automatically convert these to PyTorch tensors.
25 |
26 | Params
27 | ------
28 | addresses: list-like
29 | ZMQ addresses to connect to.
30 | max_items: integer
31 | Artificial length of this dataset. Also affects the
32 | maximum capacity of any recorder used to record messages.
33 | item_transform: callable
34 | Any transform to apply to received items. Each item is
35 | a dictionary whose content is defined by the Blender script
36 | sending the data via `btb.DataPublisher`.
37 | record_path_prefix: str, Path
38 | Path prefix to record to. When given, each DataLoader worker will
39 | create a recording file `{prefix}_{worker_idx:02d}.btr`.
40 | queue_size: integer
41 | Receive queue size before publisher get stalled.
42 | timeoutms: integer
43 | Max wait time before raising an error.
44 | """
45 |
46 | def __init__(
47 | self,
48 | addresses,
49 | queue_size=10,
50 | timeoutms=DEFAULT_TIMEOUTMS,
51 | max_items=100000,
52 | item_transform=None,
53 | record_path_prefix=None,
54 | ):
55 | self.addresses = addresses
56 | self.queue_size = queue_size
57 | self.timeoutms = timeoutms
58 | self.max_items = max_items
59 | self.record_path_prefix = record_path_prefix
60 | self.item_transform = item_transform or _identity_item_transform
61 |
62 | def enable_recording(self, fname):
63 | """Enable recording to given prefix path `fname`.
64 |
65 | Needs to be set before receiving items from the dataset.
66 | """
67 | self.record_path_prefix = fname
68 |
69 | def stream_length(self, max_items):
70 | """Return artificial dataset length."""
71 | self.max_items = max_items
72 |
73 | def __iter__(self):
74 | """Return a dataset iterator."""
75 | return self._stream()
76 |
77 | def _stream(self):
78 | ctx = zmq.Context()
79 | socket = None
80 |
81 | try:
82 | socket = ctx.socket(zmq.PULL)
83 | socket.setsockopt(zmq.RCVHWM, self.queue_size)
84 | poller = zmq.Poller()
85 | poller.register(socket, zmq.POLLIN)
86 | for addr in self.addresses:
87 | socket.connect(addr)
88 |
89 | num_workers = 1
90 | worker_id = 0
91 | wi = utils.data.get_worker_info()
92 | if wi is not None:
93 | worker_id = wi.id
94 | num_workers = wi.num_workers
95 |
96 | with ExitStack() as es:
97 | rec = None
98 | if self.record_path_prefix is not None:
99 | rec = es.enter_context(
100 | FileRecorder(
101 | FileRecorder.filename(self.record_path_prefix, worker_id),
102 | self.max_items,
103 | )
104 | )
105 |
106 | for i in range(self.max_items // num_workers):
107 | socks = dict(poller.poll(self.timeoutms))
108 | assert socket in socks, "No response within timeout interval."
109 | if rec:
110 | data = socket.recv()
111 | rec.save(data, is_pickled=True)
112 | obj = pickle.loads(data)
113 | else:
114 | obj = socket.recv_pyobj()
115 | yield self._item(obj)
116 | del obj
117 |
118 | finally:
119 | if socket is not None:
120 | socket.close()
121 |
122 | def _item(self, item):
123 | """Transform the given item.
124 | Defaults to applying the `item_transform`.
125 | """
126 | return self.item_transform(item)
127 |
128 |
129 | class SingleFileDataset(utils.data.Dataset):
130 | """Replays from a particular recording file."""
131 |
132 | def __init__(self, path, item_transform=None):
133 | self.reader = FileReader(path)
134 | self.item_transform = item_transform or _identity_item_transform
135 |
136 | def __len__(self):
137 | return len(self.reader)
138 |
139 | def __getitem__(self, idx):
140 | return self._item(self.reader[idx])
141 |
142 | def _item(self, item):
143 | return self.item_transform(item)
144 |
145 |
146 | class FileDataset(utils.data.ConcatDataset):
147 | """Replays from multiple recordings matching a recording pattern.
148 |
149 | This dataset constructs one `SingleFileDataset` per file matching
150 | the specified prefix pattern `record_path_prefix`. All datasets
151 | are then concatenated to appear as a single larger dataset.
152 | """
153 |
154 | def __init__(self, record_path_prefix, item_transform=None):
155 | fnames = sorted(glob(f"{record_path_prefix}_*.btr"))
156 | assert (
157 | len(fnames) > 0
158 | ), f"Found no recording files with prefix {record_path_prefix}"
159 | ds = [SingleFileDataset(fname) for fname in fnames]
160 | super().__init__(ds)
161 |
162 | self.item_transform = item_transform or _identity_item_transform
163 |
164 | def __getitem__(self, idx):
165 | return self._item(super().__getitem__(idx))
166 |
167 | def _item(self, item):
168 | return self.item_transform(item)
169 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/duplex.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | import os
3 | import sys
4 |
5 | from .constants import DEFAULT_TIMEOUTMS
6 |
7 |
8 | class DuplexChannel:
9 | """Provides generic bidirectional communication with a single PyTorch instance."""
10 |
11 | def __init__(self, address, btid=None, lingerms=0, timeoutms=DEFAULT_TIMEOUTMS):
12 | self.ctx = zmq.Context()
13 | self.sock = self.ctx.socket(zmq.PAIR)
14 | self.sock.setsockopt(zmq.LINGER, lingerms)
15 | self.sock.setsockopt(zmq.RCVHWM, 10)
16 | self.sock.setsockopt(zmq.SNDHWM, 10)
17 | self.sock.setsockopt(zmq.SNDTIMEO, timeoutms)
18 | self.sock.setsockopt(zmq.RCVTIMEO, timeoutms)
19 | self.sock.connect(address)
20 |
21 | self.poller = zmq.Poller()
22 | self.poller.register(self.sock, zmq.POLLIN)
23 | self.btid = btid
24 |
25 | def recv(self, timeoutms=None):
26 | """Return next message or None.
27 |
28 | Kwargs
29 | ------
30 | timeoutms: int
31 | Max time to spend waiting for messages. If None, blocks until
32 | at least one message is available.
33 |
34 | Returns
35 | -------
36 | msg: dict, None
37 | Message received or None.
38 | """
39 | socks = dict(self.poller.poll(timeoutms))
40 | if self.sock in socks:
41 | return self.sock.recv_pyobj()
42 | else:
43 | return None
44 |
45 | def send(self, **kwargs):
46 | """Send a message to remote Blender process.
47 |
48 | Automatically attaches the process identifier `btid` and
49 | a unique message id `btmid` to the dictionary.
50 |
51 | Params
52 | ------
53 | kwargs: dict
54 | Message to send.
55 |
56 | Returns
57 | -------
58 | messageid: integer
59 | Message id attached to dictionary
60 | """
61 | mid = int.from_bytes(os.urandom(4), sys.byteorder)
62 | data = {"btid": self.btid, "btmid": mid, **kwargs}
63 | self.sock.send_pyobj(data)
64 | return mid
65 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/env.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | import zmq
3 |
4 | from .constants import DEFAULT_TIMEOUTMS
5 | from .launcher import BlenderLauncher
6 | from .env_rendering import create_renderer
7 | from . import colors
8 |
9 |
10 | class RemoteEnv:
11 | """Communicate with a remote Blender environment.
12 |
13 | This sets up a communication channel with a remote Blender environment.
14 | Its counterpart on Blender is usually a `btb.RemoteControlledAgent`.
15 |
16 | `RemoteEnv` already provides the usual `step()` and `reset()` methods
17 | that block the caller until the remote call returns. However, it does
18 | not manage launching the remote Environment. For this reason we provide
19 | `launch_env` below.
20 |
21 | To provide OpenAI gym compatible environments, one usually inherits
22 | from `btb.env.OpenAIRemoteEnv`.
23 |
24 | By default, the simulation time of the remote environment only advances
25 | when the agent issues a command (step, reset). However, one may configure
26 | the remote environment in real-time mode, in which case the simulation time
27 | advances independently of the agent's commands.
28 |
29 | Params
30 | ------
31 | address: str
32 | ZMQ endpoint to connect to.
33 | timeoutms: int
34 | Receive timeout before raising an error.
35 | """
36 |
37 | def __init__(self, address, timeoutms=DEFAULT_TIMEOUTMS):
38 | self.context = zmq.Context()
39 | self.socket = self.context.socket(zmq.REQ)
40 | self.socket.setsockopt(zmq.LINGER, 0)
41 | self.socket.setsockopt(zmq.SNDTIMEO, timeoutms * 10)
42 | self.socket.setsockopt(zmq.RCVTIMEO, timeoutms)
43 | self.socket.setsockopt(zmq.REQ_RELAXED, 1)
44 | self.socket.setsockopt(zmq.REQ_CORRELATE, 1)
45 | self.socket.connect(address)
46 | self.env_time = None
47 | self.rgb_array = None
48 | self.viewer = None
49 |
50 | def reset(self):
51 | """Reset the remote environment.
52 |
53 | Returns
54 | -------
55 | obs: object
56 | Initial observation
57 | info: dict
58 | Addition information provided by the remote
59 | environment.
60 | """
61 | ddict = self._reqrep(cmd="reset")
62 | self.rgb_array = ddict.pop("rgb_array", None)
63 | return ddict.pop("obs"), ddict
64 |
65 | def step(self, action):
66 | """Advance the remote environment by providing an action.
67 |
68 | Params
69 | ------
70 | action: object
71 | Action to apply
72 |
73 | Returns
74 | -------
75 | obs: object
76 | New observation
77 | reward: float
78 | Received reward
79 | done: bool
80 | Whether or not the environment simulation finished
81 | info: dict
82 | Additional information provided by the environment.
83 | """
84 | ddict = self._reqrep(cmd="step", action=action)
85 | obs = ddict.pop("obs")
86 | r = ddict.pop("reward")
87 | done = ddict.pop("done")
88 | self.rgb_array = ddict.pop("rgb_array", None)
89 | return obs, r, done, ddict
90 |
91 | def render(self, mode="human", backend=None, gamma_coeff=2.2):
92 | """Render the current remote environment state.
93 |
94 | We consider Blender itself the visualization of the environment
95 | state. By calling this method a 2D render image of the environment
96 | will be shown, if the remote environment configured a suitable renderer.
97 |
98 | Params
99 | ------
100 | mode: str
101 | Either 'human' or 'rgb_array'
102 | backend: str, None
103 | Which backend to use to visualize the image. When None,
104 | automatically chosen by blendtorch.
105 | gamma_coeff: scalar
106 | Gamma correction coeff before visualizing image. Does not
107 | affect the returned rgb array when mode is `rgb_array` which
108 | remains in linear color space. Defaults to 2.2
109 | """
110 |
111 | if mode == "rgb_array" or self.rgb_array is None:
112 | return self.rgb_array
113 |
114 | if self.viewer is None:
115 | self.viewer = create_renderer(backend)
116 | self.viewer.imshow(colors.gamma(self.rgb_array, gamma_coeff))
117 |
118 | def _reqrep(self, **send_kwargs):
119 | """Convenience request-reply method."""
120 | try:
121 | ext = {**send_kwargs, "time": self.env_time}
122 | self.socket.send_pyobj(ext)
123 | except zmq.error.Again:
124 | raise ValueError("Failed to send to remote environment") from None
125 |
126 | try:
127 | ddict = self.socket.recv_pyobj()
128 | self.env_time = ddict["time"]
129 | return ddict
130 | except zmq.error.Again:
131 | raise ValueError("Failed to receive from remote environment") from None
132 |
133 | def close(self):
134 | """Close the environment."""
135 | if self.viewer:
136 | self.viewer.close()
137 | self.viewer = None
138 | if self.socket:
139 | self.socket.close()
140 | self.socket = None
141 |
142 |
143 | @contextmanager
144 | def launch_env(scene, script, background=False, **kwargs):
145 | """Launch a remote environment wrapped in a context manager.
146 |
147 | Params
148 | ------
149 | scene: path, str
150 | Blender scene file
151 | script: path, str
152 | Python script containing environment implementation.
153 | background: bool
154 | Whether or not this environment can run in Blender background mode.
155 | Defaults to False.
156 | kwargs: dict
157 | Any other arguments passed as command-line arguments
158 | to the remote environment. Note by default a
159 | entry will be converted to `--key str(value)`. Boolean values
160 | will be converted to switches as follows `--key` or `--no-key`.
161 | Note that underlines will be converted to dashes as usual with
162 | command-line arguments and argparse.
163 |
164 | Yields
165 | ------
166 | env: `btt.RemoteEnv`
167 | Remote environement to interact with.
168 | """
169 | env = None
170 | try:
171 | additional_args = []
172 | for k, v in kwargs.items():
173 | k = k.replace("_", "-")
174 | if isinstance(v, bool):
175 | if v:
176 | additional_args.append(f"--{k}")
177 | else:
178 | additional_args.append(f"--no-{k}")
179 | else:
180 | additional_args.extend([f"--{k}", str(v)])
181 |
182 | launcher_args = dict(
183 | scene=scene,
184 | script=script,
185 | num_instances=1,
186 | named_sockets=["GYM"],
187 | instance_args=[additional_args],
188 | background=background,
189 | )
190 | with BlenderLauncher(**launcher_args) as bl:
191 | env = RemoteEnv(bl.launch_info.addresses["GYM"][0])
192 | yield env
193 | finally:
194 | if env:
195 | env.close()
196 |
197 |
198 | try:
199 | import gym
200 | from contextlib import ExitStack
201 |
202 | class OpenAIRemoteEnv(gym.Env):
203 | """Base class for remote OpenAI gym compatible environments.
204 |
205 | By inherting from this class you can provide almost all of the
206 | code necessary to register a remote Blender environment to
207 | OpenAI gym.
208 |
209 | See the `examples/control/cartpole_gym` for details.
210 |
211 | Params
212 | ------
213 | version : str
214 | Version of this environment.
215 | """
216 |
217 | metadata = {"render.modes": ["rgb_array", "human"]}
218 |
219 | def __init__(self, version="0.0.1"):
220 | self.__version__ = version
221 | self._es = ExitStack()
222 | self._env = None
223 |
224 | def launch(self, scene, script, background=False, **kwargs):
225 | """Launch the remote environment.
226 |
227 | Params
228 | ------
229 | scene: path, str
230 | Blender scene file
231 | script: path, str
232 | Python script containing environment implementation.
233 | background: bool
234 | Whether or not this environment can run in Blender background mode.
235 | kwargs: dict
236 | Any keyword arguments passes as command-line arguments
237 | to the remote environment. See `btt.env.launch_env` for
238 | details.
239 | """
240 | assert not self._env, "Environment already running."
241 | self._env = self._es.enter_context(
242 | launch_env(scene=scene, script=script, background=background, **kwargs)
243 | )
244 |
245 | def step(self, action):
246 | """Run one timestep of the environment's dynamics. When end of
247 | episode is reached, you are responsible for calling `reset()`
248 | to reset this environment's state.
249 |
250 | Accepts an action and returns a tuple (observation, reward, done, info).
251 | Note, this methods documentation is a 1:1 copy of OpenAI `gym.Env`.
252 |
253 | Params
254 | ------
255 | action: object
256 | An action provided by the agent
257 |
258 | Returns
259 | -------
260 | observation: object
261 | Agent's observation of the current environment
262 | reward: float
263 | Amount of reward returned after previous action
264 | done: bool
265 | Whether the episode has ended, in which case further step() calls will return undefined results
266 | info: (dict)
267 | Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
268 | """
269 | assert self._env, "Environment not running."
270 | obs, reward, done, info = self._env.step(action)
271 | return obs, reward, done, info
272 |
273 | def reset(self):
274 | """Resets the state of the environment and returns an initial observation.
275 |
276 | Note, this methods documentation is a 1:1 copy of OpenAI `gym.Env`.
277 |
278 | Returns
279 | -------
280 | observation: object
281 | The initial observation.
282 | """
283 | assert self._env, "Environment not running."
284 | obs, info = self._env.reset()
285 | return obs
286 |
287 | def seed(self, seed):
288 | """'Sets the seed for this env's random number generator(s)."""
289 | raise NotImplementedError()
290 |
291 | def render(self, mode="human"):
292 | """Renders the environment.
293 |
294 | Note, we consider Blender itself the main vehicle to view
295 | and manipulate the current environment state. Calling
296 | this method will usually render a specific camera view
297 | in Blender, transmit its image and visualize it. This will
298 | only work, if the remote environment supports such an operation.
299 | """
300 | assert self._env, "Environment not running."
301 | return self._env.render(mode=mode)
302 |
303 | @property
304 | def env_time(self):
305 | """Returns the remote environment time."""
306 | return self._env.env_time
307 |
308 | def close(self):
309 | """Close the environment."""
310 | if self._es:
311 | self._es.close()
312 | self._es = None
313 | self._env = None
314 |
315 | def __del__(self):
316 | self.close()
317 |
318 |
319 | except ImportError:
320 | pass
321 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/env_rendering.py:
--------------------------------------------------------------------------------
1 | """Create and maintain image renderers used in `env.render()` to display images."""
2 |
3 | RENDER_BACKENDS = {}
4 | LOOKUP_ORDER = ["openai", "matplotlib"]
5 |
6 |
7 | def create_renderer(backend=None, **kwargs):
8 | """Create image display.
9 |
10 | Params
11 | ------
12 | backend: str, None
13 | The backend to use. When None, auto selects.
14 | kwargs: dict
15 | Additional keywords to be passed to initialization.
16 | """
17 | if backend is None:
18 | avail = [RENDER_BACKENDS[l] for l in LOOKUP_ORDER if l in RENDER_BACKENDS]
19 | assert len(avail) > 0, "No render backends available."
20 | kls = avail[0]
21 | else:
22 | assert backend in RENDER_BACKENDS, f"Render backend {backend} not found."
23 | kls = RENDER_BACKENDS[backend]
24 | return kls(**kwargs)
25 |
26 |
27 | ## MATPLOTLIB
28 | try:
29 | import matplotlib.pyplot as plt
30 |
31 | class MatplotlibRenderer:
32 | def __init__(self, **kwargs):
33 | self.fig, self.ax = plt.subplots(1, 1)
34 | self.img = None
35 |
36 | def imshow(self, rgb):
37 | if self.img is None:
38 | self.img = self.ax.imshow(rgb)
39 | plt.show(block=False)
40 | self.fig.canvas.draw()
41 | else:
42 | self.img.set_data(rgb)
43 | self.fig.canvas.draw_idle()
44 | self.fig.canvas.flush_events()
45 |
46 | def close(self):
47 | if self.fig is not None:
48 | plt.close(self.fig)
49 | self.fig = None
50 |
51 | def __del__(self):
52 | self.close()
53 |
54 | RENDER_BACKENDS["matplotlib"] = MatplotlibRenderer
55 | except ImportError as e:
56 | pass
57 |
58 | ## PYGLET/OpenAI based
59 | try:
60 | from gym.envs.classic_control import rendering
61 |
62 | class OpenAIGymRenderer(object):
63 | def __init__(self, **kwargs):
64 | self._viewer = rendering.SimpleImageViewer(**kwargs)
65 |
66 | def imshow(self, rgb):
67 | self._viewer.imshow(rgb)
68 |
69 | def close(self):
70 | if self._viewer:
71 | self._viewer.close()
72 | self._viewer = None
73 |
74 | def __del__(self):
75 | self.close()
76 |
77 | RENDER_BACKENDS["openai"] = OpenAIGymRenderer
78 | except ImportError as e:
79 | pass
80 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/file.py:
--------------------------------------------------------------------------------
1 | import io
2 | import pickle
3 | import logging
4 | import numpy as np
5 | from pathlib import Path
6 |
7 | _logger = logging.getLogger("blendtorch")
8 |
9 |
10 | class FileRecorder:
11 | """Provides raw message recording functionality.
12 |
13 | `FileRecorder` stores received pickled messages into a single file.
14 | This file consists of a header and a sequence of messages received.
15 | Upon closing, the header is updated to match the offsets of pickled
16 | messages in the file. The offsets allow `FileReader` to quickly
17 | read and unpickle a specific message stored in this file.
18 |
19 | This class is meant to be used as context manager.
20 |
21 | Params
22 | ------
23 | outpath: str
24 | File path to write to
25 | max_messages: int
26 | Only up to `max_messages` can be stored.
27 | """
28 |
29 | def __init__(
30 | self, outpath="blendtorch.mpkl", max_messages=100000, update_header=10000
31 | ):
32 | outpath = Path(outpath)
33 | outpath.parent.mkdir(parents=True, exist_ok=True)
34 | self.outpath = outpath
35 | self.capacity = max_messages
36 | self.update_header = update_header
37 | _logger.info(
38 | f"Recording configured for path {outpath}, max_messages {max_messages}."
39 | )
40 |
41 | def save(self, data, is_pickled=False):
42 | """Save new data if there is still capacity left.
43 |
44 | Params
45 | ------
46 | data : object
47 | Pickled bytes or unpickled message.
48 | is_pickled: bool
49 | Whether or not the input is already in pickled
50 | represenation.
51 | """
52 | if self.num_messages < self.capacity:
53 | offset = self.file.tell()
54 | self.offsets[self.num_messages] = offset
55 | self.num_messages += 1
56 | if not is_pickled:
57 | self.pickler.dump(data)
58 | else:
59 | self.file.write(data)
60 |
61 | if self.num_messages % self.update_header == 0:
62 | self._update_offsets()
63 |
64 | def __enter__(self):
65 | self.file = io.open(self.outpath, "wb", buffering=0)
66 | # We currently cannot use the highest protocol but use a version
67 | # compatible with Python 3.7 (Blender version).
68 | # TODO even if we set this to highest (-1) and only run the tests
69 | # we get an error when loading data from stream. We should ensure
70 | # that what we do here is actually OK, independent of the protocol.
71 | self.pickler = pickle.Pickler(self.file, protocol=3)
72 | self.offsets = np.full(self.capacity, -1, dtype=np.int64)
73 | self.num_messages = 0
74 | self.pickler.dump(self.offsets) # We fix this once we update headers.
75 | return self
76 |
77 | def __exit__(self, *args):
78 | self._update_offsets()
79 | self.file.close()
80 | self.file = None
81 |
82 | def _update_offsets(self):
83 | off = self.file.tell()
84 | self.file.seek(0)
85 | pickle.Pickler(self.file, protocol=3).dump(self.offsets)
86 | self.file.seek(off)
87 |
88 | @staticmethod
89 | def filename(prefix, worker_idx):
90 | """Return a unique filename for the given prefix and worker id."""
91 | return f"{prefix}_{worker_idx:02d}.btr"
92 |
93 |
94 | class FileReader:
95 | """Read items from file.
96 |
97 | Assumes file was written by `FileRecorder`.
98 |
99 | Params
100 | ------
101 | path: str
102 | File path to read.
103 | """
104 |
105 | def __init__(self, path):
106 | self.path = path
107 | self.offsets = FileReader.read_offsets(path)
108 | self._file = None
109 |
110 | def __len__(self):
111 | """Returns number of items stored."""
112 | return len(self.offsets)
113 |
114 | def __getitem__(self, idx):
115 | """Read message associated with index `idx`."""
116 | if self._file is None:
117 | # Lazly creating file object here to ensure PyTorch
118 | # multiprocessing compatibility (i.e num_workers > 0)
119 | self._create()
120 |
121 | self._file.seek(self.offsets[idx])
122 | return self._unpickler.load()
123 |
124 | def _create(self):
125 | self._file = io.open(self.path, "rb", buffering=0)
126 | self._unpickler = pickle.Unpickler(self._file)
127 |
128 | def close(self):
129 | """Close the reader."""
130 | if self._file is not None:
131 | self._file.close()
132 | self._file = None
133 |
134 | @staticmethod
135 | def read_offsets(fname):
136 | """Returns the offset header of file refered to by `fname`."""
137 | assert Path(fname).exists(), f"Cannot open {fname} for reading."
138 | with io.open(fname, "rb") as f:
139 | unpickler = pickle.Unpickler(f)
140 | offsets = unpickler.load()
141 | num_messages = len(offsets)
142 |
143 | m = np.where(offsets == -1)[0]
144 | if len(m) > 0:
145 | num_messages = m[0]
146 | return offsets[:num_messages]
147 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/finder.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import re
4 | import shutil
5 | import logging
6 | import tempfile
7 | from pathlib import Path
8 |
9 | logger = logging.getLogger("blendtorch")
10 |
11 | script = r"""
12 | import zmq
13 |
14 | """
15 |
16 |
17 | def discover_blender(additional_blender_paths=None):
18 | """Return Blender info as dict with keys `path`, `major`, `minor`."""
19 |
20 | my_env = os.environ.copy()
21 | if additional_blender_paths is not None:
22 | my_env["PATH"] = additional_blender_paths + os.pathsep + my_env["PATH"]
23 |
24 | # Determine path
25 |
26 | bpath = shutil.which("blender", path=my_env["PATH"])
27 | if bpath is None:
28 | logger.warning("Could not find Blender.")
29 | return None
30 | else:
31 | logger.debug(f"Discovered Blender in {bpath}")
32 | # Using absolute instead of resolve to not follow symlinks (snap issue on linux)
33 | bpath = Path(bpath).absolute()
34 |
35 | p = subprocess.Popen(
36 | f'"{bpath}" --version',
37 | shell=True,
38 | stdout=subprocess.PIPE,
39 | stderr=subprocess.PIPE,
40 | env=my_env,
41 | )
42 |
43 | out, err = p.communicate()
44 | errcode = p.returncode
45 |
46 | # Determine version
47 |
48 | r = re.compile(r"Blender\s(\d+)\.(\d+)", re.IGNORECASE)
49 | g = re.search(r, str(out))
50 |
51 | version = (None, None)
52 | if errcode == 0 and g is not None:
53 | version = (int(g[1]), int(g[2]))
54 | else:
55 | logger.warning("Failed to parse Blender version.")
56 | return None
57 |
58 | # Check if a minimal Python script works
59 | with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
60 | fp.write(script)
61 |
62 | p = subprocess.Popen(
63 | f'"{bpath}" --background --python-use-system-env --python-exit-code 255 --python {fp.name}',
64 | shell=True,
65 | stdout=subprocess.PIPE,
66 | stderr=subprocess.PIPE,
67 | env=my_env,
68 | )
69 | out, err = p.communicate()
70 | errcode = p.returncode
71 | os.remove(fp.name)
72 |
73 | if errcode != 0:
74 | logger.warning(
75 | "Failed to run minimal Blender script; ensure Python requirements are installed."
76 | )
77 | return None
78 |
79 | return {"path": bpath, "major": version[0], "minor": version[1]}
80 |
81 |
82 | def _main():
83 | print(discover_blender())
84 |
85 |
86 | if __name__ == "__main__":
87 | _main()
88 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/launch_info.py:
--------------------------------------------------------------------------------
1 | import json
2 | from contextlib import ExitStack, nullcontext
3 |
4 |
5 | class LaunchInfo:
6 | """Holds information about running Blender instances.
7 |
8 | Attributes
9 | ----------
10 | addresses: dict
11 | Dictionary of spawned addresses grouped by socket name.
12 | commands: list
13 | List of command line arguments used to spawn Blender instances.
14 | processes: list
15 | List of running spawned processes. Only populated when
16 | locally launched through BlenderLauncher, otherwise None.
17 | """
18 |
19 | def __init__(self, addresses, commands, processes=None):
20 | self.addresses = addresses
21 | self.processes = processes
22 | self.commands = commands
23 |
24 | @staticmethod
25 | def save_json(file, launch_info):
26 | """Save launch information in JSON format.
27 |
28 | Useful if you want to reconnect to running instances from a different location.
29 | This will only serialize addresses and commands.
30 |
31 | Params
32 | ------
33 | file: file-like object, string, or pathlib.Path
34 | The file to save to.
35 | launch_info: LaunchInfo
36 | The launch information to save.
37 | """
38 | with ExitStack() as stack:
39 | if hasattr(file, "write"):
40 | fp = stack.enter_context(nullcontext(file))
41 | else:
42 | fp = stack.enter_context(open(file, "w"))
43 | json.dump(
44 | {"addresses": launch_info.addresses, "commands": launch_info.commands},
45 | fp,
46 | indent=4,
47 | )
48 |
49 | @staticmethod
50 | def load_json(file):
51 | """Load launch information from JSON format.
52 |
53 | Params
54 | ------
55 | file: file-like object, string, or pathlib.Path
56 | The file to read from.
57 |
58 | Returns
59 | -------
60 | launch_info: LaunchInfo
61 | Restored launch information
62 | """
63 | with ExitStack() as stack:
64 | if hasattr(file, "read"):
65 | fp = stack.enter_context(nullcontext(file))
66 | else:
67 | fp = stack.enter_context(open(file, "r"))
68 | data = json.load(fp)
69 | return LaunchInfo(data["addresses"], data["commands"])
70 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/launcher.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | import logging
4 | import numpy as np
5 | import psutil
6 | import signal
7 |
8 |
9 | from .finder import discover_blender
10 | from .launch_info import LaunchInfo
11 | from .utils import get_primary_ip
12 |
13 | logger = logging.getLogger("blendtorch")
14 |
15 |
16 | class BlenderLauncher:
17 | """Opens and closes Blender instances.
18 |
19 | This class is meant to be used withing a `with` block to ensure clean launch/shutdown of background processes.
20 |
21 | Params
22 | ------
23 | scene : str
24 | Scene file to be processed by Blender instances
25 | script: str
26 | Script file to be called by Blender
27 | num_instances: int (default=1)
28 | How many Blender instances to create
29 | named_sockets: list-like, optional
30 | Descriptive names of sockets to be passed to launched instanced
31 | via command-line arguments '-btsockets name=tcp://address:port ...'
32 | to Blender. They are also available via LaunchInfo to PyTorch.
33 | start_port : int (default=11000)
34 | Start of port range for publisher sockets
35 | bind_addr : str (default='127.0.0.1')
36 | Address to bind publisher sockets. If 'primaryip' binds to primary ip
37 | address the one with a default route or `127.0.0.1` if none is available.
38 | proto: string (default='tcp')
39 | Protocol to use.
40 | instance_args : array (default=None)
41 | Additional arguments per instance to be passed as command
42 | line arguments.
43 | blend_path: str, optional
44 | Additional paths to look for Blender
45 | seed: integer, optional
46 | Optional launch seed. Each instance will be given
47 | background: bool
48 | Launch Blender in background mode. Note that certain
49 | animation modes and rendering does require an UI and
50 | cannot run in background mode.
51 |
52 | Attributes
53 | ----------
54 | launch_info: LaunchInfo
55 | Process launch information, available after entering the
56 | context.
57 | """
58 |
59 | def __init__(
60 | self,
61 | scene,
62 | script,
63 | num_instances=1,
64 | named_sockets=None,
65 | start_port=11000,
66 | bind_addr="127.0.0.1",
67 | instance_args=None,
68 | proto="tcp",
69 | blend_path=None,
70 | seed=None,
71 | background=False,
72 | ):
73 | """Create BlenderLauncher"""
74 | self.num_instances = num_instances
75 | self.start_port = start_port
76 | self.bind_addr = bind_addr
77 | self.proto = proto
78 | self.scene = scene
79 | self.script = script
80 | self.blend_path = blend_path
81 | self.named_sockets = named_sockets
82 | if named_sockets is None:
83 | self.named_sockets = []
84 | self.seed = seed
85 | self.background = background
86 | self.instance_args = instance_args
87 | if instance_args is None:
88 | self.instance_args = [[] for _ in range(num_instances)]
89 | assert num_instances > 0
90 | assert len(self.instance_args) == num_instances
91 |
92 | self.blender_info = discover_blender(self.blend_path)
93 | if self.blender_info is None:
94 | logger.warning("Launching Blender failed;")
95 | raise ValueError("Blender not found or misconfigured.")
96 | else:
97 | logger.info(
98 | f'Blender found {self.blender_info["path"]} version {self.blender_info["major"]}.{self.blender_info["minor"]}'
99 | )
100 |
101 | self.launch_info = None
102 | self.processes = None
103 |
104 | def __enter__(self):
105 | """Launch processes"""
106 | assert self.launch_info is None, "Already launched."
107 |
108 | addresses = {}
109 | addrgen = self._address_generator(self.proto, self.bind_addr, self.start_port)
110 | for s in self.named_sockets:
111 | addresses[s] = [next(addrgen) for _ in range(self.num_instances)]
112 |
113 | seed = self.seed
114 | if seed is None:
115 | seed = np.random.randint(np.iinfo(np.int32).max - self.num_instances)
116 | seeds = [seed + i for i in range(self.num_instances)]
117 |
118 | instance_script_args = [[] for _ in range(self.num_instances)]
119 | for idx, iargs in enumerate(instance_script_args):
120 | iargs.extend(
121 | [
122 | "-btid",
123 | str(idx),
124 | "-btseed",
125 | str(seeds[idx]),
126 | "-btsockets",
127 | ]
128 | )
129 | iargs.extend([f"{k}={v[idx]}" for k, v in addresses.items()])
130 | iargs.extend(self.instance_args[idx])
131 |
132 | popen_kwargs = {}
133 | if os.name == "posix":
134 | popen_kwargs = {"preexec_fn": os.setsid}
135 | elif os.name == "nt":
136 | popen_kwargs = {"creationflags": subprocess.CREATE_NEW_PROCESS_GROUP}
137 |
138 | processes = []
139 | commands = []
140 | env = os.environ.copy()
141 | for idx, script_args in enumerate(instance_script_args):
142 | cmd = [f'{self.blender_info["path"]}']
143 | if self.scene is not None and len(str(self.scene)) > 0:
144 | cmd.append(f"{self.scene}")
145 | if self.background:
146 | cmd.append("--background")
147 | cmd.append("--python-use-system-env")
148 | cmd.append("--enable-autoexec")
149 | cmd.append("--python")
150 | cmd.append(f"{self.script}")
151 | cmd.append("--")
152 | cmd.extend(script_args)
153 |
154 | p = subprocess.Popen(
155 | cmd,
156 | shell=False,
157 | stdin=None,
158 | stdout=None,
159 | stderr=None,
160 | env=env,
161 | **popen_kwargs,
162 | )
163 |
164 | processes.append(p)
165 | commands.append(" ".join(cmd))
166 | logger.info(f"Started instance: {cmd}")
167 |
168 | self.launch_info = LaunchInfo(addresses, commands, processes=processes)
169 | return self
170 |
171 | def assert_alive(self):
172 | """Tests if all launched process are alive."""
173 | if self.launch_info is None:
174 | return
175 | codes = self._poll()
176 | assert all([c is None for c in codes]), f"Alive test failed. Exit codes {codes}"
177 |
178 | def wait(self):
179 | """Wait until all launched processes terminate."""
180 | [p.wait() for p in self.launch_info.processes]
181 |
182 | def __exit__(self, exc_type, exc_value, exc_traceback):
183 | """Terminate all processes."""
184 | all_closed = all(
185 | [
186 | self._kill_tree(p.pid, sig=signal.SIGTERM, timeout=5.0)
187 | for p in self.launch_info.processes
188 | ]
189 | )
190 | if not all_closed:
191 | all_closed = all(
192 | [
193 | self._kill_tree(p.pid, sig=signal.SIGKILL, timeout=5.0)
194 | for p in self.launch_info.processes
195 | ]
196 | )
197 | self.launch_info = None
198 | if not all_closed:
199 | logger.warning("Not all Blender instances closed")
200 | else:
201 | logger.info("Blender instances closed")
202 |
203 | def _address_generator(self, proto, bind_addr, start_port):
204 | """Convenience to generate addresses."""
205 | if bind_addr == "primaryip":
206 | bind_addr = get_primary_ip()
207 | nextport = start_port
208 | while True:
209 | addr = f"{proto}://{bind_addr}:{nextport}"
210 | nextport += 1
211 | yield addr
212 |
213 | def _poll(self):
214 | """Convenience to poll all processes exit codes."""
215 | return [p.poll() for p in self.launch_info.processes]
216 |
217 | def _kill_tree(
218 | self,
219 | pid,
220 | sig=signal.SIGTERM,
221 | include_parent=True,
222 | timeout=None,
223 | on_terminate=None,
224 | ) -> bool:
225 | """Kill a process tree.
226 |
227 | This method is required for some tools actually spawn Blender as a subprocces (e.g. snap). This method
228 | ensures that the process opened and all its subprocesses are killed.
229 | """
230 | parent = psutil.Process(pid)
231 | plist = parent.children(recursive=True)
232 | if include_parent:
233 | plist.append(parent)
234 |
235 | for p in plist:
236 | p.send_signal(sig)
237 |
238 | gone, alive = psutil.wait_procs(plist, timeout=timeout, callback=on_terminate)
239 |
240 | return len(gone) == len(plist)
241 |
242 |
243 | parent_pid = 30437 # my example
244 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/utils.py:
--------------------------------------------------------------------------------
1 | import socket
2 |
3 |
4 | def get_primary_ip():
5 | """Returns the primary IP address of this machine (the one with).
6 |
7 | See https://stackoverflow.com/a/28950776
8 | Returns the IP address with the default route attached or `127.0.0.1`.
9 | """
10 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
11 | try:
12 | # doesn't even have to be reachable
13 | s.connect(("10.255.255.255", 1))
14 | IP = s.getsockname()[0]
15 | except Exception:
16 | IP = "127.0.0.1"
17 | finally:
18 | s.close()
19 | return IP
20 |
--------------------------------------------------------------------------------
/pkg_pytorch/blendtorch/btt/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.4.0"
2 |
--------------------------------------------------------------------------------
/pkg_pytorch/requirements.txt:
--------------------------------------------------------------------------------
1 | pyzmq>=18.1
2 | numpy>=1.18.2
3 | matplotlib>=3.1.1
4 | torch>=1.3.1
5 | psutil>=5.8.0
--------------------------------------------------------------------------------
/pkg_pytorch/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from pathlib import Path
3 |
4 | THISDIR = Path(__file__).parent
5 |
6 | with open(THISDIR / "requirements.txt") as f:
7 | required = f.read().splitlines()
8 |
9 | with open(THISDIR / ".." / "Readme.md", encoding="utf-8") as f:
10 | long_description = f.read()
11 |
12 | main_ns = {}
13 | with open(THISDIR / "blendtorch" / "btt" / "version.py") as ver_file:
14 | exec(ver_file.read(), main_ns)
15 |
16 | setup(
17 | name="blendtorch-btt",
18 | author="Christoph Heindl and Sebastian Zambal",
19 | description="PyTorch part of project blendtorch. See also blendtorch-btb.",
20 | url="https://github.com/cheind/pytorch-blender",
21 | license="MIT",
22 | long_description=long_description,
23 | long_description_content_type="text/markdown",
24 | version=main_ns["__version__"],
25 | packages=["blendtorch.btt", "blendtorch.btt.apps"],
26 | install_requires=required,
27 | zip_safe=False,
28 | entry_points={
29 | "console_scripts": ["blendtorch-launch=blendtorch.btt.apps.launch:main"],
30 | },
31 | )
32 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | background: marks the test as compatible with Blender --background flag.
4 | log_level=INFO
--------------------------------------------------------------------------------
/requirements_dev.txt:
--------------------------------------------------------------------------------
1 | pytest>=3.9.3
2 | gym>=0.17.2
--------------------------------------------------------------------------------
/scripts/install_blender.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | NAME="blender-3.6.5-linux-x64"
4 | NAMETAR="${NAME}.tar.xz"
5 | CACHE="${HOME}/.blender-cache"
6 | TAR="${CACHE}/${NAMETAR}"
7 | URL="https://mirror.clarkson.edu/blender/release/Blender3.6/${NAMETAR}"
8 |
9 | echo "Installing Blender ${NAME}"
10 | mkdir -p $CACHE
11 | if [ ! -f $TAR ]; then
12 | wget -O $TAR $URL -q
13 | fi
14 | tar -xf $TAR -C $HOME
15 |
16 | echo "export PATH=${PATH}:\"${HOME}/${NAME}\"" > .envs
--------------------------------------------------------------------------------
/scripts/install_btb.py:
--------------------------------------------------------------------------------
1 | """Install Blender dependencies.
2 |
3 | Meant to be run ONCE via blender as follows
4 | `blender --background --python scripts/install_btb.py`
5 | """
6 |
7 | import bpy
8 | import sys
9 | import subprocess
10 | from pathlib import Path
11 |
12 | THISDIR = Path(__file__).parent
13 |
14 |
15 | def run(cmd):
16 | try:
17 | output = subprocess.check_output(cmd)
18 | print(output)
19 | except subprocess.CalledProcessError as e:
20 | print(e.output)
21 | sys.exit(1)
22 |
23 |
24 | def install(name, upgrade=True, user=True, editable=False):
25 | cmd = [sys.executable, "-m", "pip", "install"]
26 | if upgrade:
27 | cmd.append("--upgrade")
28 | if user:
29 | cmd.append("--user")
30 | if editable:
31 | cmd.append("-e")
32 | cmd.append(name)
33 | run(cmd)
34 |
35 |
36 | def bootstrap(user=True):
37 | cmd = [sys.executable, "-m", "ensurepip", "--upgrade"]
38 | if user:
39 | cmd.append("--user")
40 | run(cmd)
41 | cmd = [sys.executable, "-m", "pip", "install", "--upgrade", "pip"]
42 | if user:
43 | cmd.append("--user")
44 | run(cmd)
45 |
46 |
47 | def main():
48 | print("Installing Blender dependencies. This might take a while...")
49 | bootstrap(user=True)
50 | install(str(THISDIR / ".." / "pkg_blender"), editable=True, user=True)
51 |
52 |
53 | main()
54 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/tests/__init__.py
--------------------------------------------------------------------------------
/tests/blender/anim.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | from blendtorch import btb
3 |
4 |
5 | def main():
6 | btargs, remainder = btb.parse_blendtorch_args()
7 |
8 | seq = []
9 |
10 | def pre_play(anim):
11 | seq.extend(["pre_play", anim.frameid])
12 |
13 | def pre_animation(anim):
14 | seq.extend(["pre_animation", anim.frameid])
15 |
16 | def pre_frame(anim):
17 | seq.extend(["pre_frame", anim.frameid])
18 |
19 | def post_frame(anim):
20 | seq.extend(["post_frame", anim.frameid])
21 |
22 | def post_animation(anim):
23 | seq.extend(["post_animation", anim.frameid])
24 |
25 | def post_play(anim, pub):
26 | seq.extend(["post_play", anim.frameid])
27 | pub.publish(seq=seq)
28 |
29 | # Data source: add linger to avoid not sending data upon closing Blender.
30 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid, lingerms=5000)
31 |
32 | anim = btb.AnimationController()
33 | anim.pre_play.add(pre_play, anim)
34 | anim.pre_animation.add(pre_animation, anim)
35 | anim.pre_frame.add(pre_frame, anim)
36 | anim.post_frame.add(post_frame, anim)
37 | anim.post_animation.add(post_animation, anim)
38 | anim.post_play.add(post_play, anim, pub)
39 | anim.play(frame_range=(1, 3), num_episodes=2, use_animation=not bpy.app.background)
40 |
41 |
42 | main()
43 |
--------------------------------------------------------------------------------
/tests/blender/cam.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/tests/blender/cam.blend
--------------------------------------------------------------------------------
/tests/blender/cam.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | from blendtorch import btb
3 |
4 |
5 | def main():
6 | btargs, remainder = btb.parse_blendtorch_args()
7 |
8 | cube = bpy.data.objects["Cube"]
9 | ortho = btb.Camera(bpy.data.objects["CamOrtho"])
10 | proj = btb.Camera(bpy.data.objects["CamProj"])
11 |
12 | xyz = btb.utils.world_coordinates(cube)
13 |
14 | proj_ndc, proj_z = proj.world_to_ndc(xyz, return_depth=True)
15 | proj_pix = proj.ndc_to_pixel(proj_ndc, origin="upper-left")
16 |
17 | ortho_ndc, ortho_z = ortho.world_to_ndc(xyz, return_depth=True)
18 | ortho_pix = ortho.ndc_to_pixel(ortho_ndc, origin="upper-left")
19 |
20 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid, lingerms=5000)
21 | pub.publish(ortho_xy=ortho_pix, ortho_z=ortho_z, proj_xy=proj_pix, proj_z=proj_z)
22 |
23 |
24 | main()
25 |
--------------------------------------------------------------------------------
/tests/blender/compositor.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/tests/blender/compositor.blend
--------------------------------------------------------------------------------
/tests/blender/compositor.blend.py:
--------------------------------------------------------------------------------
1 | from blendtorch import btb
2 |
3 |
4 | def main():
5 | btargs, remainder = btb.parse_blendtorch_args()
6 |
7 | cam = btb.Camera()
8 | render = btb.CompositeRenderer(
9 | [
10 | btb.CompositeSelection("color", "File Output", "Color", "RGB"),
11 | btb.CompositeSelection("depth", "File Output", "Depth", "V"),
12 | ],
13 | btid=btargs.btid,
14 | camera=cam,
15 | )
16 | data = render.render()
17 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid, lingerms=5000)
18 | pub.publish(**data)
19 |
20 |
21 | main()
22 |
--------------------------------------------------------------------------------
/tests/blender/dataset.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 | from blendtorch import btb
4 |
5 |
6 | def main():
7 | btargs, remainder = btb.parse_blendtorch_args()
8 |
9 | def post_frame(pub, anim):
10 | pub.publish(frameid=anim.frameid, img=np.zeros((64, 64), dtype=np.uint8))
11 |
12 | # Data source: add linger to avoid not sending data upon closing Blender.
13 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid, lingerms=5000)
14 |
15 | anim = btb.AnimationController()
16 | anim.post_frame.add(post_frame, pub, anim)
17 | anim.play(frame_range=(1, 3), num_episodes=-1, use_animation=not bpy.app.background)
18 |
19 |
20 | main()
21 |
--------------------------------------------------------------------------------
/tests/blender/dataset_robust.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import numpy as np
3 | from blendtorch import btb
4 |
5 |
6 | def main():
7 | btargs, remainder = btb.parse_blendtorch_args()
8 |
9 | cnt = 0
10 |
11 | def post_frame(pub, anim):
12 | nonlocal cnt
13 | pub.publish(frameid=anim.frameid, img=np.zeros((64, 64), dtype=np.uint8))
14 | cnt += 1
15 | if btargs.btid > 0 and cnt > 200:
16 | raise ValueError()
17 |
18 | # Data source: add linger to avoid not sending data upon closing Blender.
19 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid, lingerms=5000)
20 |
21 | anim = btb.AnimationController()
22 | anim.post_frame.add(post_frame, pub, anim)
23 | anim.play(frame_range=(1, 3), num_episodes=-1, use_animation=not bpy.app.background)
24 |
25 |
26 | main()
27 |
--------------------------------------------------------------------------------
/tests/blender/duplex.blend.py:
--------------------------------------------------------------------------------
1 | from blendtorch import btb
2 |
3 |
4 | def main():
5 | # Note, need to linger a bit in order to wait for unsent messages to be transmitted
6 | # before exiting blender.
7 | btargs, remainder = btb.parse_blendtorch_args()
8 | duplex = btb.DuplexChannel(
9 | btargs.btsockets["CTRL"], btid=btargs.btid, lingerms=5000
10 | )
11 |
12 | msg = duplex.recv(timeoutms=5000)
13 | duplex.send(echo=msg)
14 | duplex.send(msg="end")
15 |
16 |
17 | main()
18 |
--------------------------------------------------------------------------------
/tests/blender/env.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cheind/pytorch-blender/a5b819f04ff5c7145b7164b48abd1a3fe69f0db4/tests/blender/env.blend
--------------------------------------------------------------------------------
/tests/blender/env.blend.py:
--------------------------------------------------------------------------------
1 | import bpy
2 |
3 | from blendtorch import btb
4 |
5 |
6 | class MyEnv(btb.env.BaseEnv):
7 | def __init__(self, agent, done_after=10):
8 | super().__init__(agent)
9 | self.cube = bpy.data.objects["Cube"]
10 | self.count = 0
11 | self.done_after = done_after
12 |
13 | def _env_reset(self):
14 | self.cube.rotation_euler[2] = 0.0
15 | self.count = 0
16 | return dict(obs=self.cube.rotation_euler[2], count=self.count, done=False)
17 |
18 | def _env_prepare_step(self, action):
19 | self.cube.rotation_euler[2] = action
20 |
21 | def _env_post_step(self):
22 | self.count += 1
23 | angle = self.cube.rotation_euler[2]
24 | return dict(
25 | obs=angle,
26 | reward=1.0 if abs(angle) > 0.5 else 0.0,
27 | done=self.events.frameid > self.done_after,
28 | count=self.count,
29 | )
30 |
31 |
32 | def main():
33 | args, remainder = btb.parse_blendtorch_args()
34 | import argparse
35 |
36 | parser = argparse.ArgumentParser()
37 | parser.add_argument("--done-after", default=10, type=int)
38 | envargs = parser.parse_args(remainder)
39 |
40 | agent = btb.env.RemoteControlledAgent(args.btsockets["GYM"])
41 | env = MyEnv(agent, done_after=envargs.done_after)
42 | if not bpy.app.background:
43 | env.attach_default_renderer(every_nth=1)
44 | env.run(frame_range=(1, 10000), use_animation=not bpy.app.background)
45 |
46 |
47 | main()
48 |
--------------------------------------------------------------------------------
/tests/blender/launcher.blend.py:
--------------------------------------------------------------------------------
1 | from blendtorch import btb
2 |
3 |
4 | def main():
5 | # Note, need to linger a bit in order to wait for unsent messages to be transmitted
6 | # before exiting blender.
7 | btargs, remainder = btb.parse_blendtorch_args()
8 | pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid, lingerms=10000)
9 | pub.publish(btargs=vars(btargs), remainder=remainder)
10 |
11 |
12 | main()
13 |
--------------------------------------------------------------------------------
/tests/test_animation.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pathlib import Path
3 | from blendtorch import btt
4 |
5 | BLENDDIR = Path(__file__).parent / "blender"
6 |
7 | EXPECTED = [
8 | "pre_play",
9 | 1,
10 | "pre_animation",
11 | 1,
12 | "pre_frame",
13 | 1,
14 | "post_frame",
15 | 1,
16 | "pre_frame",
17 | 2,
18 | "post_frame",
19 | 2,
20 | "pre_frame",
21 | 3,
22 | "post_frame",
23 | 3,
24 | "post_animation",
25 | 3,
26 | "pre_animation",
27 | 1,
28 | "pre_frame",
29 | 1,
30 | "post_frame",
31 | 1,
32 | "pre_frame",
33 | 2,
34 | "post_frame",
35 | 2,
36 | "pre_frame",
37 | 3,
38 | "post_frame",
39 | 3,
40 | "post_animation",
41 | 3,
42 | "post_play",
43 | 3,
44 | ]
45 |
46 |
47 | def _capture_anim_callback_sequence(background):
48 | launch_args = dict(
49 | scene="",
50 | script=BLENDDIR / "anim.blend.py",
51 | named_sockets=["DATA"],
52 | background=background,
53 | )
54 |
55 | with btt.BlenderLauncher(**launch_args) as bl:
56 | addr = bl.launch_info.addresses["DATA"]
57 | ds = btt.RemoteIterableDataset(addr, max_items=1, timeoutms=10000)
58 | try:
59 | item = next(iter(ds))
60 | assert item["seq"] == EXPECTED
61 | except Exception:
62 | print("err")
63 |
64 |
65 | #@pytest.mark.background
66 | def test_anim_callback_sequence():
67 | _capture_anim_callback_sequence(background=True)
68 |
69 |
70 | def test_anim_callback_sequence_ui():
71 | _capture_anim_callback_sequence(background=False)
72 |
--------------------------------------------------------------------------------
/tests/test_camera.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pathlib import Path
3 | import numpy as np
4 | from numpy.testing import assert_allclose
5 | from blendtorch import btt
6 |
7 | BLENDDIR = Path(__file__).parent / "blender"
8 |
9 |
10 | @pytest.mark.background
11 | def test_projection():
12 | launch_args = dict(
13 | scene=BLENDDIR / "cam.blend",
14 | script=BLENDDIR / "cam.blend.py",
15 | num_instances=1,
16 | named_sockets=["DATA"],
17 | background=True,
18 | )
19 |
20 | ortho_xy_expected = np.array(
21 | [
22 | [480.0, 80],
23 | [480.0, 80],
24 | [480.0, 400],
25 | [480.0, 400],
26 | [160.0, 80],
27 | [160.0, 80],
28 | [160.0, 400],
29 | [160.0, 400],
30 | ]
31 | )
32 |
33 | proj_xy_expected = np.array(
34 | [
35 | [468.148, 91.851],
36 | [431.111, 128.888],
37 | [468.148, 388.148],
38 | [431.111, 351.111],
39 | [171.851, 91.851],
40 | [208.888, 128.888],
41 | [171.851, 388.148],
42 | [208.888, 351.111],
43 | ]
44 | )
45 |
46 | z_expected = np.array([6.0, 8, 6, 8, 6, 8, 6, 8])
47 |
48 | with btt.BlenderLauncher(**launch_args) as bl:
49 | addr = bl.launch_info.addresses["DATA"]
50 | ds = btt.RemoteIterableDataset(addr, max_items=2)
51 | item = next(iter(ds))
52 | assert_allclose(item["ortho_xy"], ortho_xy_expected, atol=1e-2)
53 | assert_allclose(item["ortho_z"], z_expected, atol=1e-2)
54 | assert_allclose(item["proj_xy"], proj_xy_expected, atol=1e-2)
55 | assert_allclose(item["proj_z"], z_expected, atol=1e-2)
56 |
--------------------------------------------------------------------------------
/tests/test_compositor.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pathlib import Path
3 | import numpy as np
4 | from numpy.testing import assert_allclose
5 | from blendtorch import btt
6 |
7 | BLENDDIR = Path(__file__).parent / "blender"
8 |
9 |
10 | # @pytest.mark.background
11 | # Seems to not run on travis
12 | def test_projection():
13 | launch_args = dict(
14 | scene=BLENDDIR / "compositor.blend",
15 | script=BLENDDIR / "compositor.blend.py",
16 | num_instances=1,
17 | named_sockets=["DATA"],
18 | background=True,
19 | )
20 |
21 | expected_color = np.full((200, 320, 3), (0, 1, 0), dtype=np.float32)
22 | expected_depth = np.full((200, 320, 1), 2.0, dtype=np.float32)
23 |
24 | with btt.BlenderLauncher(**launch_args) as bl:
25 | addr = bl.launch_info.addresses["DATA"]
26 | ds = btt.RemoteIterableDataset(addr, max_items=1)
27 | item = next(iter(ds))
28 | assert_allclose(item["color"], expected_color)
29 | assert_allclose(item["depth"], expected_depth)
30 |
--------------------------------------------------------------------------------
/tests/test_dataset.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pathlib import Path
3 | import numpy as np
4 | from torch.utils.data import DataLoader
5 |
6 | from blendtorch import btt
7 |
8 | BLENDDIR = Path(__file__).parent / "blender"
9 |
10 |
11 | #@pytest.mark.background
12 | def test_dataset():
13 | launch_args = dict(
14 | scene="",
15 | script=BLENDDIR / "dataset.blend.py",
16 | num_instances=1,
17 | named_sockets=["DATA"],
18 | background=True,
19 | )
20 |
21 | with btt.BlenderLauncher(**launch_args) as bl:
22 | addr = bl.launch_info.addresses["DATA"]
23 |
24 | # Note, https://github.com/pytorch/pytorch/issues/44108
25 | ds = btt.RemoteIterableDataset(addr, max_items=16)
26 | dl = DataLoader(ds, batch_size=4, num_workers=4, drop_last=False, shuffle=False)
27 |
28 | count = 0
29 | for item in dl:
30 | assert item["img"].shape == (4, 64, 64)
31 | assert item["frameid"].shape == (4,)
32 | count += 1
33 |
34 | assert count == 4
35 |
36 |
37 | #@pytest.mark.background
38 | def test_dataset_robustness():
39 | launch_args = dict(
40 | scene="",
41 | script=BLENDDIR / "dataset_robust.blend.py",
42 | num_instances=2,
43 | named_sockets=["DATA"],
44 | background=True,
45 | )
46 |
47 | with btt.BlenderLauncher(**launch_args) as bl:
48 | addr = bl.launch_info.addresses["DATA"]
49 |
50 | # Note, https://github.com/pytorch/pytorch/issues/44108
51 | ds = btt.RemoteIterableDataset(addr, max_items=5000)
52 | dl = DataLoader(ds, batch_size=4, num_workers=0, drop_last=False, shuffle=False)
53 |
54 | ids = []
55 | for item in dl:
56 | assert item["img"].shape == (4, 64, 64)
57 | assert item["frameid"].shape == (4,)
58 | ids.extend(item["btid"].tolist())
59 | if len(np.unique(ids)) == 2:
60 | break
61 | assert len(np.unique(ids)) == 2
62 |
--------------------------------------------------------------------------------
/tests/test_duplex.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pathlib import Path
3 |
4 | from blendtorch import btt
5 |
6 | BLENDDIR = Path(__file__).parent / "blender"
7 |
8 |
9 | #@pytest.mark.background
10 | def test_duplex():
11 | launch_args = dict(
12 | scene="",
13 | script=BLENDDIR / "duplex.blend.py",
14 | num_instances=2,
15 | named_sockets=["CTRL"],
16 | background=True,
17 | )
18 |
19 | with btt.BlenderLauncher(**launch_args) as bl:
20 |
21 | addresses = bl.launch_info.addresses["CTRL"]
22 | duplex = [btt.DuplexChannel(addr, lingerms=5000) for addr in addresses]
23 | mids = [d.send(msg=f"hello {i}") for i, d in enumerate(duplex)]
24 |
25 | def rcv_twice(d):
26 | return [
27 | d.recv(timeoutms=5000),
28 | d.recv(timeoutms=5000),
29 | ]
30 |
31 | msgs = [rcv_twice(d) for d in duplex]
32 |
33 | assert len(msgs) == 2
34 | assert len(msgs[0]) == 2
35 |
36 | assert msgs[0][0]["echo"]["msg"] == "hello 0"
37 | assert msgs[0][0]["echo"]["btid"] is None
38 | assert msgs[0][0]["echo"]["btmid"] == mids[0]
39 | assert msgs[0][0]["btid"] == 0
40 | assert msgs[0][1]["msg"] == "end"
41 | assert msgs[0][1]["btid"] == 0
42 |
43 | assert msgs[1][0]["echo"]["msg"] == "hello 1"
44 | assert msgs[1][0]["echo"]["btid"] is None
45 | assert msgs[1][0]["echo"]["btmid"] == mids[1]
46 | assert msgs[1][0]["btid"] == 1
47 | assert msgs[1][1]["msg"] == "end"
48 | assert msgs[1][1]["btid"] == 1
49 |
--------------------------------------------------------------------------------
/tests/test_env.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pathlib import Path
3 | from blendtorch import btt
4 |
5 | BLENDDIR = Path(__file__).parent / "blender"
6 |
7 |
8 | class MyEnv(btt.env.OpenAIRemoteEnv):
9 | def __init__(self, background=True, **kwargs):
10 | super().__init__(version="1.0.0")
11 | self.launch(
12 | scene=BLENDDIR / "env.blend",
13 | script=BLENDDIR / "env.blend.py",
14 | background=background,
15 | **kwargs
16 | )
17 | # For Blender 2.9 if we pass scene='', the tests below fail since
18 | # _env_post_step() is not called. Its unclear currently why this happens.
19 |
20 |
21 | def _run_remote_env(background):
22 | env = MyEnv(background=background)
23 |
24 | obs = env.reset()
25 | assert obs == 0.0
26 | obs, reward, done, info = env.step(0.1)
27 | assert obs == pytest.approx(0.1)
28 | assert reward == 0.0
29 | assert not done
30 | assert info["count"] == 2 # 1 is already set by reset()
31 | obs, reward, done, info = env.step(0.6)
32 | assert obs == pytest.approx(0.6)
33 | assert reward == 1.0
34 | assert not done
35 | assert info["count"] == 3
36 | for _ in range(8):
37 | obs, reward, done, info = env.step(0.6)
38 | assert done
39 |
40 | obs = env.reset()
41 | assert obs == 0.0
42 | obs, reward, done, info = env.step(0.1)
43 | assert obs == pytest.approx(0.1)
44 | assert reward == 0.0
45 | assert not done
46 | assert info["count"] == 2
47 |
48 | env.close()
49 |
50 |
51 | #@pytest.mark.background
52 | def test_remote_env():
53 | _run_remote_env(background=True)
54 |
55 |
56 | def test_remote_env_ui():
57 | _run_remote_env(background=False)
58 |
--------------------------------------------------------------------------------
/tests/test_file.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from blendtorch.btt.file import FileRecorder, FileReader
3 |
4 |
5 | @pytest.mark.background
6 | def test_file_recorder_reader(tmp_path):
7 | with FileRecorder(outpath=tmp_path / "record.mpkl", max_messages=10) as rec:
8 | for i in range(7):
9 | rec.save({"value": i}, is_pickled=False)
10 |
11 | r = FileReader(tmp_path / "record.mpkl")
12 | assert len(r) == 7
13 | for i in range(7):
14 | assert r[i]["value"] == i
15 |
16 |
17 | @pytest.mark.background
18 | def test_file_recorder_reader_exception(tmp_path):
19 | try:
20 | with FileRecorder(
21 | outpath=tmp_path / "record.mpkl", max_messages=10, update_header=1
22 | ) as rec:
23 | rec.save({"value": 0}, is_pickled=False)
24 | rec.save({"value": 1}, is_pickled=False)
25 | rec.save({"value": 2}, is_pickled=False)
26 | raise ValueError("err")
27 | except ValueError:
28 | pass
29 |
30 | r = FileReader(tmp_path / "record.mpkl")
31 | assert len(r) == 3
32 | for i in range(3):
33 | assert r[i]["value"] == i
34 |
--------------------------------------------------------------------------------
/tests/test_launcher.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import multiprocessing as mp
3 | from pathlib import Path
4 | import json
5 | import copy
6 |
7 | from blendtorch import btt
8 |
9 | BLENDDIR = Path(__file__).parent / "blender"
10 | LAUNCH_ARGS = dict(
11 | scene="",
12 | script=str(BLENDDIR / "launcher.blend.py"),
13 | num_instances=2,
14 | named_sockets=["DATA", "GYM"],
15 | background=True,
16 | instance_args=[["--x", "3"], ["--x", "4"]],
17 | seed=10,
18 | )
19 |
20 |
21 | def _validate_result(items):
22 | assert len(items) == 2
23 | first, second = 0, 1
24 | if items[0]["btid"] == 1:
25 | first, second = second, first
26 |
27 | assert items[first]["btargs"]["btid"] == 0
28 | assert items[second]["btargs"]["btid"] == 1
29 | assert items[first]["btargs"]["btseed"] == 10
30 | assert items[second]["btargs"]["btseed"] == 11
31 | assert items[first]["btargs"]["btsockets"]["DATA"].startswith("tcp://")
32 | assert items[first]["btargs"]["btsockets"]["GYM"].startswith("tcp://")
33 | assert items[second]["btargs"]["btsockets"]["DATA"].startswith("tcp://")
34 | assert items[second]["btargs"]["btsockets"]["GYM"].startswith("tcp://")
35 | assert items[first]["remainder"] == ["--x", "3"]
36 | assert items[second]["remainder"] == ["--x", "4"]
37 |
38 |
39 | #@pytest.mark.background
40 | def test_launcher():
41 | with btt.BlenderLauncher(**LAUNCH_ARGS) as bl:
42 | addr = bl.launch_info.addresses["DATA"]
43 | ds = btt.RemoteIterableDataset(addr, max_items=2)
44 | items = [item for item in ds]
45 | _validate_result(items)
46 |
47 |
48 | def _launch(q, tmp_path):
49 | with btt.BlenderLauncher(**LAUNCH_ARGS) as bl:
50 | path = Path(tmp_path / "addresses.json")
51 | btt.LaunchInfo.save_json(path, bl.launch_info)
52 | q.put(path)
53 | bl.wait()
54 |
55 |
56 | #@pytest.mark.background
57 | def test_launcher_connected_remote(tmp_path):
58 | # Simulates BlenderLauncher called from a separate process and
59 | # shows how one can connect to already launched instances through
60 | # serialization of addresses.
61 | q = mp.Queue()
62 | p = mp.Process(target=_launch, args=(q, tmp_path))
63 | p.start()
64 | path = q.get()
65 | launch_info = btt.LaunchInfo.load_json(path)
66 | ds = btt.RemoteIterableDataset(launch_info.addresses["DATA"], max_items=2)
67 | items = [item for item in ds]
68 | _validate_result(items)
69 | p.join()
70 |
71 |
72 | def _launch_app(tmp_path, args):
73 | from blendtorch.btt.apps import launch
74 |
75 | with open(tmp_path / "launchargs.json", "w") as fp:
76 | fp.write(json.dumps(args, indent=4))
77 | launch.main(
78 | [
79 | "--out-launch-info",
80 | str(tmp_path / "launchinfo.json"),
81 | str(tmp_path / "launchargs.json"),
82 | ]
83 | )
84 |
85 |
86 | #@pytest.mark.background
87 | def test_launcher_app(tmp_path):
88 |
89 | p = mp.Process(target=_launch_app, args=(tmp_path, LAUNCH_ARGS))
90 | p.start()
91 |
92 | import time
93 |
94 | path = tmp_path / "launchinfo.json"
95 | while not Path.exists(path):
96 | time.sleep(1)
97 |
98 | launch_info = btt.LaunchInfo.load_json(path)
99 | ds = btt.RemoteIterableDataset(launch_info.addresses["DATA"], max_items=2)
100 | items = [item for item in ds]
101 | _validate_result(items)
102 |
103 | p.join()
104 |
105 |
106 | def test_launcher_app_primaryip(tmp_path):
107 |
108 | # Same with primary ip resolver
109 | args = copy.deepcopy(LAUNCH_ARGS)
110 | args["bind_addr"] = "primaryip"
111 | p = mp.Process(target=_launch_app, args=(tmp_path, args))
112 | p.start()
113 |
114 | import time
115 |
116 | path = tmp_path / "launchinfo.json"
117 | while not Path.exists(path):
118 | time.sleep(1)
119 |
120 | launch_info = btt.LaunchInfo.load_json(path)
121 | print(launch_info.addresses)
122 | ds = btt.RemoteIterableDataset(launch_info.addresses["DATA"], max_items=2)
123 | items = [item for item in ds]
124 | _validate_result(items)
125 |
126 | p.join()
127 |
--------------------------------------------------------------------------------