├── torchgraph ├── __init__.py ├── compile.py ├── aot.py └── dispatch.py ├── setup.py ├── test └── test.py ├── README.md ├── LICENSE └── .gitignore /torchgraph/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from .aot import capture as aot_capture 4 | from .compile import capture as compile_capture 5 | from .dispatch import capture as dispatch_capture 6 | 7 | __all__ = ["aot_capture", "compile_capture", "dispatch_capture"] 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | 5 | setup( 6 | name="torchgraph", 7 | version="0.1", 8 | description="PyTorch graph capturing.", 9 | author="Fei Kong", 10 | author_email="alpha0422@gmail.com", 11 | url="https://github.com/alpha0422/torch-graph", 12 | packages=["torchgraph"], 13 | install_requires=[ 14 | "graphviz", 15 | "pydot", 16 | ], 17 | ) 18 | -------------------------------------------------------------------------------- /test/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from torchgraph import dispatch_capture, aot_capture, compile_capture 4 | 5 | import torch 6 | import torch.nn as nn 7 | model = nn.Sequential( 8 | nn.Conv2d(16, 32, 3), 9 | nn.BatchNorm2d(32), 10 | nn.SiLU(), 11 | ).cuda() 12 | x = torch.randn((2, 16, 8, 8), requires_grad=True, device="cuda") 13 | 14 | 15 | dispatch_capture(model, x) 16 | aot_capture(model, x) 17 | compile_capture(model, x) 18 | -------------------------------------------------------------------------------- /torchgraph/compile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import torch 4 | import time 5 | from torch._functorch.partitioners import draw_graph 6 | 7 | __all__ = ["capture"] 8 | 9 | def my_compiler(fx_module: torch.fx.GraphModule, _): 10 | draw_graph(fx_module, f"compile.{time.time()}.svg") 11 | return fx_module.forward 12 | 13 | def capture(model, *inputs): 14 | compiled_model = torch.compile(model, backend=my_compiler) 15 | y = compiled_model(*inputs) 16 | y.sum().backward() 17 | 18 | if __name__ == '__main__': 19 | import torch.nn as nn 20 | model = nn.Sequential( 21 | nn.Conv2d(16, 32, 3), 22 | nn.BatchNorm2d(32), 23 | nn.SiLU(), 24 | ).cuda() 25 | x = torch.randn((2, 16, 8, 8), requires_grad=True, device="cuda") 26 | 27 | capture(model, x) 28 | 29 | -------------------------------------------------------------------------------- /torchgraph/aot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import torch 4 | import time 5 | from functorch.compile import aot_module, make_boxed_func 6 | from torch._functorch.partitioners import draw_graph 7 | 8 | __all__ = ["capture"] 9 | 10 | def my_compiler(fx_module: torch.fx.GraphModule, _): 11 | draw_graph(fx_module, f"aot.{time.time()}.svg") 12 | return make_boxed_func(fx_module.forward) 13 | 14 | def capture(model, *inputs): 15 | aot_model = aot_module(model, fw_compiler=my_compiler) 16 | y = aot_model(*inputs) 17 | y.sum().backward() 18 | 19 | if __name__ == '__main__': 20 | import torch.nn as nn 21 | model = nn.Sequential( 22 | nn.Conv2d(16, 32, 3), 23 | nn.BatchNorm2d(32), 24 | nn.SiLU(), 25 | ).cuda() 26 | x = torch.randn((2, 16, 8, 8), requires_grad=True, device="cuda") 27 | 28 | capture(model, x) 29 | 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # torch-graph 2 | 3 | Simple PyTorch graph capturing. 4 | 5 | ## Instructions 6 | 7 | Please install `graphviz` first: 8 | 9 | ```console 10 | $ apt-get install graphviz 11 | ``` 12 | 13 | Clone and install this package: 14 | 15 | ```console 16 | $ pip install . 17 | ``` 18 | 19 | Examples: 20 | 21 | ```python 22 | from torchgraph import dispatch_capture, aot_capture, compile_capture 23 | 24 | import torch 25 | import torch.nn as nn 26 | model = nn.Sequential( 27 | nn.Conv2d(16, 32, 3), 28 | nn.BatchNorm2d(32), 29 | nn.SiLU(), 30 | ).cuda() 31 | x = torch.randn((2, 16, 8, 8), requires_grad=True, device="cuda") 32 | 33 | # Capture joint forward and backward graph through dispatch 34 | dispatch_capture(model, x) 35 | 36 | # Capture separate forward and backward graphs through PyTorch AOTAutograd 37 | aot_capture(model, x) 38 | 39 | # Capture forward graph through PyTorch compile 40 | compile_capture(model, x) 41 | ``` 42 | 43 | You'll find the captured graphs in `.svg` format under current folder. 44 | 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Wil Kong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /torchgraph/dispatch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import time 4 | import torch 5 | import torch.utils._pytree as pytree 6 | from torch.utils._python_dispatch import TorchDispatchMode 7 | from torch.utils.weak import WeakIdKeyDictionary 8 | from graphviz import Digraph 9 | 10 | __all__ = ["capture"] 11 | 12 | class CaptureGraph(TorchDispatchMode): 13 | def __init__(self, fname="graph.dot"): 14 | self.fname = fname 15 | self._graph = Digraph(format="svg") 16 | self._tensors = WeakIdKeyDictionary() 17 | self._n_tensors = 0 18 | self._n_ops = 0 19 | 20 | def __torch_dispatch__(self, func, types, args=(), kwargs=None): 21 | out = func(*args, **kwargs) 22 | op = f"{func}_{self._n_ops}" 23 | self._n_ops += 1 24 | self._graph.node(op, str(func), fillcolor="green") 25 | self._add_to_graph((args, kwargs), op, is_in=True) 26 | self._add_to_graph(out, op, is_in=False) 27 | return out 28 | 29 | def _add_to_graph(self, args, op, is_in=True): 30 | flat_args, _ = pytree.tree_flatten(args) 31 | for t in flat_args: 32 | if not torch.is_tensor(t): 33 | continue 34 | if t not in self._tensors: 35 | tensor = f"tensor_{self._n_tensors}" 36 | self._graph.node(tensor, fillcolor="skyblue") 37 | self._tensors[t] = tensor 38 | self._n_tensors += 1 39 | else: 40 | tensor = self._tensors[t] 41 | if is_in: 42 | self._graph.edge(tensor, op) 43 | else: 44 | self._graph.edge(op, tensor) 45 | 46 | def __exit__(self, exc_type, exc_value, traceback): 47 | super().__exit__(exc_type, exc_value, traceback) 48 | self._graph.render(self.fname) 49 | 50 | def capture(model, *inputs): 51 | primals = [p for p in model.parameters() if p.requires_grad] 52 | primals.extend([p for p in inputs if torch.is_tensor(p) and p.requires_grad]) 53 | with CaptureGraph(f"dispatch.{time.time()}.dot"): 54 | loss = model(*inputs).sum() 55 | grads = torch.autograd.grad(loss, primals) 56 | 57 | if __name__ == '__main__': 58 | import torch.nn as nn 59 | model = nn.Sequential( 60 | nn.Conv2d(16, 32, 3), 61 | nn.BatchNorm2d(32), 62 | nn.SiLU(), 63 | ).cuda() 64 | x = torch.randn((2, 16, 8, 8), requires_grad=True, device="cuda") 65 | 66 | capture(model, x) 67 | 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # Customized 163 | *.dot 164 | *.svg 165 | --------------------------------------------------------------------------------