├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── pytest.yml │ └── pythonpublish.yml ├── .gitignore ├── .readthedocs.yml ├── LICENSE ├── doc ├── Makefile └── source │ ├── apisrc │ └── madflow.rst │ ├── conf.py │ ├── index.rst │ ├── installation.rst │ ├── lhewriter.rst │ ├── phasespace.rst │ └── usage.rst ├── example ├── compare_mg5_hists.py ├── cuts_example.py └── madflow_and_graph.py ├── madgraph_plugin ├── PyOut_PythonFileWriter.py ├── PyOut_create_aloha.py ├── PyOut_exporter.py ├── PyOut_helas_call_writer.py ├── __init__.py └── template_files │ ├── leading_order.inc │ └── matrix_method_python.inc ├── python_package └── madflow │ ├── __init__.py │ ├── config.py │ ├── custom_op │ ├── __init__.py │ ├── aux_functions.py │ ├── classes.py │ ├── constants.py │ ├── generation.py │ ├── global_constants.py │ ├── parser.py │ ├── read.py │ ├── syntax.py │ ├── transpiler.py │ └── write_templates.py │ ├── custom_op_generator.py │ ├── lhe_writer.py │ ├── makefile_template.py │ ├── parameters.py │ ├── phasespace.py │ ├── scripts │ ├── __init__.py │ └── madflow_exec.py │ ├── tests │ ├── __init__.py │ ├── mockup_debug_me.py │ ├── test_integration.py │ └── test_ps.py │ ├── utilities.py │ └── wavefunctions_flow.py ├── readme.md └── setup.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Description 11 | 12 | Please, describe briefly what the issue is 13 | 14 | ### Code example 15 | 16 | If possible, write a minimum working example that reproduces the bug, 17 | e.g: 18 | 19 | ```python 20 | import madflow 21 | madflow.broken_function() 22 | ``` 23 | 24 | ### Additional information 25 | 26 | Does the problem occur in CPU or GPU? 27 | If GPU, how many? Which vendor? Which version of cuda or rocm are you using? 28 | 29 | e.g: 30 | 31 | ```bash 32 | nvcc --version 33 | ``` 34 | 35 | Please include the version of madflow and tensorflow that you are running. Running the following python script will produce useful information: 36 | 37 | ```python 38 | import tensorflow as tf 39 | import madflow 40 | 41 | print(f"Madflow version: {madflow.__version__}") 42 | print(f"Tensorflow: {tf.__version__}") 43 | print(f"tf-mkl: {tf.python.framework.test_util.IsMklEnabled()}") 44 | print(f"tf-cuda: {tf.test.is_built_with_cuda()}") 45 | print(f"GPU available: {tf.test.is_gpu_available()}") 46 | ``` 47 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### The problem 11 | 12 | Brief description of the problem you want to solve. 13 | 14 | ### Proposed solution 15 | 16 | Please share any possible solutions for the problem you are thinking of. 17 | 18 | ### Are you available/want to contribute? 19 | 20 | Yes/No 21 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: pytest 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | strategy: 10 | max-parallel: 3 11 | matrix: 12 | python-version: [3.8] 13 | 14 | steps: 15 | - uses: actions/checkout@v2.3.4 16 | - name: Set up Python ${{ matrix.python-version }} 17 | uses: actions/setup-python@v2 18 | with: 19 | python-version: ${{ matrix.python-version }} 20 | - name: Install dependencies and package 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install . 24 | - name: Lint with pylint 25 | run: | 26 | pip install pylint 27 | # Error out only in actual errors 28 | # TODO some of the errors here are actual minor style problems that needs to be solved *eventually* 29 | # TODO: fix once pdfflow is updated 30 | pylint python_package/*/*.py -E -d E1123,E1120,E0401,E402,E0102 31 | pylint python_package/*/*.py --exit-zero 32 | - name: Test with pytest 33 | run: | 34 | pip install pytest pdfflow 35 | wget http://pcteserver.mi.infn.it/~nnpdf/nnpdf31/NNPDF31_nnlo_as_0118.tar.gz 36 | tar xvfz NNPDF31_nnlo_as_0118.tar.gz 37 | PDFDIR=${PWD} pytest 38 | -------------------------------------------------------------------------------- /.github/workflows/pythonpublish.yml: -------------------------------------------------------------------------------- 1 | name: Python publication 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v1 12 | - name: Set up Python 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: '3.x' 16 | - name: Install dependencies 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install setuptools wheel twine 20 | - name: Build and publish 21 | env: 22 | TWINE_USERNAME: ${{ secrets.TWINE_USER }} 23 | TWINE_PASSWORD: ${{ secrets.TWINE_PASS }} 24 | run: | 25 | python setup.py sdist bdist_wheel 26 | twine upload dist/* 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Madgraph stuff 132 | MG5_debug 133 | py.py 134 | MG5_aMC_v* 135 | Events 136 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | builder: html 5 | configuration: doc/source/conf.py 6 | 7 | python: 8 | version: 3.8 9 | install: 10 | - method: pip 11 | path: . 12 | extra_requirements: 13 | - docs 14 | system_packages: true 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | view: html 23 | $(BROWSER) build/html/index.html 24 | -------------------------------------------------------------------------------- /doc/source/apisrc/madflow.rst: -------------------------------------------------------------------------------- 1 | madflow package 2 | ================= 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. automodule:: madflow 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | 12 | Submodules 13 | ---------- 14 | 15 | Phasespace module 16 | ----------------- 17 | 18 | .. automodule:: madflow.phasespace 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Utilities module 24 | ---------------- 25 | 26 | .. automodule:: madflow.utilities 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Wavefunctions module 32 | -------------------- 33 | 34 | .. automodule:: madflow.wavefunctions_flow 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | Parameters module 40 | ----------------- 41 | 42 | .. automodule:: madflow.parameters 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | LHE writer module 48 | ----------------- 49 | 50 | .. automodule:: madflow.lhe_writer 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | from recommonmark.transform import AutoStructify 16 | 17 | sys.path.insert(0, os.path.abspath('..')) 18 | import madflow 19 | 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'madflow' 24 | copyright = '2021, madflow team' 25 | author = 'Carrazza, Stefano and Cruz-Martinez, Juan and Rossi, Marco and Zaro, Marco' 26 | 27 | # The full version, including alpha/beta/rc tags 28 | release = madflow.__version__ 29 | autodoc_mock_imports = ['tensorflow', 'vegasflow', 'pdfflow'] 30 | 31 | 32 | # -- General configuration --------------------------------------------------- 33 | # 34 | # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found 35 | master_doc = 'index' 36 | 37 | # Add any Sphinx extension module names here, as strings. They can be 38 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 39 | # ones. 40 | extensions = [ 41 | 'sphinx.ext.autodoc', 42 | 'sphinx.ext.doctest', 43 | 'sphinx.ext.coverage', 44 | 'sphinx.ext.napoleon', 45 | 'sphinx.ext.intersphinx', 46 | 'recommonmark', 47 | ] 48 | 49 | # Add any paths that contain templates here, relative to this directory. 50 | templates_path = ['_templates'] 51 | 52 | # Markdown configuration 53 | 54 | # The suffix(es) of source filenames. 55 | # You can specify multiple suffix as a list of string: 56 | # 57 | source_suffix = { 58 | '.rst': 'restructuredtext', 59 | '.txt': 'markdown', 60 | '.md': 'markdown', 61 | } 62 | 63 | autosectionlabel_prefix_document = True 64 | # Allow to embed rst syntax in markdown files. 65 | enable_eval_rst = True 66 | 67 | # List of patterns, relative to source directory, that match files and 68 | # directories to ignore when looking for source files. 69 | # This pattern also affects html_static_path and html_extra_path. 70 | exclude_patterns = [] 71 | 72 | 73 | # -- Options for HTML output ------------------------------------------------- 74 | 75 | # The theme to use for HTML and HTML Help pages. See the documentation for 76 | # a list of builtin themes. 77 | # 78 | html_theme = 'sphinx_rtd_theme' 79 | 80 | # Add any paths that contain custom static files (such as style sheets) here, 81 | # relative to this directory. They are copied after the builtin static files, 82 | # so a file named "default.css" will overwrite the builtin "default.css". 83 | html_static_path = [] #['_static'] 84 | 85 | 86 | # -- Intersphinx ------------------------------------------------------------- 87 | 88 | intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} 89 | 90 | # -- Doctest ------------------------------------------------------------------ 91 | # 92 | 93 | doctest_path = [os.path.abspath('../examples')] 94 | 95 | # -- Autodoc ------------------------------------------------------------------ 96 | # 97 | autodoc_member_order = 'bysource' 98 | 99 | # Adapted this from 100 | # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py 101 | # app setup hook 102 | def setup(app): 103 | app.add_config_value('recommonmark_config', { 104 | 'enable_eval_rst': True, 105 | }, True) 106 | app.add_transform(AutoStructify) 107 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. title:: 2 | madflow's documentation! 3 | 4 | ================================================================= 5 | MadFlow: parton-level monte carlo simulations directly to your GPU 6 | ================================================================= 7 | 8 | .. 9 | image:: https://zenodo.org/badge/DOI/10.5281/zenodo.XX.svg 10 | :target: https://doi.org/10.5281/zenodo.XX 11 | 12 | .. image:: https://img.shields.io/badge/arXiv-physics.comp--ph%2F%20%20%20%202105.10529-%23B31B1B 13 | :target: https://arxiv.org/abs/2105.10529 14 | 15 | Overview 16 | -------- 17 | 18 | MadFlow is a framework for Monte Carlo simulation of particle physics processes designed to take full advantage of hardware accelerators. 19 | Processes can be generated using `MadGraph5_aMC@NLO `_ and are then output in vectorized (or tensorized) 20 | form by the ``madflow``-provided plugin. 21 | 22 | The vectorized output is compiled using the `TensorFlow `_ library (hence, tensorized) 23 | and then integrated using the `VegasFlow `_ library. 24 | The PDF interpolation is provided by `PDFFlow `_. 25 | All tools are capable of running hardware with different hardware acceleration capabilities, such as multi-threading CPU, single-GPU and multi-GPU setups. 26 | 27 | 28 | Open Source 29 | ----------- 30 | The ``madflow`` package is open source and available at https://github.com/N3PDF/madflow 31 | 32 | Motivation and design 33 | ===================== 34 | 35 | Madflow is developed by the Particle Physics group of the University of Milan. 36 | Theoretical calculations in particle physics are incredibly time consuming operations, 37 | sometimes taking months in big clusters all around the world. 38 | 39 | These expensive calculations are driven by the high dimensional phase spaces and 40 | the complexity of the integrands which can be composed of dozens or hundreds of diagrams. 41 | Furthermore, most of these calculations are built upon very dated code and methodologies 42 | not suitable for newest hardware. 43 | These problems create a huge technical debt which is very difficult to overcome 44 | by newcomers to the field. 45 | 46 | With Madflow with aim to close this gap between theoretical and high performance computing 47 | by providing a framework that it is maintainable, extensible and modern 48 | while at the same time not sacrificing the complexity of fixed-order computations. 49 | 50 | Even while there might be situations for which the huge complexity of many-particle calculations 51 | cannot efficiently automatically mapped to hardware accelerator, we expect ``madflow`` will 52 | help quickstart the efforts for a new age of GPU-capable Monte Carlo simulators. 53 | 54 | .. 55 | How to cite ``madflow``? 56 | ========================= 57 | 58 | When using ``madflow`` in your research, please cite the following publications: 59 | 60 | 61 | Bibtex: 62 | 63 | .. code-block:: latex 64 | 65 | @article{Carrazza:2020rdn, 66 | author = "Carrazza, Stefano and Cruz-Martinez, Juan M.", 67 | title = "{VegasFlow: accelerating Monte Carlo simulation across multiple hardware platforms}", 68 | eprint = "2002.12921", 69 | archivePrefix = "arXiv", 70 | primaryClass = "physics.comp-ph", 71 | reportNumber = "TIF-UNIMI-2020-8", 72 | doi = "10.1016/j.cpc.2020.107376", 73 | journal = "Comput. Phys. Commun.", 74 | volume = "254", 75 | pages = "107376", 76 | year = "2020" 77 | } 78 | 79 | 80 | @software{vegasflow_package, 81 | author = {Juan Cruz-Martinez and 82 | Stefano Carrazza}, 83 | title = {N3PDF/vegasflow: vegasflow v1.0}, 84 | month = feb, 85 | year = 2020, 86 | publisher = {Zenodo}, 87 | version = {v1.0}, 88 | doi = {10.5281/zenodo.3691926}, 89 | url = {https://doi.org/10.5281/zenodo.3691926} 90 | } 91 | 92 | 93 | 94 | FAQ 95 | === 96 | 97 | Why the name ``MadFlow``? 98 | --------------------------- 99 | 100 | It is a combination of the names `Madgraph` and `Tensorflow`. 101 | 102 | - **Madgraph**: https://cp3.irmp.ucl.ac.be/projects/madgraph/ 103 | 104 | - **TensorFlow**: the `tensorflow `_ is developed by Google and was made public in November of 2015. It is a perfect combination between performance and usability. With a focus on Deep Learning, TensorFlow provides an algebra library able to easily run operations in many different devices: CPUs, GPUs, TPUs with little input by the developer. 105 | 106 | 107 | 108 | Indices and tables 109 | ================== 110 | 111 | .. toctree:: 112 | :maxdepth: 1 113 | :caption: Overview: 114 | :hidden: 115 | 116 | Madflow 117 | Installation 118 | usage 119 | 120 | .. toctree:: 121 | :maxdepth: 1 122 | :caption: Components: 123 | :hidden: 124 | 125 | phasespace 126 | lhewriter 127 | Package specs 128 | 129 | 130 | * :ref:`genindex` 131 | * :ref:`modindex` 132 | * :ref:`search` 133 | -------------------------------------------------------------------------------- /doc/source/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation-label: 2 | 3 | Installation 4 | ============ 5 | 6 | Main Package 7 | ------------ 8 | 9 | The package can be installed with pip: 10 | 11 | .. code-block:: bash 12 | 13 | python3 -m pip install madflow 14 | 15 | 16 | If you prefer a manual installation can do: 17 | 18 | .. code-block:: bash 19 | 20 | git clone https://github.com/N3PDF/madflow.git 21 | cd madflow 22 | pip install . 23 | 24 | if you are planning to extend or develop code use instead: 25 | 26 | .. code-block:: bash 27 | 28 | pip install -e . 29 | 30 | 31 | .. _plugin-label: 32 | 33 | MG5_aMC\@NLO Plugin 34 | -------------------- 35 | 36 | A valid installation of MG5_aMC\@NLO (2.8+) is necessary in order to generate matrix elements. 37 | 38 | If you already have a valid installation, please add the following environment variable pointing to the right directory: ``MADGRAPH_PATH``. 39 | Below are the instructions for MG5_aMC\@NLO 3.1.0, for a more recent release please visit the MG5_aMC\@NLO `site `_. 40 | 41 | .. code-block:: bash 42 | 43 | wget https://launchpad.net/mg5amcnlo/3.0/3.1.x/+download/MG5_aMC_v3.1.0.tar.gz 44 | tar xfz MG5_aMC_v3.1.0.tar.gz 45 | export MADGRAPH_PATH=${PWD}/MG5_aMC_v3_1_0 46 | 47 | 48 | Once MG5_aMC\@NLO is installed, all that's left is to link the ``madflow`` plugin inside 49 | the MG5_aMC\@NLO folder. 50 | 51 | 52 | .. code-block:: bash 53 | 54 | madflow --autolink 55 | 56 | 57 | If you prefer to link the plugin manually, it is necessary to link the 58 | ``madgraph_plugin`` folder inside the ``PLUGIN`` directory of MG5_aMC\@NLO. 59 | For instance, if the environment variable ``$MADGRAPH_PATH`` is pointing to the MG5_aMC root: 60 | 61 | .. code-block:: bash 62 | 63 | ln -s ${PWD}/madgraph_plugin ${MADGRAPH_PATH}/PLUGIN/pyout 64 | 65 | .. 66 | Package distributions 67 | --------------------- 68 | 69 | It is also possible to install the package from repositories such as `conda-forge `_ or the `Arch User Repository `_. Note that in this cases MG5_aMC\@NLO is installed automatically. 70 | 71 | .. code-block:: bash 72 | 73 | conda install madflow -c conda-forge 74 | yay -S madflow 75 | 76 | -------------------------------------------------------------------------------- /doc/source/lhewriter.rst: -------------------------------------------------------------------------------- 1 | .. _lhewriter-label: 2 | 3 | Event output 4 | ============ 5 | 6 | A common workflow for particle physics Monte Carlo simulator is the generation 7 | f events files (in the LHE format) to be analyzed later on. 8 | Although the user may use the leading order :ref:`template ` to 9 | write events in their own favourite way, ``madflow`` also provides an interface to 10 | MG5_aMC\@NLO own event writer through the :py:class:`LheWriter ` class. 11 | 12 | In order to avoid hanging the calculation because of disc i/o, the events are dumped 13 | asynchronously from the rest of the calculation. 14 | As a consequence, ``LheWriter`` instances should be always used within a context manager 15 | in order to avoid unpleasant situations in which the program exits before the full 16 | length of events has been dumped to disk. 17 | 18 | .. code-block:: python 19 | 20 | from madflow.lhe_writer import LheWriter 21 | from madflow.config import DTYPE 22 | from tensorflow import py_function 23 | 24 | with LheWriter("process_path", "process_name") as lhe_writer: 25 | 26 | def cross_section(xrand, weight=None, **kwargs): 27 | ... 28 | ps = phasespace(...) 29 | result = matrix.smatrix(ps, ...) 30 | py_function(func=lhe_writer.lhe_parser, inp=[ps, result*weight], Tout=DTYPe) 31 | return result 32 | 33 | 34 | Note the usage of the TensorFlow function ``py_function``. 35 | The usage of this function allows us to make calls to functions which haven't got 36 | necessarily a GPU kernel. 37 | Take in consideration, however, that the usage of such functions will trigger a copy 38 | event from the hardware accelerator device to CPU. 39 | -------------------------------------------------------------------------------- /doc/source/phasespace.rst: -------------------------------------------------------------------------------- 1 | .. _phasespace-label: 2 | 3 | Phase-Space 4 | =========== 5 | 6 | Matrix Element files generated by the ``madflow`` :ref:`script ` 7 | need an input phase-space in order to provide results. 8 | 9 | .. code-block:: bash 10 | 11 | madflow --madgraph_process "g g > t t~" -o gg_to_ttb --dry_run 12 | 13 | 14 | The previous line will generate the relevant python files inside the ``gg_to_ttb`` directory, 15 | among them ``matrix_1_gg_ttx`` which includes the matrix element ``Matrix_1_gg_ttx``. 16 | 17 | The matrix element accepts phase space points with shape ``(n_events, n_particles, 4)`` and will 18 | return as many weights as events are passed to it. 19 | 20 | .. code-block:: python 21 | 22 | import numpy as np 23 | from matrix_1_gg_ttx import Matrix_1_gg_ttx 24 | 25 | # Instantiate the matrix element 26 | matrix = Matrix_1_gg_ttx() 27 | # Create a valid (fake) phase space point 28 | p_raw = np.array([ [3500, 0, 0, 3500], [3500, 0, 0, -3500] ]*2) 29 | p = p_raw.reshape(1, 4, 4) 30 | # Call the matrix element with the right masses and couplings 31 | me_wgt = matrix.smatrix(p, 173.0, 1.5, [-1.21 + 0j], [-1.21j]) 32 | 33 | 34 | Phase-Space generator 35 | ---------------------- 36 | 37 | For convenience, ``madflow`` offers a :py:class:`PhaseSpaceGenerator ` class 38 | which can generate phase space points using different algorithms (for now only RAMBO). 39 | 40 | In order to instantiate a phase-space object the number of particles, the center of mass energies and the 41 | mass of the final state objects must be given. 42 | Furthermore, it is possible to ask for the phase-space to be provided in the center of mass frame 43 | (``com_output=True``) or in the laboratory frame (``com_output=False``). 44 | 45 | The phase-space object also provides methods to apply cuts to the final state particles. 46 | 47 | .. code-block:: python 48 | 49 | import numpy as np 50 | from madflow.phasespace import PhaseSpaceGenerator 51 | 52 | # Generate an instance of the phase space 53 | phasespace = PhaseSpaceGenerator(4, 7e3, [173.0, 173.0], com_output=False) 54 | 55 | # Register a cut for particle 3 (0-index) of pt > 10 GeV and pt < 520 GeV 56 | phasespace.register_cut("pt", particle=3, min_val=10, max_val=520) 57 | 58 | # Generate ten phase space points 59 | xrand = np.random.rand(10, (4-2)*4+2) 60 | all_ps, wts, x1, x2, idx = phasespace(xrand) 61 | 62 | The return quantities are as follows: 63 | 64 | ``all_ps``: tensor of shape ``(nevents, nparticles, 4)``, phase space points 65 | ``wts``: tensor of shape ``(nevents,)`` weight of each event. 66 | ``x1`` and ``x2``: parton fraction of the incoming momenta 67 | ``idx``: index of the valid phase space points 68 | 69 | Note that the number of output phase-space events provided might not be equal to the number of phase-space point requested 70 | as not all events might pass the cuts. 71 | The ``idx`` variable contain the corresponding index of the events that passed all cuts. 72 | 73 | 74 | Provided Algorithms 75 | ------------------- 76 | 77 | Different algorithms can be utilized using the keyword ``algorithm``: 78 | 79 | 80 | Ramboflow 81 | ^^^^^^^^^ 82 | 83 | Vectorized form of the well known `RAMBO `_ algorithm. 84 | It is a plain phase-space that does not take into account the topology of the processes being integrated, 85 | however it can provide valid phase-space points for any number of particles which make it suitable for debugging 86 | and development. 87 | -------------------------------------------------------------------------------- /doc/source/usage.rst: -------------------------------------------------------------------------------- 1 | .. _usage-label: 2 | 3 | Usage 4 | ===== 5 | 6 | Madflow automatic script 7 | ------------------------ 8 | 9 | With the installation of madflow, a script is provided to automatically generate and integrate 10 | leading order cross sections. 11 | After installation you can launch the script with the ``madflow`` command. 12 | 13 | Leading Order integration 14 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 15 | 16 | The ``madflow`` script can integrate any leading order process using 17 | the same syntax as MG5_aMC\@NLO: 18 | 19 | .. code-block:: bash 20 | 21 | madflow --madgraph_process "p p > t t~ g" --pt_cut 60 --pdf NNPDF31_nnlo_as_0118 -m 2 22 | 23 | 24 | The script will use MG5_aMC\@NLO and the ``madflow``-provided :ref:`plugin ` 25 | to generate the relevant matrix element and a vectorized form of ``RAMBO`` to 26 | integrate it. 27 | 28 | 29 | Technical limitations 30 | ^^^^^^^^^^^^^^^^^^^^^ 31 | 32 | While, bugs permitting, ``madflow`` should be completely functional, its development 33 | is still ongoing. 34 | We tried to use conservative results for the default values to manage the memory 35 | of hardware accelerators, however our own testing devices are limited. 36 | 37 | The ``madflow`` script exposes the ``--events_limit`` flag which limits the maximum 38 | amount of events that can be sent to the accelerator 39 | Increasing the number of events run in the accelerator at once will decrease the latency 40 | and accordingly will increase the performance of the calculation. 41 | On the flip side, increasing the number of events will increase the amount of on-device 42 | memory required to perform the calculation 43 | (which the host computer should also have to prepare said calculation) 44 | and unpleasant situations where the calculation goes Out of Memory (OOM) can occur. 45 | If that's the case, please decrease the ``--events_limit`` flag. 46 | 47 | 48 | 49 | .. _lotemplate-label: 50 | 51 | Leading Order template 52 | ---------------------- 53 | 54 | The goal of the ``madflow`` script is not to be completely general but to serve as a quick way 55 | of getting results and debugging. 56 | Fore more complex and customized cross section calculations is recommended to build your own 57 | integration script. 58 | 59 | In order to simplify the process, a leading order template is provided by the ``madflow`` 60 | script. 61 | To generate the relevant matrix element files and leading order script without performing the integration 62 | you can use the ``--dry_run`` option. 63 | 64 | .. code-block:: bash 65 | 66 | madflow --madgraph_process "p p > Z" --output pp2z --dry_run -m 1 67 | 68 | 69 | The previous command will output to the ``pp2z`` all the necessary files to perform the integration 70 | alongside a template for cross section calculations: ``leading_order.py``. 71 | This template serves as a guide to start building your own fixed order calculation. 72 | 73 | 74 | Scope 75 | ^^^^^ 76 | 77 | With the leading order template we aim to kick-start more complex fixed order calculations. 78 | The template is commented in a pedagogical step-by-step way and should suffice to run the simpler calculations 79 | while making clear where changes are needed for more complex situations. 80 | 81 | The main areas to address are: 82 | 83 | - Phase-space and cuts: 84 | 85 | In the template the phase space is a vectorized form of ``rambo`` which we have dubbed 86 | ``ramboflow``. 87 | As the seasoned phenomenologist knows, the right choice of phase-space generator can mean 88 | the difference between a convergent integration and crazy and unreasonable results. 89 | Unfortunately building a fully general phase-space sampling algorithm is (as the very same 90 | seasoned phenomenologist surely knows) a very much non-trivial subject. 91 | For the time being ``ramboflow`` is the only phase-space generator provider by ``madflow`` 92 | and thus more complicated calculations will need to build their own. 93 | 94 | The cuts on the phase-space, while trivial on their own, must be applied carefully 95 | when building software targeting hardware accelerators. 96 | The number-1 enemy of GPUs is branching, and cuts to the phase-space will mean 97 | exactly that. 98 | Therefore cuts (and equivalently any kind of multi-channeling algorithm) 99 | should be applied in such a way that the number of events that are computed at each 100 | single go is maximized. 101 | 102 | 103 | - Matrix element evaluation: 104 | 105 | While one of the advantages of ``madflow`` is to use the capabilities of MG5_aMC in order 106 | to automatically generate matrix elements which can be evaluated in hardware accelerators 107 | no hardware-specific optimization has been applied to the evaluation strategy 108 | which remain that of `ALOHA `_, 109 | which is based in a raw evaluation of Feynman diagrams. 110 | 111 | While this approach ensures universality, it also means the number of diagrams 112 | can grow in a factorial manner, soon becoming intractable. 113 | Processes with many particles in the final state will surely benefit of other 114 | strategies. 115 | In the future we aim to provide interfaces to other Matrix-elements provided 116 | (also vectorized when possible) in order to address these short-comings. 117 | -------------------------------------------------------------------------------- /example/compare_mg5_hists.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Script for the generation of plots based on Madflow/Madgraph 4 | LHE output 5 | """ 6 | import os, argparse 7 | from time import time as tm 8 | import numpy as np 9 | from pathlib import Path 10 | import matplotlib.pyplot as plt 11 | import matplotlib.gridspec as gridspec 12 | 13 | from madflow.lhe_writer import EventFileFlow, FourMomentumFlow 14 | 15 | 16 | def top_hists(lhe, nbins=50, printings=False): 17 | """ 18 | 19 | Computes pT, Pseudorapidity, histograms from LHE event file. 20 | 21 | Parameters 22 | ---------- 23 | lhe: EventFileFlow, LHE events file 24 | nbins: int, number of histogram bins 25 | 26 | Returns 27 | ------- 28 | np.array, containing histogram 29 | np.array, containing bin edges 30 | """ 31 | pt_bins = np.linspace(0, 300, nbins + 1) 32 | eta_bins = np.linspace(-4, 4, nbins + 1) 33 | pts = [] 34 | etas = [] 35 | wgts = [] 36 | nb_kept = 0 37 | for event in lhe: 38 | etaabs = 0 39 | etafinal = 0 40 | for particle in event: 41 | if particle.status == 1 and particle.pid == 6: 42 | p = FourMomentumFlow(particle) 43 | eta = p.pseudorapidity 44 | pt = p.pt 45 | if abs(eta) > etaabs: 46 | etafinal = eta 47 | etaabs = abs(eta) 48 | if etaabs < 4: 49 | nb_kept += 1 50 | etas.append(etafinal) 51 | wgts.append(event.wgt) 52 | pts.append(pt) 53 | wgts = np.array(wgts) 54 | pt_hist = np.histogram(pts, bins=pt_bins, weights=wgts / nb_kept) 55 | eta_hist = np.histogram(etas, bins=eta_bins, weights=wgts / nb_kept) 56 | 57 | return pt_hist, eta_hist 58 | 59 | 60 | def plot_hist(hist_flow, hist_mg5, xlabel, fname): 61 | """ 62 | Plots madflow vs mg5 histograms. 63 | 64 | Parameters 65 | ---------- 66 | hist_flow: list, madflow histogram weights and bin edges 67 | hist_mg5: list, mg5 histogram weights and bin edges 68 | xlabel: str, label of x axis 69 | fname: Path, plot file name 70 | """ 71 | fig = plt.figure() 72 | gs = fig.add_gridspec(nrows=5, ncols=1, wspace=0.05) 73 | ax = fig.add_subplot(gs[:-1]) 74 | ax.title.set_text("g g > t t~") 75 | 76 | h_flow, bins_flow = hist_flow 77 | ax.step(bins_flow[:-1], h_flow, where="post", label="madflow", lw=0.75) 78 | h_mg5, bins_mg5 = hist_mg5 79 | ax.step(bins_mg5[:-1], h_mg5, where="post", label="mg5_aMC", lw=0.75) 80 | ax.tick_params( 81 | axis="x", 82 | which="both", 83 | direction="in", 84 | bottom=True, 85 | labelbottom=False, 86 | top=True, 87 | labeltop=False, 88 | ) 89 | ax.tick_params( 90 | axis="y", 91 | which="both", 92 | direction="in", 93 | left=True, 94 | labelleft=True, 95 | right=True, 96 | labelright=False, 97 | ) 98 | ax.legend() 99 | 100 | ax = fig.add_subplot(gs[-1]) 101 | ax.set_ylabel("Ratio") 102 | ax.step(bins_flow[:-1], (h_flow - h_mg5) / h_mg5, where="post", lw=0.75) 103 | ax.plot([bins_flow[0], bins_flow[-2]], [0, 0], lw=0.8, color="black", linestyle="dashed") 104 | ax.set_xlabel(xlabel, loc="right") 105 | ax.set_ylim([-1, 1]) 106 | ax.tick_params( 107 | axis="x", 108 | which="both", 109 | direction="in", 110 | bottom=True, 111 | labelbottom=True, 112 | top=True, 113 | labeltop=False, 114 | ) 115 | ax.tick_params( 116 | axis="y", 117 | which="both", 118 | direction="in", 119 | left=True, 120 | labelleft=True, 121 | right=True, 122 | labelright=False, 123 | ) 124 | 125 | print(f"Saved histogram at {fname}") 126 | plt.savefig(fname.as_posix(), bbox_inches="tight", dpi=300) 127 | plt.close() 128 | 129 | 130 | def main(): 131 | """ 132 | Example script to compare madflow-mg5 histograms on some observables. 133 | """ 134 | arger = argparse.ArgumentParser(main.__doc__) 135 | arger.add_argument( 136 | "--madflow", help="Path to folder where madflow unweighted events are", type=Path 137 | ) 138 | arger.add_argument("--mg5", help="Path to the mg5_aMC output folder", type=Path) 139 | arger.add_argument("--nbins", help="Number of bins in the histogram", type=int, default=30) 140 | args = arger.parse_args() 141 | unw_filename = "unweighted_events.lhe.gz" 142 | path_flow = args.madflow / unw_filename 143 | path_mg5 = args.mg5 / "Events/run_01" / unw_filename 144 | if not path_flow.exists(): 145 | raise FileNotFoundError(f"LHE file for madflow not found at: {path_flow}") 146 | if not path_mg5.exists(): 147 | raise FileNotFoundError(f"LHE file for madgraph not found at: {path_mg5}") 148 | 149 | lhe_flow = EventFileFlow(path_flow) 150 | lhe_mg5 = EventFileFlow(path_mg5) 151 | 152 | print(f"Filling MadFlow histograms with {len(lhe_flow)} events") 153 | pt_flow, eta_flow = top_hists(lhe_flow, args.nbins, printings=True) 154 | 155 | print(f"Filling mg5_aMC histograms with {len(lhe_mg5)} events") 156 | pt_mg5, eta_mg5 = top_hists(lhe_mg5, args.nbins) 157 | 158 | lhe_folder = path_flow.parent 159 | plot_hist(pt_flow, pt_mg5, "top pT [MeV]", lhe_folder.joinpath("top_pt.png")) 160 | plot_hist( 161 | eta_flow, eta_mg5, "top \N{GREEK SMALL LETTER ETA}", lhe_folder.joinpath("top_eta.png") 162 | ) 163 | 164 | 165 | if __name__ == "__main__": 166 | start = tm() 167 | main() 168 | print(f"Program done in {tm()-start} s") 169 | -------------------------------------------------------------------------------- /example/cuts_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Example script that goes from the generation of a matrix element 5 | to the integration with the corresponding cuts 6 | 7 | The matrix element run by default is: g g > t t~ 8 | 9 | ``` 10 | ~$ ./cuts_example.py --madgraph_process "g g > g g" 11 | ``` 12 | 13 | It is possible to apply some mock cuts (pt > 60) with the option `-c` 14 | It is also possible to use a variable coupling with muF = muR = pt of the top with the option `-g` 15 | """ 16 | import re 17 | import sys 18 | import time 19 | import importlib 20 | import argparse 21 | import tempfile 22 | import subprocess as sp 23 | from pathlib import Path 24 | import numpy as np 25 | 26 | from vegasflow import VegasFlow 27 | from pdfflow import mkPDF, float_me, int_me, run_eager 28 | 29 | from madflow.config import get_madgraph_path, DTYPE 30 | from madflow.phasespace import ramboflow, PhaseSpaceGenerator 31 | from madflow.lhe_writer import LheWriter 32 | import tensorflow as tf 33 | 34 | # Create some temporary directories and files 35 | # (won't be removed on output so they can be inspected) 36 | out_path = Path(tempfile.mkdtemp(prefix="mad")) 37 | script_path = Path(tempfile.mktemp(prefix="mad_script")) 38 | DEFAULT_PDF = "NNPDF31_nnlo_as_0118" 39 | 40 | # Note that if another process is run, the imports below 41 | # must be changed accordingly, it can be made into options later on 42 | re_name = re.compile(r"\w{3,}") 43 | _flav_dict = {"g": 21, "d": 1, "u": 2, "s": 3, "c": 4, "b": 5, "t": 6} 44 | 45 | 46 | def _read_flav(flav_str): 47 | particle = _flav_dict.get(flav_str[0]) 48 | if particle is None: 49 | raise ValueError( 50 | f"Could not understand the incoming flavour: {flav_str} " 51 | "You can skip this error by using --no_pdf" 52 | ) 53 | if flav_str[-1] == "~": 54 | particle = -particle 55 | return particle 56 | 57 | 58 | def _import_module_from_path(path, module_name): 59 | spec = importlib.util.spec_from_file_location(module_name, path) 60 | module = importlib.util.module_from_spec(spec) 61 | spec.loader.exec_module(module) 62 | return module 63 | 64 | 65 | if __name__ == "__main__": 66 | arger = argparse.ArgumentParser(__doc__) 67 | arger.add_argument("-v", "--verbose", help="Print extra info", action="store_true") 68 | arger.add_argument("-p", "--pdf", help="PDF set", type=str, default=DEFAULT_PDF) 69 | arger.add_argument( 70 | "--no_pdf", help="Don't use a PDF for the initial state", action="store_true" 71 | ) 72 | arger.add_argument("-c", "--enable_cuts", help="Enable the cuts", action="store_true") 73 | arger.add_argument( 74 | "--madgraph_process", 75 | help="Set the madgraph process to be run", 76 | type=str, 77 | default="g g > t t~", 78 | ) 79 | arger.add_argument( 80 | "-m", "--massive_particles", help="Number of massive particles", type=int, default=2 81 | ) 82 | arger.add_argument("-g", "--variable_g", help="Use variable g_s", action="store_true") 83 | arger.add_argument( 84 | "--run_madgraph", help="Whether to run madgraph as well", action="store_true" 85 | ) 86 | arger.add_argument( 87 | "--pt_cut", help="Minimum pt for the outgoint particles", type=float, default=60 88 | ) 89 | arger.add_argument("--histograms", help="Generate LHE files/histograms", action="store_true") 90 | 91 | args = arger.parse_args() 92 | 93 | # Naive reading of the process to understand the flavours that are initiating the process 94 | if not args.no_pdf: 95 | initiation = args.madgraph_process.strip().split(" ") 96 | flav_1 = _read_flav(initiation[0]) 97 | flav_2 = _read_flav(initiation[1]) 98 | if args.verbose: 99 | print(f" > PDF {args.pdf} will be called with flavours idxs: {flav_1} and {flav_2}") 100 | 101 | # Prepare the madgraph script 102 | madgraph_script = f"""generate {args.madgraph_process} 103 | output pyout {out_path}""" 104 | 105 | # Run the process in madgraph and create the tensorized output 106 | script_path.write_text(madgraph_script) 107 | mg5_path = get_madgraph_path() 108 | mg5_exe = mg5_path / "bin/mg5_aMC" 109 | if not mg5_exe.exists(): 110 | raise RuntimeError(f"Could not find madgraph executable at {mg5_exe}") 111 | if args.verbose: 112 | print(f" > Running madgraph script at {script_path}") 113 | output = None 114 | else: 115 | output = sp.DEVNULL 116 | sp.run([mg5_exe, "-f", script_path], stdout=output, check=True) 117 | if args.verbose: 118 | print(f" > Madgraph output can be found at {out_path}") 119 | 120 | # Import the matrix file from the output folder as a module 121 | sys.path.insert(0, out_path.as_posix()) 122 | matrix_file = next(out_path.glob("matrix_*.py")).name 123 | matrix_name = re_name.findall(matrix_file)[0] 124 | matrix_module = _import_module_from_path(out_path / matrix_file, matrix_name) 125 | # Import specifically the matrix element 126 | matrix_element = getattr(matrix_module, matrix_name.capitalize()) 127 | 128 | # Read the parameters of the model 129 | model_sm = mg5_path / "models/sm" 130 | model = matrix_module.import_ufo.import_model(model_sm.as_posix()) 131 | model_params = matrix_module.get_model_param( 132 | model, (out_path / "Cards/param_card.dat").as_posix() 133 | ) 134 | 135 | # Instantiate the matrix element 136 | matrix = matrix_element() 137 | 138 | # Set up the parameters of the process 139 | nparticles = int(matrix.nexternal) 140 | ndim = (nparticles - 2) * 4 + 2 141 | sqrts = 13e3 142 | massive_particles = args.massive_particles 143 | non_massive = nparticles - massive_particles - 2 144 | # Assume that the massive particles go first 145 | # and _if_ the number of masses is below the number of massive particle 146 | # assume they are all the same mass (usually the top anyway) 147 | param_masses = model_params.get_masses() 148 | if len(param_masses) < massive_particles: 149 | param_masses *= massive_particles 150 | 151 | masses = param_masses + [0.0] * non_massive 152 | 153 | if not args.variable_g: 154 | q2 = float_me(91.46 ** 2) 155 | model_params.freeze_alpha_s(0.118) 156 | 157 | if args.verbose: 158 | test_events = 5 159 | xrand = tf.random.uniform(shape=(test_events, ndim), dtype=tf.float64) 160 | ps, wgt, x1, x2 = ramboflow(xrand, nparticles, sqrts, masses) 161 | if args.variable_g: 162 | alpha_s = float_me([0.118] * test_events) 163 | wgts = matrix.smatrix(ps, *model_params.evaluate(alpha_s)) 164 | print(f"Weights: \n{wgts.numpy()}") 165 | 166 | pdf = mkPDF(args.pdf + "/0") 167 | 168 | # Create the pase space and register the cuts 169 | phasespace = PhaseSpaceGenerator(nparticles, sqrts, masses, com_output=False) 170 | if args.enable_cuts: 171 | if args.verbose: 172 | print(f"Masses: {masses}") 173 | for i in range(2, nparticles): 174 | if args.verbose: 175 | print(f"Appling cut of pt > {args.pt_cut} to particle {i}") 176 | phasespace.register_cut("pt", particle=i, min_val=args.pt_cut) 177 | 178 | def luminosity(x1, x2, q2array): 179 | """Returns f(x1)*f(x2) for the given flavours""" 180 | hadron_1 = pdf.xfxQ2(int_me([flav_1]), x1, q2array) 181 | hadron_2 = pdf.xfxQ2(int_me([flav_2]), x2, q2array) 182 | return (hadron_1 * hadron_2) / x1 / x2 183 | 184 | def generate_integrand(lhewriter=None): 185 | """Generate a cross section with (or without) a LHE parser""" 186 | 187 | def cross_section(xrand, weight=1.0, **kwargs): 188 | """Compute the cross section""" 189 | # Generate the phase space point 190 | all_ps, wts, x1, x2, idx = phasespace(xrand) 191 | 192 | # Compute the value of muF==muR if needed 193 | if args.variable_g: 194 | full_mt = tf.reduce_sum(phasespace.mt(all_ps[:, 2:nparticles, :]), axis=-1) 195 | q2array = (full_mt / 2.0) ** 2 196 | alpha_s = pdf.alphasQ2(q2array) 197 | else: 198 | q2array = tf.ones_like(x1) * q2 199 | alpha_s = None 200 | 201 | # Get the luminosity per event 202 | pdf_result = luminosity(x1, x2, q2array) 203 | 204 | # Compute the cross section 205 | smatrix = matrix.smatrix(all_ps, *model_params.evaluate(alpha_s)) 206 | ret = smatrix * pdf_result * wts 207 | 208 | if lhewriter is not None: 209 | # Fill up the LH grid 210 | if args.enable_cuts: 211 | weight = tf.gather(weight, idx)[:, 0] 212 | tf.py_function(func=lhewriter.lhe_parser, inp=[all_ps, ret * weight], Tout=DTYPE) 213 | 214 | if args.enable_cuts: 215 | ret = tf.scatter_nd(idx, ret, shape=xrand.shape[0:1]) 216 | 217 | return ret 218 | 219 | return cross_section 220 | 221 | flow_start = time.time() 222 | vegas = VegasFlow(ndim, int(5e4)) 223 | integrand = generate_integrand() 224 | vegas.compile(integrand) 225 | vegas.run_integration(6) 226 | 227 | vegas.events_per_run = int(1e6) 228 | vegas.freeze_grid() 229 | 230 | if args.histograms: 231 | proc_name = args.madgraph_process.replace(" ", "_").replace(">", "to").replace("~", "b") 232 | with LheWriter(Path("."), proc_name, False, 0) as lhe_writer: 233 | integrand = generate_integrand(lhe_writer) 234 | vegas.compile(integrand) 235 | res, err = vegas.run_integration(10) 236 | flow_final = time.time() 237 | lhe_writer.store_result((res, err)) 238 | proc_folder = Path(f"Events/{proc_name}") 239 | print(f"Written LHE file to {proc_folder}") 240 | else: 241 | res, err = vegas.run_integration(10) 242 | flow_final = time.time() 243 | 244 | if args.run_madgraph: 245 | # Prepare the madgraph_script 246 | if args.variable_g: 247 | scale = "set run_card dynamical_scale_choice 3" 248 | else: 249 | qsqrt = np.sqrt(q2) 250 | scale = f"""set run_card fixed_ren_scale true 251 | set run_card fixed_fac_scale true 252 | set run_card scale {qsqrt} 253 | set run_card dsqrt_q2fact1 {qsqrt} 254 | set run_card dsqrt_q2fact2 {qsqrt} 255 | """ 256 | 257 | if args.enable_cuts: 258 | outgoing_particles = args.madgraph_process.rsplit(" ", nparticles - 2)[1:] 259 | dict_cuts = {_flav_dict[i[0]]: args.pt_cut for i in outgoing_particles} 260 | # All pt must be above PT_CUT 261 | cuts = f"set run_card pt_min_pdg {dict_cuts}" 262 | else: 263 | cuts = "" 264 | 265 | madgraph_script = f"""generate {args.madgraph_process} 266 | output {out_path} 267 | launch 268 | set run_card nevents 300000 269 | set run_card systematics none 270 | set run_card pdlabel lhapdf 271 | set run_card lhaid 303600 272 | {scale} 273 | {cuts} 274 | """ 275 | mad_start = time.time() 276 | 277 | script_path = Path(tempfile.mktemp(prefix="mad_script")) 278 | script_path.write_text(madgraph_script) 279 | sp.run([mg5_exe, "-f", script_path], check=True) 280 | 281 | mad_final = time.time() 282 | 283 | print(f"\nFinal madflow result: {res.numpy():.6} +- {err:.4}") 284 | 285 | print(f"> Madgraph took: {mad_final-mad_start:.4}s to run") 286 | 287 | if args.pdf != DEFAULT_PDF: 288 | print(f"Note that Madgraph runs with {DEFAULT_PDF} while you chose {args.pdf}") 289 | print(f"> Madflow took: {flow_final-flow_start:.4}s") 290 | 291 | if args.run_madgraph and args.histograms: 292 | generate_histograms = [ 293 | "./compare_mg5_hists.py", 294 | "--madflow", 295 | proc_folder.as_posix(), 296 | "--mg5", 297 | out_path.as_posix(), 298 | ] 299 | sp.run(generate_histograms, check=True) 300 | -------------------------------------------------------------------------------- /example/madflow_and_graph.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Run madgraph and madflow side to side 4 | 5 | First it runs madflow with whatever arguments and then 6 | uses said arguments to generate the appropriate madgraph call 7 | 8 | To ensure you get all available output, do 9 | export MADFLOW_LOG_LEVEL=3 10 | """ 11 | from time import time 12 | from pathlib import Path 13 | import tempfile 14 | import subprocess as sp 15 | from madflow.scripts.madflow_exec import madflow_main, logger 16 | from madflow.config import get_madgraph_exe 17 | 18 | _flav_dict = {"g": 21, "d": 1, "u": 2, "s": 3, "c": 4, "b": 5, "t": 6} 19 | DEFAULT_PDF = "NNPDF31_nnlo_as_0118" 20 | 21 | out_path = Path(tempfile.mkdtemp(prefix="mad")) 22 | script_path = Path(tempfile.mktemp(prefix="mad_script")) 23 | 24 | if __name__ == "__main__": 25 | logger.info("Running madflow") 26 | start_madflow = time() 27 | args, final_res, events_folder = madflow_main() 28 | end_madflow = time() 29 | 30 | logger.info("Running madgraph") 31 | 32 | # Prepare the madgraph script 33 | if args.fixed_scale is None: 34 | scale = "set run_card dynamical_scale_choice 3" 35 | else: 36 | qsqrt = args.fixed_scale 37 | scale = f"""set run_card fixed_ren_scale true 38 | set run_card fixed_fac_scale true 39 | set run_card scale {qsqrt} 40 | set run_card dsqrt_q2fact1 {qsqrt} 41 | set run_card dsqrt_q2fact2 {qsqrt} 42 | """ 43 | 44 | if args.pt_cut is None: 45 | cuts = "" 46 | else: 47 | nparticles = len(args.madgraph_process.strip().split()) - 1 48 | outgoing_particles = args.madgraph_process.rsplit(" ", nparticles - 2)[1:] 49 | dict_cuts = {_flav_dict[i[0]]: args.pt_cut for i in outgoing_particles} 50 | # All pt must be above PT_CUT 51 | cuts = f"set run_card pt_min_pdg {dict_cuts}" 52 | if args.pdf != DEFAULT_PDF: 53 | logger.warning("Madgraph will run with pdf=%s instead of %s", DEFAULT_PDF, args.pdf) 54 | 55 | madgraph_script = f"""generate {args.madgraph_process} 56 | output {out_path} 57 | launch 58 | set run_card nevents 10000 59 | set run_card systematics none 60 | set run_card pdlabel lhapdf 61 | set run_card lhaid 303600 62 | {scale} 63 | {cuts} 64 | """ 65 | script_path.write_text(madgraph_script) 66 | madgraph_run = f"{get_madgraph_exe()} -f {script_path}" 67 | 68 | logger.info("Running madgraph script %s", script_path) 69 | process = sp.run(madgraph_run, shell=True) 70 | logger.info("Madgraph output can be found at %s", out_path) 71 | logger.info("Madflow result %.4f +- %.4f fb", *final_res) 72 | 73 | logger.info("Madgraph took: %.4fs", time() - end_madflow) 74 | logger.info("Madflow took: %.4fs", end_madflow - start_madflow) 75 | 76 | if args.histograms: 77 | hist_script = Path(__file__).parent / "compare_mg5_hists.py" 78 | generate_histograms = [ 79 | hist_script.as_posix(), 80 | "--madflow", 81 | events_folder.as_posix(), 82 | "--mg5", 83 | out_path.as_posix(), 84 | ] 85 | sp.run(generate_histograms, check=True) 86 | -------------------------------------------------------------------------------- /madgraph_plugin/PyOut_PythonFileWriter.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # # 3 | # Source file of the Matrix Elements exports for # 4 | # the PyOut MG5aMC plugin. # 5 | # # 6 | ##################################################### 7 | 8 | import madgraph.iolibs.file_writers as file_writers 9 | 10 | 11 | class PyOutPythonWriter(file_writers.FileWriter): 12 | 13 | def write_comments(self, text): 14 | text = '#%s\n' % text.replace('\n','\n#') 15 | self.write(text) 16 | 17 | def write_line(self, line): 18 | """Write a line with proper indent and splitting of long lines 19 | for the language in question.""" 20 | return [line + '\n'] 21 | 22 | ##def writelines(self, line): 23 | ## self.writelines(line) 24 | -------------------------------------------------------------------------------- /madgraph_plugin/PyOut_create_aloha.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # # 3 | # Source file of the Matrix Elements exports for # 4 | # the PyOut MG5aMC plugin. # 5 | # Defines some classes which inherit from # 6 | # those inside create aloha, but use the module # 7 | # PyOut_PythonFileWriter.py for output 8 | # # 9 | ##################################################### 10 | 11 | import madgraph.iolibs.file_writers as file_writers 12 | import madgraph.various.misc as misc 13 | import aloha 14 | import aloha.create_aloha as create_aloha 15 | import aloha.aloha_writers as aloha_writers 16 | from . import PyOut_PythonFileWriter as PythonFileWriter 17 | 18 | import cmath 19 | import os 20 | import re 21 | from numbers import Number 22 | from collections import defaultdict 23 | from fractions import Fraction 24 | # fast way to deal with string 25 | from six import StringIO 26 | # Look at http://www.skymind.com/~ocrow/python_string/ 27 | # For knowing how to deal with long strings efficiently. 28 | import itertools 29 | 30 | 31 | 32 | class ALOHAWriterForTensorFlow(aloha_writers.ALOHAWriterForPython): 33 | """a new class, similar to the Python writer, but capable 34 | of generating TensorFlow-compatible functions 35 | """ 36 | 37 | #extension = '.py' 38 | #writer = file_writers.PythonWriter 39 | 40 | ci_definition = 'cI = complex_tf(0,1)\n' 41 | realoperator = 'tf.math.real()' 42 | imagoperator = 'tf.math.imag()' 43 | 44 | # use complex_me everywhere 45 | type2def = {} 46 | type2def['int'] = 'complex_me' 47 | type2def['double'] = 'complex_me' 48 | type2def['complex'] = 'complex_me' 49 | 50 | #@staticmethod 51 | def change_number_format(self, number): 52 | """Formating the number 53 | MZ: similar to the CPP function 54 | """ 55 | 56 | if isinstance(number, complex): 57 | if number.imag: 58 | if number.real: 59 | out = '(%s + %s*cI)' % (self.change_number_format(number.real), \ 60 | self.change_number_format(number.imag)) 61 | else: 62 | if number.imag == 1: 63 | out = 'cI' 64 | elif number.imag == -1: 65 | out = '-cI' 66 | else: 67 | out = '%s * cI' % self.change_number_format(number.imag) 68 | else: 69 | out = '%s' % (self.change_number_format(number.real)) 70 | else: 71 | tmp = Fraction(str(number)) 72 | tmp = tmp.limit_denominator(100) 73 | if not abs(tmp - number) / abs(tmp + number) < 1e-8: 74 | out = '%.9f' % (number) 75 | else: 76 | out = '%s./%s.' % (tmp.numerator, tmp.denominator) 77 | return out 78 | 79 | 80 | def change_var_format(self, name): 81 | """Formatting the variable name to Python format 82 | start to count at zero. 83 | No neeed to define the variable in python -> no need to keep track of 84 | the various variable 85 | """ 86 | 87 | if '_' not in name: 88 | self.declaration.add((name.type, name)) 89 | else: 90 | self.declaration.add(('', name.split('_',1)[0])) 91 | name = re.sub('(?P\w*)_(?P\d+)$', self.shift_indices , name) 92 | 93 | return name 94 | 95 | 96 | def get_fct_format(self, fct): 97 | """Put the function in the correct format""" 98 | if not hasattr(self, 'fct_format'): 99 | one = self.change_number_format(1) 100 | self.fct_format = {'csc' : '{0}/tf.math.cos(%s)'.format(one), 101 | 'sec': '{0}/tf.math.sin(%s)'.format(one), 102 | 'acsc': 'tf.math.asin({0}/(%s))'.format(one), 103 | 'asec': 'tf.math.acos({0}/(%s))'.format(one), 104 | 're': ' tf.math.real(%s)', 105 | 'im': 'tf.match.imac(%s)', 106 | 'cmath.sqrt': 'tf.math.sqrt(%s)', 107 | 'sqrt': 'tf.math.sqrt(%s)', 108 | 'pow': 'tf.math.pow(%s, %s)', 109 | 'complexconjugate': 'tf.math.conj(%s)', 110 | '/' : '{0}/%s'.format(one), 111 | 'abs': 'tf.math.abs(%s)' 112 | } 113 | 114 | if fct in self.fct_format: 115 | return self.fct_format[fct] 116 | elif hasattr(cmath, fct): 117 | self.declaration.add(('fct', fct)) 118 | print ('MZ, this case should be changed for tensorflow', fct) 119 | return 'cmath.{0}(%s)'.format(fct) 120 | else: 121 | raise Exception("Unable to handle function name %s (no special rule defined and not in cmath)" % fct) 122 | 123 | 124 | def define_expression(self): 125 | """ Identical to the mother class function, but replace 1j with cI 126 | (strange errors were obtained when calling the mother class function 127 | """ 128 | out = StringIO() 129 | 130 | if self.routine.contracted: 131 | keys = list( self.routine.contracted.keys()) 132 | keys.sort() 133 | 134 | for name in keys: 135 | obj = self.routine.contracted[name] 136 | out.write(' %s = %s\n' % (name, self.write_obj(obj))) 137 | 138 | def sort_fct(a, b): 139 | if len(a) < len(b): 140 | return -1 141 | elif len(a) > len(b): 142 | return 1 143 | elif a < b: 144 | return -1 145 | else: 146 | return +1 147 | 148 | keys = list(self.routine.fct.keys()) 149 | keys.sort(key=misc.cmp_to_key(sort_fct)) 150 | for name in keys: 151 | fct, objs = self.routine.fct[name] 152 | format = ' %s = %s\n' % (name, self.get_fct_format(fct)) 153 | try: 154 | text = format % ','.join([self.write_obj(obj) for obj in objs]) 155 | except TypeError: 156 | text = format % tuple([self.write_obj(obj) for obj in objs]) 157 | finally: 158 | out.write(text) 159 | 160 | numerator = self.routine.expr 161 | if not 'Coup(1)' in self.routine.infostr: 162 | coup_name = 'COUP' 163 | else: 164 | coup_name = '%s' % self.change_number_format(1) 165 | 166 | if not self.offshell: 167 | if coup_name == 'COUP': 168 | out.write(' vertex = COUP*%s\n' % self.write_obj(numerator.get_rep([0]))) 169 | else: 170 | out.write(' vertex = %s\n' % self.write_obj(numerator.get_rep([0]))) 171 | else: 172 | OffShellParticle = '%s%d' % (self.particles[self.offshell-1],\ 173 | self.offshell) 174 | 175 | if not 'L' in self.tag: 176 | coeff = 'denom' 177 | if not aloha.complex_mass: 178 | if self.routine.denominator: 179 | out.write(' denom = %(COUP)s/(%(denom)s)\n' % {'COUP': coup_name,\ 180 | 'denom':self.write_obj(self.routine.denominator)}) 181 | else: 182 | out.write(' denom = %(coup)s/(P%(i)s[0]**2-P%(i)s[1]**2-P%(i)s[2]**2-P%(i)s[3]**2 - M%(i)s * (M%(i)s -cI* W%(i)s))\n' % 183 | {'i': self.outgoing,'coup':coup_name}) 184 | else: 185 | if self.routine.denominator: 186 | raise Exception('modify denominator are not compatible with complex mass scheme') 187 | 188 | out.write(' denom = %(coup)s/(P%(i)s[0]**2-P%(i)s[1]**2-P%(i)s[2]**2-P%(i)s[3]**2 - M%(i)s**2)\n' % 189 | {'i': self.outgoing,'coup':coup_name}) 190 | else: 191 | coeff = 'COUP' 192 | 193 | for ind in numerator.listindices(): 194 | out.write(' %s[%d]= %s*%s\n' % (self.outname, 195 | self.pass_to_HELAS(ind), coeff, 196 | self.write_obj(numerator.get_rep(ind)))) 197 | return out.getvalue() 198 | 199 | 200 | def get_foot_txt(self): 201 | if not self.offshell: 202 | return ' return vertex\n\n' 203 | else: 204 | return ' return tf.stack(%s, axis=0)\n\n' % (self.outname) 205 | 206 | 207 | 208 | def get_header_txt(self, name=None, couplings=None, **opt): 209 | if name is None: 210 | name = self.name 211 | 212 | out = StringIO() 213 | 214 | out.write('from madflow.config import DTYPE, DTYPEINT, complex_tf, complex_me, DTYPECOMPLEX\n') 215 | out.write('import tensorflow as tf\n\n') 216 | 217 | arguments = self.define_argument_list(couplings) 218 | 219 | arguments_names = [arg[1] for arg in arguments] 220 | 221 | # the signature 222 | shape_dict = {'list_complex' : '[None,None]', 223 | 'complex' : '[None]', 224 | 'double' : '[]'} 225 | type_dict = {'list_complex' : 'DTYPECOMPLEX', 226 | 'complex' : 'DTYPECOMPLEX', 227 | 'double' : 'DTYPE'} 228 | 229 | out.write('%(name)s_signature = [\n') 230 | 231 | for arg in arguments: 232 | fmt = arg[0] 233 | out.write('tf.TensorSpec(shape=%(shape)s, dtype=%(type)s),\n' % 234 | {'shape': shape_dict[fmt], 'type': type_dict[fmt]}) 235 | out.write(']\n\n') 236 | 237 | out.write('@tf.function(input_signature=%(name)s_signature)\n') 238 | out.write('def %(name)s(%(args)s):\n' ) 239 | 240 | return out.getvalue() % {'name': name, 'args': ','.join(arguments_names)} 241 | 242 | 243 | def get_momenta_txt(self): 244 | """Define the Header of the fortran file. This include 245 | - momentum conservation 246 | - definition of the impulsion""" 247 | 248 | out = StringIO() 249 | 250 | # Define all the required momenta 251 | p = [] # a list for keeping track how to write the momentum 252 | 253 | signs = self.get_momentum_conservation_sign() 254 | 255 | for i,type in enumerate(self.particles): 256 | if self.declaration.is_used('OM%s' % (i+1)): 257 | out.write(" OM{0} = complex_tf(0, 0)\n if (M{0}): OM{0}=complex_tf(1,0)/M{0}**2\n".format( (i+1) )) 258 | if i+1 == self.outgoing: 259 | out_type = type 260 | out_size = self.type_to_size[type] 261 | continue 262 | elif self.offshell: 263 | p.append('{0}{1}{2}[%(i)s]'.format(signs[i],type,i+1)) 264 | 265 | if self.declaration.is_used('P%s' % (i+1)): 266 | self.get_one_momenta_def(i+1, out) 267 | 268 | # define the resulting momenta 269 | if self.offshell: 270 | type = self.particles[self.outgoing-1] 271 | out.write(' %s%s = [complex_tf(0,0)] * %s\n' % (type, self.outgoing, out_size)) 272 | if aloha.loop_mode: 273 | size_p = 4 274 | else: 275 | size_p = 2 276 | for i in range(size_p): 277 | dict_energy = {'i':i} 278 | rhs = ''.join(p) % dict_energy 279 | # remove trailing '+' 280 | if rhs.startswith('+'): 281 | rhs = rhs[1:] 282 | 283 | out.write(' %s%s[%s] = %s\n' % (type,self.outgoing,i,rhs)) 284 | 285 | self.get_one_momenta_def(self.outgoing, out) 286 | 287 | 288 | # Returning result 289 | return out.getvalue() 290 | 291 | 292 | def get_one_momenta_def(self, i, strfile): 293 | """return the string defining the momentum""" 294 | 295 | type = self.particles[i-1] 296 | 297 | main = ' P%d = complex_tf(tf.stack([' % i 298 | if aloha.loop_mode: 299 | template ='%(sign)s%(type)s%(i)d[%(nb)d]' 300 | else: 301 | template ='%(sign)stf.math%(operator)s(%(type)s%(i)d[%(nb2)d])' 302 | 303 | nb2 = 0 304 | strfile.write(main) 305 | data = [] 306 | for j in range(4): 307 | if not aloha.loop_mode: 308 | nb = j 309 | if j == 0: 310 | assert not aloha.mp_precision 311 | operator = '.real' # not suppose to pass here in mp 312 | elif j == 1: 313 | nb2 += 1 314 | elif j == 2: 315 | assert not aloha.mp_precision 316 | operator = '.imag' # not suppose to pass here in mp 317 | elif j ==3: 318 | nb2 -= 1 319 | else: 320 | operator ='' 321 | nb = j 322 | nb2 = j 323 | data.append(template % {'j':j,'type': type, 'i': i, 324 | 'nb': nb, 'nb2': nb2, 'operator':operator, 325 | 'sign': self.get_P_sign(i)}) 326 | 327 | strfile.write(', '.join(data)) 328 | strfile.write('], axis=0), 0.)\n') 329 | 330 | 331 | def get_declaration_txt(self, add_i=True): 332 | """ Prototype for how to write the declaration of variable 333 | Include the symmetry line (entry FFV_2) 334 | """ 335 | 336 | out = StringIO() 337 | argument_var = [name for type,name in self.call_arg] 338 | # define the complex number CI = 0+1j 339 | if add_i: 340 | out.write(' ' + self.ci_definition) 341 | 342 | for type, name in self.declaration.tolist(): 343 | # skip P, V, etc... only Coup, masses, CI, 344 | if type.startswith('list'): continue 345 | if type == '': continue 346 | if name.startswith('TMP'): continue 347 | out.write(' %s = %s(%s)\n' % (name, self.type2def[type], name)) 348 | 349 | return out.getvalue() 350 | 351 | 352 | 353 | def write_obj_Add(self, obj, prefactor=True): 354 | """Turns addvariable into a string. Avoids trailing '+'""" 355 | 356 | data = defaultdict(list) 357 | number = [] 358 | [data[p.prefactor].append(p) if hasattr(p, 'prefactor') else number.append(p) 359 | for p in obj] 360 | 361 | file_str = StringIO() 362 | if prefactor and obj.prefactor != 1: 363 | formatted = self.change_number_format(obj.prefactor) 364 | if formatted.startswith(('+','-')): 365 | file_str.write('(%s)' % formatted) 366 | else: 367 | file_str.write(formatted) 368 | file_str.write('*(') 369 | else: 370 | file_str.write('(') 371 | first=True 372 | for value, obj_list in data.items(): 373 | add= '+' 374 | if value not in [-1,1]: 375 | nb_str = self.change_number_format(value) 376 | if nb_str[0] in ['+','-']: 377 | file_str.write(nb_str) 378 | else: 379 | # remove trailing '+' 380 | if not first: 381 | file_str.write('+') 382 | file_str.write(nb_str) 383 | file_str.write('*(') 384 | elif value == -1: 385 | add = '-' 386 | file_str.write('-') 387 | elif not first: 388 | file_str.write('+') 389 | else: 390 | file_str.write('') 391 | first = False 392 | file_str.write(add.join([self.write_obj(obj, prefactor=False) 393 | for obj in obj_list])) 394 | if value not in [1,-1]: 395 | file_str.write(')') 396 | if number: 397 | total = sum(number) 398 | file_str.write('+ %s' % self.change_number_format(total)) 399 | 400 | file_str.write(')') 401 | return file_str.getvalue() 402 | 403 | 404 | 405 | class PyOutAbstractRoutine(create_aloha.AbstractRoutine): 406 | """Same as AbstractRoutine, except for the write 407 | function which forces the usage of a 408 | PyOut_PythonFileWriter.py 409 | 410 | Also includes a copy constructor 411 | """ 412 | 413 | def __init__(self, *args): 414 | """copy constructor if only a AbstractRoutine is passed. Otherwise calls 415 | the mother class 416 | """ 417 | attrs_to_copy = [ 418 | 'spins', 419 | 'expr', 420 | 'denominator', 421 | 'name', 422 | 'outgoing', 423 | 'infostr', 424 | 'symmetries', 425 | 'combined', 426 | 'fct', 427 | 'tag', 428 | 'contracted'] 429 | if len(args) == 1 and type(args[0])==create_aloha.AbstractRoutine: 430 | for attr in attrs_to_copy: 431 | setattr(self, attr, getattr(args[0], attr)) 432 | else: 433 | super(PyOutAbstractRoutine, self).__init__(args) 434 | 435 | 436 | 437 | def write(self, output_dir, language='Fortran', mode='self', combine=True,**opt): 438 | """ write the content of the object. Same function as in aloha/create_aloha 439 | except for the first line 440 | """ 441 | # initialise the writer with an empty dirpath, so that text is returned but 442 | # not written to disk (will be done later) 443 | writer = ALOHAWriterForTensorFlow(self, dirpath = '') 444 | text = writer.write(mode=mode, **opt) 445 | if combine: 446 | for grouped in self.combined: 447 | if isinstance(text, tuple): 448 | text = tuple([old.__add__(new) for old, new in zip(text, 449 | writer.write_combined(grouped, mode=mode+'no_include', **opt))]) 450 | else: 451 | text += writer.write_combined(grouped, mode=mode+'no_include', **opt) 452 | if aloha.mp_precision and 'MP' not in self.tag: 453 | self.tag.append('MP') 454 | text += self.write(output_dir, language, mode, **opt) 455 | return text 456 | -------------------------------------------------------------------------------- /madgraph_plugin/PyOut_helas_call_writer.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # # 3 | # Source file of the Helas writer for # 4 | # the PyOut MG5aMC plugin. # 5 | # # 6 | ##################################################### 7 | 8 | import madgraph.iolibs.helas_call_writers as helas_call_writers 9 | import madgraph.core.helas_objects as helas_objects 10 | import aloha 11 | import aloha.aloha_writers as aloha_writers 12 | 13 | class PyOutUFOHelasCallWriter(helas_call_writers.PythonUFOHelasCallWriter): 14 | """a modified version of the PythonUFOHelasCallWriter, which behaves properly with 15 | tensorflow 16 | """ 17 | 18 | 19 | def generate_helas_call(self, argument, gauge_check=False): 20 | """Routine for automatic generation of Python Helas calls 21 | according to just the spin structure of the interaction. 22 | """ 23 | 24 | if not isinstance(argument, helas_objects.HelasWavefunction) and \ 25 | not isinstance(argument, helas_objects.HelasAmplitude): 26 | raise self.PhysicsObjectError("get_helas_call must be called with wavefunction or amplitude") 27 | 28 | call_function = None 29 | 30 | if isinstance(argument, helas_objects.HelasAmplitude) and \ 31 | argument.get('interaction_id') == 0: 32 | call = "#" 33 | call_function = lambda amp: call 34 | self.add_amplitude(argument.get_call_key(), call_function) 35 | return 36 | 37 | if isinstance(argument, helas_objects.HelasWavefunction) and \ 38 | not argument.get('mothers'): 39 | # String is just IXXXXX, OXXXXX, VXXXXX or SXXXXX 40 | call = "w%d = " 41 | 42 | call = call + helas_call_writers.HelasCallWriter.mother_dict[\ 43 | argument.get_spin_state_number()].lower() 44 | # Fill out with X up to 6 positions 45 | call = call + 'x' * (12 - len(call)) 46 | call = call + "(all_ps[:,%d]," 47 | if argument.get('spin') != 1: 48 | # For non-scalars, need mass and helicity 49 | if gauge_check and argument.get('spin') == 3 and \ 50 | argument.get('mass') == 'ZERO': 51 | call = call + "%s, 4," 52 | else: 53 | call = call + "%s,hel[%d]," 54 | call = call + "float_me(%+d))" 55 | if argument.get('spin') == 1: 56 | call_function = lambda wf: call % \ 57 | (wf.get('me_id')-1, 58 | wf.get('number_external')-1, 59 | # For boson, need initial/final here 60 | (-1)**(wf.get('state') == 'initial')) 61 | elif argument.is_boson(): 62 | if not gauge_check or argument.get('mass') != 'ZERO': 63 | call_function = lambda wf: call % \ 64 | (wf.get('me_id')-1, 65 | wf.get('number_external')-1, 66 | wf.get('mass'), 67 | wf.get('number_external')-1, 68 | # For boson, need initial/final here 69 | (-1)**(wf.get('state') == 'initial')) 70 | else: 71 | call_function = lambda wf: call % \ 72 | (wf.get('me_id')-1, 73 | wf.get('number_external')-1, 74 | 'ZERO', 75 | # For boson, need initial/final here 76 | (-1)**(wf.get('state') == 'initial')) 77 | else: 78 | call_function = lambda wf: call % \ 79 | (wf.get('me_id')-1, 80 | wf.get('number_external')-1, 81 | wf.get('mass'), 82 | wf.get('number_external')-1, 83 | # For fermions, need particle/antiparticle 84 | -(-1)**wf.get_with_flow('is_part')) 85 | else: 86 | # String is LOR1_0, LOR1_2 etc. 87 | 88 | if isinstance(argument, helas_objects.HelasWavefunction): 89 | outgoing = argument.find_outgoing_number() 90 | else: 91 | outgoing = 0 92 | 93 | # Check if we need to append a charge conjugation flag 94 | l = [str(l) for l in argument.get('lorentz')] 95 | flag = [] 96 | if argument.needs_hermitian_conjugate(): 97 | flag = ['C%d' % i for i in argument.get_conjugate_index()] 98 | 99 | 100 | # Creating line formatting: 101 | call = '%(out)s= %(routine_name)s(%(wf)s%(coup)s%(mass)s)' 102 | # compute wf 103 | arg = {'routine_name': aloha_writers.combine_name(\ 104 | '%s' % l[0], l[1:], outgoing, flag, True), 105 | 'wf': ("w%%(%d)d," * len(argument.get('mothers'))) % \ 106 | tuple(range(len(argument.get('mothers')))), 107 | 'coup': ("%%(coup%d)s," * len(argument.get('coupling'))) % \ 108 | tuple(range(len(argument.get('coupling')))) 109 | } 110 | 111 | if isinstance(argument, helas_objects.HelasWavefunction): 112 | arg['out'] = 'w%(out)d' 113 | if aloha.complex_mass: 114 | arg['mass'] = "%(CM)s" 115 | else: 116 | arg['mass'] = "%(M)s,%(W)s" 117 | else: 118 | arg['coup'] = arg['coup'][:-1] #removing the last coma 119 | arg['out'] = 'amp%(out)d' 120 | arg['mass'] = '' 121 | 122 | call = call % arg 123 | # Now we have a line correctly formatted 124 | call_function = lambda wf: call % wf.get_helas_call_dict(index=0) 125 | 126 | routine_name = aloha_writers.combine_name( 127 | '%s' % l[0], l[1:], outgoing, flag) 128 | 129 | # Add the constructed function to wavefunction or amplitude dictionary 130 | if isinstance(argument, helas_objects.HelasWavefunction): 131 | if not gauge_check: 132 | self.add_wavefunction(argument.get_call_key(), call_function) 133 | else: 134 | self.add_amplitude(argument.get_call_key(), call_function) 135 | 136 | return call_function 137 | -------------------------------------------------------------------------------- /madgraph_plugin/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # # 3 | # Source file of the MadFlow plugin # 4 | # Use only with consent of its authors. # 5 | # # 6 | # authors: S.Carrazza, J.Cruz-Martinez, # 7 | # M.Rossi, M.Zaro # 8 | # # 9 | # # 10 | ##################################################### 11 | 12 | import os 13 | import sys 14 | root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 15 | sys.path.insert(0, root_path) 16 | 17 | from . import PyOut_exporter 18 | ##import Resummation.resummation_exporters as resummation_exporters 19 | 20 | # Three types of functionality are allowed in a plugin 21 | # 1. new output mode 22 | # 2. new cluster support 23 | # 3. new interface 24 | 25 | # 1. Define new output mode 26 | # example: new_output = {'myformat': MYCLASS} 27 | # madgraph will then allow the command "output myformat PATH" 28 | # MYCLASS should inherated of the class madgraph.iolibs.export_v4.VirtualExporter 29 | new_output = {'pyout': PyOut_exporter.PyOutExporter} 30 | 31 | # 2. Define new way to handle the cluster. 32 | # example new_cluster = {'mycluster': MYCLUSTERCLASS} 33 | # allow "set cluster_type mycluster" in madgraph 34 | # MYCLUSTERCLASS should inherated from madgraph.various.cluster.Cluster 35 | new_cluster = {} 36 | 37 | # 3. Define a new interface (allow to add/modify MG5 command) 38 | # This can be activated via ./bin/mg5_aMC --mode=PLUGINNAME 39 | ## Put None if no dedicated command are required 40 | new_interface = None 41 | 42 | 43 | ########################## CONTROL VARIABLE #################################### 44 | __author__ = 'Marco Zaro' 45 | __email__ = 'marco.zaro@gmail.com' 46 | __version__ = (0,1,0) 47 | minimal_mg5amcnlo_version = (2,5,0) 48 | maximal_mg5amcnlo_version = (1000,1000,1000) 49 | latest_validated_version = (2,5,0) 50 | -------------------------------------------------------------------------------- /madgraph_plugin/template_files/leading_order.inc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | %(info_lines)s 4 | 5 | Process (at Leading Order accuracy) : 6 | %(history)s 7 | 8 | """ 9 | 10 | # The following settings are the default for this template 11 | PDF_NAME = "NNPDF31_nnlo_as_0118" 12 | SQRTS = 13000 # TeV 13 | GLOBAL_MODEL_PATH = "models/sm" # Where the models are within the madgraph folder 14 | PARAM_CARD = "Cards/param_card.dat" 15 | EVENTS = int(1e4) 16 | ITERATIONS = 3 17 | 18 | import sys 19 | import itertools 20 | from pathlib import Path 21 | 22 | # Madflow imports 23 | from madflow.config import get_madgraph_path, int_me, run_eager, DTYPE 24 | from madflow.phasespace import PhaseSpaceGenerator 25 | from madflow.lhe_writer import LheWriter 26 | from madflow.utilities import generate_initial_states 27 | 28 | # External utilities 29 | from pdfflow import mkPDF 30 | from vegasflow import VegasFlow 31 | import tensorflow as tf 32 | 33 | # Get import_ufo from madgraph 34 | import models.import_ufo as import_ufo 35 | 36 | # Import the matrix element output from Madgraph 37 | %(matrix_element_imports)s 38 | 39 | # Read the UFO model from the madgraph folder 40 | model_sm = get_madgraph_path() / GLOBAL_MODEL_PATH 41 | if not model_sm.exists(): 42 | print(f"No model sm found at {model_sm}") 43 | sys.exit(0) 44 | global_model = import_ufo.import_model(model_sm.as_posix()) 45 | 46 | # List all Matrix Elements relevant for this process 47 | tree_level = {%(tree_level_keys)s} 48 | 49 | # and the masses of the final states 50 | particle_masses = {%(masses)s} 51 | 52 | # Loop over all the matrix element classes and models 53 | # and save them in a list, this is what will be integrated later on 54 | matrices = [] 55 | models = [] 56 | for matrix, get_model in tree_level.values(): 57 | model = get_model(global_model, PARAM_CARD) 58 | matrices.append(matrix()) 59 | models.append(model) 60 | nparticles = int(matrices[0].nexternal) 61 | 62 | # Assumption for the template: all final state masses are equal 63 | for m in particle_masses.values(): 64 | final_state_masses = m[2:] 65 | masses = [models[0].parse_parameter(i) for i in final_state_masses] 66 | break 67 | 68 | 69 | # Instantiate the PDF 70 | pdf = mkPDF(f"{PDF_NAME}/0") 71 | # Read up all initial states that can appear in these matrices 72 | initial_flavours = generate_initial_states(matrices) 73 | # The complicated syntax before does the following: 74 | # 1) We get the list of all flavours that might enter the cross section 75 | # calculation from hadron 1 and hadron 2 76 | # 2) Then, for each matrix element, the relevant flavours are only a subset 77 | # of them, so we'll get them by their index in the `hadron_1`, `hadron_2` lists 78 | flavours_hadron_1, flavours_hadron_2 = zip(*initial_flavours) 79 | # These are passed to pdfflow 80 | hadron_1 = list(set(itertools.chain(*flavours_hadron_1))) 81 | hadron_2 = list(set(itertools.chain(*flavours_hadron_2))) 82 | # The gathers are a list of the indices corresponding to the relevant flavours 83 | gather_1 = [] 84 | gather_2 = [] 85 | for p1, p2 in initial_flavours: 86 | gather_1.append([hadron_1.index(i) for i in p1]) 87 | gather_2.append([hadron_2.index(i) for i in p2]) 88 | 89 | # Instantiate the phase space and apply some cuts 90 | # The phase space takes as input: 91 | # (number of particles, center of mass energy, mass of the final state particles, 92 | # flag to return the result in the lab frame or com frame) 93 | phasespace = PhaseSpaceGenerator(nparticles, SQRTS, masses, com_output=False) 94 | if nparticles > 3: 95 | cuts_active = True 96 | for i in range(2, nparticles): 97 | # Apply a PT cut of 30 GeV to all outgoing particles 98 | phasespace.register_cut("pt", particle=i, min_val=30.0) 99 | else: 100 | cuts_active = False 101 | 102 | 103 | # Create the functions that will be actually part of the integrand 104 | def luminosity_function(x1, x2, factorization_scale): 105 | """ 106 | Create the luminosity function L(x1, x2, muF) 107 | Note: for this template it is assumed that the muF is shared for both partons 108 | but it is not required. 109 | """ 110 | raw_proton_1 = pdf.xfxQ2(int_me(hadron_1), x1, factorization_scale) 111 | raw_proton_2 = pdf.xfxQ2(int_me(hadron_2), x2, factorization_scale) 112 | # Ensure they have the right shape, just in case! 113 | proton_1 = tf.reshape(raw_proton_1, (-1, len(hadron_1))) 114 | proton_2 = tf.reshape(raw_proton_2, (-1, len(hadron_2))) 115 | return proton_1, proton_2 116 | 117 | 118 | def cross_section_generation(lhewriter=None): 119 | """ 120 | Function that generates a cross section to be integrated with 121 | or without a LHE event output 122 | """ 123 | 124 | def cross_section(xrand, n_dim=None, weight=1.0): 125 | """Compute the cross section at LO for (process)""" 126 | 127 | # Generate the phase space point 128 | # all_ps: full phase space for each event 129 | # wts: weight for each event 130 | # x1, x2: parton fraction of the initial state partons 131 | # idx: index of the points which passed the cuts 132 | # In order to save memory and computational time, points which did not pass the 133 | # cuts are not returned 134 | all_ps, wts, x1, x2, idx = phasespace(xrand) 135 | 136 | # Compute the renormalization and factorization scale muF = muR = sum(mt)/2 137 | full_mt = tf.reduce_sum(phasespace.mt(all_ps[:, 2:nparticles, :]), axis=-1) 138 | q2array = (full_mt / 2.0) ** 2 139 | 140 | # Compute alpha_s at renormalization scale 141 | alpha_s = pdf.alphasQ2(q2array) 142 | 143 | # Compute the PDF at the factorization scale 144 | proton_1, proton_2 = luminosity_function(x1, x2, q2array) 145 | 146 | # Compute the weight for each matrix element 147 | ret = 0.0 148 | for i, (matrix, model) in enumerate(zip(matrices, models)): 149 | # Compute the weight of the matrix element 150 | smatrix = matrix.smatrix(all_ps, *model.evaluate(alpha_s)) 151 | # Obtain the relevant PDFs for hadrons 1 and 2 152 | p1 = tf.gather(proton_1, gather_1[i], axis=1) 153 | p2 = tf.gather(proton_2, gather_2[i], axis=1) 154 | # Sum all input channels together for now 155 | luminosity = tf.reduce_sum(p1 * p2, axis=1) / x1 / x2 156 | ret += luminosity * smatrix 157 | 158 | # Final cross section 159 | ret *= wts 160 | 161 | if lhewriter is not None: 162 | # Fill up the LHE grid 163 | weight = tf.gather(weight, idx)[:, 0] 164 | tf.py_function(func=lhewriter.lhe_parser, inp=[all_ps, ret * weight], Tout=DTYPE) 165 | 166 | # Use scatter to return a vector with 0s wherever the result has been cut away 167 | if cuts_active: 168 | out_shape = tf.shape(xrand)[0:1] 169 | ret = tf.scatter_nd(idx, ret, shape=out_shape) 170 | 171 | return ret 172 | 173 | return cross_section 174 | 175 | 176 | ## Monte Carlo integration 177 | # As a first step, we'll do a warmup step so no events will be written 178 | xs = cross_section_generation(None) 179 | # Do a warmup of the integrator grid 180 | ndim = (nparticles - 2) * 4 + 2 181 | vegas = VegasFlow(ndim, EVENTS) 182 | # Generate a cross section with no LHE writer for the warmup 183 | vegas.compile(xs) 184 | vegas.run_integration(ITERATIONS) 185 | vegas.freeze_grid() 186 | 187 | # Now output an event file if this is a 2->2 188 | if nparticles == 4: 189 | proc_name = Path("test") 190 | with LheWriter(Path("."), proc_name, False, 0) as lhe_writer: 191 | xs_events = cross_section_generation(lhe_writer) 192 | vegas.compile(xs_events) 193 | res, err = vegas.run_integration(ITERATIONS) 194 | lhe_writer.store_result((res, err)) 195 | proc_folder = Path(f"Events/{proc_name}") 196 | print(f"Written LHE file to {proc_folder}") 197 | -------------------------------------------------------------------------------- /madgraph_plugin/template_files/matrix_method_python.inc: -------------------------------------------------------------------------------- 1 | 2 | from madflow.config import ( 3 | int_me, 4 | float_me, 5 | DTYPE, 6 | DTYPEINT, 7 | run_eager, 8 | complex_tf, 9 | complex_me 10 | ) 11 | from madflow.wavefunctions_flow import oxxxxx, ixxxxx, vxxxxx, sxxxxx 12 | from madflow.parameters import Model 13 | 14 | import os 15 | import sys 16 | import numpy as np 17 | 18 | import tensorflow as tf 19 | import collections 20 | 21 | ModelParamTupleConst = collections.namedtuple("constants", [%(paramnames_const)s]) 22 | ModelParamTupleFunc = collections.namedtuple("functions", [%(paramnames_func)s]) 23 | 24 | root_path = '%(root_path)s' 25 | sys.path.insert(0, root_path) 26 | sys.path.insert(0, os.path.join(root_path, 'madgraph')) 27 | sys.path.insert(0, os.path.join(root_path, 'aloha', 'template_files')) 28 | 29 | import models.import_ufo as import_ufo 30 | import models.check_param_card as param_card_reader 31 | 32 | # import the ALOHA routines 33 | from aloha_%(process_string)s import * 34 | 35 | 36 | def get_model_param(model, param_card_path): 37 | param_card = param_card_reader.ParamCard(param_card_path) 38 | %(model_parameters)s 39 | constants = ModelParamTupleConst(%(paramtuple_const)s) 40 | functions = ModelParamTupleFunc(%(paramtuple_func)s) 41 | return Model(constants, functions) 42 | 43 | 44 | 45 | smatrix_signature = [ 46 | tf.TensorSpec(shape=[None,%(nexternal)d,4], dtype=DTYPE), 47 | %(paramsignature_const)s 48 | %(paramsignature_func)s 49 | ] 50 | 51 | 52 | matrix_signature = [ 53 | tf.TensorSpec(shape=[None,%(nexternal)d,4], dtype=DTYPE), 54 | tf.TensorSpec(shape=[%(nexternal)d], dtype=DTYPE), 55 | %(paramsignature_const)s 56 | %(paramsignature_func)s 57 | ] 58 | 59 | 60 | class Matrix_%(process_string)s(object): 61 | nexternal = float_me(%(nexternal)d) 62 | ndiags = float_me(%(ndiags)d) 63 | ncomb = float_me(%(ncomb)d) 64 | initial_states = [%(initial_states)s] 65 | mirror_initial_states = %(mirror)s 66 | %(helicity_lines)s 67 | %(den_factor_line)s 68 | 69 | def __init__(self): 70 | """define the object""" 71 | self.clean() 72 | 73 | def clean(self): 74 | pass 75 | ##self.jamp = [] 76 | 77 | def __str__(self): 78 | return "%(process_string)s" 79 | 80 | @tf.function(input_signature=smatrix_signature) 81 | def smatrix(self,all_ps,%(params)s): 82 | # 83 | %(info_lines)s 84 | # 85 | # MadGraph5_aMC@NLO StandAlone Version 86 | # 87 | # Returns amplitude squared summed/avg over colors 88 | # and helicities 89 | # for the point in phase space P(0:3,NEXTERNAL) 90 | # 91 | %(process_lines)s 92 | # 93 | # Clean additional output 94 | # 95 | ###self.clean() 96 | # ---------- 97 | # BEGIN CODE 98 | # ---------- 99 | nevts = tf.shape(all_ps, out_type=DTYPEINT)[0] 100 | ans = tf.zeros(nevts, dtype=DTYPE) 101 | for hel in self.helicities: 102 | ans += self.matrix(all_ps,hel,%(params)s) 103 | 104 | return ans/self.denominator 105 | 106 | @tf.function(input_signature=matrix_signature) 107 | def matrix(self,all_ps,hel,%(params)s): 108 | # 109 | %(info_lines)s 110 | # 111 | # Returns amplitude squared summed/avg over colors 112 | # for the point with external lines W(0:6,NEXTERNAL) 113 | # 114 | %(process_lines)s 115 | # 116 | # 117 | # Process parameters 118 | # 119 | ngraphs = %(ngraphs)d 120 | nwavefuncs = %(nwavefuncs)d 121 | ncolor = %(ncolor)d 122 | ZERO = float_me(0.) 123 | # 124 | # Color matrix 125 | # 126 | %(color_matrix_lines)s 127 | # 128 | # Model parameters 129 | # 130 | # ---------- 131 | # Begin code 132 | # ---------- 133 | %(helas_calls)s 134 | 135 | %(jamp_lines)s 136 | 137 | ret = tf.einsum("ie, ij, je -> e", jamp, cf, tf.math.conj(jamp)/tf.reshape(denom, (ncolor, 1))) 138 | return tf.math.real(ret) 139 | 140 | 141 | if __name__ == "__main__": 142 | import sys, pathlib 143 | import numpy as np 144 | 145 | # Get the name of the matrix in this file 146 | matrix_name = pathlib.Path(sys.argv[0]).stem.capitalize() 147 | matrix_class = globals()[matrix_name] 148 | 149 | # Instantiate the matrix 150 | matrix = matrix_class() 151 | 152 | # Read up the model 153 | model_sm = pathlib.Path(root_path) / "models/sm" 154 | if not model_sm.exists(): 155 | print(f"No model sm found at {model_sm}, test cannot continue") 156 | sys.exit(0) 157 | model = import_ufo.import_model(model_sm.as_posix()) 158 | model_params = get_model_param(model, 'Cards/param_card.dat') 159 | 160 | # Define th phase space 161 | # The structure asked by the matrix elements is 162 | # (nevents, ndimensions, nparticles) 163 | # the 4 dimensions of the 4-momentum is expected as 164 | # (E, px, py, pz) 165 | ndim = 4 166 | npar = int(matrix.nexternal) 167 | nevents = 2 168 | max_momentum = 7e3 169 | 170 | par_ax = 1 171 | dim_ax = 2 172 | 173 | # Now generate random outgoing particles in a com frame (last_p carries whatever momentum is needed to sum 0 ) 174 | shape = [nevents, 0, 0] 175 | shape[par_ax] = npar - 3 176 | shape[dim_ax] = ndim - 1 177 | partial_out_p = tf.random.uniform(shape, minval=-max_momentum, maxval=max_momentum, dtype=DTYPE) 178 | last_p = -tf.reduce_sum(partial_out_p, keepdims=True, axis=par_ax) 179 | out_p = tf.concat([partial_out_p, last_p], axis=par_ax) 180 | 181 | if "mdl_MT" in dir(model_params): 182 | # TODO fill in the mass according to the particles 183 | out_m = tf.reshape((npar - 2) * [model_params.mdl_MT], (1, -1, 1)) 184 | else: 185 | out_m = 0.0 186 | out_e = tf.sqrt(tf.reduce_sum(out_p ** 2, keepdims=True, axis=dim_ax) + out_m ** 2) 187 | outgoing_4m = tf.concat([out_e, out_p], axis=dim_ax) 188 | 189 | # Put all incoming momenta in the z axis (TODO: for now assume massless input) 190 | ea = tf.reduce_sum(out_e, axis=par_ax, keepdims=True) / 2 191 | zeros = tf.zeros_like(ea) 192 | inc_p1 = tf.concat([ea, zeros, zeros, ea], axis=dim_ax) 193 | inc_p2 = tf.concat([ea, zeros, zeros, -ea], axis=dim_ax) 194 | 195 | all_ps = tf.concat([inc_p1, inc_p2, outgoing_4m], axis=par_ax) 196 | 197 | model_params.freeze_alpha_s(0.118) 198 | wgt_set = matrix.smatrix(all_ps, *model_params.evaluate(None)) 199 | 200 | print("All good!") 201 | for i, (p, wgt) in enumerate(zip(all_ps, wgt_set)): 202 | print(f"\n#{i} ME value: {wgt.numpy():.3e} for P set:\n{p.numpy()}") 203 | -------------------------------------------------------------------------------- /python_package/madflow/__init__.py: -------------------------------------------------------------------------------- 1 | """ madflow """ 2 | __version__ = "0.9" 3 | -------------------------------------------------------------------------------- /python_package/madflow/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Default settings for madflow 3 | 4 | This program is to be installed alongside VegasFlow and 5 | PDFFlow so we'll take the main configuration from there. 6 | The environment variables for log levels and float/int types 7 | are propagated to any other programs used by madflow 8 | """ 9 | import os 10 | import logging 11 | from distutils.spawn import find_executable 12 | import subprocess as sp 13 | from pathlib import Path 14 | 15 | # Read the madfflow environment variables 16 | _log_level_idx = os.environ.get("MADFLOW_LOG_LEVEL") 17 | _float_env = os.environ.get("MADFLOW_FLOAT", "64") 18 | _int_env = os.environ.get("MADFLOW_INT", "32") 19 | 20 | # Ensure that both vegasflow and pdfflow are consistent 21 | # with the corresponding madflow choice 22 | # For float/int the consistency is enforced, logs can be chosen 23 | # differently for each program 24 | 25 | if _log_level_idx is None: 26 | _log_level_idx = "2" 27 | else: 28 | os.environ.setdefault("PDFFLOW_LOG_LEVEL", _log_level_idx) 29 | os.environ.setdefault("VEGASFLOW_LOG_LEVEL", _log_level_idx) 30 | 31 | os.environ["VEGASFLOW_FLOAT"] = _float_env 32 | os.environ["PDFFLOW_FLOAT"] = _float_env 33 | os.environ["VEGASFLOW_INT"] = _int_env 34 | os.environ["PDFFLOW_INT"] = _int_env 35 | 36 | # Now import all functions and variables directly from one of the other programs 37 | from pdfflow.configflow import ( 38 | run_eager, 39 | DTYPE, 40 | DTYPEINT, 41 | int_me, 42 | float_me, 43 | fzero, 44 | fone, 45 | ione, 46 | izero, 47 | ) 48 | 49 | # Configure logging 50 | # Log levels 51 | LOG_DICT = {"0": logging.ERROR, "1": logging.WARNING, "2": logging.INFO, "3": logging.DEBUG} 52 | _log_level = LOG_DICT[_log_level_idx] 53 | logger = logging.getLogger(__name__.split(".")[0]) 54 | logger.setLevel(_log_level) 55 | 56 | # Create and format the log handler 57 | _console_handler = logging.StreamHandler() 58 | _console_handler.setLevel(_log_level) 59 | _console_format = logging.Formatter("[%(levelname)s] () %(message)s") 60 | _console_handler.setFormatter(_console_format) 61 | logger.addHandler(_console_handler) 62 | 63 | import tensorflow as tf 64 | 65 | 66 | def complex_tf(real, imag): 67 | """Builds a tf.complex tensor from real and imaginary parts""" 68 | # python objects are stored with 32-bits, so cast first with float_me 69 | real = float_me(real) 70 | imag = float_me(imag) 71 | return tf.complex(real, imag) 72 | 73 | 74 | DTYPECOMPLEX = complex_tf(1.0, 1.0).dtype 75 | 76 | 77 | def complex_me(cmp): 78 | """Cast the input to complex type""" 79 | return tf.cast(cmp, dtype=DTYPECOMPLEX) 80 | 81 | 82 | def get_madgraph_path(madpath=None): 83 | """Return the path to the madgrapt root""" 84 | if madpath is None: 85 | madpath = os.environ.get("MADGRAPH_PATH", "mg5amcnlo") 86 | madgraph_path = Path(madpath) 87 | if not madgraph_path.exists(): 88 | raise ValueError( 89 | f"""{madgraph_path} does not exist. 90 | Are you sure Madgraph is installed? https://madflow.readthedocs.io/en/latest/installation.html#mg5-amc-nlo-plugin 91 | MadFlow needs a valid path for Madgraph, can be given as env. variable MADGRAPH_PATH""" 92 | ) 93 | # If the path exists, check whether the madgraph executable is there 94 | _ = get_madgraph_exe(madgraph_path) 95 | return madgraph_path 96 | 97 | 98 | def get_madgraph_exe(madpath=None): 99 | """Return the path to the madgraph executable""" 100 | if madpath is None: 101 | madpath = get_madgraph_path(madpath) 102 | mg5_exe = madpath / "bin/mg5_aMC" 103 | if not mg5_exe.exists(): 104 | raise ValueError( 105 | f"""Madgraph executable could not be found at {mg5_exe}, 106 | are you sure Madgraph is installed? https://madflow.readthedocs.io/en/latest/installation.html#mg5-amc-nlo-plugin""" 107 | ) 108 | return mg5_exe 109 | 110 | 111 | def _parse_amd_info(info): 112 | """Parse the information returned by 113 | rocm-smi to find out the amount of free memory in MB 114 | """ 115 | for line in info.split("\n"): 116 | if line.startswith("GPU") and "Used" not in line: 117 | total_b = line.strip().rsplit(" ", 1)[-1] 118 | return int(total_b) / 1024 / 1024 119 | 120 | 121 | def guess_events_limit(nparticles): 122 | """Given a number of particles, reads GPU memory to guess 123 | what should be the event limit. 124 | Use the smallest available GPU as the limit (but print warning in that case) 125 | """ 126 | gpu_physical_devices = tf.config.list_physical_devices("GPU") 127 | memories = [] 128 | for gpu in gpu_physical_devices: 129 | gpu_idx = gpu.name.rsplit(":", 1)[-1] 130 | # Nvidia and AMD GPU split 131 | if find_executable("nvidia-smi"): 132 | gpuinfo_command = ( 133 | f"nvidia-smi --id={gpu_idx} --query-gpu=memory.total --format=csv,noheader,nounits" 134 | ) 135 | parse = lambda x: int(x) 136 | elif find_executable("rocm-smi"): 137 | gpuinfo_command = f"rocm-smi -d {gpu_idx} --showmeminfo VRAM" 138 | parse = _parse_amd_info 139 | else: 140 | logger.error("No rocm-smi or nvidia-smi command found, GPU memory cannot be guessed") 141 | continue 142 | 143 | try: 144 | out = parse( 145 | sp.run( 146 | gpuinfo_command, check=True, shell=True, capture_output=True, text=True 147 | ).stdout 148 | ) 149 | memories.append(out) 150 | except sp.CalledProcessError: 151 | logger.error("Could not read the memory of GPU %d", gpu_idx) 152 | except ValueError: 153 | logger.error("Could not read the memory of GPU %d", gpu_idx) 154 | 155 | if not memories: 156 | return None 157 | 158 | if len(set(memories)) == 1: 159 | memory = memories[0] 160 | else: 161 | memory = min(memories) 162 | logger.warning( 163 | "Using the memory of GPU#%d: %d MiB to limit the events per device", 164 | memories.index(memory), 165 | memory, 166 | ) 167 | 168 | # NOTE: this is based on heuristics in some of the available cards 169 | if memory < 13000: 170 | events_limit = int(1e5) 171 | else: 172 | events_limit = int(5e5) 173 | 174 | if nparticles > 5: 175 | events_limit //= 10 176 | return events_limit 177 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/N3PDF/madflow/46afee693fa32b651a0d41373eae3912f59f8ecb/python_package/madflow/custom_op/__init__.py -------------------------------------------------------------------------------- /python_package/madflow/custom_op/aux_functions.py: -------------------------------------------------------------------------------- 1 | """Sign functions used by the Custom Operator and functions used by the transpiler""" 2 | 3 | import re 4 | 5 | import madflow.custom_op.global_constants as op_gc 6 | import madflow.custom_op.classes as op_cl 7 | 8 | 9 | def generate_auxiliary_functions(function_list_): 10 | """generates sign functions 11 | 12 | function_list_: list of functions used by the Custom Operator (updated)""" 13 | aux_args = [] 14 | aux_arg = op_cl.Argument("x", op_gc.DOUBLE_TYPE, 0, False, []) 15 | aux_args.append(aux_arg) 16 | aux_arg = op_cl.Argument("y", op_gc.DOUBLE_TYPE, 0, False, []) 17 | aux_args.append(aux_arg) 18 | aux_scope = ["int sign = 0;", "y >= 0 ? sign = 1 : sign = -1;", "return x * sign;"] 19 | aux_scope_args = [op_cl.Argument("sign", "int", 0, False, [])] 20 | aux_function = op_cl.Function( 21 | op_gc.DOUBLE_TYPE, "sign", aux_args, aux_scope, aux_scope_args, "" 22 | ) 23 | function_list_.append(aux_function) 24 | 25 | aux_scope = ["return sign(x, y);"] 26 | aux_scope_args = [] 27 | aux_function = op_cl.Function( 28 | op_gc.DOUBLE_TYPE, "signvec", aux_args, aux_scope, aux_scope_args, "" 29 | ) 30 | function_list_.append(aux_function) 31 | 32 | 33 | # Support functions 34 | 35 | 36 | def clean_spaces(a): 37 | """remove spaces or endlines within a string""" 38 | return a.translate({ord(c): None for c in "\n "}) 39 | 40 | 41 | def clean_index(a): 42 | """return the index of an array element""" 43 | return a.split("[")[0] 44 | 45 | 46 | def clean_pointer(var_type): 47 | """remove * from a variable type""" 48 | var_type = re.sub("[&*]*", "", var_type) 49 | return var_type 50 | 51 | 52 | def count_brackets(line, brackets_count): 53 | """remove the count of brackets () in a string""" 54 | for letter in line: 55 | brackets_count = count_brackets_letter(letter, brackets_count) 56 | return brackets_count 57 | 58 | 59 | def convert_grammar(old_value): 60 | """converts the grammar from Python to C++""" 61 | value = re.sub("tf.reshape", "", old_value) 62 | value = re.sub("\[:,[ :]*(\d+)\]", "[\g<1>]", value) 63 | value = re.sub("float_me\(([a-zA-Z0-9[\]+\-*/. ]*)\)", "\g<1>", value) 64 | value = re.sub("int_me", "(int)", value) 65 | value = re.sub("([a-zA-Z_0-9[\]]+) *\*\* *2", "\g<1> * \g<1>", value) 66 | value = re.sub("([a-zA-Z_0-9[\]]+) \*\* (\d+)", "pow(\g<1>, \g<2>)", value) 67 | value = re.sub("\( *([a-zA-Z_0-9[\]+\-*/ ]+)\) *// *2", "(int)(\g<1>) / 2", value) 68 | value = re.sub("tf.ones_like\([a-zA-Z_0-9[\]{}+\-*/=, \n]*\) *\**", "", value) 69 | value = re.sub("tfmath\.", "", value) 70 | value = re.sub("minimum", "MINIMUM", value) # hhh 71 | value = re.sub("maximum", "MAXIMUM", value) # hhh 72 | value = re.sub("tf.math.real\(([a-zA-Z0-9_()[\] +\-*/]*)\)", "\g<1>.real()", value) 73 | value = re.sub("tf.math.imag\(([a-zA-Z0-9_()[\] +\-*/]*)\)", "\g<1>.imag()", value) 74 | value = re.sub("tf.math.conj", "COMPLEX_CONJUGATE", value) # hhh 75 | value = re.sub( 76 | "tf.stack\([ \n]*\[([a-zA-Z_0-9()[\]+\-*/,. ]*)] *, *axis=[0-9 \n]*\)", "{\g<1>}", value 77 | ) 78 | value = re.sub("tf.stack\([ \n]*\[([a-zA-Z_0-9()[\]+\-*/,. ]*)][ \n]*\)", "{\g<1>}", value) 79 | value = re.sub("tf.stack\([ \n]*([a-zA-Z_0-9()[\]+\-*/,. ]*) *, *axis=[0-9 \n]*\)", "", value) 80 | value = re.sub("\(*tf.stack\([ \n]*([a-zA-Z_0-9()[\]+\-*/,. ]*) *, *\[[0-9, \n]*]\)", "", value) 81 | value = re.sub("complex_tf", "T", value) 82 | value = re.sub("complex_me", "T", value) 83 | value = re.sub("complex\(", "T(", value) 84 | value = re.sub("\( *\(([a-zA-Z_0-9()[\]{}+\-*/ \n]*)\) *\)", "(\g<1>)", value) 85 | return value 86 | 87 | 88 | def convert_type(t): 89 | """converts TensorFlow types into C++ types""" 90 | t = clean_spaces(t) 91 | 92 | result = "" 93 | d = { 94 | "DTYPE": op_gc.DOUBLE_TYPE, 95 | "DTYPEINT": "int", 96 | "DTYPECOMPLEX": "T", 97 | } 98 | result = d.get(t, t) 99 | return result 100 | 101 | 102 | def change_array_into_variable(line): 103 | """specific to denom 104 | chenges denom from denom[i] into denom""" 105 | match = re.search("denom", line) 106 | if match != None: 107 | line = re.sub("\[\]", "", line) 108 | line = re.sub("{([+\-0-9]+).*;", "\g<1>", line) 109 | return line 110 | else: 111 | line = re.sub(";", "", line) 112 | return line 113 | 114 | 115 | def count_brackets_letter(letter, bracket_count): 116 | if letter == "(": 117 | bracket_count += 1 118 | elif letter == ")": 119 | bracket_count -= 1 120 | return bracket_count 121 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/classes.py: -------------------------------------------------------------------------------- 1 | """Classes used by the transpiler""" 2 | 3 | from dataclasses import dataclass 4 | 5 | 6 | @dataclass 7 | class Argument: 8 | """variable 9 | type: void, int, double, ... 10 | name: variable_name 11 | size: size of the array 12 | 0 => int x; 13 | 6 => int x[6]; 14 | tensor: true if the variable is a TensorFlow tensor 15 | slice: if tensor == true, length of a single tensor slice""" 16 | 17 | name: str 18 | type: str 19 | size: int 20 | tensor: bool 21 | slice: list # [str] 22 | 23 | 24 | @dataclass 25 | class Function: 26 | """function 27 | type: void, int, double, ... 28 | name: function_name 29 | args: function arguments (list of argument objects) 30 | scope: function scope (list of strings, one for each line) 31 | scope_args: variables defined in the scope (list of argument objects) 32 | template: i.e. template """ 33 | 34 | type: str 35 | name: str 36 | args: list # [Argument] 37 | scope: list # [str] 38 | scope_args: list # [str] 39 | template: str 40 | argn: int = 0 41 | 42 | def __post_init__(self): 43 | self.argn = len(self.args) 44 | 45 | 46 | @dataclass 47 | class CustomOperator: 48 | """Custom Operator 49 | name: op_name 50 | scope: op scope (list of strings, one for each line) 51 | functor_name: name of the functor (called by MatrixOp)""" 52 | 53 | name: str 54 | scope: list # [str] 55 | functor_name: str 56 | 57 | 58 | @dataclass 59 | class Signature: 60 | """signature 61 | name: signature_name 62 | type: complex, int, double, ... 63 | size: tensor shape 64 | tensor: true if the variable is a TensorFlow tensor 65 | slice: if tensor == true, length of a single tensor slice""" 66 | 67 | name: str 68 | type: str 69 | size: str 70 | tensor: bool 71 | slice: list # [str] 72 | 73 | 74 | @dataclass 75 | class SignatureVariable: 76 | """tf.function signature 77 | name: signature_name 78 | signature_list: list of Signature objects 79 | signature_name_list: list of strings containing signature.name""" 80 | 81 | name: str 82 | signature_list: list # [Signature] 83 | signature_name_list: list # [str] 84 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/constants.py: -------------------------------------------------------------------------------- 1 | """Jinja templates for the generation of custom operators""" 2 | 3 | import madflow.custom_op.global_constants as op_gc 4 | 5 | # Templates ---------------------- 6 | 7 | library_template = ( 8 | "{% for lib in variableName %}\ 9 | " 10 | "#include <{{ lib }}>\n\ 11 | " 12 | "{% endfor %}" 13 | ) 14 | 15 | header_template = ( 16 | "{% for head in variableName %}\ 17 | " 18 | '#include "{{ head }}"\n\ 19 | ' 20 | "{% endfor %}" 21 | ) 22 | 23 | constant_variable_template = ( 24 | "{% for var in constantVariable %}\ 25 | " 26 | "{{ dev }}{{ var}};\n\ 27 | " 28 | "{% endfor %}" 29 | ) 30 | 31 | defined_constant_template = ( 32 | "{% for var in variableName %}\ 33 | " 34 | "#define {{ var }}\n\ 35 | " 36 | "{% endfor %}" 37 | ) 38 | 39 | function_definition_template = "\ 40 | {{ func.template }}\n\ 41 | {{ dev }}{{ func.type }} {{ func.name }} (\ 42 | {% for i in range(func.argn - 1) %}\ 43 | {{ func.args[i].type }} {{ func.args[i].name }}, \ 44 | {% endfor %}\ 45 | {{ func.args[func.argn - 1].type }} {{ func.args[func.argn - 1].name }}\ 46 | );" 47 | 48 | function_template = "\ 49 | {{ func.template }}\n\ 50 | {{ dev }}{{ func.type }} {{ func.name }} (\ 51 | {% for i in range(func.argn - 1) %}\ 52 | {{ func.args[i].type }} {{ func.args[i].name }}, \ 53 | {% endfor %}\ 54 | {{ func.args[func.argn - 1].type }} {{ func.args[func.argn - 1].name }}\ 55 | ) {\n\ 56 | {% for scope in func.scope %}\ 57 | {{ scope }}\n\ 58 | {% endfor %}\ 59 | }" 60 | 61 | header_file_template = ( 62 | '#ifndef MATRIX_H_\n\ 63 | #define MATRIX_H_\n\ 64 | \n\ 65 | #include \n\ 66 | #include \n\ 67 | \n\ 68 | #include "tensorflow/core/framework/op.h"\n\ 69 | #include "tensorflow/core/framework/op_kernel.h"\n\ 70 | using namespace tensorflow;\n\ 71 | \n\ 72 | template \n\ 73 | struct MatrixFunctor {\n\ 74 | void operator()(const Device& d, \ 75 | {% for i in range(func.argn - 1) %}\ 76 | {{ func.args[i].type }} {{ func.args[i].name }}, \ 77 | {% endfor %}\ 78 | {{ func.args[func.argn - 1].type }} {{ func.args[func.argn - 1].name }}\ 79 | );\n\ 80 | };\n\ 81 | \n\ 82 | #if GOOGLE_CUDA\n\ 83 | // Partially specialize functor for GpuDevice.\n\ 84 | template \n\ 85 | struct MatrixFunctor {\n\ 86 | void operator()(const Eigen::GpuDevice& d, \ 87 | {% for i in range(func.argn - 1) %}\ 88 | {{ func.args[i].type }} {{ func.args[i].name }}, \ 89 | {% endfor %}\ 90 | {{ func.args[func.argn - 1].type }} {{ func.args[func.argn - 1].name }}\ 91 | );\n\ 92 | };\n\ 93 | #endif\n\ 94 | \n\ 95 | \n\ 96 | #define COMPLEX_TYPE ' 97 | + op_gc.COMPLEX_TYPE 98 | + "\n\ 99 | \n\ 100 | #endif" 101 | ) 102 | 103 | cpu_op_template = '\ 104 | REGISTER_OP("Matrix{{ process }}")\n\ 105 | .Attr("T: numbertype")\n\ 106 | {% for i in range(func.argn - 3) %}\ 107 | .Input("{{ func.args[i].name|lower }}: {{ op_types[i] }}")\n\ 108 | {% endfor %}\ 109 | .Output("{{ func.args[func.argn - 3].name|lower }}: {{ op_types[func.argn - 3] }}")\n\ 110 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {\n\ 111 | c->set_output(0, c->MakeShape({c->Dim(c->input(0), 0)}));\n\ 112 | return Status::OK();\n\ 113 | });\n\ 114 | \ 115 | \ 116 | \ 117 | template \n\ 118 | struct {{ custom_op.functor_name }} {\n\ 119 | void operator()(const CPUDevice& d,\ 120 | {% for i in range(func.argn - 1) %}\ 121 | {{ func.args[i].type }} {{ func.args[i].name }}, \ 122 | {% endfor %}\ 123 | {{ func.args[func.argn - 1].type }} {{ func.args[func.argn - 1].name }}\ 124 | ) {\n\ 125 | {{ func.name }}({% for i in range(func.argn - 1) %}\ 126 | {{ func.args[i].name }}, \ 127 | {% endfor %}\ 128 | {{ func.args[func.argn - 1].name }}\ 129 | );\n\ 130 | }\n\ 131 | };\n\ 132 | \ 133 | \ 134 | \ 135 | template \n\ 136 | class {{ custom_op.name }} : public OpKernel {\n\ 137 | public:\n\ 138 | explicit {{ custom_op.name }}(OpKernelConstruction* context) : OpKernel(context) {}\n\ 139 | \n\ 140 | void Compute(OpKernelContext* context) override {\n\ 141 | {% for scope in custom_op.scope %}\ 142 | {{ scope }}\n\ 143 | {% endfor %}\ 144 | }\n\ 145 | };\n\ 146 | #define REGISTER_CPU(T)\\\n\ 147 | REGISTER_KERNEL_BUILDER(\\\n\ 148 | Name("Matrix{{ process }}").Device(DEVICE_CPU).TypeConstraint("T"),\\\n\ 149 | MatrixOp);\n\ 150 | REGISTER_CPU(COMPLEX_TYPE);\n\ 151 | \n\ 152 | // Register the GPU kernels.\n\ 153 | #ifdef GOOGLE_CUDA\n\ 154 | #define REGISTER_GPU(T)\\\n\ 155 | /* Declare explicit instantiations in kernel_example.cu.cc. */\\\n\ 156 | extern template class MatrixFunctor;\\\n\ 157 | REGISTER_KERNEL_BUILDER(\\\n\ 158 | Name("Matrix{{ process }}").Device(DEVICE_GPU).TypeConstraint("T"),\\\n\ 159 | MatrixOp);\n\ 160 | REGISTER_GPU(COMPLEX_TYPE);\n\ 161 | #endif\n\ 162 | ' 163 | 164 | gpu_op_template = "\ 165 | template \n\ 166 | void MatrixFunctor::operator()(\n\ 167 | const GPUDevice& d,\ 168 | {% for i in range(func.argn - 1) %}\ 169 | {{ func.args[i].type }} {{ func.args[i].name }}, \ 170 | {% endfor %}\ 171 | {{ func.args[func.argn - 1].type }} {{ func.args[func.argn - 1].name }}\ 172 | ) {\n\ 173 | // Launch the cuda kernel.\n\ 174 | //\n\ 175 | // See core/util/gpu_kernel_helper.h for example of computing\n\ 176 | // block count and thread_per_block count.\n\ 177 | \n\ 178 | int eventsPerBlock = 1;\n\ 179 | \n\ 180 | int blockSize = DEFAULT_BLOCK_SIZE;\n\ 181 | int numBlocks = (nevents + blockSize - 1) / (eventsPerBlock * blockSize);\n\ 182 | \n\ 183 | if (nevents < blockSize) {\n\ 184 | numBlocks = 1;\n\ 185 | blockSize = nevents;\n\ 186 | }\n\ 187 | \n\ 188 | \n\ 189 | {{ func.name }}<<>>({% for i in range(func.argn - 2) %}\ 190 | {{ func.args[i].name }}, \ 191 | {% endfor %}\ 192 | {{ func.args[func.argn - 2].name }}\ 193 | );\n\ 194 | \n\ 195 | }\n\ 196 | \n\ 197 | // Explicitly instantiate functors for the types of OpKernels registered.\n\ 198 | template struct MatrixFunctor;" 199 | 200 | 201 | # -------------------------------- 202 | 203 | gpu_arithmetic_operators = ( 204 | "__device__ COMPLEX_TYPE cconj(COMPLEX_TYPE a) {\n\ 205 | return COMPLEX_TYPE(a.real(), -a.imag());\n\ 206 | }\n\ 207 | \n\ 208 | __device__ COMPLEX_TYPE operator+(const COMPLEX_TYPE& a, const COMPLEX_TYPE& b) {\n\ 209 | return COMPLEX_TYPE(a.real() + b.real(), a.imag() + b.imag());\n\ 210 | }\n\ 211 | \n\ 212 | __device__ COMPLEX_TYPE operator-(const COMPLEX_TYPE& a, const COMPLEX_TYPE& b) {\n\ 213 | return COMPLEX_TYPE(a.real() - b.real(), a.imag() - b.imag());\n\ 214 | }\n\ 215 | \n\ 216 | __device__ COMPLEX_TYPE operator*(const COMPLEX_TYPE& a, const COMPLEX_TYPE& b) {\n\ 217 | return COMPLEX_TYPE(a.real() * b.real() - a.imag() * b.imag(), a.imag() * b.real() + a.real() * b.imag());\n\ 218 | }\n\ 219 | \n\ 220 | __device__ COMPLEX_TYPE operator/(const COMPLEX_TYPE& a, const COMPLEX_TYPE& b) {\n\ 221 | " 222 | + op_gc.DOUBLE_TYPE 223 | + " norm = b.real() * b.real() + b.imag() * b.imag();\n\ 224 | return COMPLEX_TYPE((a.real() * b.real() + a.imag() * b.imag())/norm, (a.imag() * b.real() - a.real() * b.imag())/norm);\n\ 225 | }\n\ 226 | \n\ 227 | __device__ COMPLEX_TYPE operator-(const COMPLEX_TYPE& a) {\n\ 228 | return COMPLEX_TYPE(-a.real(), -a.imag());\n\ 229 | }\n\ 230 | \n\ 231 | __device__ COMPLEX_TYPE operator*(const COMPLEX_TYPE& a, const " 232 | + op_gc.DOUBLE_TYPE 233 | + "& b) {\n\ 234 | return COMPLEX_TYPE(a.real() * b, a.imag() * b);\n\ 235 | }\n\ 236 | \n\ 237 | __device__ COMPLEX_TYPE operator*(const " 238 | + op_gc.DOUBLE_TYPE 239 | + "& a, const COMPLEX_TYPE& b) {\n\ 240 | return b * a;\n\ 241 | }\n\ 242 | \n\ 243 | __device__ COMPLEX_TYPE operator/(const COMPLEX_TYPE& a, const " 244 | + op_gc.DOUBLE_TYPE 245 | + "& b) {\n\ 246 | return COMPLEX_TYPE(a.real() / b, a.imag() / b);\n\ 247 | }\n" 248 | ) 249 | 250 | # -------------------------------- 251 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/generation.py: -------------------------------------------------------------------------------- 1 | """Optimization of the Custom Operator (parallelization / memory optimizations)""" 2 | 3 | import re 4 | 5 | import madflow.custom_op.aux_functions as op_af 6 | import madflow.custom_op.classes as op_cl 7 | import madflow.custom_op.global_constants as op_gc 8 | 9 | N_EVENTS = op_cl.Argument("nevents", "const int", 0, False, []) 10 | FOR_LOOP_STRING = "for (int it = 0; it < " + N_EVENTS.name + "; it += 1) {" 11 | 12 | 13 | def serialize_function(f): 14 | """Create a loop over the total number of events 15 | f: function object (updated)""" 16 | 17 | for_loop = False 18 | spacing = " " 19 | s = 0 20 | while s < len(f.scope): 21 | if for_loop == True: 22 | f.scope[s] = spacing + f.scope[s] 23 | elif op_af.clean_spaces(f.scope[s]).startswith("//Begin"): 24 | for_loop = True 25 | s += 1 26 | while op_af.clean_spaces(f.scope[s]).startswith("//") == True: 27 | s += 1 28 | 29 | f.scope.insert(s, FOR_LOOP_STRING) 30 | 31 | s += 1 32 | 33 | f.scope.insert(s, "}") 34 | s += 1 35 | 36 | f = prepare_custom_op(f, N_EVENTS) 37 | 38 | f.args.append(op_cl.Argument("context", "const OpKernelContext*", 0, False, [])) 39 | 40 | 41 | def parallelize_function(f, parallelization_type): 42 | """Parallelize the loop over the total number of events 43 | f: function object 44 | parallelization_type: OpenMP/ThreadPool/CUDA 45 | 46 | return: updated function object""" 47 | s = 0 48 | if parallelization_type == "OpenMP": 49 | while s < len(f.scope): 50 | if op_af.clean_spaces(f.scope[s]).startswith(op_af.clean_spaces(FOR_LOOP_STRING)): 51 | f.scope.insert(s, "#pragma omp parallel for") 52 | break 53 | s += 1 54 | elif parallelization_type == "ThreadPool": 55 | while s < len(f.scope): 56 | if op_af.clean_spaces(f.scope[s]).startswith(op_af.clean_spaces(FOR_LOOP_STRING)): 57 | f.scope.insert( 58 | s, 59 | "auto thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers;", 60 | ) 61 | s += 1 62 | f.scope.insert(s, "const int ncores = (int)thread_pool->NumThreads();") 63 | s += 1 64 | f.scope.insert(s, op_gc.INT64_TYPE + " nreps;") 65 | s += 1 66 | f.scope.insert(s, "if (ncores > 1) {") 67 | s += 1 68 | f.scope.insert( 69 | s, " nreps = (" + op_gc.INT64_TYPE + ")" + N_EVENTS.name + " / ncores;" 70 | ) 71 | s += 1 72 | f.scope.insert(s, "} else {") 73 | s += 1 74 | f.scope.insert(s, " nreps = 1;") 75 | s += 1 76 | f.scope.insert(s, "}") 77 | s += 1 78 | f.scope.insert( 79 | s, 80 | "const ThreadPool::SchedulingParams p(ThreadPool::SchedulingStrategy::kFixedBlockSize, absl::nullopt, nreps);", 81 | ) 82 | s += 1 83 | f.scope.insert( 84 | s, "auto DoWork = [&](" + op_gc.INT64_TYPE + " t, " + op_gc.INT64_TYPE + " w) {" 85 | ) 86 | s += 1 87 | del f.scope[s] 88 | f.scope.insert(s, "for (auto it = t; it < w; it += 1) {") 89 | break 90 | s += 1 91 | 92 | s = len(f.scope) 93 | f.scope.insert(s, "};") 94 | s += 1 95 | f.scope.insert(s, "thread_pool->ParallelFor(" + N_EVENTS.name + ", p, DoWork);") 96 | elif parallelization_type == "CUDA": 97 | while s < len(f.scope): 98 | if op_af.clean_spaces(f.scope[s]).startswith(op_af.clean_spaces(FOR_LOOP_STRING)): 99 | f.scope[s] = ( 100 | "for (int it = blockIdx.x * blockDim.x + threadIdx.x; it < " 101 | + f.args[-1].name 102 | + "; it += blockDim.x * gridDim.x) {" 103 | ) 104 | break 105 | s += 1 106 | 107 | return f 108 | 109 | 110 | def prepare_custom_op(f, nevents): 111 | """Few changes to the structure of the Op 112 | f: function object 113 | nevents: number of MC events 114 | 115 | return: updated function object""" 116 | 117 | # momenta, masses, widths and coupling constants are const 118 | # pass them by pointer 119 | for i in range(len(f.args) - 1): 120 | f.args[i].type = "const " + f.args[i].type 121 | if f.args[i].type.endswith("*") == False: 122 | f.args[i].type += "*" 123 | if f.args[i].tensor == True: 124 | # Tensors are arrays 125 | for j in range(len(f.scope)): 126 | f.scope[j] = re.sub( 127 | "([()[\]{} ,+\-*/]*)" + f.args[i].name + "([()[\]{} ,+\-*/]*)", 128 | "\g<1>" + f.args[i].name + "[it]" + "\g<2>", 129 | f.scope[j], 130 | ) 131 | else: 132 | # Non-Tensors are arrays with only one component 133 | for j in range(len(f.scope)): 134 | f.scope[j] = re.sub( 135 | "([()[\]{} ,+\-*/]*)" + f.args[i].name + "([()[\]{} ,+\-*/]*)", 136 | "\g<1>" + f.args[i].name + "[0]" + "\g<2>", 137 | f.scope[j], 138 | ) 139 | 140 | # The polarized Matrix Element is an array of double 141 | f.args[-1].type = op_gc.DOUBLE_TYPE + "*" 142 | 143 | for j in range(len(f.scope)): 144 | f.scope[j] = re.sub( 145 | "([()[\]{} ,+\-*/]*)" + f.args[-1].name + "([()[\]{} ,+\-*/]*)", 146 | "\g<1>" + f.args[-1].name + "[it]" + "\g<2>", 147 | f.scope[j], 148 | ) 149 | match = re.search("[()[\]{} ,+\-*/]*" + f.args[0].name + "\[", f.scope[j]) 150 | if match != None: 151 | number = int( 152 | re.sub( 153 | ".*[()[\]{} ,+\-*/]*" + f.args[0].name + "\[([0-9]*)\].*", "\g<1>", f.scope[j] 154 | ) 155 | ) 156 | f.scope[j] = re.sub( 157 | "([()[\]{} ,+\-*/]*)" + f.args[0].name + "\[([0-9]*)\]", 158 | "\g<1>" 159 | + f.args[0].name 160 | + "+(" 161 | + str(f.args[0].slice[-1]) 162 | + "*it + " 163 | + str(int(f.args[0].slice[-2]) * number) 164 | + ")", 165 | f.scope[j], 166 | ) 167 | 168 | # Add the number events as a function argument 169 | f.args.append(nevents) 170 | 171 | return f 172 | 173 | 174 | def define_custom_op(func): 175 | """Generates a custom_operator object 176 | func: Function object 177 | 178 | return: CustomOperator object""" 179 | s = [] 180 | 181 | input_tensors_number = len(func.args) - 3 182 | 183 | for i in range(input_tensors_number): 184 | s.append("const Tensor& " + func.args[i].name + "_tensor = context->input(" + str(i) + ");") 185 | s.append( 186 | "auto " 187 | + func.args[i].name 188 | + " = " 189 | + func.args[i].name 190 | + "_tensor.flat<" 191 | + re.sub("const ([^&*]*)[&*]*", "\g<1>", func.args[i].type) 192 | + ">().data();" 193 | ) 194 | s.append("") 195 | 196 | # func.args[-1] is context 197 | s.append( 198 | func.args[-2].type 199 | + " " 200 | + func.args[-2].name 201 | + " = " 202 | + func.args[0].name 203 | + "_tensor.shape().dim_size(0);" 204 | ) 205 | 206 | s.append("Tensor* output_tensor = NULL;") 207 | s.append( 208 | "OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({" 209 | + func.args[-2].name 210 | + "}), &output_tensor));" 211 | ) 212 | s.append( 213 | "auto " 214 | + func.args[-3].name 215 | + " = output_tensor->flat<" 216 | + re.sub("([^&*]*)[&*]*", "\g<1>", func.args[-3].type) 217 | + ">();" 218 | ) 219 | 220 | functor_name = re.sub("Op", "Functor", "MatrixOp") 221 | 222 | s.append("") 223 | line = functor_name + "()(context->eigen_device()" 224 | for i in range(input_tensors_number): 225 | line += ", " + func.args[i].name 226 | line += ", " + func.args[-3].name + ".data()" 227 | line += ", " + func.args[-2].name 228 | line += ", " + func.args[-1].name + ");" 229 | s.append(line) 230 | 231 | return op_cl.CustomOperator("MatrixOp", s, functor_name) 232 | 233 | 234 | def modify_matrix(infile, process_name, destination): 235 | """add the ability to execute the Op from MadFlow 236 | infile: complete path to matrix_1_xxxxx.py 237 | process_name: process name 238 | (read from matrix_1_xxxxx.py) 239 | destination: directory of the Custom Operator 240 | 241 | return: CustomOperator object""" 242 | f = open(infile, "r") 243 | line = f.readline() 244 | previous_line = "" 245 | new_matrix = "" 246 | matrix_source_code = "" 247 | matrix_source_code_array = [] 248 | skip_lines = False 249 | inside_matrix = False 250 | p = re.sub("_", "", process_name) 251 | while line != "": 252 | if skip_lines == True: 253 | if op_af.clean_spaces(line).startswith("return"): 254 | skip_lines = False 255 | else: 256 | # temp += line 257 | matrix_source_code_array.append(line) 258 | if op_af.clean_spaces(line).startswith( 259 | "defcusmatrix(" 260 | ): # I can re-run the script without creating duplicates of cusmatrix() 261 | skip_lines = True 262 | matrix_source_code_array.pop() 263 | matrix_source_code_array.pop() 264 | if op_af.clean_spaces(line).startswith("defsmatrix("): 265 | inside_matrix = True 266 | new_matrix += "\n" # add empty line 267 | new_matrix += ( 268 | previous_line # add @tf.function() with the same input signature as smatrix 269 | ) 270 | if inside_matrix == True: 271 | if op_af.clean_spaces(line).startswith("for"): 272 | space = line.split("for")[0] 273 | new_matrix += ( 274 | space 275 | + "matrixOp = tf.load_op_library('" 276 | + (destination / ("matrix_" + process_name + "_cu.so'")).as_posix() 277 | + ")\n" 278 | ) 279 | new_matrix += line 280 | if op_af.clean_spaces(line).startswith("return"): 281 | inside_matrix = False 282 | new_matrix = re.sub("smatrix\(", "cusmatrix(", new_matrix) 283 | new_matrix = re.sub("self\.matrix\(", "matrixOp.matrix" + p + "(", new_matrix) 284 | # temp += new_matrix 285 | matrix_source_code_array.append(new_matrix) 286 | # break 287 | if op_af.clean_spaces(line) != "": # not checking if it is inside a comment !!! 288 | previous_line = line 289 | line = f.readline() 290 | 291 | for line in matrix_source_code_array: 292 | matrix_source_code += line 293 | 294 | return matrix_source_code 295 | 296 | 297 | def extract_constants(func, constants): 298 | """cf and denom are constant (event-independent) 299 | this function moves them to global scope 300 | func: Function object (updated) 301 | constants: list of constants (updated)""" 302 | 303 | count = 0 304 | for i in range(len(func.scope)): 305 | if func.scope[i].startswith("const double "): 306 | constants.append(op_af.change_array_into_variable(func.scope[i])) 307 | del func.scope[i] 308 | i -= 1 309 | count += 1 310 | if count == 2: 311 | break 312 | 313 | for i in range(len(func.scope)): 314 | match = re.search("denom", func.scope[len(func.scope) - i - 1]) 315 | if match != None: 316 | func.scope[len(func.scope) - i - 1] = re.sub( 317 | "denom\[[a-zA-Z0-9+\-*/_]\]", "denom", func.scope[len(func.scope) - i - 1] 318 | ) 319 | break 320 | 321 | 322 | def remove_real_ret(func): 323 | """In the Op the return variable is already declared as double, 324 | therefore .real() must be removed 325 | func: Function object (updated)""" 326 | 327 | for i in range(len(func.scope)): # This loop can be reversed 328 | if op_af.clean_spaces(func.scope[len(func.scope) - i - 1]).startswith(func.args[-3].name): 329 | func.scope[len(func.scope) - i - 1] = re.sub( 330 | ".real\(\)", "", func.scope[len(func.scope) - i - 1] 331 | ) 332 | break # Only one occurrence 333 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/global_constants.py: -------------------------------------------------------------------------------- 1 | """Global constants and libraries used by the Custom Operator""" 2 | 3 | # Types 4 | 5 | INT64_TYPE = "int64_t" 6 | DOUBLE_TYPE = "double" 7 | COMPLEX_TYPE = "complex128" 8 | 9 | # Parallelization 10 | 11 | CPU_PARALLELIZATION = "ThreadPool" 12 | GPU_PARALLELIZATION = "CUDA" 13 | 14 | # Libraries (#include ) 15 | # Used in Matrix_xxxx.h 16 | 17 | LIBRARIES = ["math.h", "unsupported/Eigen/CXX11/Tensor"] 18 | 19 | # Header files (#include "Header.h") 20 | 21 | HEADERS_ = [ 22 | "tensorflow/core/framework/op.h", 23 | "tensorflow/core/framework/op_kernel.h", 24 | "tensorflow/core/util/work_sharder.h", 25 | "tensorflow/core/framework/shape_inference.h", 26 | "tensorflow/cc/ops/array_ops.h", 27 | "tensorflow/cc/ops/math_ops.h", 28 | ] 29 | 30 | # Namespaces 31 | 32 | NAMESPACE = "tensorflow" 33 | 34 | # Constants 35 | 36 | DEFINED = [ 37 | "COMPLEX_CONJUGATE std::conj", 38 | "MAXIMUM std::max", 39 | "MINIMUM std::min", 40 | "CPUDevice Eigen::ThreadPoolDevice", 41 | "GPUDevice Eigen::GpuDevice", 42 | "DEFAULT_BLOCK_SIZE 32", 43 | ] 44 | GLOBAL_CONSTANTS = [ 45 | "const " + DOUBLE_TYPE + " SQH = 0.70710676908493", 46 | "const COMPLEX_TYPE CZERO = COMPLEX_TYPE(0.0, 0.0)", 47 | ] 48 | CPU_CONSTANTS = ["using thread::ThreadPool"] # Not used for the GPU Op 49 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/parser.py: -------------------------------------------------------------------------------- 1 | """Extraction of features from python code (function scope / signature)""" 2 | 3 | import re 4 | import copy 5 | 6 | import madflow.custom_op.aux_functions as op_af 7 | import madflow.custom_op.classes as op_cl 8 | import madflow.custom_op.transpiler as op_tp 9 | 10 | 11 | # Function parsing 12 | 13 | 14 | def parse_function_return(function_return, scope, scope_args, args): 15 | """Parse the return line 16 | function_return: a string containing the return line 17 | scope: list of strings containing the function scope 18 | scope_args: variables defined within function scope 19 | args: function arguments 20 | 21 | return: updated scope and scope variables""" 22 | 23 | function_return = re.sub("return", args[-1].name + " =", function_return) 24 | function_return = re.sub("#[^\n]*\n", "", function_return) 25 | inside_comment = False 26 | new_line, scope_args, scope, inside_comment = op_tp.parse_line( 27 | function_return, args, scope_args, scope, inside_comment 28 | ) 29 | 30 | scope.append(new_line) 31 | 32 | return scope, scope_args 33 | 34 | 35 | def parse_function_scope(function_scope, scope, scope_args, args): 36 | """Parse function scope 37 | function_scope: a string containing the return line 38 | scope: list of strings containing the function scope 39 | scope_args: variables defined within function scope 40 | args: function arguments 41 | 42 | return: updated scope and scope variables""" 43 | if len(function_scope) == 0: 44 | # empty scope 45 | return scope, scope_args 46 | # read the function scope 47 | 48 | i = 0 49 | inside_comment = False 50 | while i < len(function_scope): 51 | line = function_scope[i] # read i-th line 52 | brackets_count = 0 53 | brackets_count = op_af.count_brackets(line, brackets_count) 54 | # if there are more '(' than ')', read more lines 55 | # (but don't go beyond len(function_scope) 56 | while brackets_count > 0 and i < len(function_scope) - 1: 57 | i += 1 58 | l = function_scope[i] 59 | brackets_count = op_af.count_brackets(l, brackets_count) 60 | line += l # create a single line 61 | # parse (and transpile) the line 62 | # read also any variables defined in the scope 63 | # append those variables to scope_args 64 | new_line, scope_args, scope, inside_comment = op_tp.parse_line( 65 | line, args, scope_args, scope, inside_comment 66 | ) 67 | scope.append(new_line) # add the line to the function scope 68 | i += 1 69 | 70 | return scope, scope_args 71 | 72 | 73 | def get_signature(line): 74 | """Read the signature from text 75 | line: line of text containing the signature 76 | 77 | return: a Signature object""" 78 | type_ = line.split("dtype=")[1] 79 | type_ = type_.split(")")[0] 80 | type_ = op_af.convert_type(type_) 81 | is_tensor = False 82 | 83 | shape = line.split("shape=[")[1] 84 | shape = shape.split("]")[0] 85 | slice_ = [] 86 | 87 | if shape == "": 88 | shape = 0 89 | elif shape == "None": 90 | shape = 0 91 | is_tensor = True 92 | else: 93 | s = shape.split(",", 1)[-1] 94 | shape = op_af.clean_spaces(shape.split(",")[-1]) 95 | s = s.split(",") 96 | prod = 1 97 | for a in s: 98 | slice_.append(a) 99 | if a != "None": 100 | prod *= int(a) 101 | slice_.append(str(prod)) 102 | 103 | name = op_af.clean_spaces(line.split("=")[0]) 104 | 105 | return op_cl.Signature(name, type_, shape, is_tensor, slice_) 106 | 107 | 108 | def convert_signatures(signatures, signature_variables): 109 | """Read the signature from text and update signature_variables 110 | signatures: list of text Signature objects 111 | signature_variables: list of tf.function signatures""" 112 | for sv in signature_variables: 113 | for v in sv.signature_name_list: 114 | for s in signatures: 115 | if s.name == v: 116 | sv.signature_list.append(s) 117 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/read.py: -------------------------------------------------------------------------------- 1 | """Reads functions and signatures line by line""" 2 | 3 | import re 4 | 5 | import madflow.custom_op.aux_functions as op_af 6 | import madflow.custom_op.global_constants as op_gc 7 | import madflow.custom_op.classes as op_cl 8 | import madflow.custom_op.parser as op_pa 9 | 10 | 11 | def grab_function_name(line): 12 | """Read function name 13 | line: a line of text that defines the function. 14 | i.e.: def function_name(args): 15 | 16 | return: function name""" 17 | return line.split("(", 1)[0] 18 | 19 | 20 | def grab_function_arguments(line, f_name, signature_variables, signature_line): 21 | """Read function arguments 22 | line: a line of text that defines the function. 23 | i.e.: def function_name(args): 24 | f_name: function name 25 | signature_variables: list of Signature objects containing 26 | any previously defined signature 27 | signature_line: a line of text defining function signature. 28 | (@tf.function ...) 29 | 30 | return: a list of argument objects containing all function arguments""" 31 | line = line.split(")", 1)[0] 32 | line = line[len(f_name) + 1 :] 33 | # create a list of names of function arguments 34 | split_args = op_af.clean_spaces(line).split(",") 35 | 36 | # delete self if the function is a class method 37 | j = -1 38 | for i in range(len(split_args)): 39 | if split_args[i] == "self": 40 | j = i 41 | break 42 | if j != -1: 43 | del split_args[j] 44 | 45 | args = [] 46 | split_types = [] 47 | split_sizes = [] 48 | split_tensors = [] 49 | split_slices = [] 50 | sig_list = [] 51 | signature_line = signature_line.split("@tf.function(")[1] 52 | signature_line = signature_line.split(")")[0] 53 | signature_line = op_af.clean_spaces(signature_line).split("input_signature=")[1] 54 | if signature_line.startswith("["): 55 | s = op_pa.get_signature(signature_line) 56 | sig_list.append(s) 57 | else: 58 | for sv in signature_variables: 59 | if sv.name == signature_line: 60 | if len(sv.signature_list) == len(split_args): 61 | sig_list = sv.signature_list 62 | 63 | for a in sig_list: 64 | t = a.type 65 | if a.size != 0: 66 | t += "*" 67 | split_types.append(t) 68 | split_sizes.append(a.size) 69 | split_tensors.append(a.tensor) 70 | split_slices.append(a.slice) 71 | 72 | for i in range(len(split_args)): 73 | split_args[i] = op_af.clean_spaces(split_args[i]) 74 | args.append( 75 | op_cl.Argument( 76 | split_args[i], split_types[i], split_sizes[i], split_tensors[i], split_slices[i] 77 | ) 78 | ) 79 | 80 | return args 81 | 82 | 83 | def grab_function_return(line, f_name, args): 84 | """Read function return value and type 85 | line: a line of text containing the return line 86 | f_name: function name 87 | args: list of argument objects containing 88 | any previously defined variable. 89 | 90 | return: an updated list of argument objects containing 91 | the return variable""" 92 | 93 | # Currently all functions are void 94 | f_type = "void" 95 | # The return value is passed by pointer 96 | args.append(op_cl.Argument("ret", op_gc.DOUBLE_TYPE, -1, False, [])) 97 | return args, f_type 98 | 99 | 100 | def grab_function_scope(f, args): 101 | """Read function scope 102 | f: file stream 103 | args: list of argument objects containing 104 | function arguments. 105 | 106 | return: an updated list of strings containing 107 | the function scope and scope variables""" 108 | 109 | line = f.readline() 110 | scope = [] 111 | scope_args = [] 112 | function_scope = [] 113 | function_return = "" 114 | 115 | match = re.search("^ *return[ ]+", line) 116 | while op_af.clean_spaces(line).startswith("return") == False: 117 | function_scope.append(line) 118 | match = re.search("^ *return[ ]+", line) 119 | line = f.readline() 120 | while op_af.clean_spaces(line) != "": 121 | function_return += line 122 | line = f.readline() 123 | 124 | args[-1].name = grab_return_variable_name(function_return) 125 | scope, scope_args = op_pa.parse_function_scope(function_scope, scope, scope_args, args) 126 | scope, scope_args = op_pa.parse_function_return(function_return, scope, scope_args, args) 127 | 128 | return scope, scope_args 129 | 130 | 131 | def grab_return_variable_name(function_return): 132 | """Read the name of the return variable, if defined 133 | otherwise, default to 'out_final' 134 | function_return: a line of text containing the return line 135 | 136 | return: string representing the name of the return variable""" 137 | ret_name = "out_final" 138 | function_return = op_af.clean_spaces(function_return)[len("return") :] 139 | st1 = "tf.stack(" 140 | st2 = "tf.transpose(" 141 | st3 = "tf.reshape(tf.stack(" 142 | if function_return.startswith(st1): 143 | ret_name = function_return[len(st1) :].split(")")[0].split(",")[0] 144 | elif function_return.startswith(st2): 145 | ret_name = function_return[len(st2) :].split(")")[0].split(",")[0] 146 | elif function_return.startswith(st3): 147 | ret_name = function_return[len(st3) :].split(")")[0].split(",")[0] 148 | return ret_name 149 | 150 | 151 | # Read from file 152 | 153 | 154 | def read_file_from_source(function_list, file_source, signatures, signature_variables): 155 | """Read a file, looking for functions 156 | function_list: list of function objects containing 157 | previously defined functions (updated) 158 | file_source: complete path to the file we need to read 159 | signatures: defined function signatures 160 | signature_variables: defined signature variables""" 161 | f = open(file_source, "r") 162 | line = f.readline() 163 | while line != "": 164 | if op_af.clean_spaces(line).startswith("@tf.function"): 165 | signature_line = line 166 | line = f.readline() 167 | l = line 168 | i = 0 169 | while l.endswith("):\n") == False: 170 | if i != 0: 171 | line += l[:-1] 172 | l = f.readline() 173 | i += 1 174 | line = re.sub(" *def ", "", line) # cut "def " from the line 175 | f_name = grab_function_name(line) 176 | 177 | already_defined = False 178 | for func in function_list: 179 | if f_name == func.name: 180 | already_defined = True 181 | break 182 | 183 | if already_defined == False: 184 | f_type = "void" 185 | args = [] 186 | scope = [] 187 | scope_args = [] 188 | args = grab_function_arguments(line, f_name, signature_variables, signature_line) 189 | args, f_type = grab_function_return(line, f_name, args) 190 | scope, scope_args = grab_function_scope(f, args) 191 | new_function = op_cl.Function( 192 | f_type, f_name, args, scope, scope_args, "template " 193 | ) 194 | function_list.append(new_function) 195 | 196 | line = f.readline() 197 | 198 | 199 | def extract_matrix_from_file(function_list, file_source, signatures, signature_variables): 200 | """Read the matrix element file, looking for the polarized 201 | matrix element function 202 | function_list: list of function objects containing 203 | previously defined functions (updated) 204 | file_source: complete path to the file we need to read 205 | signatures: defined function signatures 206 | signature_variables: defined signature variables""" 207 | f = open(file_source, "r") 208 | line = f.readline() 209 | while line != "": 210 | if op_af.clean_spaces(line).startswith("@tf.function"): 211 | signature_line = line 212 | line = f.readline() 213 | l = line 214 | i = 0 215 | while l.endswith("):\n") == False: 216 | if i != 0: 217 | line += l[:-1] 218 | l = f.readline() 219 | i += 1 220 | line = re.sub(" *def ", "", line) # cut "def " from the line 221 | f_name = grab_function_name(line) 222 | 223 | already_defined = False 224 | for func in function_list: 225 | if f_name == func.name: 226 | already_defined = True 227 | break 228 | 229 | if already_defined == False and f_name == "matrix": 230 | f_type = "void" 231 | args = [] 232 | scope = [] 233 | scope_args = [] 234 | args = grab_function_arguments(line, f_name, signature_variables, signature_line) 235 | args, f_type = grab_function_return(line, f_name, args) 236 | scope, scope_args = grab_function_scope(f, args) 237 | new_function = op_cl.Function( 238 | f_type, f_name, args, scope, scope_args, "template " 239 | ) 240 | function_list.append(new_function) 241 | 242 | line = f.readline() 243 | 244 | 245 | def read_signatures(signatures, signature_variables, file_source): 246 | """Read signatures from file 247 | signatures: previously defined signatures (updated) 248 | signature_variables: previously defined signature variables (updated) 249 | file_source: complete path to the file we need to read""" 250 | f = open(file_source, "r") 251 | line = f.readline() 252 | while line != "": 253 | match = re.search("tf.TensorSpec", line) 254 | match2 = re.search("signature", line) 255 | if match != None and op_af.clean_spaces(line).startswith("@tf.function") == False: 256 | s = op_pa.get_signature(line) 257 | signatures.append(s) 258 | elif match2 != None and op_af.clean_spaces(line).startswith("@tf.function") == False: 259 | br_count = 0 260 | for letter in line: 261 | if letter == "[": 262 | br_count += 1 263 | elif letter == "]": 264 | br_count -= 1 265 | while br_count > 0: 266 | l = f.readline() 267 | for letter in l: 268 | if letter == "[": 269 | br_count += 1 270 | elif letter == "]": 271 | br_count -= 1 272 | line += l 273 | line = re.sub("(TensorSpec\([^)]*\) *),", "\g<1>?", line) 274 | name = op_af.clean_spaces(line.split("=")[0]) 275 | line = line.split(" = ")[1] 276 | var_list = line.split("+") 277 | if len(var_list) == 1: 278 | var_list = line.split("?") 279 | sig_list = [] 280 | s_list = [] 281 | for var in var_list: 282 | match = re.search("tf.TensorSpec", var) 283 | if match != None: 284 | var = re.sub(".*[\n]*.*(tf.TensorSpec\([^)]*\)).*", "\g<1>", var) 285 | s_list.append( 286 | op_cl.Signature( 287 | var, 288 | op_pa.get_signature(var).type, 289 | op_pa.get_signature(var).size, 290 | op_pa.get_signature(var).tensor, 291 | op_pa.get_signature(var).slice, 292 | ) 293 | ) 294 | match = re.search("\[[a-zA-Z0-9_]+] *\*", var) 295 | sig_name = op_af.clean_spaces(re.sub("\[([a-zA-Z0-9_]+)].*", "\g<1>", var)) 296 | times = 1 297 | if match != None: 298 | times = int( 299 | op_af.clean_spaces(re.sub("\[[a-zA-Z0-9_]+] *\* *(\d+)", "\g<1>", var)) 300 | ) 301 | for i in range(times): 302 | sig_list.append(sig_name) 303 | 304 | if len(s_list) > 0: 305 | s = op_cl.SignatureVariable(name, s_list, []) 306 | signature_variables.append(s) 307 | elif len(sig_list) > 0: 308 | s = op_cl.SignatureVariable(name, [], sig_list) 309 | signature_variables.append(s) 310 | line = f.readline() 311 | f.close() 312 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/syntax.py: -------------------------------------------------------------------------------- 1 | """Control of C++/CUDA syntax robustness""" 2 | 3 | import re 4 | 5 | import madflow.custom_op.aux_functions as op_af 6 | import madflow.custom_op.global_constants as op_gc 7 | import madflow.custom_op.classes as op_cl 8 | 9 | 10 | def check_variables(counter, function_list): 11 | """Check if all variables of i-th function are 12 | correctly defined 13 | counter: index of the i-th function (i) 14 | function_list: list of all function objects (updated)""" 15 | all_sizes_defined = True 16 | found = False 17 | i = 0 18 | 19 | # Check if all function arguments have a defined size 20 | for i in range(len(function_list[counter].args)): 21 | if (function_list[counter].args)[i].size == -1: 22 | all_sizes_defined = False 23 | break 24 | 25 | if all_sizes_defined == False: 26 | for j in range(len(function_list[counter].scope)): 27 | line = (function_list[counter].scope)[j] 28 | 29 | for k in range(len(function_list)): 30 | match = re.search( 31 | function_list[k].name + "\(.*" + (function_list[counter].args)[i].name, line 32 | ) 33 | if match != None: 34 | check_variables(k, function_list) 35 | if function_list[k].args[-1].size != -1: 36 | (function_list[counter].args)[i].size = function_list[k].args[-1].size 37 | (function_list[counter].args)[i].type = op_af.clean_pointer( 38 | function_list[k].args[-1].type 39 | ) 40 | if function_list[k].args[-1].size != 0: 41 | (function_list[counter].args)[i].type += "*" 42 | else: 43 | (function_list[counter].args)[i].type += "&" 44 | # found = False # to avoid counting multiple times 45 | 46 | i = 0 47 | all_sizes_defined = True 48 | for i in range(len(function_list[counter].scope_args)): 49 | if (function_list[counter].scope_args)[i].size == -1: 50 | all_sizes_defined = False 51 | break 52 | 53 | line_of_definition = -1 54 | variabe_type = "" 55 | new_size = -1 56 | 57 | if all_sizes_defined == False: 58 | for j in range(len(function_list[counter].scope)): 59 | line = (function_list[counter].scope)[j] 60 | match = re.search( 61 | (function_list[counter].scope_args)[i].type 62 | + "\** " 63 | + (function_list[counter].scope_args)[i].name 64 | + ";", 65 | line, 66 | ) 67 | if match != None: 68 | found = True 69 | line_of_definition = j 70 | 71 | if found == True: 72 | for k in range(len(function_list)): 73 | match = re.search( 74 | function_list[k].name 75 | + "\(.*" 76 | + (function_list[counter].scope_args)[i].name, 77 | line, 78 | ) 79 | if match != None: 80 | check_variables(k, function_list) 81 | new_size = function_list[k].args[-1].size 82 | (function_list[counter].scope_args)[i].size = new_size 83 | variabe_type = re.sub("[&\*]*", "", function_list[k].args[-1].type) 84 | (function_list[counter].scope_args)[i].type = variabe_type 85 | found = False # to avoid counting multiple times 86 | 87 | if variabe_type != "": 88 | (function_list[counter].scope)[line_of_definition] = re.sub( 89 | "^[a-zA-Z0-9_]* ", 90 | variabe_type + " ", 91 | (function_list[counter].scope)[line_of_definition], 92 | ) 93 | if new_size != 0: 94 | (function_list[counter].scope)[line_of_definition] = re.sub( 95 | ";", "[" + str(new_size) + "];", (function_list[counter].scope)[line_of_definition] 96 | ) 97 | 98 | 99 | def check_lines(counter, function_list): 100 | """Check if all lines of the i-th function have 101 | correct grammar and syntax 102 | counter: index of the i-th function (i) 103 | function_list: list of all function objects (updated) 104 | 105 | return: updated function_list""" 106 | it = 0 107 | while it < len(function_list[counter].scope): 108 | line = function_list[counter].scope[it] 109 | 110 | if function_list[counter].args[-1].size == -1: 111 | match = re.search("^[ +\-*/,()[\]{}]*T\(", line) 112 | if match == None: 113 | l = line.split(" = ") 114 | if function_list[counter].args[-1].name == l[0]: 115 | custom_type = "int" 116 | type_value = 0 117 | value = l[1] 118 | for v in function_list[counter].args + function_list[counter].scope_args: 119 | reassignment = re.search( 120 | "[()[\]{}+\-*/, \n]" + v.name + "[()[\]{}+\-*/, \n;]", value 121 | ) 122 | if reassignment == None: 123 | reassignment = re.search("^" + v.name + "[()[\]{}+\-*/, \n;]", value) 124 | if reassignment == None: 125 | reassignment = re.search("[()[\]{}+\-*/, \n]" + v.name + "$", value) 126 | if reassignment == None: 127 | reassignment = re.search("^" + v.name + "$", value) 128 | if reassignment != None: 129 | if v.type.startswith("T"): 130 | custom_type = "T" 131 | break 132 | elif v.type.startswith(op_gc.DOUBLE_TYPE): 133 | type_value += 1 134 | 135 | if custom_type != "T" and type_value > 0: 136 | custom_type = op_gc.DOUBLE_TYPE 137 | 138 | function_list[counter].args[-1].type = custom_type + "&" 139 | function_list[counter].args[-1].size = 0 140 | 141 | ls = line.split(" = ") 142 | if len(ls) > 1: 143 | if ls[1].startswith("T("): 144 | value = ls[1] 145 | for v in function_list[counter].args + function_list[counter].scope_args: 146 | reassignment = re.search( 147 | "[()[\]{}+\-*/, \n]" + v.name + "[()[\]{}+\-*/, \n;]", value 148 | ) 149 | if reassignment == None: 150 | reassignment = re.search("^" + v.name + "[()[\]{}+\-*/, \n;]", value) 151 | if reassignment == None: 152 | reassignment = re.search("[()[\]{}+\-*/, \n]" + v.name + "$", value) 153 | if reassignment == None: 154 | reassignment = re.search("^" + v.name + "$", value) 155 | if reassignment != None: 156 | if v.type.startswith("T"): 157 | match = re.search("T\( *" + v.name + "[0-9[\]]* *\)", value) 158 | if match != None: 159 | if ls[0].startswith(v.name): 160 | function_list[counter].scope[it] = "" 161 | break 162 | elif v.type.startswith(op_gc.DOUBLE_TYPE): 163 | match = re.search("T\( *" + v.name + "[0-9[\]]* *\)", value) 164 | if match != None: 165 | if ls[0].startswith(v.name): 166 | for it2 in range(it): 167 | function_list[counter].scope[it2] = re.sub( 168 | "([()[\]{}, +\-*/]*" + v.name + ")([()[\]{}, +\-*/;]*)", 169 | "\g<1>_\g<2>", 170 | function_list[counter].scope[it2], 171 | ) 172 | function_list[counter].scope[it] = ( 173 | "T " 174 | + ls[0] 175 | + " = " 176 | + re.sub( 177 | "([()[\]{}, +\-*/]*" 178 | + v.name 179 | + ")([()[\]{}, +\-*/;]*)\);", 180 | "\g<1>_\g<2>", 181 | ls[1], 182 | ) 183 | + ", 0);" 184 | ) 185 | function_list[counter].scope_args.append( 186 | op_cl.Argument(v.name, "T", 0, False, []) 187 | ) 188 | for it2 in range(len(function_list[counter].args)): 189 | if v.name == function_list[counter].args[it2].name: 190 | function_list[counter].args[it2].name += "_" 191 | for it2 in range(len(function_list[counter].scope_args)): 192 | if v.name == function_list[counter].scope_args[it2].name: 193 | function_list[counter].scope_args[it2].name += "_" 194 | break 195 | else: 196 | for f in function_list: 197 | match = re.search("^ *" + f.name + " *\(", ls[1]) 198 | if match != None: 199 | if f.type == "void": 200 | if ( 201 | ls[0].startswith("T") 202 | or ls[0].startswith(op_gc.DOUBLE_TYPE) 203 | or ls[0].startswith("int") 204 | ): 205 | for v in range(len(function_list[counter].scope_args)): 206 | if l[0].endswith( 207 | " " + function_list[counter].scope_args[v].name 208 | ): 209 | function_list[counter].scope_args[v].type = re.sub( 210 | "[&*]*", "", f.args[-1].type 211 | ) 212 | function_list[counter].scope_args[v].size = f.args[-1].size 213 | break 214 | if f.args[-1].size > 0: 215 | function_list[counter].scope.insert( 216 | it, 217 | function_list[counter].scope_args[v].type 218 | + " " 219 | + function_list[counter].scope_args[v].name 220 | + "[" 221 | + str(f.args[-1].size) 222 | + "];", 223 | ) 224 | else: 225 | function_list[counter].scope.insert( 226 | it, 227 | function_list[counter].scope_args[v].type 228 | + " " 229 | + function_list[counter].scope_args[v].name 230 | + ";", 231 | ) 232 | it += 1 233 | function_list[counter].scope[it] = re.sub( 234 | ".* +" + function_list[counter].scope_args[v].name + " *=", 235 | function_list[counter].scope_args[v].name + " =", 236 | function_list[counter].scope[it], 237 | ) 238 | function_list[counter].scope[it] = re.sub( 239 | "([a-zA-Z0-9_]*) *= *(.*)\) *;", 240 | "\g<2>, \g<1>);", 241 | function_list[counter].scope[it], 242 | ) 243 | 244 | match = re.search("tf.concat", line) 245 | if match != None: 246 | function_list[counter].scope.remove(line) 247 | line = re.sub("(.*)tf.concat\( *\[(.*) *] *, *axis.*", "\g<1>\g<2>", line) 248 | assigned = op_af.clean_spaces(line.split("=")[1]) 249 | assigned_variable = op_af.clean_spaces(line.split("=")[0]) 250 | var_list = assigned.split(",") 251 | var_length = [] 252 | conc_size = 0 253 | unknown = False 254 | type_value = 0 255 | conc_type = "int" 256 | for var in var_list: 257 | for i in range(len(function_list[counter].args)): 258 | if var == function_list[counter].args[i].name: 259 | c_size = 0 260 | if function_list[counter].args[i].size == 0: 261 | c_size += 1 262 | elif function_list[counter].args[i].size != -1: 263 | c_size += args[i].size 264 | else: 265 | c_size = 0 266 | unknown = True 267 | if function_list[counter].args[i].type.startswith("T"): 268 | conc_type = "T" 269 | elif function_list[counter].args[i].type.startswith(op_gc.DOUBLE_TYPE): 270 | type_value += 1 271 | conc_size += c_size 272 | var_length.append(c_size) 273 | break 274 | for i in range(len(function_list[counter].scope_args)): 275 | if var == function_list[counter].scope_args[i].name: 276 | c_size = 0 277 | if function_list[counter].scope_args[i].size == 0: 278 | c_size += 1 279 | elif function_list[counter].scope_args[i].size != -1: 280 | c_size += int(function_list[counter].scope_args[i].size) 281 | else: 282 | c_size = 0 283 | unknown = True 284 | if function_list[counter].scope_args[i].type.startswith("T"): 285 | conc_type = "T" 286 | elif ( 287 | function_list[counter].scope_args[i].type.startswith(op_gc.DOUBLE_TYPE) 288 | ): 289 | type_value += 1 290 | conc_size += c_size 291 | var_length.append(c_size) 292 | break 293 | 294 | if conc_type != "T": 295 | if type_value > 0: 296 | conc_type = op_gc.DOUBLE_TYPE 297 | 298 | if unknown == False: 299 | 300 | for i in range(len(function_list[counter].args)): 301 | if assigned_variable == function_list[counter].args[i].name: 302 | function_list[counter].args[i].type = conc_type 303 | if conc_size > 1: 304 | function_list[counter].args[i].size = conc_size 305 | function_list[counter].args[i].type += "*" 306 | elif function_list[counter].args[i].size == 1: 307 | function_list[counter].args[i].size = 0 308 | break 309 | for i in range(len(function_list[counter].scope_args)): 310 | if assigned_variable == function_list[counter].scope_args[i].name: 311 | function_list[counter].scope_args[i].type = conc_type 312 | if conc_size > 1: 313 | function_list[counter].scope_args[i].size = conc_size 314 | elif function_list[counter].scope_args[i].size == 1: 315 | function_list[counter].scope_args[i].size = 0 316 | break 317 | i = 0 318 | it2 = 0 319 | while i < conc_size: 320 | for j in range(len(var_list)): 321 | newline = "" 322 | if var_length[j] == 1: 323 | newline = assigned_variable + "[" + str(i) + "] = " + var_list[j] + ";" 324 | function_list[counter].scope.insert(it + it2, newline) 325 | it2 += 1 326 | else: 327 | function_list[counter].scope.insert( 328 | it + it2, 329 | "for (int it1 = 0; it1 < " + str(var_length[j]) + "; it1++) {", 330 | ) 331 | it2 += 1 332 | function_list[counter].scope.insert( 333 | it + it2, 334 | " " 335 | + assigned_variable 336 | + "[" 337 | + str(i) 338 | + " + it1] = " 339 | + var_list[j] 340 | + "[it1];", 341 | ) 342 | it2 += 1 343 | function_list[counter].scope.insert(it + it2, "}") 344 | it2 += 1 345 | i += int(var_length[j]) 346 | it += 1 347 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op/write_templates.py: -------------------------------------------------------------------------------- 1 | """Functions for writing Jinja templates to string and thus to file""" 2 | 3 | from jinja2 import Template 4 | import re 5 | 6 | import madflow.custom_op.constants as op_co 7 | import madflow.custom_op.classes as op_cl 8 | import madflow.custom_op.generation as op_gen 9 | import madflow.custom_op.global_constants as op_gc 10 | 11 | 12 | def template_with_string(template_string, variable): 13 | 14 | template = Template(template_string) 15 | string = template.render(variableName=variable) 16 | 17 | return string 18 | 19 | 20 | def write_libraries(temp, lib): 21 | """Writes libraries (#include <...>)""" 22 | temp += template_with_string(op_co.library_template, lib) 23 | return temp 24 | 25 | 26 | def write_headers(head): 27 | """Writes external header files (#include "...")""" 28 | return template_with_string(op_co.header_template, head) 29 | 30 | 31 | def write_namespaces(name): 32 | """Writes namespaces (using namespace ...;)""" 33 | return "using namespace " + name + ";\n" 34 | 35 | 36 | def write_constants(const_var, device): 37 | """Writes constants ((__device__) const ...;)""" 38 | 39 | dev = "" 40 | if device == "gpu": 41 | dev = "__device__ " 42 | 43 | template_string = op_co.constant_variable_template 44 | 45 | template = Template(template_string) 46 | return template.render(constantVariable=const_var, dev=dev) 47 | 48 | 49 | def write_defined(constants_, device): 50 | """Writes constants (#define ...)""" 51 | 52 | constants = [] # GPU constants are different from CPU constants 53 | for l in constants_: 54 | constants.append(l) 55 | if device == "gpu": 56 | for i in range(len(constants)): 57 | constants[i] = re.sub("std::", "", constants[i]) 58 | constants[i] = re.sub("conj", "cconj", constants[i]) 59 | 60 | return template_with_string(op_co.defined_constant_template, constants) 61 | 62 | 63 | def write_function_definition(func, device): 64 | """Writes function definitions 65 | (void function(..., ..., ...);)""" 66 | 67 | dev = "" 68 | if device == "gpu": 69 | if func.name == "matrix": 70 | dev = "__global__ " 71 | else: 72 | dev = "__device__ " 73 | 74 | func.argn = len(func.args) 75 | if func.args[0].type == op_gc.DOUBLE_TYPE + "*": 76 | func.args[0].type = "const " + func.args[0].type 77 | 78 | template_string = op_co.function_definition_template 79 | 80 | template = Template(template_string) 81 | return "\n" + template.render(func=func, dev=dev) 82 | 83 | 84 | def write_function(fun, device): 85 | """Writes function implementations 86 | (void function(..., ..., ...) { 87 | ... 88 | })""" 89 | 90 | dev = "" 91 | 92 | if fun.name == "matrix": 93 | func = op_cl.Function(fun.type, fun.name, fun.args, [], fun.scope_args, fun.template) 94 | for func_scope_line in fun.scope: 95 | func.scope.append(func_scope_line) 96 | if device == "cpu": 97 | func = op_gen.parallelize_function(func, op_gc.CPU_PARALLELIZATION) 98 | else: 99 | del func.args[-1] 100 | func = op_gen.parallelize_function(func, op_gc.GPU_PARALLELIZATION) 101 | dev = "__global__ " 102 | else: 103 | func = fun 104 | if device == "gpu": 105 | dev = "__device__ " 106 | 107 | func.argn = len(func.args) 108 | template_string = op_co.function_template 109 | 110 | template = Template(template_string) 111 | return "\n" + template.render(func=func, dev=dev) 112 | 113 | 114 | def write_header_file(custom_op, func): 115 | """Writes matrix_xxxxx.h""" 116 | # func2 = func 117 | op_types = [] 118 | for i in range(len(func.args)): 119 | t = re.sub("const (.*)", "\g<1>", func.args[i].type) 120 | t = re.sub("([^&*]*)[&*]*", "\g<1>", t) 121 | op_types.append(t) 122 | func.argn = len(func.args) 123 | 124 | template_string = op_co.header_file_template 125 | template = Template(template_string) 126 | return "\n" + template.render(custom_op=custom_op, func=func, op_types=op_types) 127 | 128 | 129 | def write_matrix_op(custom_op, func, device, process_name): 130 | # func2 = func 131 | op_types = [] 132 | for func_arg in func.args: 133 | t = re.sub("const (.*)", "\g<1>", func_arg.type) 134 | t = re.sub("([^&*]*)[&*]*", "\g<1>", t) 135 | op_types.append(t) 136 | func.argn = len(func.args) 137 | p = re.sub("_", "", process_name) 138 | 139 | if device == "cpu": 140 | template_string = op_co.cpu_op_template 141 | elif device == "gpu": 142 | template_string = op_co.gpu_op_template 143 | 144 | template = Template(template_string) 145 | return "\n" + template.render(custom_op=custom_op, func=func, op_types=op_types, process=p) 146 | 147 | 148 | def write_custom_op( 149 | headers, 150 | namespace, 151 | defined, 152 | constants, 153 | cpu_constants, 154 | function_list, 155 | custom_op_list, 156 | destination, 157 | process_name, 158 | device, 159 | ): 160 | """Writes the Custom Operator: 161 | - headers and libraries 162 | - namespaces 163 | - global constants 164 | - function definitions 165 | - function implementations 166 | - "wrapper function" called by matrix_1_xxxxx.py""" 167 | 168 | extension = "" 169 | custom_op_code = "" 170 | if device == "cpu": 171 | extension = ".cc" 172 | elif device == "gpu": 173 | extension = ".cu.cc" 174 | 175 | custom_op_code += ( 176 | "#ifdef GOOGLE_CUDA\n\ 177 | " 178 | "#define EIGEN_USE_GPU\n" 179 | ) 180 | else: 181 | return 182 | 183 | custom_op_code += write_headers(headers) 184 | custom_op_code += write_namespaces(namespace) 185 | custom_op_code += write_defined(defined, device) 186 | custom_op_code += write_constants(constants, device) 187 | 188 | if device == "cpu": # write 'using thread::ThreadPool' if using ThreadPool 189 | if op_gc.CPU_PARALLELIZATION == "ThreadPool": 190 | custom_op_code += write_constants(cpu_constants, device) 191 | 192 | for f in function_list: 193 | custom_op_code += write_function_definition(f, device) 194 | 195 | if device == "gpu": 196 | custom_op_code += "\n" 197 | custom_op_code += op_co.gpu_arithmetic_operators 198 | 199 | for f in function_list: 200 | custom_op_code += "\n" 201 | custom_op_code += write_function(f, device) 202 | 203 | if device == "gpu": 204 | function_list[-1].args.append( 205 | op_cl.Argument("context", "const OpKernelContext*", 0, False, []) 206 | ) 207 | 208 | for c in custom_op_list: 209 | custom_op_code += write_matrix_op(c, function_list[-1], device, process_name) 210 | 211 | if device == "gpu": 212 | custom_op_code = re.sub("([ ,+\-*/]+)sign([ (;]+)", "\g<1>signn\g<2>", custom_op_code) 213 | custom_op_code = re.sub("([ ,+\-*/]+)signvec([ (;]+)", "\g<1>signvecc\g<2>", custom_op_code) 214 | 215 | custom_op_code += "\n#endif\n" 216 | 217 | (destination / ("matrix_" + process_name + extension)).write_text(custom_op_code) 218 | -------------------------------------------------------------------------------- /python_package/madflow/custom_op_generator.py: -------------------------------------------------------------------------------- 1 | """Generation of the custom operator from existing python code""" 2 | 3 | import subprocess 4 | import re 5 | import copy 6 | 7 | import madflow.wavefunctions_flow 8 | import madflow.makefile_template as mf_tmp 9 | 10 | import madflow.custom_op.aux_functions as op_af 11 | import madflow.custom_op.generation as op_gen 12 | import madflow.custom_op.global_constants as op_gc 13 | import madflow.custom_op.write_templates as op_wt 14 | import madflow.custom_op.syntax as op_sy 15 | import madflow.custom_op.parser as op_pa 16 | import madflow.custom_op.read as op_re 17 | 18 | 19 | DEVICES = ["cpu", "gpu"] 20 | 21 | 22 | def translate(destination): 23 | """Translates Python code into a C++/CUDA Custom Operator 24 | destination: directory of madflow output""" 25 | 26 | FILE_SOURCES = [madflow.wavefunctions_flow.__file__] # path to wavefunctions_flow.py 27 | 28 | # Create the directory for the Op source code and create the makefile 29 | 30 | destination_gpu = destination / "gpu" 31 | destination_gpu.mkdir(parents=True, exist_ok=True) 32 | mf_tmp.write_makefile(destination) 33 | 34 | # Generate sign functions 35 | function_list_ = [] 36 | op_af.generate_auxiliary_functions(function_list_) 37 | 38 | # Read wavefunctions_flow.py 39 | for file_source in FILE_SOURCES: 40 | signatures = [] 41 | signature_variables = [] 42 | 43 | op_re.read_signatures(signatures, signature_variables, file_source) 44 | 45 | op_pa.convert_signatures(signatures, signature_variables) 46 | 47 | op_re.read_file_from_source(function_list_, file_source, signatures, signature_variables) 48 | 49 | for subprocess_file_name in destination.glob("matrix_1_*"): 50 | 51 | constants = copy.copy(op_gc.GLOBAL_CONSTANTS) # global_constants 52 | 53 | process_name = re.sub("matrix_1_", "", subprocess_file_name.stem) 54 | 55 | matrix_source = subprocess_file_name 56 | process_source = subprocess_file_name.parent / ( 57 | re.sub("matrix_1_", "aloha_1_", subprocess_file_name.stem) + subprocess_file_name.suffix 58 | ) 59 | function_list = copy.copy(function_list_) 60 | headers = copy.copy(op_gc.HEADERS_) 61 | headers.append("matrix_" + process_name + ".h") 62 | 63 | custom_op_list = [] 64 | 65 | op_re.read_signatures(signatures, signature_variables, process_source) 66 | 67 | op_pa.convert_signatures(signatures, signature_variables) 68 | 69 | op_re.read_file_from_source(function_list, process_source, signatures, signature_variables) 70 | 71 | matrix_name = subprocess_file_name.name 72 | 73 | op_re.read_signatures(signatures, signature_variables, matrix_source) 74 | op_pa.convert_signatures(signatures, signature_variables) 75 | 76 | op_re.extract_matrix_from_file( 77 | function_list, matrix_source, signatures, signature_variables 78 | ) 79 | 80 | for i in range(len(function_list)): 81 | op_sy.check_variables(i, function_list) 82 | 83 | for i in range(len(function_list)): 84 | op_sy.check_lines(i, function_list) 85 | 86 | op_gen.serialize_function(function_list[-1]) 87 | 88 | custom_op_list.append(op_gen.define_custom_op(function_list[-1])) 89 | 90 | op_gen.extract_constants(function_list[-1], constants) 91 | 92 | op_gen.remove_real_ret(function_list[-1]) 93 | 94 | # write the Op for both CPU and GPU 95 | for device in DEVICES: 96 | op_wt.write_custom_op( 97 | headers, 98 | op_gc.NAMESPACE, 99 | op_gc.DEFINED, 100 | constants, 101 | op_gc.CPU_CONSTANTS, 102 | function_list, 103 | custom_op_list, 104 | destination_gpu, 105 | process_name, 106 | device, 107 | ) 108 | 109 | # write matrix_xxxxx.h 110 | temp = "" 111 | for c in custom_op_list: 112 | temp += op_wt.write_header_file(c, function_list[-1]) 113 | (destination_gpu / ("matrix_" + process_name + ".h")).write_text(temp) 114 | 115 | # write matrix_1_xxxxx.py 116 | temp = "" 117 | temp = op_gen.modify_matrix(matrix_source, process_name, destination) 118 | (destination / matrix_name).write_text(temp) 119 | 120 | # -------------------------------------------------------------------------------------- 121 | 122 | 123 | def compile_op(destination): 124 | """Compiles the Custom Operator 125 | destination: directory of madflow output""" 126 | subprocess.run("make", cwd=destination, check=True) 127 | -------------------------------------------------------------------------------- /python_package/madflow/lhe_writer.py: -------------------------------------------------------------------------------- 1 | from madflow.config import get_madgraph_path 2 | import sys, os, six, gzip, copy 3 | from time import time as tm 4 | import math 5 | import numpy as np 6 | from pathlib import Path 7 | from multiprocessing.pool import ThreadPool as Pool 8 | 9 | from pdfflow.configflow import fzero 10 | 11 | import logging 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | ### go to the madgraph folder and load up anything that you need 16 | original_path = copy.copy(sys.path) 17 | 18 | mg5amcnlo_folder = get_madgraph_path() 19 | sys.path.insert(0, mg5amcnlo_folder.as_posix()) 20 | 21 | original_package = __package__ 22 | __package__ = "madgraph.various" 23 | from madgraph.various import lhe_parser 24 | 25 | sys.path = original_path 26 | __package__ = original_package 27 | 28 | ################################################ 29 | 30 | class EventFlow(lhe_parser.Event): 31 | """ 32 | Wrapper class for madgraph lhe_parser.Event class. EventFlow deals with 33 | holding the LHE info for the event. 34 | Subclass of list class: looping over self yields ParticleFlow objects 35 | contained in the event. 36 | """ 37 | def __init__(self, info, *args, **kwargs): 38 | """ 39 | Parameters 40 | ---------- 41 | info: dict | lhe_parser.Event | list, event information to be stored 42 | """ 43 | super().__init__(*args, **kwargs) 44 | 45 | self.nexternal = info.get('nexternal') 46 | self.ievent = info.get('ievent') 47 | self.wgt = info.get('wgt') 48 | self.aqcd = info.get('aqcd') 49 | self.scale = info.get('scale') 50 | self.aqed = info.get('aqed') 51 | self.tag = info.get('tag') 52 | self.comment = info.get('comment') 53 | 54 | def add_particles(self, particles): 55 | """ 56 | Parameters 57 | ---------- 58 | particles: list, ParticleFlow objects list to extend the event with 59 | """ 60 | self.extend(particles) 61 | 62 | def as_bytes(self): 63 | """ Returns byte string event representation. """ 64 | return self.__str__().encode('utf-8') 65 | 66 | 67 | class ParticleFlow(lhe_parser.Particle): 68 | """ 69 | Wrapper class for madgraph lhe_parser.Particle class. Holds particle info. 70 | """ 71 | def __init__(self, info, *args, **kwargs): 72 | """ 73 | Parameters 74 | ---------- 75 | info: dict, particle information to be stored 76 | """ 77 | super().__init__(*args, **kwargs) 78 | 79 | self.pid = info.get('pid') 80 | self.status = info.get('status') 81 | self.mother1 = info.get('mother1') 82 | self.mother2 = info.get('mother2') 83 | self.color1 = info.get('color1') 84 | self.color2 = info.get('color2') 85 | self.px = info.get('px') 86 | self.py = info.get('py') 87 | self.pz = info.get('pz') 88 | self.E = info.get('E') 89 | self.mass = info.get('mass') 90 | self.vtim = info.get('vtim') 91 | self.helicity = info.get('helicity') 92 | 93 | 94 | class LheWriter: 95 | def __init__(self, folder, run='run_01', no_unweight=False, event_target=0): 96 | """ 97 | Utility class to write Les Houches Event (LHE) file info: writes LHE 98 | events to /Events//weighted_events.lhe.gz 99 | 100 | Parameters 101 | ---------- 102 | folder: Path, the madflow output folder 103 | run: str, the run name 104 | no_unweight: bool, wether to unweight or not events before objects goes 105 | out of scope 106 | event_target: int, number of requested unweighted events 107 | """ 108 | self.folder = folder 109 | self.run = run 110 | self.no_unweight = no_unweight 111 | self.event_target = event_target 112 | self.pool = Pool(processes=1) 113 | 114 | # create LHE file directory tree 115 | lhe_folder = self.folder.joinpath(f"Events/{self.run}") 116 | lhe_folder.mkdir(parents=True, exist_ok=True) 117 | self.lhe_path = lhe_folder.joinpath('weighted_events.lhe.gz') 118 | 119 | # create I/O stream 120 | self.stream = gzip.open(self.lhe_path, 'wb') 121 | 122 | 123 | def __enter__(self): 124 | self.dump_banner() 125 | return self 126 | 127 | 128 | def __exit__(self, exc_type, exc_value, exc_traceback): 129 | """ 130 | Send closing signal to asynchronous dumping pool. Triggers unweighting 131 | if self.no_unweight is False (default). 132 | 133 | Note: this function should be called after having stored the cross 134 | section and statistical error values 135 | """ 136 | self.pool.close() 137 | self.pool.join() 138 | self.dump_exit() 139 | logger.debug(f"Saved LHE file at {self.lhe_path.as_posix()}") 140 | self.stream.close() 141 | if not self.no_unweight: 142 | logger.debug("Unweighting ...") 143 | start = tm() 144 | nb_keep, nb_wgt = self.do_unweighting(event_target=self.event_target) 145 | end = tm()-start 146 | log = "Unweighting stats: kept %d events out of %d (efficiency %.2g %%, time %.5f)" \ 147 | %(nb_keep, nb_wgt, nb_keep/nb_wgt*100, end) 148 | logger.info(log) 149 | 150 | 151 | def lhe_parser(self, all_ps, res): 152 | """ 153 | Takes care of storing and dumping LHE info from the integrator. 154 | To be passed as argument to generate the Vegasflow custom integrand. 155 | 156 | Parameters 157 | ---------- 158 | all_ps: tf.Tensor, phase space points of shape=(nevents,nexternal,ndims) 159 | res: tf.Tensor, weights of shape=(nevents,) 160 | """ 161 | _, nexternal, _ = all_ps.shape 162 | events_info = [{ 163 | 'nexternal': nexternal, 164 | 'ievent': 1, 165 | 'wgt': wgt, 166 | 'aqcd': 0.0, # alpha strong value, get this from vegasflow? 167 | 'scale': 0.0, # Q^2 scale for pdfs, get this from vegasflow? 168 | 'aqed': 0.0, # alpha EW value , get this from vegasflow? 169 | 'tag': '', 170 | 'comment': '' 171 | } for wgt in res.numpy()] 172 | 173 | index_to_pid = { 174 | 0: 2212, # p 175 | 1: 2212, # p 176 | 2: 6, # t 177 | 3: -6 # t~ 178 | } 179 | 180 | index_to_status = { 181 | 0: -1, # incoming particle 182 | 1: -1, # incoming particle 183 | 2: 1, # outgoing particle 184 | 3: 1, # outgoing particle 185 | } 186 | 187 | # we are missing the virtual particles 188 | particles_info = [ 189 | [{ 190 | 'pid': index_to_pid[i], 191 | 'status': index_to_status[i], 192 | 'mother1': 0, 193 | 'mother2': 0, 194 | 'color1': 0, 195 | 'color2': 0, 196 | 'E': ps[0], 197 | 'px': ps[1], 198 | 'py': ps[2], 199 | 'pz': ps[3], 200 | 'mass': np.sqrt(ps[0]**2 - ps[1]**2 - ps[2]**2 - ps[3]**2), # vectorize this? 201 | 'vtim': 0, 202 | 'helicity': 0, 203 | } for i, ps in enumerate(ps_external) 204 | ] for ps_external in all_ps.numpy()] 205 | 206 | self.dump(events_info, particles_info) 207 | 208 | return fzero 209 | 210 | 211 | def dump_banner(self, stream=None): 212 | """ 213 | Parameters 214 | ---------- 215 | stream: _io.TextIOWrapper, output file object, if None use default 216 | self.stream 217 | """ 218 | if stream: 219 | stream.write('\n'.encode('utf-8')) 220 | else: 221 | self.stream.write('\n'.encode('utf-8')) 222 | 223 | 224 | def dump_events(self, events_info, particles_info): 225 | """ 226 | Get the vectorized information stored in a dict. Loop over events and 227 | particles to dump into LHE file. 228 | 229 | Parameters 230 | ---------- 231 | events_info: list, list of events dict info 232 | particles_info: list, list particles dict info 233 | """ 234 | for info, p_info in zip(events_info, particles_info): 235 | evt = EventFlow(info) 236 | 237 | particles = [ParticleFlow(pinfo, event=evt) for pinfo in p_info] 238 | evt.add_particles(particles) 239 | self.stream.write(evt.as_bytes()) 240 | 241 | 242 | def dump_exit(self, stream=None): 243 | """ 244 | Parameters 245 | ---------- 246 | stream: _io.TextIOWrapper, output file object, if None use default 247 | self.stream 248 | """ 249 | tag = '\n' 250 | if stream: 251 | stream.write(tag.encode('utf-8')) 252 | else: 253 | self.stream.write(tag.encode('utf-8')) 254 | 255 | 256 | def async_dump(self, events_info, particles_info): 257 | """ 258 | Dump info file in LHE format. 259 | 260 | Parameters 261 | ---------- 262 | events_info: list, dictionaries for events info 263 | particles_info: list, dictionaries for particles info 264 | """ 265 | self.dump_events(events_info, particles_info) 266 | 267 | 268 | def dump(self, *args): 269 | """ Dumps info asynchronously. """ 270 | self.pool.apply_async(self.async_dump, args) 271 | 272 | def dump_result(self, filename): 273 | """ 274 | Dump cross section and statistical error at filename. 275 | 276 | Parameters 277 | ---------- 278 | filename: Path, file to save cross section and error. 279 | """ 280 | xsec_err = np.array([self.__cross, self.__err]) 281 | np.savetxt(filename.as_posix(), xsec_err) 282 | 283 | @property 284 | def cross(self): 285 | """ Cross section. """ 286 | return self.__cross 287 | 288 | 289 | @cross.setter 290 | def cross(self, value): 291 | """ Cross section setter. """ 292 | self.__cross = value 293 | 294 | 295 | @property 296 | def err(self): 297 | """ Cross section's statistical error. """ 298 | return self.__err 299 | 300 | 301 | @err.setter 302 | def err(self, value): 303 | """ Error section setter""" 304 | self.__err = value 305 | 306 | 307 | def store_result(self, result): 308 | """ 309 | Stores integration result in numpy format. 310 | 311 | Parameters 312 | ---------- 313 | result: list, cross section and statistical error 314 | """ 315 | self.__cross = float(result[0]) 316 | self.__err = float(result[1]) 317 | 318 | 319 | def do_unweighting(self, event_target=0): 320 | """ 321 | Does unweighting. Removes the weighted LHE file. 322 | 323 | Parameters 324 | ---------- 325 | event_target: int, number of unweighted events requested 326 | 327 | Note: this function should be called after having stored the cross 328 | section and statistical error values 329 | """ 330 | # load weighted LHE file 331 | lhe = EventFileFlow(self.lhe_path) 332 | nb_wgt = len(lhe) 333 | 334 | # open a tmp stream for unweighted LHE file 335 | tmp_path = self.lhe_path.with_name("tmp_unweighted_events.lhe.gz") 336 | # unweight 337 | nb_keep = lhe.unweight(tmp_path.as_posix(), event_target=event_target) 338 | 339 | # delete weighted LHE file 340 | # self.lhe_path.unlink() 341 | 342 | # load tmp file 343 | tmp_lhe = EventFileFlow(tmp_path) 344 | 345 | # open a stream for final unweighted LHE file 346 | unwgt_path = tmp_path.with_name("unweighted_events.lhe.gz") 347 | with gzip.open(unwgt_path, 'wb') as stream: 348 | self.dump_banner(stream) 349 | for event in tmp_lhe: 350 | event.wgt = self.__cross 351 | stream.write(event.as_bytes()) 352 | self.dump_exit(stream) 353 | 354 | # delete tmp file 355 | tmp_path.unlink() 356 | return nb_keep, nb_wgt 357 | 358 | 359 | class EventFileFlow(lhe_parser.EventFile): 360 | """ 361 | Wrapper class for madgraph lhe_parser.EventFile class. Loads, modifies and 362 | dumps the events contained in a LHE file. 363 | """ 364 | def __init__(self, path, mode='r', *args, **kwargs): 365 | """ 366 | Parameters 367 | ---------- 368 | path: Path or str, path pointing to a valid LHE file (both with 369 | .lhe or .lhe.gz extension) 370 | mode: str, file opening mode 371 | """ 372 | if isinstance(path, Path): 373 | path = path.as_posix() 374 | super().__init__(path, mode, *args, **kwargs) 375 | 376 | def __next__(self): 377 | """ 378 | Replacing the mother class method returnin an EventFileFlow, not an 379 | lhe_parser.EventFile. 380 | 381 | Note: This won't work with (if self.eventgroup is True). 382 | """ 383 | event = super().__next__() 384 | if isinstance(event, lhe_parser.Event): 385 | event.__class__ = EventFlow 386 | # EventFile.__len__ method loops over self and returns a list 387 | # instead of an Event, but the returned object is not used then. In 388 | # this case it's fine to return a non EventFileFlow object. 389 | return event 390 | 391 | 392 | class FourMomentumFlow(lhe_parser.FourMomentum): 393 | """ 394 | Wrapper class for madgraph lhe_parser.FourMomentum class. Stores (E,px,py,pz) 395 | of a particle and allows access to its kinematical quantities. 396 | """ 397 | def __init__(self, obj=0, px=0, py=0, pz=0, E=0): 398 | """ 399 | Parameters 400 | ---------- 401 | obj: FourMomentumFlow|ParticleFlow|list|tuple|str|six.text_type|float 402 | object to copy momentum components from. 403 | - If is FourMomentumFlow or ParticleFlow this function acts like 404 | a copy constructor. 405 | - If is list or tuple, momentum components should be 406 | (E,px,py,pz) ordered. 407 | - If is str or six.text_type, a space separated string with 408 | (E,px,py,pz) ordered components. 409 | - If is float, superseeds the E argument 410 | px: float, x momentum component 411 | py: float, y momentum component 412 | pz: float, z momentum component 413 | E: float, particle energy 414 | """ 415 | super().__init__(obj, px, py, pz, E) 416 | 417 | @property 418 | def phi(self): 419 | """ Returns the azimuthal angle. """ 420 | phi = 0.0 if (self.pt == 0.0) else math.atan2(self.py, self.px) 421 | return phi % (2.0*np.pi) 422 | -------------------------------------------------------------------------------- /python_package/madflow/makefile_template.py: -------------------------------------------------------------------------------- 1 | """Makefile generation for compiling the Custom Operator""" 2 | 3 | MAKEFILE = "makefile" 4 | CPPCOMPILER = "g++" 5 | CPPVERSION = "c++14" 6 | CUDAPATH = "" 7 | # example: CUDAPATH = "/usr/local/cuda" 8 | 9 | 10 | def write_compilers(): 11 | """Adds C++ and CUDA compilers""" 12 | text = f"""CXX := {CPPCOMPILER} 13 | NVCC := $(shell which nvcc) 14 | 15 | """ 16 | return text 17 | 18 | 19 | def write_shell_name(): 20 | """Adds a line for the kernel name""" 21 | text = """UNAME_S := $(shell uname -s) 22 | 23 | """ 24 | return text 25 | 26 | 27 | def write_multithreading(): 28 | """Find the number of processors and use as many threads as possible 29 | if the number of processors isn't found, default to 1""" 30 | text = """ifeq ($(UNAME_S), Darwin) 31 | NPROCS = $(shell sysctl -n hw.ncpu) 32 | else 33 | NPROCS = $(shell grep -c 'processor' /proc/cpuinfo) 34 | endif 35 | ifeq ($(NPROCS),) 36 | NPROCS = 1 37 | endif 38 | MAKEFLAGS += -j$(NPROCS) 39 | 40 | """ 41 | return text 42 | 43 | 44 | def write_tf_generic_flags(): 45 | """Adds TensorFlow flags""" 46 | text = """TF_CFLAGS = $(shell python3 -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') 47 | TF_LFLAGS = $(shell python3 -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') 48 | 49 | """ 50 | return text 51 | 52 | 53 | def write_tf_cuda_flags(): 54 | """Adds TansorFlow CUDA flags and the path to CUDA libraries. 55 | If the environment variable ${CUDA_PATH} isn't defined, use a default path""" 56 | # """ 57 | text = """CUDA_LFLAGS = -x cu -Xcompiler -fPIC 58 | CUDA_PATH := $(shell echo ${CUDA_PATH}) 59 | ifeq ($(CUDA_PATH),) 60 | 61 | """ 62 | """If the path for CUDA libraries isn't explicitly stated at the beginning of this file, find it from ${PATH}""" 63 | if CUDAPATH == "": 64 | text += 'CUDA_PATH = $(shell echo ${PATH} | sed -e "s&.*:\([^:]*cuda[^/]*\).*&\\1&g")\n' 65 | else: 66 | text += "CUDA_PATH = " + CUDAPATH + "\n" 67 | text += """endif 68 | 69 | """ 70 | return text 71 | 72 | 73 | def write_omp_flags(openmp_flag="-fopenmp"): 74 | """Adds flags for OpenMP parallelization""" 75 | text = f"""ifeq ($(UNAME_S), Darwin) 76 | OMP_CFLAGS = -Xpreprocessor {openmp_flag} -lomp 77 | else 78 | OMP_CFLAGS = {openmp_flag} 79 | endif 80 | 81 | """ 82 | return text 83 | 84 | 85 | def write_cflags(): 86 | """Adds C-Flags. C++ version is defined at the beginning of this file""" 87 | text = f"""CFLAGS = ${{TF_CFLAGS}} ${{OMP_CFLAGS}} -fPIC -O2 -std={CPPVERSION} 88 | LDFLAGS = -shared ${{TF_LFLAGS}} 89 | 90 | """ 91 | 92 | text += write_cflags_cuda() 93 | 94 | return text 95 | 96 | 97 | def write_cflags_cuda(): 98 | """Adds C-Flags for CUDA (only if nvcc is found)""" 99 | # fallback in case nvcc is not installed => use CXX 100 | text = f"""ifeq ($(NVCC),) 101 | CFLAGS_CUDA = $(CFLAGS) 102 | CFLAGS_NVCC = ${{TF_CFLAGS}} 103 | LDFLAGS_CUDA = $(LDFLAGS) 104 | NVCC = $(CXX) 105 | else 106 | CFLAGS_CUDA = $(CFLAGS) -D GOOGLE_CUDA=1 -I$(CUDA_PATH)/include 107 | CFLAGS_NVCC = ${{TF_CFLAGS}} -O2 -std={CPPVERSION} -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -DNDEBUG --expt-relaxed-constexpr 108 | LDFLAGS_CUDA = $(LDFLAGS) -L$(CUDA_PATH)/lib64 -lcudart 109 | endif 110 | 111 | """ 112 | return text 113 | 114 | 115 | def write_target(): 116 | """Adds the name of the generated library (matrix_processName_cu.so)""" 117 | text = """TARGETS = $(shell ls gpu/ | grep ".h" | sed 's/\.h/_cu.so/g') 118 | 119 | """ 120 | return text 121 | 122 | 123 | def write_commands(): 124 | """Adds commands for compiling of source code""" 125 | text = "" 126 | text += write_generic_commands() 127 | text += write_library_commands() 128 | text += write_source_commands() 129 | text += write_cleanup_commands() 130 | return text 131 | 132 | 133 | def write_generic_commands(): 134 | """all compiles all target libraries (one for each subprocess, i.e.: qq~ -> X + gg -> X""" 135 | text = """all: $(TARGETS) 136 | 137 | """ 138 | return text 139 | 140 | 141 | def write_library_commands(): 142 | """Compile each library from object files""" 143 | text = """%_cu.so: gpu/%.cudao gpu/%.cu.cudao 144 | \t$(CXX) -o $@ $(CFLAGS_CUDA) $^ $(LDFLAGS_CUDA) 145 | 146 | """ 147 | return text 148 | 149 | 150 | def write_source_commands(): 151 | """Generate object files""" 152 | text = """%.o: %.cc 153 | \t$(CXX) -c $(CFLAGS) $^ -o $@ 154 | 155 | %.cu.cudao: %.cu.cc 156 | \t$(NVCC) -c $(CFLAGS_NVCC) $^ -o $@ 157 | 158 | %.cudao: %.cc 159 | \t$(CXX) -c $(CFLAGS_CUDA) $^ -o $@ 160 | 161 | """ 162 | return text 163 | 164 | 165 | def write_cleanup_commands(): 166 | """Adds commmand for cleanup""" 167 | # remove generated libraries 168 | # remove generated libraries and source code 169 | text = """clean: 170 | \trm -f $(TARGETS) $(OBJECT_SRCS_CUDA) 171 | 172 | clean_all: 173 | \trm -f $(TARGETS) $(OBJECT_SRCS_CUDA) 174 | \trm -f gpu/* 175 | 176 | """ 177 | return text 178 | 179 | 180 | def write_makefile(destination): 181 | 182 | makefile_content = "" 183 | 184 | makefile_content += write_compilers() 185 | makefile_content += write_shell_name() 186 | makefile_content += write_multithreading() 187 | makefile_content += write_tf_generic_flags() 188 | makefile_content += write_tf_cuda_flags() 189 | # makefile_content += write_omp_flags() 190 | makefile_content += write_cflags() 191 | makefile_content += write_target() 192 | makefile_content += write_commands() 193 | 194 | # write the makefile 195 | (destination / MAKEFILE).write_text(makefile_content) 196 | 197 | 198 | if __name__ == "__main__": 199 | write_makefile("") 200 | -------------------------------------------------------------------------------- /python_package/madflow/parameters.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities and functions to deal with the parameters of the model 3 | """ 4 | from .config import DTYPE, DTYPECOMPLEX, complex_me, float_me, run_eager 5 | import numpy as np 6 | import tensorflow as tf 7 | from itertools import chain 8 | 9 | GS_SIGNATURE = [tf.TensorSpec(shape=[None], dtype=DTYPECOMPLEX)] 10 | ALPHAS_SIGNATURE = [tf.TensorSpec(shape=[None], dtype=DTYPE)] 11 | 12 | 13 | @tf.function(input_signature=ALPHAS_SIGNATURE) 14 | def _alphas_to_gs(alpha_s): 15 | return complex_me(2.0 * tf.math.sqrt(np.pi * alpha_s)) 16 | 17 | 18 | class Model: 19 | """This class is instantiated with knowledge about 20 | all couplings and parameters in the process of interest 21 | and provides an interface to compute them in a per-phase space 22 | basis 23 | 24 | Parameters 25 | --------- 26 | constants: tuple(DTYPE) 27 | tuple with all constants of the model 28 | functions: tuple(functions) 29 | tuple with all parameters of the model which depend on g_s 30 | """ 31 | 32 | def __init__(self, constants, functions): 33 | self._tuple_constants = constants 34 | self._tuple_functions = functions 35 | self._constants = list(constants) 36 | self._to_evaluate = [tf.function(i, input_signature=GS_SIGNATURE) for i in functions] 37 | self._frozen = [] 38 | 39 | @property 40 | def frozen(self): 41 | """Whether the model is frozen for a given value of alpha_s or not""" 42 | return bool(self._frozen) 43 | 44 | def freeze_alpha_s(self, alpha_s): 45 | """The model can be frozen to a specific value 46 | of alpha_s such that all phase space points are evaluated at that value 47 | Parameters 48 | ---------- 49 | alpha_s: float 50 | """ 51 | if self.frozen: 52 | raise ValueError("The model is already frozen") 53 | self._frozen = self._evaluate(float_me([alpha_s])) 54 | 55 | def unfreeze(self): 56 | """Remove the frozen status""" 57 | self._frozen = [] 58 | 59 | @tf.function(input_signature=ALPHAS_SIGNATURE) 60 | def _evaluate(self, alpha_s): 61 | """Evaluate all couplings for the given values of alpha_s 62 | Parameters 63 | ---------- 64 | alpha_s: tensor of shape (None,) 65 | """ 66 | gs = _alphas_to_gs(alpha_s) 67 | results = [fun(gs) for fun in self._to_evaluate] 68 | if not results: 69 | return self._constants 70 | if not self._constants: 71 | return results 72 | return list(chain.from_iterable([self._constants, results])) 73 | 74 | def get_masses(self): 75 | """Get the masses that entered the model as constants""" 76 | masses = [] 77 | for key, val in self._tuple_constants._asdict().items(): 78 | if key.startswith("mdl_M"): 79 | masses.append(val) 80 | return masses 81 | 82 | def parse_parameter(self, parameter_name): 83 | """Parse a (constant) parameter given its string name""" 84 | if parameter_name == "ZERO": 85 | return 0.0 86 | 87 | if hasattr(self._tuple_constants, parameter_name): 88 | return getattr(self._tuple_constants, parameter_name) 89 | if hasattr(self._tuple_functions, parameter_name): 90 | return getattr(self._tuple_functions, parameter_name) 91 | raise AttributeError(f"The model class does not contain parameter {parameter_name}") 92 | 93 | def evaluate(self, alpha_s=None): 94 | """Evaluate alpha_s, if the model is frozen 95 | returns the frozen values""" 96 | if self.frozen: 97 | return self._frozen 98 | return self._evaluate(alpha_s) 99 | -------------------------------------------------------------------------------- /python_package/madflow/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/N3PDF/madflow/46afee693fa32b651a0d41373eae3912f59f8ecb/python_package/madflow/scripts/__init__.py -------------------------------------------------------------------------------- /python_package/madflow/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/N3PDF/madflow/46afee693fa32b651a0d41373eae3912f59f8ecb/python_package/madflow/tests/__init__.py -------------------------------------------------------------------------------- /python_package/madflow/tests/test_integration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test for the integration routine 3 | """ 4 | 5 | import os 6 | import numpy as np 7 | from madflow.utilities import one_matrix_integration 8 | from pdfflow import mkPDF 9 | 10 | # For github actions, check whether we find a PDF set directorty 11 | git_pdfs = os.environ.get("PDFDIR") 12 | 13 | def test_integration(): 14 | """ Regresion-style check to the integration routine using a predefined 15 | mockup matrix element. 16 | Checks that the result is within 3 sigmas of the true result 17 | """ 18 | from madflow.tests.mockup_debug_me import Matrix_1_gg_ttx, model_params 19 | matrix = Matrix_1_gg_ttx() 20 | pdf = mkPDF("NNPDF31_nnlo_as_0118/0", dirname=git_pdfs) 21 | res, error = one_matrix_integration(matrix, model_params, pdf=pdf, flavours=(0,), out_masses=[173.0, 173.0]) 22 | true_result = 103.4 23 | assert np.fabs(true_result - res) < 3*error 24 | 25 | if __name__=='__main__': 26 | from time import time as tm 27 | start = tm() 28 | test_integration() 29 | print(f"Program done in {tm()-start} s") -------------------------------------------------------------------------------- /python_package/madflow/tests/test_ps.py: -------------------------------------------------------------------------------- 1 | """ Tests phase space routines 2 | """ 3 | from madflow.config import DTYPE 4 | import madflow.phasespace as ps 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | 9 | def massless_volume(n, e): 10 | """Volume of massless phase space 11 | 1 / 2 / (4*pi)^(2n-3) * E^(2n-4) / G(n)G(n-1) 12 | """ 13 | gn = np.math.factorial(n - 1) 14 | gnm1 = np.math.factorial(n - 2) 15 | energy = pow(e, 2 * n - 4) / 2.0 16 | return energy / gn / gnm1 / pow(4 * np.pi, 2 * n - 3) 17 | 18 | 19 | def auto_test_rambo_massless(n, sqrts, n_events=3, tol=1e-6): 20 | """Check that a massless phase space point 21 | produces the right weight and has the right shape 22 | """ 23 | n_rand_dim = n * 4 24 | xrand = tf.random.uniform((n_events, n_rand_dim), dtype=DTYPE) 25 | all_p, w = ps.rambo(xrand, n, sqrts, masses=None) 26 | np.testing.assert_equal(all_p.shape, (n_events, n, 4)) 27 | vol = massless_volume(n, sqrts) 28 | np.testing.assert_allclose(vol, w, rtol=tol) 29 | 30 | 31 | def test_rambo(sqrts=7e3, max_n=8): 32 | """Check that rambo produces the right type of phase space""" 33 | for n in range(2, max_n): 34 | auto_test_rambo_massless(n, sqrts) 35 | 36 | # Check that it also accepts a variable input energy 37 | events = 13 38 | variable_sqrts = tf.random.uniform((13,), dtype=DTYPE) * sqrts 39 | auto_test_rambo_massless(n, variable_sqrts, n_events=events) 40 | 41 | 42 | def test_PhaseSpaceGenerator(sqrts=7e3, nparticles=5, nevents=10000): 43 | """Check that the phase space generator and cuts work 44 | This is explicitly testing: 45 | 46 | 1. The phase space is generating points 47 | 2. The phase space can compute the PT 48 | 3. The register_cut and clear_cuts methods of the phase space are doing something 49 | 4. The cuts done with numpy and done with ps_gen are equivalent 50 | """ 51 | ps_gen = ps.PhaseSpaceGenerator(nparticles, sqrts, algorithm="ramboflow") 52 | ps_gen.register_cut("pt", particle=3, min_val=60, max_val=300.0) 53 | dim = (nparticles - 2) * 4 + 2 54 | xrand = tf.random.uniform((nevents, dim), dtype=DTYPE) 55 | all_ps, w, x1, x2, idx = ps_gen(xrand) 56 | ps_gen.clear_cuts() 57 | full_ps, full_w, fx1, fx2, _ = ps_gen(xrand) 58 | # Check the original pt 59 | internal_pt = ps_gen.pt(full_ps[:, 3, :]) 60 | full_np = full_ps.numpy() 61 | numpy_pt = np.sqrt(full_np[:, 3, 1] ** 2 + full_np[:, 3, 2] ** 2) 62 | np.testing.assert_allclose(numpy_pt, internal_pt) 63 | # Check that after the cuts we get the right ones 64 | mask = np.all([numpy_pt > 60.0, numpy_pt < 300.0], axis=0) 65 | np.testing.assert_allclose(all_ps, full_np[mask]) 66 | 67 | 68 | def test_fourmomenta(sqrts=7e3, nparticles=4, nevents=100, masses=[50.0, 125.0]): 69 | """Generate a few phase space points and compute some quantities""" 70 | ps_gen = ps.PhaseSpaceGenerator(nparticles, sqrts, masses=masses, algorithm="ramboflow") 71 | dim = (nparticles - 2) * 4 + 2 72 | xrand = tf.random.uniform((nevents, dim), dtype=DTYPE) 73 | all_ps, w, x1, x2, idx = ps_gen(xrand) 74 | # The initial particles should have mass == 0.0 75 | np.testing.assert_allclose(ps._invariant_mass(all_ps[:, 0:1, :]), 0.0) 76 | # And the others whatever is given by the mass 77 | np.testing.assert_allclose(ps._invariant_mass(all_ps[:, 2:3, :]), masses[0] ** 2, atol=1e-4, rtol=1e-4) 78 | np.testing.assert_allclose(ps._invariant_mass(all_ps[:, 3:4, :]), masses[1] ** 2, atol=1e-4, rtol=1e-4) 79 | 80 | 81 | if __name__ == "__main__": 82 | from time import time as tm 83 | 84 | start = tm() 85 | test_fourmomenta() 86 | print(f"Program done in {tm()-start} s") 87 | -------------------------------------------------------------------------------- /python_package/madflow/utilities.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities and wrappers for quickstarting integration 3 | 4 | The main utility in this module is ``one_matrix_integration`` 5 | which is a wrapper to a full Monte Carlo integration of a given matrix 6 | element generated with madgraph. 7 | 8 | For the example below the matrix element generated is 9 | ``g g > t t~`` 10 | 11 | >>> from madflow.utilities import one_matrix_integration 12 | >>> from pdfflow import mkPDF 13 | >>> pdf = mkPDF("NNPDF31_nnlo_as_0118/0") 14 | >>> one_matrix_integration(matrix, model_params, pdf=pdf, flavours=(0,), out_masses=[173.0, 173.0]) 15 | [INFO] > Final results: 103.439 +/- 0.1147 16 | """ 17 | from .config import int_me, float_me 18 | from .phasespace import ramboflow 19 | 20 | import numpy as np 21 | import tensorflow as tf 22 | from vegasflow import vegas_wrapper 23 | 24 | 25 | def generate_initial_states(matrices): 26 | """Reads a list of matrices and outputs a list of tuples of initial states 27 | each element in the list will be a tuple ([flavours hadron 1, flavours hadron 2]) 28 | for each matrix 29 | """ 30 | initial_flavours = [] 31 | for matrix in matrices: 32 | initials = matrix.initial_states 33 | flavs_1, flavs_2 = zip(*initials) 34 | if matrix.mirror_initial_states: 35 | m2, m1 = zip(*initials) 36 | flavs_1 += m1 37 | flavs_2 += m2 38 | initial_flavours.append((flavs_1, flavs_2)) 39 | return initial_flavours 40 | 41 | 42 | def _generate_luminosity(pdf, q): 43 | """Generates a luminosity function""" 44 | q2 = float_me(q ** 2) 45 | 46 | def luminosity_function(x1, x2, flavours): 47 | """Returns f(x1)*f(x2) for the given flavours""" 48 | q2array = tf.ones_like(x1) * q2 49 | hadron_1 = pdf.xfxQ2(flavours, x1, q2array) 50 | hadron_2 = pdf.xfxQ2(flavours, x2, q2array) 51 | return (hadron_1 * hadron_2) / x1 / x2 52 | 53 | return luminosity_function 54 | 55 | 56 | def one_matrix_integration( 57 | matrix, 58 | model_params, 59 | sqrts=7e3, 60 | n_events=int(1e5), 61 | n_iter=5, 62 | q=91.46, 63 | pdf=None, 64 | flavours=None, 65 | out_masses=None, 66 | ): 67 | """Receives a matrix element from Madgraph""" 68 | nparticles = int(matrix.nexternal) 69 | if pdf is None: 70 | 71 | def luminosity_function(x, *args): 72 | return tf.ones_like(x) 73 | 74 | else: 75 | luminosity_function = _generate_luminosity(pdf, q) 76 | 77 | # Prepare the matrix element 78 | def matrix_wgt(all_p): 79 | return matrix.smatrix(all_p, *model_params) 80 | 81 | # Prepare the integrand 82 | def cross_section(xrand, **kwargs): 83 | all_ps, wts, x1, x2 = ramboflow(xrand, nparticles, sqrts, masses=out_masses) 84 | smatrix = matrix_wgt(all_ps) 85 | pdf_result = luminosity_function(x1, x2, int_me(flavours)) 86 | return smatrix * pdf_result * wts 87 | 88 | ndim = (nparticles - 2) * 4 + 2 89 | tf.random.set_seed(4) 90 | return vegas_wrapper(cross_section, ndim, n_iter, n_events) 91 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Madflow 2 | 3 | [![Tests](https://github.com/N3PDF/madflow/actions/workflows/pytest.yml/badge.svg)](https://github.com/N3PDF/madflow/actions/workflows/pytest.yml) 4 | [![Documentation Status](https://readthedocs.org/projects/madflow/badge/?version=latest)](https://madflow.readthedocs.io/en/latest/?badge=latest) 5 | [![epjc](https://img.shields.io/badge/%20%20%20%20Eur.Phys.J.C-%2081%20(2021)%207%2C%20656-blue)](https://inspirehep.net/literature/1869616) 6 | 7 | 8 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4954375.svg)](https://doi.org/10.5281/zenodo.4954375) 9 | 10 | If you use this software please cite this [paper](https://inspirehep.net/literature/1869616) 11 | 12 | ```bibtex 13 | @article{madflow, 14 | author = "Carrazza, Stefano and Cruz-Martinez, Juan and Rossi, Marco and Zaro, Marco", 15 | title = "{MadFlow: automating Monte Carlo simulation on GPU for particle physics processes}", 16 | eprint = "2106.10279", 17 | archivePrefix = "arXiv", 18 | primaryClass = "physics.comp-ph", 19 | reportNumber = "TIF-UNIMI-2021-9", 20 | doi = "10.1140/epjc/s10052-021-09443-8", 21 | journal = "Eur. Phys. J. C", 22 | volume = "81", 23 | number = "7", 24 | pages = "656", 25 | year = "2021" 26 | } 27 | 28 | ``` 29 | 30 | ## Install `madflow` 31 | 32 | #### From PyPI 33 | 34 | ``` 35 | pip install madflow 36 | ``` 37 | 38 | #### From the repository 39 | 40 | ```bash 41 | git clone https://github.com/N3PDF/madflow.git 42 | cd madflow 43 | pip install . 44 | ``` 45 | 46 | ### External tools 47 | 48 | `madflow` relies in a number of external tools. 49 | Some of them are just used for convenience and are optional, some are necessary for the proper functioning of the program. 50 | 51 | #### MG5_aMC 52 | 53 | A valid installation of MG5_aMC (2.8+) is necessary in order to generate matrix elements. 54 | If you already have a valid installation, please add the following environment variable pointing to the right directory: `MADGRAPH_PATH`. 55 | Below are the instructions for MG5_aMC 3.1.0, for a more recent release please visit the MG5_aMC@NLO [site](https://launchpad.net/mg5amcnlo). 56 | 57 | ```bash 58 | wget https://launchpad.net/mg5amcnlo/3.0/3.1.x/+download/MG5_aMC_v3.1.0.tar.gz 59 | tar xfz MG5_aMC_v3.1.0.tar.gz 60 | export MADGRAPH_PATH=${PWD}/MG5_aMC_v3_1_0 61 | ``` 62 | 63 | #### PDF grids 64 | 65 | While `LHAPDF` is not strictly necessary to use the `madflow` library or run any of the scripts, 66 | having access to the `lhapdf` python wrapper can be convenient in order to manage the different PDFsets. 67 | Please install the latest version from the LHAPDF [site](https://lhapdf.hepforge.org/). 68 | 69 | Otherwise, if your installed version of `pdfflow` is equal or greater than `1.2.2`, 70 | it includes the [lhapdf-management](https://github.com/scarlehoff/lhapdf_management) scripts suite and LHAPDF should not be needed. 71 | You can also manually install the [PDF sets](https://lhapdf.hepforge.org/pdfsets.html) in a suitable directory 72 | and ensure that either the `PDFFLOW_DATA_PATH` or `LHAPDF_DATA_PATH` environment variables are pointing to it. 73 | 74 | You can check your installed version of `pdfflow` with: `python -c 'import pdfflow ; print(pdfflow.__version__);'` 75 | 76 | ## Install plugin in MG5_aMC 77 | 78 | In order to install the `madflow` plugin in MG5_aMC@NLO, it is necessary to link the `madgraph_plugin` folder inside the `PLUGIN` directory of MG5_aMC@NLO. 79 | For instance, if the environment variable `$MADGRAPH_PATH` is pointing to the MG5_aMC root and you are currently in the repository root. 80 | 81 | ```bash 82 | ln -s ${PWD}/madgraph_plugin ${MADGRAPH_PATH}/PLUGIN/pyout 83 | ``` 84 | 85 | The link can be performed automagically with the `madflow --autolink` option. 86 | 87 | ## Use `madflow` 88 | 89 | For a more precise description of what `madflow` can do please visit the online documentation. 90 | 91 | For convenience a script is provided which should have been installed alongside the library. 92 | Using this script is possible to run any process at Leading Order, integrated with a `RAMBO`-like phasespace. 93 | 94 | ```bash 95 | madflow --help 96 | ``` 97 | ```bash 98 | [-h] [-v] [-p PDF] [--no_pdf] [-c] [--madgraph_process MADGRAPH_PROCESS] [-m MASSIVE_PARTICLES] [-g] [--pt_cut PT_CUT] [--histograms] 99 | 100 | optional arguments: 101 | -h, --help show this help message and exit 102 | -v, --verbose Print extra info 103 | -p PDF, --pdf PDF PDF set 104 | --no_pdf Don't use a PDF for the initial state 105 | -c, --enable_cuts Enable the cuts 106 | --madgraph_process MADGRAPH_PROCESS 107 | Set the madgraph process to be run 108 | -m MASSIVE_PARTICLES, --massive_particles MASSIVE_PARTICLES 109 | Number of massive particles 110 | -g, --variable_g Use variable g_s 111 | --pt_cut PT_CUT Minimum pt for the outgoint particles 112 | --histograms Generate LHE files/histograms 113 | ``` 114 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Installation script for madflow (WIP) 3 | """ 4 | 5 | from pathlib import Path 6 | from sys import version_info 7 | import re 8 | import os 9 | from setuptools import setup, find_packages 10 | 11 | requirements = ["vegasflow", "pdfflow", "requests"] 12 | # soft-requirements due to vegasflow and pdfflow are: 13 | # tensorflow, joblib, numpy 14 | package_name = "madflow" 15 | package_root = "python_package" 16 | repository_root = Path(__file__).parent 17 | 18 | description = "Package for GPU fixed order calculations" 19 | long_description = (repository_root / "readme.md").read_text() 20 | 21 | 22 | def get_version(): 23 | """Gets the version from the package's __init__ file 24 | if there is some problem, let it happily fail""" 25 | version_file = repository_root / f"{package_root}/{package_name}/__init__.py" 26 | initfile_lines = version_file.open("rt").readlines() 27 | VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" 28 | for line in initfile_lines: 29 | mo = re.search(VSRE, line, re.M) 30 | if mo: 31 | return mo.group(1) 32 | return "unknown" 33 | 34 | 35 | setup( 36 | name=package_name, 37 | version=get_version(), 38 | description=description, 39 | long_description=long_description, 40 | long_description_content_type="text/markdown", 41 | license="Apache 2", 42 | author="S. Carrazza, J. Cruz-Martinez, M. Rossi, M. Zaro", 43 | author_email="n3pdf@lairen.eu", 44 | url="https://github.com/N3PDF/madflow/", 45 | package_dir={"": package_root}, 46 | packages=find_packages(package_root), 47 | zip_safe=False, 48 | classifiers=[ 49 | "Operating System :: Unix", 50 | "Programming Language :: Python", 51 | "Programming Language :: Python :: 3", 52 | "Topic :: Scientific/Engineering", 53 | "Topic :: Scientific/Engineering :: Physics", 54 | ], 55 | python_requires=">=3.6", 56 | install_requires=requirements, 57 | extras_require={ 58 | 'docs' : [ 59 | 'sphinx_rtd_theme', 60 | 'recommonmark', 61 | 'sphinxcontrib-bibtex', 62 | ], 63 | 'amd' : ['tensorflow-rocm'] 64 | }, 65 | entry_points={ 66 | "console_scripts": [ 67 | "madflow = madflow.scripts.madflow_exec:main", 68 | ] 69 | }, 70 | ) 71 | --------------------------------------------------------------------------------