├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── pytest.yml │ ├── pythonpublish.yml │ └── test_many_pdfs.yml ├── .gitignore ├── .pylintrc ├── .readthedocs.yml ├── LICENSE ├── README.md ├── benchmarks_and_tests ├── check_many_pdfs.py ├── compare_accuracy_alphas.py ├── compare_accuracy_lhapdf.py ├── compare_performance_lhapdf.py ├── nlo_integration │ ├── main_vfh.py │ ├── me.py │ ├── parameters.py │ ├── phase_space.py │ └── spinors.py └── singletop_lo.py ├── capi ├── CMakeLists.txt ├── README.md ├── examples │ ├── Makefile │ ├── example.c │ └── fortran │ │ ├── Makefile │ │ ├── example.f90 │ │ └── pdfflow_f_interface.c └── src │ ├── build.py │ ├── pdfflow.pc.in │ ├── pdfflow │ └── pdfflow.h │ └── wrapper.py ├── doc ├── Makefile └── source │ ├── apisrc │ └── pdfflow.rst │ ├── conf.py │ ├── how_to.rst │ ├── index.rst │ └── overview.rst ├── pyproject.toml ├── setup.cfg └── src └── pdfflow ├── __init__.py ├── alphas_functions.py ├── alphas_interpolations.py ├── alphas_region_interpolator.py ├── configflow.py ├── functions.py ├── interpolations.py ├── neighbour_knots.py ├── pflow.py ├── region_interpolator.py ├── subgrid.py └── tests ├── __init__.py ├── test_alphas.py ├── test_config.py ├── test_lhapdf.py └── test_pflow.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Description 11 | 12 | Please, describe briefly what the issue is 13 | 14 | ### Code example 15 | 16 | If possible, write a minimum working example that reproduces the bug, 17 | e.g: 18 | 19 | ```python 20 | import pdfflow 21 | pdfflow.broken_function() 22 | ``` 23 | 24 | ### Additional information 25 | 26 | Does the problem occur in CPU or GPU? 27 | If GPU, how many? Which version of Cuda do you have? 28 | 29 | ```bash 30 | nvcc --version 31 | ``` 32 | 33 | Please include the version of pdfflow and tensorflow that you are running. Running the following python script will produce useful information: 34 | 35 | ```python 36 | import tensorflow as tf 37 | import pdfflow 38 | 39 | print(f"pdfflow: {pdfflow.__version__}") 40 | print(f"Tensorflow: {tf.__version__}") 41 | print(f"tf-mkl: {tf.python.framework.test_util.IsMklEnabled()}") 42 | print(f"tf-cuda: {tf.test.is_built_with_cuda()}") 43 | print(f"GPU available: {tf.test.is_gpu_available()}") 44 | ``` 45 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### The problem 11 | 12 | Brief description of the problem you want to solve. 13 | 14 | ### Proposed solution 15 | 16 | Please share any possible solutions for the problem you are thinking of. 17 | 18 | ### Are you available/want to contribute? 19 | 20 | Yes/No 21 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: pytest 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | strategy: 10 | max-parallel: 3 11 | matrix: 12 | python-version: [3.9, "3.10"] 13 | 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: Setup Conda 17 | uses: conda-incubator/setup-miniconda@v2 18 | with: 19 | python-version: ${{ matrix.python-version }} 20 | auto-update-conda: true 21 | - name: Install dependencies and package 22 | shell: bash --login {0} 23 | run: | 24 | conda info 25 | python -m pip install --upgrade pip 26 | pip install .[tf-cpu] 27 | # Install LHAPDF 28 | conda install -y lhapdf -c conda-forge 29 | # Download and install a PDF set to ensure that the environment paths are working 30 | wget http://pcteserver.mi.infn.it/~nnpdf/nnpdf31/NNPDF31_nnlo_as_0118.tar.gz 31 | mkdir -p pdfsets 32 | tar xvfz NNPDF31_nnlo_as_0118.tar.gz 33 | mv NNPDF31_nnlo_as_0118 pdfsets/ 34 | - name: Test with pytest 35 | shell: bash --login {0} 36 | run: | 37 | # Download the PDF set 38 | export PDFFLOW_DATA_PATH="pdfsets" 39 | pip install pytest 40 | pytest 41 | -------------------------------------------------------------------------------- /.github/workflows/pythonpublish.yml: -------------------------------------------------------------------------------- 1 | name: Python publication 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v1 12 | - name: Set up Python 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: '3.x' 16 | - name: Install dependencies 17 | run: | 18 | pip install --upgrade pip build 19 | pip install twine 20 | - name: Build and publish 21 | env: 22 | TWINE_USERNAME: ${{ secrets.TWINE_USER }} 23 | TWINE_PASSWORD: ${{ secrets.TWINE_PASS }} 24 | run: | 25 | python -m build 26 | twine upload dist/* 27 | -------------------------------------------------------------------------------- /.github/workflows/test_many_pdfs.yml: -------------------------------------------------------------------------------- 1 | name: Check interpolation for many PDFs 2 | on: [push] 3 | 4 | jobs: 5 | test_of_pdfs: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v3 9 | - name: Setup Conda 10 | uses: conda-incubator/setup-miniconda@v2 11 | with: 12 | python-version: "3.11" 13 | auto-update-conda: true 14 | - name: Install dependencies, package and LHAPDF 15 | shell: bash --login {0} 16 | run: | 17 | conda install lhapdf -c conda-forge 18 | pip install .[tf] 19 | lhapdf-management update --init 20 | - name: Test a random assortment of 50 PDFs 21 | shell: bash -l {0} 22 | run: | 23 | export PDFFLOW_LOG_LEVEL=0 24 | python benchmarks_and_tests/check_many_pdfs.py --yes --verbose -n 50 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Benchmark output files 132 | benchmarks/*.png 133 | 134 | # cffi stuff 135 | *.pc 136 | cpdfflow.c 137 | # example executables 138 | example 139 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code 6 | extension-pkg-whitelist=numpy,tensorflow 7 | 8 | # Specify a score threshold to be exceeded before program exits with error. 9 | fail-under=10 10 | 11 | # Add files or directories to the blacklist. They should be base names, not 12 | # paths. 13 | ignore=CVS 14 | 15 | # Add files or directories matching the regex patterns to the blacklist. The 16 | # regex matches against base names, not paths. 17 | ignore-patterns= 18 | 19 | # Python code to execute, usually for sys.path manipulation such as 20 | # pygtk.require(). 21 | #init-hook= 22 | 23 | # Use multiple processes to speed up Pylint. 24 | jobs=2 25 | 26 | # List of plugins (as comma separated values of python modules names) to load, 27 | # usually to register additional checkers. 28 | load-plugins= 29 | 30 | # Pickle collected data for later comparisons. 31 | persistent=yes 32 | 33 | # When enabled, pylint would attempt to guess common misconfiguration and emit 34 | # user-friendly hints instead of false-positive error messages. 35 | suggestion-mode=yes 36 | 37 | # Allow loading of arbitrary C extensions. Extensions are imported into the 38 | # active Python interpreter and may run arbitrary code. 39 | unsafe-load-any-extension=no 40 | 41 | 42 | [MESSAGES CONTROL] 43 | 44 | # Only show warnings with the listed confidence levels. Leave empty to show 45 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 46 | confidence= 47 | 48 | # Disable the message, report, category or checker with the given id(s). You 49 | # can either give multiple identifiers separated by comma (,) or put this 50 | # option multiple times (only on the command line, not in the configuration 51 | # file where it should appear only once).You can also use "--disable=all" to 52 | # disable everything first and then reenable specific checks. For example, if 53 | # you want to run only the similarities checker, you can use "--disable=all 54 | # --enable=similarities". If you want to run only the classes checker, but have 55 | # no Warning level messages displayed, use "--disable=all --enable=classes 56 | # --disable=W". 57 | disable=print-statement, 58 | parameter-unpacking, 59 | unpacking-in-except, 60 | old-raise-syntax, 61 | backtick, 62 | long-suffix, 63 | old-ne-operator, 64 | old-octal-literal, 65 | import-star-module-level, 66 | non-ascii-bytes-literal, 67 | raw-checker-failed, 68 | bad-inline-option, 69 | locally-disabled, 70 | file-ignored, 71 | suppressed-message, 72 | useless-suppression, 73 | deprecated-pragma, 74 | use-symbolic-message-instead, 75 | apply-builtin, 76 | basestring-builtin, 77 | buffer-builtin, 78 | cmp-builtin, 79 | coerce-builtin, 80 | execfile-builtin, 81 | file-builtin, 82 | long-builtin, 83 | raw_input-builtin, 84 | reduce-builtin, 85 | standarderror-builtin, 86 | unicode-builtin, 87 | xrange-builtin, 88 | coerce-method, 89 | delslice-method, 90 | getslice-method, 91 | setslice-method, 92 | no-absolute-import, 93 | old-division, 94 | dict-iter-method, 95 | dict-view-method, 96 | next-method-called, 97 | metaclass-assignment, 98 | indexing-exception, 99 | raising-string, 100 | reload-builtin, 101 | oct-method, 102 | hex-method, 103 | nonzero-method, 104 | cmp-method, 105 | input-builtin, 106 | round-builtin, 107 | intern-builtin, 108 | unichr-builtin, 109 | map-builtin-not-iterating, 110 | zip-builtin-not-iterating, 111 | range-builtin-not-iterating, 112 | filter-builtin-not-iterating, 113 | using-cmp-argument, 114 | eq-without-hash, 115 | div-method, 116 | idiv-method, 117 | rdiv-method, 118 | exception-message-attribute, 119 | invalid-str-codec, 120 | sys-max-int, 121 | bad-python3-import, 122 | deprecated-string-function, 123 | deprecated-str-translate-call, 124 | invalid-name, 125 | too-few-public-methods, 126 | deprecated-itertools-function, 127 | deprecated-types-field, 128 | next-method-defined, 129 | dict-items-not-iterating, 130 | dict-keys-not-iterating, 131 | dict-values-not-iterating, 132 | deprecated-operator-function, 133 | deprecated-urllib-function, 134 | xreadlines-attribute, 135 | deprecated-sys-function, 136 | exception-escape, 137 | comprehension-escape, 138 | E1123, # pylint is not able to deal with tensorflow 139 | E1120, # same as above 140 | C0330, # black indentation when breaking long lines is better 141 | 142 | 143 | 144 | # Enable the message, report, category or checker with the given id(s). You can 145 | # either give multiple identifier separated by comma (,) or put this option 146 | # multiple time (only on the command line, not in the configuration file where 147 | # it should appear only once). See also the "--disable" option for examples. 148 | enable=c-extension-no-member 149 | 150 | 151 | [REPORTS] 152 | 153 | # Python expression which should return a note less than 10 (10 is the highest 154 | # note). You have access to the variables errors warning, statement which 155 | # respectively contain the number of errors / warnings messages and the total 156 | # number of statements analyzed. This is used by the global evaluation report 157 | # (RP0004). 158 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 159 | 160 | # Template used to display messages. This is a python new-style format string 161 | # used to format the message information. See doc for all details 162 | #msg-template= 163 | 164 | # Set the output format. Available formats are text, parseable, colorized, json 165 | # and msvs (visual studio).You can also give a reporter class, eg 166 | # mypackage.mymodule.MyReporterClass. 167 | output-format=text 168 | 169 | # Tells whether to display a full report or only the messages 170 | reports=no 171 | 172 | # Activate the evaluation score. 173 | score=yes 174 | 175 | 176 | [REFACTORING] 177 | 178 | # Maximum number of nested blocks for function / method body 179 | max-nested-blocks=5 180 | 181 | # Complete name of functions that never returns. When checking for 182 | # inconsistent-return-statements if a never returning function is called then 183 | # it will be considered as an explicit return statement and no message will be 184 | # printed. 185 | never-returning-functions=sys.exit 186 | 187 | [MISCELLANEOUS] 188 | 189 | # List of note tags to take in consideration, separated by a comma. 190 | notes=FIXME,XXX,TODO 191 | 192 | 193 | [SIMILARITIES] 194 | 195 | # Ignore comments when computing similarities. 196 | ignore-comments=yes 197 | 198 | # Ignore docstrings when computing similarities. 199 | ignore-docstrings=yes 200 | 201 | # Ignore imports when computing similarities. 202 | ignore-imports=no 203 | 204 | # Minimum lines number of a similarity. 205 | min-similarity-lines=4 206 | 207 | [STRING] 208 | 209 | # This flag controls whether inconsistent-quotes generates a warning when the 210 | # character used as a quote delimiter is used inconsistently within a module. 211 | check-quote-consistency=no 212 | 213 | # This flag controls whether the implicit-str-concat should generate a warning 214 | # on implicit string concatenation in sequences defined over several lines. 215 | check-str-concat-over-line-jumps=no 216 | 217 | 218 | [TYPECHECK] 219 | 220 | # List of decorators that produce context managers, such as 221 | # contextlib.contextmanager. Add to this list to register other decorators that 222 | # produce valid context managers. 223 | contextmanager-decorators=contextlib.contextmanager 224 | 225 | # List of members which are set dynamically and missed by pylint inference 226 | # system, and so shouldn't trigger E1101 when accessed. Python regular 227 | # expressions are accepted. 228 | generated-members= 229 | 230 | # Tells whether missing members accessed in mixin class should be ignored. A 231 | # mixin class is detected if its name ends with "mixin" (case insensitive). 232 | ignore-mixin-members=yes 233 | 234 | # Tells whether to warn about missing members when the owner of the attribute 235 | # is inferred to be None. 236 | ignore-none=yes 237 | 238 | # This flag controls whether pylint should warn about no-member and similar 239 | # checks whenever an opaque object is returned when inferring. The inference 240 | # can return multiple potential results while evaluating a Python object, but 241 | # some branches might not be evaluated, which results in partial inference. In 242 | # that case, it might be useful to still emit no-member and other checks for 243 | # the rest of the inferred objects. 244 | ignore-on-opaque-inference=yes 245 | 246 | # List of class names for which member attributes should not be checked (useful 247 | # for classes with dynamically set attributes). This supports the use of 248 | # qualified names. 249 | ignored-classes=optparse.Values,thread._local,_thread._local 250 | 251 | # List of module names for which member attributes should not be checked 252 | # (useful for modules/projects where namespaces are manipulated during runtime 253 | # and thus existing member attributes cannot be deduced by static analysis). It 254 | # supports qualified module names, as well as Unix pattern matching. 255 | ignored-modules=tensorflow 256 | 257 | # Show a hint with possible names when a member name was not found. The aspect 258 | # of finding the hint is based on edit distance. 259 | missing-member-hint=yes 260 | 261 | # The minimum edit distance a name should have in order to be considered a 262 | # similar match for a missing member name. 263 | missing-member-hint-distance=1 264 | 265 | # The total number of similar names that should be taken in consideration when 266 | # showing a hint for a missing member. 267 | missing-member-max-choices=1 268 | 269 | # List of decorators that change the signature of a decorated function. 270 | signature-mutators= 271 | 272 | 273 | [BASIC] 274 | 275 | # Naming hint for argument names 276 | argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 277 | 278 | # Regular expression matching correct argument names 279 | argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 280 | 281 | # Naming hint for attribute names 282 | attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 283 | 284 | # Regular expression matching correct attribute names 285 | attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 286 | 287 | # Bad variable names which should always be refused, separated by a comma 288 | bad-names=foo,bar,baz,toto,tutu,tata 289 | 290 | # Naming hint for class attribute names 291 | class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 292 | 293 | # Regular expression matching correct class attribute names 294 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 295 | 296 | # Naming hint for class names 297 | class-name-hint=[A-Z_][a-zA-Z0-9]+$ 298 | 299 | # Regular expression matching correct class names 300 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 301 | 302 | # Naming hint for constant names 303 | const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 304 | 305 | # Regular expression matching correct constant names 306 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 307 | 308 | # Minimum line length for functions/classes that require docstrings, shorter 309 | # ones are exempt. 310 | docstring-min-length=-1 311 | 312 | # Naming hint for function names 313 | function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 314 | 315 | # Regular expression matching correct function names 316 | function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 317 | 318 | # Good variable names which should always be accepted, separated by a comma 319 | good-names=i,j,k,ex,Run,_ 320 | 321 | # Include a hint for the correct naming format with invalid-name 322 | include-naming-hint=no 323 | 324 | # Naming hint for inline iteration names 325 | inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ 326 | 327 | # Regular expression matching correct inline iteration names 328 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 329 | 330 | # Naming hint for method names 331 | method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 332 | 333 | # Regular expression matching correct method names 334 | method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 335 | 336 | # Naming hint for module names 337 | module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 338 | 339 | # Regular expression matching correct module names 340 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 341 | 342 | # Colon-delimited sets of names that determine each other's naming style when 343 | # the name regexes allow several styles. 344 | name-group= 345 | 346 | # Regular expression which should only match function or class names that do 347 | # not require a docstring. 348 | no-docstring-rgx=^_ 349 | 350 | # List of decorators that produce properties, such as abc.abstractproperty. Add 351 | # to this list to register other decorators that produce valid properties. 352 | property-classes=abc.abstractproperty 353 | 354 | # Naming hint for variable names 355 | variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 356 | 357 | # Regular expression matching correct variable names 358 | variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 359 | 360 | 361 | [VARIABLES] 362 | 363 | # List of additional names supposed to be defined in builtins. Remember that 364 | # you should avoid to define new builtins when possible. 365 | additional-builtins= 366 | 367 | # Tells whether unused global variables should be treated as a violation. 368 | allow-global-unused-variables=yes 369 | 370 | # List of strings which can identify a callback function by name. A callback 371 | # name must start or end with one of those strings. 372 | callbacks=cb_,_cb 373 | 374 | # A regular expression matching the name of dummy variables (i.e. expectedly 375 | # not used). 376 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 377 | 378 | # Argument names that match this expression will be ignored. Default to name 379 | # with leading underscore 380 | ignored-argument-names=_.*|^ignored_|^unused_ 381 | 382 | # Tells whether we should check for unused import in __init__ files. 383 | init-import=no 384 | 385 | # List of qualified module names which can have objects that can redefine 386 | # builtins. 387 | redefining-builtins-modules=six.moves,future.builtins 388 | 389 | 390 | [FORMAT] 391 | 392 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 393 | expected-line-ending-format= 394 | 395 | # Regexp for a line that is allowed to be longer than the limit. 396 | ignore-long-lines=^\s*(# )??$ 397 | 398 | # Number of spaces of indent required inside a hanging or continued line. 399 | indent-after-paren=4 400 | 401 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 402 | # tab). 403 | indent-string=' ' 404 | 405 | # Maximum number of characters on a single line. 406 | max-line-length=100 407 | 408 | # Maximum number of lines in a module 409 | max-module-lines=1000 410 | 411 | # List of optional constructs for which whitespace checking is disabled. `dict- 412 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. 413 | # `trailing-comma` allows a space between comma and closing bracket: (a, ). 414 | # `empty-line` allows space-only lines. 415 | no-space-check=trailing-comma,dict-separator 416 | 417 | # Allow the body of a class to be on the same line as the declaration if body 418 | # contains single statement. 419 | single-line-class-stmt=no 420 | 421 | # Allow the body of an if to be on the same line as the test if there is no 422 | # else. 423 | single-line-if-stmt=no 424 | 425 | 426 | [SPELLING] 427 | 428 | # Spelling dictionary name. Available dictionaries: none. To make it working 429 | # install python-enchant package. 430 | spelling-dict= 431 | 432 | # List of comma separated words that should not be checked. 433 | spelling-ignore-words= 434 | 435 | # A path to a file that contains private dictionary; one word per line. 436 | spelling-private-dict-file= 437 | 438 | # Tells whether to store unknown words to indicated private dictionary in 439 | # --spelling-private-dict-file option instead of raising a message. 440 | spelling-store-unknown-words=no 441 | 442 | 443 | 444 | 445 | 446 | [DESIGN] 447 | 448 | # Maximum number of arguments for function / method 449 | max-args=12 450 | 451 | # Maximum number of attributes for a class (see R0902). 452 | max-attributes=12 453 | 454 | # Maximum number of boolean expressions in a if statement 455 | max-bool-expr=5 456 | 457 | # Maximum number of branch for function / method body 458 | max-branches=12 459 | 460 | # Maximum number of locals for function / method body 461 | max-locals=30 462 | 463 | # Maximum number of parents for a class (see R0901). 464 | max-parents=7 465 | 466 | # Maximum number of public methods for a class (see R0904). 467 | max-public-methods=20 468 | 469 | # Maximum number of return / yield for function / method body 470 | max-returns=6 471 | 472 | # Maximum number of statements in function / method body 473 | max-statements=50 474 | 475 | # Minimum number of public methods for a class (see R0903). 476 | min-public-methods=2 477 | 478 | 479 | [CLASSES] 480 | 481 | # List of method names used to declare (i.e. assign) instance attributes. 482 | defining-attr-methods=__init__,__new__,setUp 483 | 484 | # List of member names, which should be excluded from the protected access 485 | # warning. 486 | exclude-protected=_asdict,_fields,_replace,_source,_make 487 | 488 | # List of valid names for the first argument in a class method. 489 | valid-classmethod-first-arg=cls 490 | 491 | # List of valid names for the first argument in a metaclass class method. 492 | valid-metaclass-classmethod-first-arg=cls 493 | 494 | 495 | [IMPORTS] 496 | 497 | # List of modules that can be imported at any level, not just the top level 498 | # one. 499 | allow-any-import-level= 500 | 501 | # Allow wildcard imports from modules that define __all__. 502 | allow-wildcard-with-all=no 503 | 504 | # Analyse import fallback blocks. This can be used to support both Python 2 and 505 | # 3 compatible code, which means that the block might have code that exists 506 | # only in one or another interpreter, leading to false positives when analysed. 507 | analyse-fallback-blocks=no 508 | 509 | # Deprecated modules which should not be used, separated by a comma 510 | deprecated-modules=optparse,tkinter.tix 511 | 512 | # Create a graph of external dependencies in the given file (report RP0402 must 513 | # not be disabled) 514 | ext-import-graph= 515 | 516 | # Create a graph of every (i.e. internal and external) dependencies in the 517 | # given file (report RP0402 must not be disabled) 518 | import-graph= 519 | 520 | # Create a graph of internal dependencies in the given file (report RP0402 must 521 | # not be disabled) 522 | int-import-graph= 523 | 524 | # Force import order to recognize a module as part of the standard 525 | # compatibility libraries. 526 | known-standard-library= 527 | 528 | # Force import order to recognize a module as part of a third party library. 529 | known-third-party=enchant 530 | 531 | # Couples of modules and preferred modules, separated by a comma. 532 | preferred-modules= 533 | 534 | [EXCEPTIONS] 535 | 536 | # Exceptions that will emit a warning when being caught. Defaults to 537 | # "Exception" 538 | overgeneral-exceptions=Exception 539 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | builder: html 5 | configuration: doc/source/conf.py 6 | 7 | python: 8 | version: 3.7 9 | install: 10 | - method: pip 11 | path: . 12 | extra_requirements: 13 | - docs 14 | system_packages: true 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![DOI](https://zenodo.org/badge/238731330.svg)](https://zenodo.org/badge/latestdoi/238731330) 2 | [![arxiv](https://img.shields.io/badge/arXiv-hep--ph%2F2009.06635-%23B31B1B.svg)](https://arxiv.org/abs/2009.06635) 3 | 4 | [![Documentation Status](https://readthedocs.org/projects/pdfflow/badge/?version=stable)](https://pdfflow.readthedocs.io/en/latest/?badge=stable) 5 | ![pytest](https://github.com/N3PDF/pdfflow/workflows/pytest/badge.svg) 6 | [![AUR](https://img.shields.io/aur/version/python-pdfflow)](https://aur.archlinux.org/packages/python-pdfflow) 7 | 8 | # PDFFlow 9 | 10 | PDFFlow is parton distribution function interpolation library written in Python and based on the [TensorFlow](https://www.tensorflow.org/) framework. It is developed with a focus on speed and efficiency, enabling researchers to perform very expensive calculation as quick and easy as possible. 11 | 12 | The key features of PDFFlow is the possibility to query PDF sets on GPU accelerators. 13 | 14 | ## Documentation 15 | 16 | The documentation for PDFFlow can be consulted in the readthedocs page: [pdfflow.readthedocs.io](https://pdfflow.readthedocs.io/en/latest). 17 | 18 | ## Installation 19 | 20 | The package can be installed with pip: 21 | 22 | ```bash 23 | python3 -m pip install pdfflow[MODE] 24 | ``` 25 | 26 | If you prefer a manual installation just `cd` in the cloned folder and use: 27 | 28 | ```bash 29 | pip install .[MODE] 30 | ``` 31 | 32 | or if you are planning to extend or develop code just install the package in 33 | editable mode: 34 | 35 | ```bash 36 | pip install -e .[MODE] 37 | ``` 38 | 39 | `PDFFlow` assumes that the user has already installed the most optimized version 40 | of TensorFlow for his platform. As such, by default, `pip` will not check it as 41 | a requirement. 42 | 43 | However, the user can also install it specifying a `MODE` option in the 44 | `pip` command. The list below summarizes the valid choices for the `MODE` flag: 45 | 46 | - `tf`: installs the `tensorflow` package 47 | - `tf-cpu`: installs the `tensorflow-cpu` package 48 | - `tf-gpu`: installs the `tensorflow-gpu` package 49 | - `tf-amd`: installs the `tensorflow-rocm` package 50 | 51 | **⚠ Note: Use the latest version of TensorFlow!** 52 | 53 | TensorFlow is updated frequently and a later version of TensorFlow will often 54 | offer better performance in both GPUs and CPUs. 55 | Although it can be made to work with earlier versions, `PDFFlow` is only 56 | supported for TensorFlow>2.1. 57 | 58 | ## PDF set management 59 | 60 | PDFFlow does not do management of PDF sets, which is left to LHAPDF and so a lhapdf installation is needed. 61 | A full lhapdf installation can be obtained by utilizing the `lhapdf_management` library. 62 | 63 | ```bash 64 | python3 -m pip install lhapdf_management 65 | lhapdf_management install NNPDF31_nnlo_as_0118 66 | ``` 67 | 68 | ## Minimal Working Example 69 | 70 | Below a minimalistic example where `PDFFlow` is used to generate a 10 values of the PDF 71 | for 2 members for three different flavours. 72 | 73 | ```python 74 | from pdfflow import mkPDFs 75 | import tensorflow as tf 76 | 77 | pdf = mkPDFs("NNPDF31_nnlo_as_0118", [0,2]) 78 | x = tf.random.uniform([10], dtype=tf.float64) 79 | q2 = tf.random.uniform([10], dtype=tf.float64)*20 + 10 80 | pid = tf.cast([-1,21,1], dtype=tf.int32) 81 | 82 | result = pdf.xfxQ2(pid, x, q2) 83 | ``` 84 | 85 | Note the usage of the `dtype` keyword inm the TensorFlow calls. 86 | This is used to ensure that `float64` is being used all across the program. 87 | For convenience, we ship two functions, `int_me` and `float_me` which are simply 88 | wrappers to `tf.cast` with the right types. 89 | 90 | These wrappers can be used over TensorFlow types but also numpy values: 91 | 92 | ```python 93 | from pdfflow import mkPDFs, int_me, float_me 94 | import tensorflow as tf 95 | import numpy as np 96 | 97 | pdf = mkPDFs("NNPDF31_nnlo_as_0118", [0,2]) 98 | x = float_me(np.random.rand(10)) 99 | q2 = float_me(tf.random.uniform([10])*20 + 10) 100 | pid = int_me([-1,21,1]) 101 | 102 | result = pdf.xfxQ2(pid, x, q2) 103 | ``` 104 | 105 | ## Citation policy 106 | 107 | If you use the package pelase cite the following paper and zenodo references: 108 | 109 | - https://doi.org/10.5281/zenodo.3964190 110 | - https://arxiv.org/abs/2009.06635 111 | -------------------------------------------------------------------------------- /benchmarks_and_tests/check_many_pdfs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | This little script uses the lhapdf python interface to: 4 | 1) Get the latest list of PDF sets 5 | 2) Download a subset of them (or all) 6 | 3) Test with a random set of points that the LHPADF and pdfflow produce the same result 7 | 8 | Uses lhapdf-management to install the PDFs programatically :) 9 | """ 10 | 11 | import sys 12 | import tempfile 13 | from argparse import ArgumentParser 14 | from pathlib import Path 15 | 16 | import lhapdf 17 | import numpy as np 18 | import pdfflow 19 | from lhapdf_management import environment, pdf_list, pdf_update 20 | 21 | lhapdf.setVerbosity(0) 22 | 23 | 24 | def _compare_w_lhapdf(pdf, npoints=1000, tolerance=1e-6): 25 | """Compare a LHAPDF and pdfflow pdfs 26 | for an array of npoints""" 27 | # Now get a random member 28 | m = np.random.randint(len(loaded_pdf), dtype=int) 29 | 30 | pdfflow_pdf = pdfflow.mkPDF(f"{pdf}/{m}") 31 | lhapdf_pdf = lhapdf.mkPDF(f"{pdf}/{m}") 32 | 33 | # Get n points for x between 0 and 1 34 | xx = np.random.rand(npoints) 35 | # And n points for q between the min and the maximum seen by pdfflow 36 | qdelta = pdfflow_pdf.q2max - pdfflow_pdf.q2min 37 | qq = pdfflow_pdf.q2min + np.random.rand(npoints) * qdelta 38 | 39 | # Make sure the order is the same as in pdfflow 40 | flavors = pdf.info["Flavors"] 41 | lhapdf_results = lhapdf_pdf.xfxQ2(flavors, xx, qq) 42 | 43 | lres = np.array(lhapdf_results) 44 | pres = pdfflow_pdf.py_xfxQ2_allpid(xx, qq).numpy() 45 | 46 | # This is not still implemented as part of pdfflow, but need to be careful during the check 47 | if pdf.info.get("ForcePositive", 0) > 0: 48 | pres = np.maximum(pres, 1e-10) 49 | 50 | np.testing.assert_allclose(pres, lres, rtol=tolerance, atol=tolerance) 51 | 52 | 53 | if __name__ == "__main__": 54 | parser = ArgumentParser(description=__doc__) 55 | parser.add_argument("-d", "--dir", help="Directory where to download the sets", type=Path) 56 | parser.add_argument("-y", "--yes", help="Respond yes to every question", action="store_true") 57 | parser.add_argument("-v", "--verbose", help="Be verbose", action="store_true") 58 | parser.add_argument( 59 | "-a", 60 | "--all", 61 | help="Try really ALL sets, otherwise, do a random selection of N of them", 62 | action="store_true", 63 | ) 64 | parser.add_argument( 65 | "-n", 66 | "--npdfs", 67 | help="If all is not given, hoy many PDFs to actually test (default 50)", 68 | type=int, 69 | default=50, 70 | ) 71 | parser.add_argument( 72 | "-p", 73 | "--points", 74 | help="How many points in x/q to test per PDF set (default 1000)", 75 | type=int, 76 | default=1000, 77 | ) 78 | parser.add_argument( 79 | "-t", "--tolerance", help="Tolerance for the test (default 1e-6)", type=float, default=1e-6 80 | ) 81 | 82 | args = parser.parse_args() 83 | 84 | if args.dir is None: 85 | target_dir = Path(tempfile.mkdtemp()) 86 | else: 87 | target_dir = args.dir 88 | 89 | if not args.yes: 90 | print( 91 | f"""You are about to download a potentially large number of PDF sets to {target_dir.absolute()} 92 | This is likely to be heavy in both your storage and your bandwith.""" 93 | ) 94 | yn = input(" > Do you want to continue? [Y/N] ") 95 | if not yn.lower() in ("y", "yes", "ye", "si"): 96 | sys.exit(0) 97 | 98 | target_dir.mkdir(exist_ok=True) 99 | 100 | # Set the datapath 101 | environment.datapath = target_dir 102 | lhapdf.setPaths([target_dir.as_posix()]) 103 | 104 | # Get the latest PDF list 105 | pdf_update() 106 | 107 | # And now list them all 108 | list_of_pdfs = pdf_list() 109 | 110 | # if not --all, take a mask of N PDFs 111 | if not args.all: 112 | if args.npdfs > len(list_of_pdfs): 113 | raise ValueError( 114 | f"The value of N ({args.npdfs}) cannot be greater than the number of PDFs available ({len(list_of_pdfs)}), use --all if you just want to test all of them" 115 | ) 116 | list_of_pdfs = np.random.choice(list_of_pdfs, size=args.npdfs, replace=False) 117 | 118 | # And time to install! 119 | failed_pdfs = [] 120 | for pdf in list_of_pdfs: 121 | if args.verbose: 122 | print(f"Testing {pdf}... ", end="") 123 | try: 124 | pdf.install() 125 | # Try loading the PDF 126 | loaded_pdf = pdf.load() 127 | _compare_w_lhapdf(loaded_pdf, npoints=args.points, tolerance=args.tolerance) 128 | except KeyError as e: 129 | # If there's a key error on the PDF either the .info file is malformed (then not our problem) 130 | # or the PDF is using analytical running for alpha_s, so PDFFlow cannot use it 131 | pass 132 | except Exception as e: 133 | # We are not going to care that much _how_ the failure happened 134 | if args.verbose: 135 | print(f"{pdf} failed!") 136 | failed_pdfs.append((pdf, e)) 137 | 138 | if failed_pdfs: 139 | print("\nThe failed pdfs are: ") 140 | for pdf, error in failed_pdfs: 141 | print(f"{pdf} with {error}") 142 | raise Exception("Some PDFs failed!") 143 | else: 144 | print("\nNo PDF failed the test!") 145 | -------------------------------------------------------------------------------- /benchmarks_and_tests/compare_accuracy_alphas.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark script for LHAPDF comparison. 3 | """ 4 | from pdfflow.configflow import float_me 5 | import lhapdf 6 | import argparse 7 | import subprocess as sp 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | import matplotlib as mpl 11 | import tensorflow as tf 12 | import time 13 | import pdfflow.pflow as pdf 14 | from compare_accuracy_lhapdf import set_ticks 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument("--pdfname", "-p", default="NNPDF31_nlo_as_0118/0", 18 | type=str, help='The PDF set name/replica number.') 19 | DIRNAME = sp.run(['lhapdf-config','--datadir'], stdout=sp.PIPE, 20 | universal_newlines=True).stdout.strip('\n') + '/' 21 | EPS = np.finfo(float).eps 22 | 23 | def compare_alphas(pdfname, ax): 24 | """ 25 | Computes the alphas difference pdfflow vs lhapdf and returns 26 | axes for plots 27 | Parameters: 28 | pdfname: string 29 | ax: matplotlib.axes.Axes object 30 | Return: 31 | matplotlib.axes.Axes object 32 | """ 33 | p = pdf.mkPDF(pdfname, DIRNAME) 34 | name = '\_'.join(pdfname.split('_')) 35 | 36 | s = time.time() 37 | p.alphas_trace() 38 | print("".join([f"\nPDFflow alphas, pdf set:{pdfname}", 39 | f"\n\tBuilding graph time: {time.time()-s}\n"])) 40 | 41 | if p is None: 42 | p = pdf.mkPDF(pdfname, DIRNAME) 43 | l_pdf = lhapdf.mkPDF(pdfname) 44 | name = '\_'.join(pdfname.split('_')) 45 | 46 | q = np.logspace(-3, 9, 10000, dtype=float) 47 | 48 | s_time = time.time() 49 | vl = np.array([l_pdf.alphasQ(iq) for iq in q]) 50 | l_time = time.time() 51 | vp = p.py_alphasQ(q) 52 | p_time = time.time() 53 | 54 | ax.plot(q, np.abs(vp-vl)/(np.abs(vl)+EPS)) 55 | ax.hlines(1e-3, plt.xlim()[0], plt.xlim()[1], 56 | linestyles='dotted', color='red') 57 | ax.set_xscale('log') 58 | ax.set_yscale('log') 59 | ax.set_xlim([1e-3,1e9]) 60 | ax.set_ylim([EPS, 0.01]) 61 | 62 | ax = set_ticks(ax, -3, 9, 13, 'x', 4) 63 | ax.tick_params(axis='x', which='both', direction='in', 64 | bottom=True, labelbottom=True, 65 | top=True, labeltop=False) 66 | 67 | ax = set_ticks(ax, -15, -3, 16, 'y') 68 | 69 | ax.title.set_text(r'%s, $\alpha_s(Q)$' % name) 70 | ax.set_xlabel(r'$Q$', fontsize=17) 71 | 72 | print("\nDry run time comparison for pdf %s:"%pdfname) 73 | print("{:>10}:{:>15.8f}".format("lhapdf", l_time - s_time)) 74 | print("{:>10}:{:>15.8f}".format("pdfflow", p_time - l_time)) 75 | 76 | return ax 77 | 78 | 79 | def main(pdfname): 80 | """Testing PDFflow vs LHAPDF performance.""" 81 | mpl.rcParams['text.usetex'] = True 82 | 83 | mpl.rcParams['savefig.format'] = 'pdf' 84 | mpl.rcParams['figure.figsize'] = [11,5.5] 85 | mpl.rcParams['axes.titlesize'] = 20 86 | mpl.rcParams['ytick.labelsize'] = 17 87 | mpl.rcParams['xtick.labelsize'] = 17 88 | 89 | fig = plt.figure() 90 | gs = fig.add_gridspec(nrows=1, ncols=2, wspace=0.1) 91 | 92 | ax = fig.add_subplot(gs[0]) 93 | ax = compare_alphas(pdfname, ax) 94 | ax.tick_params(axis='y', which='both', direction='in', 95 | left=True, labelleft=True, 96 | right=True, labelright=False) 97 | ax.set_ylabel(r'$\displaystyle{r_{\alpha_s}(Q)}$', 98 | fontsize=20) 99 | 100 | ax = fig.add_subplot(gs[1]) 101 | pdfname = 'MMHT2014nlo68cl/0' 102 | ax = compare_alphas(pdfname, ax) 103 | ax.tick_params(axis='y', which='both', direction='in', 104 | left=True, labelleft=False, 105 | right=True, labelright=False) 106 | 107 | plt.savefig('diff_alphas.pdf', bbox_inches='tight',dpi=250) 108 | plt.close() 109 | 110 | 111 | if __name__ == "__main__": 112 | args = vars(parser.parse_args()) 113 | start=time.time() 114 | main(**args) 115 | print("Total time: ", time.time()-start) 116 | 117 | -------------------------------------------------------------------------------- /benchmarks_and_tests/compare_accuracy_lhapdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark script for LHAPDF comparison. 3 | """ 4 | from pdfflow.configflow import float_me 5 | import lhapdf 6 | import argparse 7 | import subprocess as sp 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | import matplotlib as mpl 11 | import tensorflow as tf 12 | import time 13 | from math import floor, log10 14 | 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument("--pdfname", "-p", default="NNPDF31_nlo_as_0118/0", 18 | type=str, help='The PDF set name/replica number.') 19 | parser.add_argument("--pid", default=21, type=int, help='The flavour PID.') 20 | parser.add_argument("--no_tex", action="store_false", 21 | help="Don't render pyplot with tex") 22 | DIRNAME = sp.run(['lhapdf-config','--datadir'], stdout=sp.PIPE, 23 | universal_newlines=True).stdout.strip('\n') + '/' 24 | EPS = np.finfo(float).eps 25 | 26 | def sci_notation(num, decimal_digits=1, precision=None, exponent=None): 27 | """ 28 | Returns a string representation of the scientific 29 | notation of the given number formatted for use with 30 | LaTeX, with specified number of significant 31 | decimal digits and precision (number of decimal digits 32 | to show). The exponent to be used can also be specified 33 | explicitly. 34 | """ 35 | if num == 0: 36 | return r'0' 37 | if exponent is None: 38 | exponent = int(floor(log10(abs(num)))) 39 | if precision is None: 40 | precision = decimal_digits 41 | coeff = round(num / float(10**exponent), decimal_digits) 42 | return r'%s'%format(coeff, f'.{decimal_digits}f') + r"\times 10^{%d}"%exponent 43 | 44 | def set_ticks(ax, start, end, numticks, axis, nskip=2): 45 | """ 46 | Set both major and minor axes ticks in the logarithmical scale 47 | Parameters: 48 | ax: matplotlib.axes.Axes object 49 | start: int, leftmost tick 50 | end: int, rightmost tick 51 | numticks 52 | axis: 1 y axis, 0 x axis 53 | nskip: int, major ticks to leave without label 54 | """ 55 | 56 | ticks = list(np.logspace(start,end,end-start+1)) 57 | labels = [r'$10^{%d}$'%start] 58 | for i in [i for i in range(start+2,end+1,nskip)]: 59 | labels.extend(['' for i in range(nskip-1)]+[r'$10^{%d}$'%i]) 60 | locmin = mpl.ticker.LogLocator(base=10.0, 61 | subs=[i/10 for i in range(1,10)], 62 | numticks=numticks) 63 | if axis == 'x': 64 | ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(ticks)) 65 | ax.xaxis.set_major_formatter(mpl.ticker.FixedFormatter(labels)) 66 | ax.xaxis.set_minor_locator(locmin) 67 | ax.xaxis.set_minor_formatter(mpl.ticker.NullFormatter()) 68 | if axis == 'y': 69 | ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(ticks)) 70 | ax.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(labels)) 71 | ax.yaxis.set_minor_locator(locmin) 72 | ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter()) 73 | return ax 74 | 75 | 76 | def main(pdfname, pid, no_tex=True): 77 | """Testing PDFflow vs LHAPDF performance.""" 78 | mpl.rcParams['text.usetex'] = no_tex 79 | mpl.rcParams['savefig.format'] = 'pdf' 80 | mpl.rcParams['figure.figsize'] = [11,5.5] 81 | mpl.rcParams['axes.titlesize'] = 20 82 | mpl.rcParams['ytick.labelsize'] = 17 83 | mpl.rcParams['xtick.labelsize'] = 17 84 | mpl.rcParams['legend.fontsize'] = 14 85 | 86 | import pdfflow.pflow as pdf 87 | 88 | p = pdf.mkPDF(pdfname, DIRNAME) 89 | l_pdf = lhapdf.mkPDF(pdfname) 90 | name = '\_'.join(pdfname.split('_')) 91 | 92 | s = time.time() 93 | p.trace() 94 | print("\nPDFflow\n\tBuilding graph time: %f\n"%(time.time()-s)) 95 | 96 | fig = plt.figure() 97 | gs = fig.add_gridspec(nrows=1, ncols=2, wspace=0.05) 98 | ax = fig.add_subplot(gs[0]) 99 | x = np.logspace(-12,0,10000, dtype=float) 100 | q2 = np.array([1.65,1.7,4.92,1e2,1e3,1e4,1e5,1e6,2e6], dtype=float)**2 101 | for iq2 in q2: 102 | vl = np.array([l_pdf.xfxQ2(pid, ix, iq2) for ix in x]) 103 | vp = p.py_xfxQ2(pid, float_me(x), float_me([iq2]*len(x))) 104 | 105 | 106 | ax.plot(x, np.abs(vp-vl)/(np.abs(vl)+EPS), 107 | label=r'$Q=%s$' % sci_notation(iq2**0.5,2)) 108 | 109 | ax.hlines(1e-3, plt.xlim()[0], plt.xlim()[1], linestyles='dotted', color='red') 110 | ax.set_xscale('log') 111 | ax.set_yscale('log') 112 | ax.set_xlim([1e-12,1.]) 113 | ax.set_ylim([EPS, .01]) 114 | 115 | ax = set_ticks(ax, -12, 0, 13, 'x', 4) 116 | ax.tick_params(axis='x', which='both', direction='in', 117 | bottom=True, labelbottom=True, 118 | top=True, labeltop=False) 119 | 120 | ax = set_ticks(ax, -15, -3, 16, 'y') 121 | ax.tick_params(axis='y', which='both', direction='in', 122 | left=True, labelleft=True, 123 | right=True, labelright=False) 124 | 125 | ax.set_title(r'%s, flav = %d' % (name, pid)) 126 | ylabel = r'$\displaystyle{r_{i}(x,Q)}$' if no_tex else '$r_{i}$(x,Q)' 127 | ax.set_ylabel(ylabel, fontsize=20) 128 | ax.set_xlabel(r'$x$', fontsize=17) 129 | ax.legend(frameon=False, ncol=2, 130 | loc='upper right', bbox_to_anchor=(1.02,0.9)) 131 | 132 | x = np.array([1e-10,1e-9,1.1e-9,5e-7,1e-6,1e-4,1e-2,0.5,0.99], dtype=float) 133 | q2 = np.logspace(1, 7, 10000, dtype=float)**2 134 | ax = fig.add_subplot(gs[1]) 135 | for ix in x: 136 | s_time = time.time() 137 | vl = np.array([l_pdf.xfxQ2(pid, ix, iq2) for iq2 in q2]) 138 | l_time = time.time() 139 | vp = p.py_xfxQ2(pid, float_me([ix]*len(q2)), float_me(q2)) 140 | p_time = time.time() 141 | 142 | ax.plot(q2**0.5, np.abs(vp-vl)/(np.abs(vl)+EPS), 143 | label=r'$x=%s$' % sci_notation(ix,1)) 144 | 145 | ax.hlines(1e-3, plt.xlim()[0], plt.xlim()[1], linestyles='dotted', color='red') 146 | ax.set_xscale('log') 147 | ax.set_yscale('log') 148 | ax.set_xlim([1,1e7]) 149 | ax.set_ylim([EPS, .01]) 150 | 151 | ax = set_ticks(ax, 1, 7, 9, 'x') 152 | ax.tick_params(axis='x', which='both', direction='in', 153 | top=True, labeltop=False, 154 | bottom=True, labelbottom=True) 155 | 156 | ax = set_ticks(ax, -15, -3, 16, 'y') 157 | ax.tick_params(axis='y', which='both', direction='in', 158 | right=True, labelright=False, 159 | left=True, labelleft=False) 160 | 161 | ax.set_title(r'%s, flav = %d' % (name, pid)) 162 | ax.set_xlabel(r'$Q$', fontsize=17) 163 | ax.legend(frameon=False, ncol=2, 164 | loc='upper right', bbox_to_anchor=(1.02,0.9)) 165 | plt.savefig('diff_%s_flav%d.pdf' % (pdfname.replace('/','-'), pid), 166 | bbox_inches='tight', dpi=250) 167 | plt.close() 168 | 169 | print("\nDry run time comparison:") 170 | print("{:>10}:{:>15.8f}".format("lhapdf", l_time - s_time)) 171 | print("{:>10}:{:>15.8f}".format("pdfflow", p_time - l_time)) 172 | 173 | 174 | if __name__ == "__main__": 175 | args = vars(parser.parse_args()) 176 | if args['pid'] == 0: 177 | args['pid'] = 21 178 | start=time.time() 179 | main(**args) 180 | print("Total time: ", time.time()-start) 181 | 182 | -------------------------------------------------------------------------------- /benchmarks_and_tests/compare_performance_lhapdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark script for LHAPDF comparison. 3 | """ 4 | import lhapdf 5 | import pdfflow.pflow as pdf 6 | import argparse 7 | import subprocess as sp 8 | import numpy as np 9 | import tensorflow as tf 10 | from time import time 11 | import tqdm 12 | 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--pdfname", "-p", default="NNPDF31_nlo_as_0118/0", type=str, 15 | help="The PDF set name/replica number.") 16 | parser.add_argument("--n-draws", default=100000, type=int, 17 | help="Number of trials.") 18 | parser.add_argument("--pid", default=21, type=int, 19 | help="The flavour PID.") 20 | parser.add_argument("--no_lhapdf", action="store_false", 21 | help="Don't run lhapdf, only pdfflow") 22 | parser.add_argument("-t", "--tensorboard", action="store_true", 23 | help="Enable tensorboard profile logging") 24 | parser.add_argument("--dev0", default=None, type=str, 25 | help="First pdfflow running device") 26 | parser.add_argument("--dev1", default=None, type=str, 27 | help="Second pdfflow running device") 28 | parser.add_argument("--label0", default=None, type=str, 29 | help=" ".join(["Legend label of first pdfflow running device,", 30 | "defaults to tf device auto selection"])) 31 | parser.add_argument("--label1", default=None, type=str, 32 | help=" ".join(["Legend label of second pdfflow running device,", 33 | "defaults to tf device auto selection"])) 34 | parser.add_argument("--no_tex", action="store_false", 35 | help="Don't render pyplot with tex") 36 | DIRNAME = (sp.run(["lhapdf-config", "--datadir"], stdout=sp.PIPE, 37 | universal_newlines=True).stdout.strip("\n") + "/") 38 | 39 | 40 | def test_pdfflow(p, a_x, a_q2): 41 | start = time() 42 | p.py_xfxQ2_allpid(a_x, a_q2) 43 | return time() - start 44 | 45 | 46 | def test_lhapdf(l_pdf, a_x, a_q2): 47 | start = time() 48 | f_lha = [] 49 | for i in range(a_x.shape[0]): 50 | l_pdf.xfxQ2(a_x[i], a_q2[i]) 51 | return time() - start 52 | 53 | 54 | def accumulate_times(pdfname, dev0, dev1, no_lhapdf): 55 | """ 56 | Computes performance times 57 | Parameters: 58 | dev0: str, device name over which run pdfflow 59 | dev1: str, device name over which run pdfflow 60 | """ 61 | with tf.device(dev0): 62 | p0 = pdf.mkPDF(pdfname, DIRNAME) 63 | p0.trace() 64 | 65 | with tf.device(dev1): 66 | p1 = pdf.mkPDF(pdfname, DIRNAME) 67 | p1.trace() 68 | 69 | if no_lhapdf: 70 | l_pdf = lhapdf.mkPDF(pdfname) 71 | else: 72 | l_pdf = None 73 | 74 | xmin = np.exp(p0.grids[0][0].log_xmin) 75 | xmax = np.exp(p0.grids[0][0].log_xmax) 76 | q2min = np.sqrt(np.exp(p0.grids[0][0].log_q2min)) 77 | q2max = np.sqrt(np.exp(p0.grids[0][-1].log_q2max)) 78 | 79 | t_pdf0 = [] 80 | t_pdf1 = [] 81 | t_lha = [] 82 | 83 | n = np.linspace(1e5,1e6,20) 84 | for j in range(10): 85 | t0 = [] 86 | t1 = [] 87 | t2 = [] 88 | for i in tqdm.tqdm(n): 89 | a_x = np.random.uniform(xmin, xmax,[int(i),]) 90 | a_q2 = np.exp(np.random.uniform(np.log(q2min), 91 | np.log(q2max),[int(i),])) 92 | with tf.device(dev0): 93 | t_ = test_pdfflow(p0, a_x, a_q2) 94 | t0 += [t_] 95 | 96 | with tf.device(dev1): 97 | t_ = test_pdfflow(p1, a_x, a_q2) 98 | t1 += [t_] 99 | 100 | t_ = test_lhapdf(l_pdf, a_x, a_q2) if no_lhapdf else [] 101 | t2 += [t_] 102 | 103 | t_pdf0 += [t0] 104 | t_pdf1 += [t1] 105 | t_lha += [t2] 106 | 107 | t_pdf0 = np.stack(t_pdf0) 108 | t_pdf1 = np.stack(t_pdf1) 109 | t_lha = np.stack(t_lha) 110 | 111 | return n, t_pdf0, t_pdf1, t_lha 112 | 113 | 114 | def main(pdfname=None, n_draws=10, pid=21, no_lhapdf=False, 115 | tensorboard=False, dev0=None, dev1=None, 116 | label0=None, label1=None, no_tex=True): 117 | """Testing PDFflow vs LHAPDF performance.""" 118 | if tensorboard: 119 | tf.profiler.experimental.start('logdir') 120 | 121 | #check legend labels 122 | if label0 is None: 123 | label0 = dev0 124 | 125 | if label1 is None: 126 | label1 = dev1 127 | 128 | n, t_pdf0, t_pdf1, t_lha = accumulate_times(pdfname, dev0, dev1, no_lhapdf) 129 | 130 | if tensorboard: 131 | tf.profiler.experimental.stop('logdir') 132 | 133 | import matplotlib.pyplot as plt 134 | import matplotlib as mpl 135 | mpl.rcParams['text.usetex'] = no_tex 136 | mpl.rcParams['savefig.format'] = 'pdf' 137 | mpl.rcParams['figure.figsize'] = [7,8] 138 | mpl.rcParams['axes.titlesize'] = 20 139 | mpl.rcParams['ytick.labelsize'] = 17 140 | mpl.rcParams['xtick.labelsize'] = 17 141 | mpl.rcParams['legend.fontsize'] = 18 142 | 143 | avg_l = t_lha.mean(0) 144 | avg_p0 = t_pdf0.mean(0) 145 | avg_p1 = t_pdf1.mean(0) 146 | std_l = t_lha.std(0) 147 | std_p0 = t_pdf0.std(0) 148 | std_p1 = t_pdf1.std(0) 149 | 150 | std_ratio0 = np.sqrt((std_l/avg_p0)**2 + (avg_l*std_p0/(avg_p0)**2)**2) 151 | std_ratio1 = np.sqrt((std_l/avg_p1)**2 + (avg_l*std_p1/(avg_p1)**2)**2) 152 | 153 | k = len(t_pdf0)**0.5 154 | 155 | fig = plt.figure() 156 | gs = fig.add_gridspec(nrows=3, ncols=1, hspace=0.1) 157 | 158 | ax = fig.add_subplot(gs[:-1,:]) 159 | ax.errorbar(n,avg_p0,yerr=std_p0/k,label=r'\texttt{PDFFlow}: %s'%label0, 160 | linestyle='--', color='b', marker='^') 161 | ax.errorbar(n,avg_p1,yerr=std_p1/k,label=r'\texttt{PDFFlow}: %s'%label1, 162 | linestyle='--', color='#ff7f0e', marker='s') 163 | ax.errorbar(n,avg_l,yerr=std_l/k,label=r'LHAPDF (CPU)', 164 | linestyle='--', color='g', marker='o') 165 | ax.title.set_text('%s - LHAPDF performances'%PDFFLOW) 166 | ax.set_ylabel(r'$t [s]$', fontsize=20) 167 | ticks = list(np.linspace(1e5,1e6,10)) 168 | labels = [r'%d'%i for i in range(1,11)] 169 | ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(ticks)) 170 | ax.xaxis.set_major_formatter(mpl.ticker.FixedFormatter(labels)) 171 | ax.tick_params(axis='x', direction='in', 172 | bottom=True, labelbottom=False, 173 | top=True, labeltop=False) 174 | ax.tick_params(axis='y', direction='in', 175 | left=True, labelleft=True, 176 | right=True, labelright=False) 177 | ax.legend(frameon=False) 178 | 179 | ax = fig.add_subplot(gs[-1,:]) 180 | ax.errorbar(n, (avg_l/avg_p0),yerr=std_ratio0/k, label=r'%s'%label0, 181 | linestyle='--', color='b', marker='^') 182 | ax.errorbar(n, (avg_l/avg_p1),yerr=std_ratio1/k, label=r'%s'%label1, 183 | linestyle='--', color='#ff7f0e', marker='s') 184 | xlabel = r'$[\times 10^{5}]$' if no_tex else '$x10^{5}$' 185 | ax.set_xlabel(''.join([r'Number of $(x,Q)$ points drawn', xlabel]), 186 | fontsize=18) 187 | ax.set_ylabel(r'Ratio to LHAPDF', 188 | fontsize=18) 189 | ax.set_yscale('log') 190 | ticks = list(np.linspace(1e5,1e6,10)) 191 | labels = [r'%d'%i for i in range(1,11)] 192 | ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(ticks)) 193 | ax.xaxis.set_major_formatter(mpl.ticker.FixedFormatter(labels)) 194 | ax.tick_params(axis='x', direction='in', 195 | bottom=True, labelbottom=True, 196 | top=True, labeltop=False) 197 | ax.tick_params(axis='y', direction='in', 198 | left=True, labelleft=True, 199 | right=True, labelright=False) 200 | 201 | plt.savefig('time.pdf', bbox_inches='tight', dpi=200) 202 | plt.close() 203 | 204 | 205 | if __name__ == "__main__": 206 | args = vars(parser.parse_args()) 207 | start = time() 208 | main(**args) 209 | print(time() - start) 210 | -------------------------------------------------------------------------------- /benchmarks_and_tests/nlo_integration/main_vfh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Implementation of the Vector Boson Fusion Higgs production 4 | using the hep-flow suite: pdfflow and vegasflow 5 | """ 6 | from argparse import ArgumentParser 7 | import subprocess as sp 8 | 9 | from pdfflow.pflow import mkPDF 10 | from pdfflow.configflow import float_me, fone, fzero, DTYPE 11 | from pdfflow.functions import _condition_to_idx 12 | from vegasflow.vflow import VegasFlow 13 | 14 | import tensorflow as tf 15 | 16 | from parameters import * 17 | import phase_space 18 | import spinors 19 | import me 20 | 21 | pdfset = "NNPDF31_nnlo_as_0118/0" 22 | # Instantiate the PDF 23 | DIRNAME = ( 24 | sp.run(["lhapdf-config", "--datadir"], stdout=sp.PIPE, universal_newlines=True).stdout.strip( 25 | "\n" 26 | ) 27 | + "/" 28 | ) 29 | pdf = mkPDF(pdfset, DIRNAME) 30 | 31 | ##### PDF calculation 32 | @tf.function(input_signature=[TFLOAT1, TFLOAT1, TFLOAT1]) 33 | def luminosity(x1, x2, q2array): 34 | """ Returns f(x1)*f(x2) """ 35 | # q2array = muR2 * tf.ones_like(x1) 36 | utype = pdf.xfxQ2([2, 4], x1, q2array) 37 | dtype = pdf.xfxQ2([1, 3], x2, q2array) 38 | lumi = tf.reduce_sum(utype * dtype, axis=-1) 39 | return lumi / x1 / x2 40 | 41 | 42 | ### Main functions 43 | @tf.function 44 | def vfh_production_leading_order(xarr, **kwargs): 45 | """ Wrapper for LO VFH calculation 46 | """ 47 | # Compute the phase space point 48 | pa, pb, p1, p2, x1, x2, wgt = phase_space.psgen_2to3(xarr) 49 | # Apply cuts 50 | stripe, idx, max_pt2 = phase_space.pt_cut_2of2(p1, p2) 51 | pa = tf.boolean_mask(pa, stripe, axis=1) 52 | pb = tf.boolean_mask(pb, stripe, axis=1) 53 | p1 = tf.boolean_mask(p1, stripe, axis=1) 54 | p2 = tf.boolean_mask(p2, stripe, axis=1) 55 | wgt = tf.boolean_mask(wgt, stripe, axis=0) 56 | x1 = tf.boolean_mask(x1, stripe, axis=0) 57 | x2 = tf.boolean_mask(x2, stripe, axis=0) 58 | max_pt2 = tf.boolean_mask(max_pt2, stripe, axis=0) 59 | 60 | # Compute luminosity 61 | lumi = luminosity(x1, x2, max_pt2) 62 | 63 | me_lo = me.qq_h_lo(pa, pb, p1, p2) 64 | res = lumi * me_lo * wgt 65 | final_result = res * flux / x1 / x2 66 | return tf.scatter_nd(idx, final_result, shape=xarr.shape[0:1]) 67 | 68 | 69 | @tf.function 70 | def vfh_production_real(xarr, **kwargs): 71 | """ Wrapper for R VFH calculation 72 | """ 73 | # Compute the phase space point 74 | pa, pb, p1, p2, p3, x1, x2, wgt = phase_space.psgen_2to4(xarr) 75 | 76 | # Apply cuts 77 | stripe, idx, max_pt2 = phase_space.pt_cut_3of3(pa, pb, p1, p2, p3) 78 | 79 | pa = tf.boolean_mask(pa, stripe, axis=1) 80 | pb = tf.boolean_mask(pb, stripe, axis=1) 81 | p1 = tf.boolean_mask(p1, stripe, axis=1) 82 | p2 = tf.boolean_mask(p2, stripe, axis=1) 83 | p3 = tf.boolean_mask(p3, stripe, axis=1) 84 | wgt = tf.boolean_mask(wgt, stripe, axis=0) 85 | x1 = tf.boolean_mask(x1, stripe, axis=0) 86 | x2 = tf.boolean_mask(x2, stripe, axis=0) 87 | max_pt2 = tf.boolean_mask(max_pt2, stripe, axis=0) 88 | if phase_space.UNIT_PHASE: 89 | return tf.scatter_nd(idx, wgt, shape=xarr.shape[0:1]) 90 | 91 | # Compute luminosity 92 | lumi = luminosity(x1, x2, max_pt2) 93 | 94 | me_r = me.qq_h_r(pa, pb, p1, p2, p3) 95 | res = lumi * me_r * wgt 96 | # res = lumi * wgt 97 | final_result = res * flux / x1 / x2 98 | return tf.scatter_nd(idx, final_result, shape=xarr.shape[0:1]) 99 | 100 | 101 | @tf.function 102 | def vfh_production_nlo(xarr, **kwargs): 103 | """ Wrapper for R VFH calculation at NLO (2 jets) 104 | """ 105 | # Compute the phase space point 106 | pa, pb, p1, p2, p3, x1, x2, wgt = phase_space.psgen_2to4(xarr) 107 | 108 | # Apply cuts 109 | stripe, idx, max_pt2 = phase_space.pt_cut_2of3(pa, pb, p1, p2, p3) 110 | # stripe, idx, max_pt2 = phase_space.pt_cut_3of3(pa, pb, p1, p2, p3) 111 | 112 | pa = tf.boolean_mask(pa, stripe, axis=1) 113 | pb = tf.boolean_mask(pb, stripe, axis=1) 114 | p1 = tf.boolean_mask(p1, stripe, axis=1) 115 | p2 = tf.boolean_mask(p2, stripe, axis=1) 116 | p3 = tf.boolean_mask(p3, stripe, axis=1) 117 | wgt = tf.boolean_mask(wgt, stripe, axis=0) 118 | x1 = tf.boolean_mask(x1, stripe, axis=0) 119 | x2 = tf.boolean_mask(x2, stripe, axis=0) 120 | max_pt2 = tf.boolean_mask(max_pt2, stripe, axis=0) 121 | 122 | me_r = me.qq_h_r(pa, pb, p1, p2, p3) 123 | 124 | # Compute luminosity 125 | lumi = luminosity(x1, x2, max_pt2) 126 | phys_me = lumi * me_r * wgt / x1 / x2 127 | phys_res = tf.scatter_nd(idx, phys_me, shape=xarr.shape[0:1]) 128 | 129 | if SUBTRACT: 130 | # Now we need the subtraction terms for leg 1 and leg 2 131 | # leg 1, p3 is radiated from pa-p1 132 | npa, np1 = phase_space.map_3to2(pa, p1, p3) 133 | # apply cuts on this sbt 134 | stripe_1, x, max_pt2 = phase_space.pt_cut_2of2(np1, p2) 135 | 136 | wgt_1 = tf.where(stripe_1, wgt, fzero) 137 | 138 | # Compute the dipole 139 | dip_1 = me.antenna_qgq(pa, p3, p1) 140 | # Reduced ME 141 | red_1 = me.partial_lo(npa, pb, np1, p2) 142 | 143 | # Compute luminosity of the subtraction 144 | lumi_1 = luminosity(x1, x2, max_pt2) 145 | sub_1 = lumi_1 * dip_1 * red_1 * me.factor_re * wgt_1 / x1 / x2 146 | 147 | # Add this to the result 148 | phys_res -= tf.scatter_nd(idx, sub_1, shape=xarr.shape[0:1]) 149 | 150 | # leg 2, p3 is radiated from pb-p2 151 | npb, np2 = phase_space.map_3to2(pb, p2, p3) 152 | # apply cuts on this sbt 153 | stripe_2, x, max_pt2 = phase_space.pt_cut_2of2(p1, np2) 154 | 155 | wgt_2 = tf.where(stripe_2, wgt, fzero) 156 | 157 | # Compute the dipole 158 | dip_2 = me.antenna_qgq(pb, p3, p2) 159 | # Reduced ME 160 | red_2 = me.partial_lo(pa, npb, p1, np2) 161 | 162 | # Compute luminosity of the subtraction 163 | lumi_2 = luminosity(x1, x2, max_pt2) 164 | sub_2 = lumi_2 * dip_2 * red_2 * me.factor_re * wgt_2 / x1 / x2 165 | # Add this to the result 166 | phys_res -= tf.scatter_nd(idx, sub_2, shape=xarr.shape[0:1]) 167 | 168 | final_result = phys_res * flux 169 | return final_result 170 | 171 | 172 | if __name__ == "__main__": 173 | parser = ArgumentParser() 174 | parser.add_argument("-l", "--level", default="LO", help="Integration level") 175 | parser.add_argument("-n", "--nevents", type=int, default=int(1e6), help="Number of events") 176 | parser.add_argument("-i", "--iterations", type=int, default=5, help="Number of iterations") 177 | parser.add_argument( 178 | "-e", 179 | "--events_limit", 180 | type=int, 181 | default=int(5e5), 182 | help="Max events to be sent to an accelerator device at once", 183 | ) 184 | args = parser.parse_args() 185 | 186 | ncalls = args.nevents 187 | niter = args.iterations 188 | print(f"ncalls={ncalls:2.1e}, niter={niter}, device_limit={args.events_limit:2.1e}") 189 | 190 | if args.level == "LO": 191 | print("Running Leading Order") 192 | ndim = 6 193 | integrand = vfh_production_leading_order 194 | elif args.level == "R": 195 | print("Running Real Tree level") 196 | ndim = 9 197 | integrand = vfh_production_real 198 | elif args.level == "NLO": 199 | print("Running Real NLO") 200 | ndim = 9 201 | integrand = vfh_production_nlo 202 | 203 | # First prepare the grid 204 | integrator = VegasFlow(ndim, ncalls, events_limit=args.events_limit) 205 | integrator.compile(integrand) 206 | integrator.run_integration(niter) 207 | 208 | # Now freeze and integrate 209 | print(" > Freezing the grid") 210 | integrator.events_per_run = args.events_limit 211 | integrator.freeze_grid() 212 | integrator.run_integration(niter) 213 | -------------------------------------------------------------------------------- /benchmarks_and_tests/nlo_integration/me.py: -------------------------------------------------------------------------------- 1 | """ 2 | Matrix Element functions for VFH calculation 3 | """ 4 | 5 | import numpy as np 6 | from pdfflow.configflow import float_me 7 | import tensorflow as tf 8 | from parameters import TFLOAT4, stw, mw, gw, TFLOAT1 9 | from phase_space import psgen_2to3, psgen_2to4 10 | from spinors import dot_product, zA, zB, sprod 11 | 12 | 13 | @tf.function(input_signature=[TFLOAT1]) 14 | def propagator_w(s): 15 | """ Compute the propagator of the W boson: 16 | (s - w)^2 + (m*Gamma_w)^2 17 | """ 18 | t1 = tf.square(s - tf.square(mw)) 19 | t2 = tf.square(mw * gw) 20 | return t1 + t2 21 | 22 | 23 | @tf.function(input_signature=4 * [TFLOAT4]) 24 | def partial_lo(pa, pb, p1, p2): 25 | """ Computes the LO q Q -> Q q H (WW->H) """ 26 | # Compute the propagators 27 | sa1 = -2.0 * dot_product(pa, p1) 28 | sb2 = -2.0 * dot_product(pb, p2) 29 | 30 | prop = propagator_w(sa1) * propagator_w(sb2) 31 | coup = tf.square(mw / tf.pow(stw, 1.5)) 32 | rmcom = coup / prop 33 | 34 | # Compute the amplitude 35 | # W-boson, so only Left-Left 36 | amplitude = zA(pa, pb) * zB(p1, p2) 37 | amp2 = tf.math.real(amplitude * tf.math.conj(amplitude)) 38 | 39 | me_res = 2.0 * amp2 * rmcom 40 | return me_res 41 | 42 | 43 | # Leading Order matrix element 44 | factor_lo = float_me(1.0702411577062499e-4) # there is no alpha_s, alpha_ew computed at Mz val 45 | 46 | 47 | @tf.function(input_signature=4 * [TFLOAT4]) 48 | def qq_h_lo(pa, pb, p1, p2): 49 | """ Computes the LO q Q -> Q q H (WW->H) """ 50 | return factor_lo * partial_lo(pa, pb, p1, p2) 51 | 52 | 53 | @tf.function(input_signature=[TFLOAT4] * 5) 54 | def partial_qq_h_qQg(pa, pb, p1, p2, p3): 55 | """ Gluon radiated from leg pa-p1 """ 56 | pa13 = pa - (p1 + p3) 57 | sa13 = dot_product(pa13, pa13) 58 | sb2 = -2.0 * dot_product(pb, p2) 59 | prop = propagator_w(sa13) * propagator_w(sb2) 60 | coup = tf.square(mw / tf.pow(stw, 1.5)) 61 | rmcom = coup / prop 62 | 63 | # compute the amplitude 64 | zUp = zB(pa, p1, cross=True) * zA(p2, p1) + zB(pa, p3, cross=True) * zA(p2, p3) 65 | zFp = zB(pb, pa) # (zB(p1,p3)*zB(pa,p3,cross=True)) 66 | zAp = zFp * zUp 67 | 68 | zUm = zA(pa, p1, cross=True) * zB(pa, pb) + zB(pb, p3, cross=True) * zA(p1, p3) 69 | zFm = zA(p1, p2) # (zA(p1,p3)*zA(pa,p3,cross=True)) 70 | zAm = zFm * zUm 71 | 72 | s13 = 2.0 * dot_product(p1, p3) 73 | sa3 = 2.0 * dot_product(pa, p3) 74 | 75 | zamp2 = zAp * tf.math.conj(zAp) + zAm * tf.math.conj(zAm) 76 | amp = 2.0 * tf.math.real(zamp2) / s13 / sa3 77 | 78 | return amp * rmcom 79 | 80 | 81 | factor_re = float_me(4.0397470069216974e-004) 82 | 83 | @tf.function(input_signature=[TFLOAT4] * 5) 84 | def qq_h_r(pa, pb, p1, p2, p3): 85 | """ Computes q Q -> Q q g H (WW -> H) 86 | Q = p1 87 | q = p2 88 | g = p3 89 | """ 90 | r1 = partial_qq_h_qQg(pa, pb, p1, p2, p3) 91 | r2 = partial_qq_h_qQg(pb, pa, p2, p1, p3) 92 | return (r1 + r2) * factor_re 93 | 94 | 95 | @tf.function(input_signature=[TFLOAT4] * 3) 96 | def antenna_qgq(p1, p2, p3): 97 | """ Dipole for a q-g-q where p1 is an initial particle 98 | and p2 is the gluon """ 99 | s12 = sprod(p1, p2) 100 | s13 = sprod(p1, p3) 101 | s23 = sprod(p2, p3) 102 | s123 = s12 + s13 + s23 103 | 104 | FullAnt = s12 / s23 + s23 / s12 + 2.0 * s13 * s123 / s12 / s23 105 | return FullAnt / s123 106 | 107 | 108 | if __name__ == "__main__": 109 | nevents = 10 110 | print("Generate a tree level matrix element") 111 | random_lo = np.random.rand(nevents, 9) 112 | pa, pb, p1, p2, _, _, _ = psgen_2to3(random_lo) 113 | tree_level_res = qq_h_lo(pa, pb, p1, p2) 114 | random_lo = np.random.rand(nevents, 12) 115 | pa, pb, p1, p2, p3, _, _, _ = psgen_2to4(random_lo) 116 | real_level_res = qq_h_r(pa, pb, p1, p2, p3) 117 | -------------------------------------------------------------------------------- /benchmarks_and_tests/nlo_integration/parameters.py: -------------------------------------------------------------------------------- 1 | from pdfflow.configflow import float_me, DTYPE 2 | import tensorflow as tf 3 | 4 | # Settings 5 | TFLOAT1 = tf.TensorSpec(shape=[None], dtype=DTYPE) 6 | TFLOAT4 = tf.TensorSpec(shape=[4, None], dtype=DTYPE) 7 | TECH_CUT = 1e-8 # change this for debugging or to avoid going to too divergent zones 8 | 9 | # Physical parameters 10 | higgs_mass = float_me(125.0) 11 | mw = float_me(80.379) 12 | gw = float_me(2.085) 13 | stw = float_me(0.22264585341299603) 14 | muR2 = float_me(pow(higgs_mass, 2)) 15 | 16 | # Cuts 17 | pt2_cut = float_me(30 ** 2) 18 | rdistance = float_me(0.3 ** 2) 19 | deltaycut = float_me(4.5) 20 | m2jj_cut = float_me(600 ** 2) 21 | 22 | # Collision parameters 23 | s_in = float_me(pow(13 * 1000, 2)) 24 | # Flux factor 25 | fbGeV2 = float_me(389379365600) 26 | flux = fbGeV2 / 2.0 / s_in 27 | 28 | # Compute shat_min taking into account the higgs mass and the cuts 29 | # only pt cuts, only two jets are required to have pt > pt_cut 30 | shat_min = ( 31 | tf.square(higgs_mass) + 2.0 * pt2_cut + 4.0 * higgs_mass * tf.sqrt(pt2_cut) + 4.0 * TECH_CUT 32 | ) 33 | 34 | ### Debug parameters 35 | # Set the subtraction term on or off, useful to check that 36 | # indeed the R ME does not converge 37 | SUBTRACT = False 38 | 39 | # Uncomment when testing R ME at tree level for a more faithful smin 40 | # shat_min = tf.square(higgs_mass) + 6.0*pt2_cut + 6.0*higgs_mass*tf.sqrt(pt2_cut) 41 | -------------------------------------------------------------------------------- /benchmarks_and_tests/nlo_integration/phase_space.py: -------------------------------------------------------------------------------- 1 | """ 2 | Phase Space calcuation for VFH integration 3 | 4 | The wrapper interface functions are 5 | 6 | psgen_2to3 7 | psgen_2to4 8 | 9 | The convention of the 4 momenta is such that 10 | p[0] = E 11 | p[1:4] = px, py, pz 12 | """ 13 | from pdfflow.configflow import float_me, fone, fzero 14 | from pdfflow.functions import _condition_to_idx 15 | import numpy as np 16 | import tensorflow as tf 17 | from parameters import ( 18 | TFLOAT4, 19 | TECH_CUT, 20 | shat_min, 21 | s_in, 22 | higgs_mass, 23 | pt2_cut, 24 | rdistance, 25 | deltaycut, 26 | m2jj_cut, 27 | ) 28 | import spinors 29 | 30 | # Control flags 31 | UNIT_PHASE = False 32 | 33 | # Constants 34 | tfpi = float_me(3.1415926535897932385) 35 | costhmax = fone 36 | costhmin = float_me(-1.0) * fone 37 | phimin = fzero 38 | phimax = float_me(2.0) * tfpi 39 | 40 | # Jet separation 41 | @tf.function 42 | def rapidity_dif(p1, p2): 43 | """ Computes the rapidity difference between p1 and p2 44 | y = 1/2*log(E+z / E-z) 45 | """ 46 | num = (p1[0, :] + p1[3, :]) * (p2[0, :] - p2[3, :]) 47 | den = (p1[0, :] - p1[3, :]) * (p2[0, :] + p2[3, :]) 48 | return float_me(0.5) * tf.math.log(num / den) 49 | 50 | 51 | @tf.function 52 | def azimuth_dif(p1, p2): 53 | """ Compute the difference in the azimuthal angle between p1 and p2 54 | theta = atan(y/x) 55 | """ 56 | theta_1 = tf.math.atan2(p1[2, :], p1[1, :]) # + float_me(np.pi) 57 | theta_2 = tf.math.atan2(p2[2, :], p2[1, :]) # + float_me(np.pi) 58 | res = tf.abs(theta_1 - theta_2) 59 | res = tf.where(res > tfpi, phimax - res, res) 60 | return res 61 | 62 | 63 | @tf.function 64 | def jet_separation(pg, pj, pgt2, pjt2): 65 | """ Compute the jet separation in rapidity 66 | using anti-kt where pg is the target jet 67 | """ 68 | ydif = tf.square(rapidity_dif(pg, pj)) 69 | adif = tf.square(azimuth_dif(pg, pj)) 70 | delta_ij2 = ydif + adif 71 | kmin = fone # /tf.reduce_max([pgt2, pjt2], axis=0) 72 | res = kmin * delta_ij2 / rdistance 73 | return res 74 | 75 | 76 | @tf.function 77 | def pt2(fourp): 78 | """ Returns px^2 + py^2 """ 79 | return tf.square(fourp[1, :]) + tf.square(fourp[2, :]) 80 | 81 | 82 | @tf.function 83 | def pt2many(allpt): 84 | return tf.square(allpt[:, 1, :]) + tf.square(allpt[:, 2, :]) 85 | 86 | 87 | @tf.function 88 | def deltay_cut(p1, p2): 89 | deltay = tf.abs(rapidity_dif(p1, p2)) 90 | return deltay > deltaycut 91 | 92 | 93 | @tf.function 94 | def mjj_cut(p1, p2): 95 | val = abs_dot(p1, p2) * 2.0 96 | return val > m2jj_cut 97 | 98 | 99 | @tf.function 100 | def rcone_cut(p1, p2, p3=None): 101 | """ Check that the jets 1, 2 (and 3) are separated at least sqrt(rdistance) using the Cambridge-Aachen algorithm 102 | """ 103 | pt2_1 = pt2(p1) 104 | pt2_2 = pt2(p2) 105 | if p3 is not None: 106 | pt2_3 = pt2(p3) 107 | pt2_1, pt2_2, pt2_3 = 3 * [None] 108 | r12 = jet_separation(p1, p2, pt2_1, pt2_2) 109 | if p3 is not None: 110 | r13 = jet_separation(p1, p3, pt2_1, pt2_3) 111 | r23 = jet_separation(p2, p3, pt2_2, pt2_3) 112 | dij = tf.reduce_min([r12, r13, r23], axis=0) 113 | dib = fone # tf.reduce_max([pt2_1, pt2_2, pt2_3], axis=0) 114 | else: 115 | dij = r12 116 | dib = fone # tf.reduce_max([pt2_1, pt2_2], axis=0) 117 | return dij > dib 118 | 119 | 120 | @tf.function 121 | def pt_cut_2of2(p1, p2): 122 | """ Ensures that both p1 and p2 pass the pt_cut 123 | returns a boolean mask and the list of true indices 124 | """ 125 | pt21 = pt2(p1) 126 | pt22 = pt2(p2) 127 | p1pass = pt21 > pt2_cut 128 | p2pass = pt22 > pt2_cut 129 | deltaypass = deltay_cut(p1, p2) 130 | mjjpass = mjj_cut(p1, p2) 131 | jetpass = tf.reduce_all([p1pass, p2pass, deltaypass], axis=0) 132 | stripe, idx = _condition_to_idx(jetpass, mjjpass) 133 | return stripe, idx, tf.reduce_max([pt21, pt22], axis=0) 134 | 135 | 136 | @tf.function 137 | def abs_dot(a, b): 138 | return tf.abs(spinors.dot_product(a, b)) 139 | 140 | 141 | @tf.function 142 | def invariant_cut(pa, pb, p1, p2, p3): 143 | """ Ensures that all invariants are above the technical cut 144 | in order to avoid instabilities """ 145 | shat_cut = abs_dot(pa, pb) * TECH_CUT / 2.0 146 | sa1 = abs_dot(pa, p1) > shat_cut 147 | sa2 = abs_dot(pa, p2) > shat_cut 148 | sa3 = abs_dot(pa, p3) > shat_cut 149 | sb1 = abs_dot(pb, p1) > shat_cut 150 | sb2 = abs_dot(pb, p2) > shat_cut 151 | sb3 = abs_dot(pb, p3) > shat_cut 152 | s12 = abs_dot(p1, p2) > shat_cut 153 | s13 = abs_dot(p1, p3) > shat_cut 154 | s23 = abs_dot(p2, p3) > shat_cut 155 | return tf.reduce_all([sa1, sa2, sa3, sb1, sb2, sb3, s12, s13, s23], 0) 156 | 157 | 158 | @tf.function 159 | def pt_cut_2of3(pa, pb, p1, p2, p3): 160 | """ Ensures that at least two of the three jets 161 | pass the pt cut 162 | """ 163 | # Put all pt on a stack 164 | all_p = tf.stack([p1, p2, p3]) 165 | # First check that all 3 pass the pt cut 166 | all_pt2 = pt2many(all_p) 167 | # Now select the two hardest jets and apply the rest of the cuts on those two 168 | pts, idx = tf.math.top_k(tf.transpose(all_pt2), k=2, sorted=True) 169 | ptpass = pts[:, 1] > pt2_cut 170 | pjsT = tf.gather(tf.transpose(all_p, perm=[2, 0, 1]), idx, batch_dims=1) 171 | pjs = tf.transpose(pjsT, perm=[1, 2, 0]) 172 | 173 | # Check that the two hardest jets pass the rapidity delta cut 174 | ypass = deltay_cut(pjs[0], pjs[1]) 175 | # and the mjj cut 176 | mjjpass = mjj_cut(pjs[0], pjs[1]) 177 | 178 | # Check that all two jets pass the rdistance check 179 | rpass = rcone_cut(pjs[0], pjs[1]) 180 | 181 | # Ensure that all invariants are above some threshold 182 | tech_cut_pass = invariant_cut(pa, pb, p1, p2, p3) 183 | jetpass = tf.reduce_all([rpass, tech_cut_pass, ypass, mjjpass], axis=0) 184 | stripe, idx = _condition_to_idx(ptpass, jetpass) 185 | return stripe, idx, pts[:, 0] 186 | 187 | 188 | @tf.function 189 | def pt_cut_3of3(pa, pb, p1, p2, p3): 190 | """ Ensures that both p1 and p2 pass the pt_cut 191 | returns a boolean mask and the list of true indices 192 | """ 193 | # Put all pt on a stack 194 | all_p = tf.stack([p1, p2, p3]) 195 | # First check that all 3 pass the pt cut 196 | all_pt2 = pt2many(all_p) 197 | ptpass = tf.reduce_all(all_pt2 > pt2_cut, axis=0) 198 | # Now select the two hardest jets and apply the rest of the cuts on those two 199 | pts, idx = tf.math.top_k(tf.transpose(all_pt2), k=2, sorted=False) 200 | pjsT = tf.gather(tf.transpose(all_p, perm=[2, 0, 1]), idx, batch_dims=1) 201 | pjs = tf.transpose(pjsT, perm=[1, 2, 0]) 202 | 203 | # Check that the two hardest jets pass the rapidity delta cut 204 | ypass = deltay_cut(pjs[0], pjs[1]) 205 | # and the mjj cut 206 | mjjpass = mjj_cut(pjs[0], pjs[1]) 207 | 208 | # Check that all jets pass the rdistance check 209 | rpass = rcone_cut(p1, p2, p3) 210 | 211 | # Ensure that all invariants are above some threshold 212 | tech_cut_pass = invariant_cut(pa, pb, p1, p2, p3) 213 | jetpass = tf.reduce_all([rpass, tech_cut_pass, ypass, mjjpass], axis=0) 214 | stripe, idx = _condition_to_idx(ptpass, jetpass) 215 | return stripe, idx, pts[:, 0] 216 | 217 | 218 | # Utility functions 219 | @tf.function 220 | def dlambda(a, b, c): 221 | """ Computes dlamba(a,b,c) """ 222 | return a * a + b * b + c * c - 2.0 * (a * b + a * c + b * c) 223 | 224 | 225 | @tf.function 226 | def pick_within(r, valmin, valmax): 227 | """ Get a random value between valmin and valmax 228 | as given by the random number r (batch_size, 1) 229 | the outputs are val (batch_size, 1) and jac (batch_size, 1) 230 | 231 | Linear sampling 232 | 233 | Parameters 234 | ---------- 235 | r: random val 236 | valmin: minimum value 237 | valmax: maximum value 238 | Returns 239 | ------- 240 | val: chosen random value 241 | jac: jacobian of the transformation 242 | """ 243 | delta_val = valmax - valmin 244 | val = valmin + r * delta_val 245 | return val, delta_val 246 | 247 | 248 | @tf.function 249 | def log_pick(r, valmin, valmax): 250 | """ Get a random value between valmin and valmax 251 | as given by the random number r (batch_size, 1) 252 | the outputs are val (batch_size, 1) and jac (batch_size, 1) 253 | Logarithmic sampling 254 | 255 | Parameters 256 | ---------- 257 | r: random val 258 | valmin: minimum value 259 | valmax: maximum value 260 | Returns 261 | ------- 262 | val: chosen random value 263 | jac: jacobian of the transformation 264 | """ 265 | ratio_val = valmax / valmin 266 | val = valmin * tf.pow(ratio_val, r) 267 | jac = val * tf.math.log(ratio_val) 268 | return val, jac 269 | 270 | 271 | ############################################################## 272 | 273 | 274 | @tf.function 275 | def get_x1x2(xarr): 276 | """Receives two random numbers and return the 277 | value of the invariant mass of the center of mass 278 | as well as the jacobian of the x1,x2 -> tau-y transformation 279 | and the values of x1 and x2. 280 | 281 | The xarr array is of shape (batch_size, 2) 282 | """ 283 | taumin = shat_min / s_in 284 | taumax = fone 285 | # Get tau logarithmically 286 | tau, wgt = log_pick(xarr[:, 0], taumin, taumax) 287 | x1 = tf.pow(tau, xarr[:, 1]) 288 | x2 = tau / x1 289 | wgt *= -1.0 * tf.math.log(tau) 290 | shat = x1 * x2 * s_in 291 | return shat, wgt, x1, x2 292 | 293 | 294 | @tf.function 295 | def sample_linear_all(x, nfspartons=2): 296 | """ Receives an array of random numbers and samples the 297 | invariant masses of the particles as well as the angles 298 | of the particles 299 | 300 | Uses 3*nfspartons + 2 random numbers (from index 0 to 3*nfspartons + 1) 301 | 302 | Parameters 303 | ---------- 304 | x: tensor (nevents,) 305 | Input random numbers from the integration routien 306 | nfspartons: int 307 | Number of partons in the final state 308 | 309 | Returns 310 | ------- 311 | x1: tensor (nevents,) 312 | Momentum fraction of incoming parton 1 313 | x2: tensor (nevents,) 314 | Momentum fraction of incoming parton 1 315 | shat: tensor (nevents,) 316 | Incoming invariant mass 317 | shiggs: tensor (nevents,) 318 | Invariant mass of the higgs boson 319 | fspartons: list( list( (nevents, nevents, nevents) ) ) 320 | For each of the detachments returns the invariant mass 321 | and the angles of the decaya 322 | i.e., for a 2->3 + H phase space it will return 2 items: 323 | item 1: [s123, costh1,23, phi1,23] 324 | item 2: [s23, costh2,3, phi2,3] 325 | """ 326 | # Sample shat and the incoming momentum fractions 327 | shat, wgt, x1, x2 = get_x1x2(x[:, 0:2]) 328 | if UNIT_PHASE: 329 | shat = s_in * tf.ones_like(shat) 330 | wgt = tf.ones_like(wgt) 331 | x1 = tf.ones_like(x1) 332 | x2 = tf.ones_like(x2) 333 | 334 | smin = TECH_CUT 335 | smax = shat 336 | 337 | fspartons = [] 338 | # Detach the massive boson 339 | shiggs = tf.square(higgs_mass) 340 | wgt *= tfpi * (16.0 * tfpi) 341 | # And the angles of its decay 342 | # (which for now are not to be used, but they 343 | # do affect the weight) 344 | # this consumes random numbers 2, 3, 4 345 | wgt *= costhmax - costhmin 346 | wgt *= phimax - phimin 347 | wgt *= fone / (2.0 * tfpi * 32.0 * tf.square(tfpi)) 348 | # the remaining mass in the new smax 349 | roots = tf.sqrt(shat) 350 | smax = tf.square(roots - higgs_mass) 351 | # Now loop over the final state partons 352 | for i in range(1, nfspartons): 353 | j = i * 3 - 1 354 | prev_smax = smax 355 | wgt = tf.where(smin + TECH_CUT > prev_smax, fzero, wgt) 356 | smax, jac = pick_within(x[:, j], smin, prev_smax) 357 | wgt *= jac 358 | cos12, jac = pick_within(x[:, j + 1], costhmin, costhmax) 359 | wgt *= jac 360 | phi12, jac = pick_within(x[:, j + 2], phimin, phimax) 361 | wgt *= jac 362 | wgt *= fone / (2.0 * tfpi) 363 | fspartons.append((smax, cos12, phi12)) 364 | wgt *= fone / (32.0 * tf.square(tfpi)) 365 | if i > 1: 366 | wgt *= (prev_smax - smax) / prev_smax 367 | return x1, x2, shat, shiggs, fspartons, wgt 368 | 369 | 370 | @tf.function 371 | def pcommon2to2(r, shat, s1, s2): 372 | """ Generates a 2 to 2 phase space in the c.o.m. of pa+pb 373 | 374 | pa + pb ----> p1 + p2 375 | 376 | Where: 377 | (pa + pb)^2 = shat 378 | p1^2 = s1 379 | p2^2 = s2 380 | 381 | Parameters 382 | ---------- 383 | r: tensor (nevents) 384 | random number to generate a scattering angle 385 | shat: tensor(nevents) 386 | invariant mass of the incoming system 387 | s1: tensor(nevents) 388 | invariant mass of the outgoing particle 1 389 | s2: tensor(nevents) 390 | invariant mass of the outgoing particle 2 391 | 392 | Returns 393 | ------- 394 | pa: tensor (4, nevents) 395 | incoming 4-momenta of parton a 396 | pb: tensor (4, nevents) 397 | incoming 4-momenta of parton b 398 | p1: tensor (4, nevents) 399 | outgoing 4-momenta of parton 1 400 | p2: tensor (4, nevents) 401 | outgoing 4-momenta of parton 2 402 | wgt: tensor (nevents,) 403 | weight of the generated phase space point 404 | """ 405 | roots = tf.sqrt(shat) 406 | Eab = roots / 2.0 407 | pin = Eab 408 | E1 = (shat + s1 - s2) / 2.0 / roots 409 | E2 = (shat + s2 - s1) / 2.0 / roots 410 | pout = tf.sqrt(dlambda(shat, s1, s2)) / 2.0 / roots 411 | # Pick cosine p1-beam 412 | ta1min = s1 - 2.0 * Eab * E1 - 2.0 * pin * pout 413 | ta1max = s1 - 2.0 * Eab * E1 + 2.0 * pin * pout 414 | ta1, wgt = pick_within(r, -ta1max, -ta1min) 415 | costh = (-ta1 - s1 + 2.0 * Eab * E1) / (2.0 * pin * pout) 416 | # Check that the cosine is not greater than 1 at this point 417 | # nor less than -1 418 | sinth = tf.sqrt(fone - tf.square(costh)) 419 | wgt *= fone / (16.0 * tfpi * tfpi * shat) 420 | 421 | # Since there are rotational symmetry around the beam axis 422 | # we can set the phi angle to 0.0 423 | # cosphi = 1.0 424 | # sinphi = 0.0 425 | wgt *= 2.0 * tfpi 426 | 427 | # Now generate all the momenta 428 | zeros = tf.zeros_like(r) 429 | pa = tf.stack([Eab, zeros, zeros, pin]) 430 | pb = tf.stack([Eab, zeros, zeros, -pin]) 431 | 432 | px = pout * sinth # cosphi = 1.0 433 | py = zeros # sinphi = 0.0 434 | pz = pout * costh 435 | 436 | p1 = tf.stack([E1, -1.0 * px, -1.0 * py, -1.0 * pz]) 437 | p2 = tf.stack([E2, px, py, pz]) 438 | 439 | return pa, pb, p1, p2, wgt 440 | 441 | 442 | @tf.function 443 | def pcommon1to2(sin, pin, s1, s2, costh, phi): 444 | """ Generates a 1 -> 2 phase space in the c.o.m. of 1 445 | 446 | p_in -> p_1 + p_2 447 | 448 | Parameters 449 | ---------- 450 | sin: tensor(nevents,) 451 | ivariant mass of particle in 452 | pin: tensor(4, nevents,) 453 | 4-momenta of particle in 454 | s1: tensor(4, nevents,) 455 | invariant mass of particle 1 456 | s2: tensor(4, nevents,) 457 | invariant mass of particle 2 458 | costh: tensor(nevent,) 459 | theta angle of the 1->2 decay 460 | phi: tensor(nevent,) 461 | phi angle of the 1->2 decay 462 | 463 | Returns 464 | ------ 465 | p1: tensor(4, nevents) 466 | 4-momenta of particle 1 467 | p2: tensor(4, nevents) 468 | 4-momenta of particle 2 469 | """ 470 | sinth = tf.sqrt(fone - tf.square(costh)) 471 | cosphi = tf.cos(phi) 472 | sinphi = tf.sin(phi) 473 | 474 | roots = tf.sqrt(sin) 475 | E1 = (sin + s1 - s2) / 2.0 / roots 476 | E2 = (sin + s2 - s1) / 2.0 / roots 477 | roots1 = tf.sqrt(s1) 478 | pp = tf.sqrt((E1 - roots1) * (E1 + roots1)) 479 | 480 | px = pp * sinth * cosphi 481 | py = pp * sinth * sinphi 482 | pz = pp * costh 483 | 484 | p1 = tf.stack([E1, px, py, pz]) 485 | p2 = tf.stack([E2, -px, -py, -pz]) 486 | 487 | # Now boost both p1 and p2 back to the lab frame 488 | # Construct the boosting matrix 489 | gamma = pin[0, :] / roots 490 | vx = -pin[1, :] / pin[0, :] 491 | vy = -pin[2, :] / pin[0, :] 492 | vz = -pin[3, :] / pin[0, :] 493 | v2 = vx * vx + vy * vy + vz * vz 494 | 495 | omgdv = (gamma - fone) / v2 496 | bmatE = tf.stack([gamma, -gamma * vx, -gamma * vy, -gamma * vz]) 497 | bmatx = tf.stack([-gamma * vx, omgdv * vx * vx + fone, omgdv * vx * vy, omgdv * vx * vz]) 498 | bmaty = tf.stack([-gamma * vy, omgdv * vy * vx, omgdv * vy * vy + fone, omgdv * vy * vz]) 499 | bmatz = tf.stack([-gamma * vz, omgdv * vz * vx, omgdv * vz * vy, omgdv * vz * vz + fone]) 500 | bmat = tf.stack([bmatE, bmatx, bmaty, bmatz]) 501 | 502 | # Now unboost 503 | bmatt = tf.transpose(bmat) 504 | p1t = tf.transpose(p1) 505 | p2t = tf.transpose(p2) 506 | up1t = tf.keras.backend.batch_dot(p1t, bmatt) 507 | up2t = tf.keras.backend.batch_dot(p2t, bmatt) 508 | 509 | return tf.transpose(up1t), tf.transpose(up2t) 510 | 511 | 512 | ##### PS wrapper functions 513 | @tf.function 514 | def psgen_2to3(xarr): # tree level phase space 515 | """ Generates a 2 -> H + 2j phase space 516 | 517 | a + b -> H + 1 + 2 518 | 519 | where 1 and 2 are massless and H is the Higgs boson. 520 | 521 | Uses 9 random numbers 522 | """ 523 | x1, x2, shat, sh, fspartons, wgt = sample_linear_all(xarr[:, 1:], nfspartons=2) 524 | s12, cos12, phi12 = fspartons[0] 525 | pa, pb, _, p12, jac = pcommon2to2(xarr[:, 0], shat, sh, s12) 526 | p1, p2 = pcommon1to2(s12, p12, fzero, fzero, cos12, phi12) 527 | wgt *= jac 528 | return pa, pb, p1, p2, x1, x2, wgt 529 | 530 | 531 | def psgen_2to4(xarr): # Real radiation phase space 532 | x1, x2, shat, sh, fspartons, wgt = sample_linear_all(xarr[:, 1:], nfspartons=3) 533 | s123, cos123, phi123 = fspartons[0] 534 | pa, pb, _, p123, jac = pcommon2to2(xarr[:, 0], shat, sh, s123) 535 | s23, cos23, phi23 = fspartons[1] 536 | p1, p23 = pcommon1to2(s123, p123, fzero, s23, cos123, phi123) 537 | p2, p3 = pcommon1to2(s23, p23, fzero, fzero, cos23, phi23) 538 | wgt *= jac 539 | return pa, pb, p1, p2, p3, x1, x2, wgt 540 | 541 | 542 | ##### Mappings 543 | @tf.function(input_signature=[TFLOAT4] * 3) 544 | def map_3to2(pa, p1, p3): 545 | """ Maps a 2 -> 3 ps into a 2 -> 2 ps 546 | where particle 3 goes unresolved between a and 1 547 | """ 548 | omx2 = spinors.dot_product(p1, p3) / (spinors.dot_product(pa, p1) + spinors.dot_product(pa, p3)) 549 | xx2 = 1 - omx2 550 | newpa = xx2 * pa 551 | newp1 = p1 + p3 - omx2 * pa 552 | return newpa, newp1 553 | 554 | 555 | if __name__ == "__main__": 556 | nevents = 10 557 | print("Generate a tree level phase space point") 558 | random_lo = np.random.rand(nevents, 9) 559 | momentum_set_lo = psgen_2to3(random_lo) 560 | print("Generate a real level phase space point") 561 | random_r = np.random.rand(nevents, 12) 562 | momentum_set_r = psgen_2to4(random_r) 563 | print("Map the momentum set from p5 to p4") 564 | pa = momentum_set_r[0] 565 | p1 = momentum_set_r[2] 566 | p3 = momentum_set_r[4] 567 | npa, np1 = map_3to2(pa, p1, p3 * fzero) 568 | np.testing.assert_allclose(pa, npa) 569 | np.testing.assert_allclose(p1, np1) 570 | -------------------------------------------------------------------------------- /benchmarks_and_tests/nlo_integration/spinors.py: -------------------------------------------------------------------------------- 1 | """ 2 | Spinor calculations 3 | """ 4 | 5 | import numpy as np 6 | from pdfflow.configflow import fone, fzero 7 | import tensorflow as tf 8 | from parameters import TFLOAT4, s_in 9 | 10 | zi = tf.complex(fzero, fone) 11 | 12 | 13 | @tf.function(input_signature=[TFLOAT4]) 14 | def calc_zt2(pa): 15 | """ Transverse momentum squared along the y axis: 16 | returns px^2 + pz^2 17 | """ 18 | bb = tf.square(pa[1, :]) + tf.square(pa[3, :]) 19 | return bb 20 | 21 | 22 | @tf.function(input_signature=[TFLOAT4]) 23 | def calc_ap(pa): 24 | """ compute py + E """ 25 | at2 = calc_zt2(pa) 26 | ap = pa[2, :] + pa[0, :] 27 | conditional_p = at2 / (pa[0, :] - pa[2, :]) 28 | ap = tf.where(ap < pa[0, :] / 2.0, conditional_p, ap) 29 | return tf.complex(ap, fzero) 30 | 31 | 32 | def zA(pa, pb, cross=False): # cross == when only one of (pa,pb) is initial-state 33 | """ spinor """ 34 | ap = calc_ap(pa) 35 | bp = calc_ap(pb) 36 | ra = tf.complex(pa[1, :], -pa[3, :]) * bp 37 | rb = tf.complex(pb[1, :], -pb[3, :]) * ap 38 | zval = zi * (ra - rb) / tf.sqrt(ap * bp) 39 | if not cross: 40 | return zval 41 | return zval * zi 42 | 43 | 44 | def zB(pa, pb, cross=False): 45 | """ [ab] spinor """ 46 | return tf.math.conj(zA(pa, pb, cross=cross)) 47 | 48 | 49 | @tf.function(input_signature=[TFLOAT4] * 2) 50 | def dot_product(par, pbr): 51 | pa = tf.transpose(par) 52 | pb = tf.transpose(pbr) 53 | ener = pa[:, 0] * pb[:, 0] 54 | mome = tf.keras.backend.batch_dot(pa[:, 1:4], pb[:, 1:4])[:, 0] 55 | return ener - mome 56 | 57 | 58 | @tf.function(input_signature=[TFLOAT4, TFLOAT4]) 59 | def zprod(pa, pb): 60 | return tf.math.real(zA(pa, pb) * zB(pa, pb)) 61 | 62 | 63 | @tf.function(input_signature=[TFLOAT4, TFLOAT4]) 64 | def sprod(pa, pb): 65 | pp = pa + pb 66 | return dot_product(pp, pp) 67 | 68 | 69 | if __name__ == "__main__": 70 | from phase_space import psgen_2to3, psgen_2to4 71 | 72 | nevents = 10 73 | for n in [2, 3]: 74 | if n == 2: 75 | print("Generate a tree level phase space point") 76 | random_r = np.random.rand(nevents, 12) 77 | pa, pb, p1, p2, p3, x1, x2, _ = psgen_2to4(random_r) 78 | elif n == 3: 79 | print("Generate a real level phase space point") 80 | random_lo = np.random.rand(nevents, 9) 81 | pa, pb, p1, p2, x1, x2, _ = psgen_2to3(random_lo) 82 | # Ensure that (pa+pb)^2 is shat in different ways 83 | shat = s_in * x1 * x2 84 | shat_sprod = sprod(pa, pb) 85 | shat_zprod = zprod(pa, pb) 86 | zA_ab = zA(pa, pb) 87 | zB_ab = zB(pa, pb) 88 | zhat = tf.math.real(zA_ab * zB_ab) 89 | print("Testing II") 90 | np.testing.assert_allclose(shat, shat_sprod) 91 | np.testing.assert_allclose(shat, shat_zprod) 92 | np.testing.assert_allclose(shat, zhat) 93 | # Check sprod and zprod do the same for several cases 94 | print("Testing IF") 95 | np.testing.assert_allclose(zprod(pa, p1), sprod(pa, p1)) 96 | np.testing.assert_allclose(zprod(pa, p2), sprod(pa, p2)) 97 | np.testing.assert_allclose(zprod(pb, p1), sprod(pb, p1)) 98 | np.testing.assert_allclose(zprod(pb, p2), sprod(pb, p2)) 99 | if n == 3: 100 | np.testing.assert_allclose(zprod(pa, p3), sprod(pa, p3)) 101 | np.testing.assert_allclose(zprod(pb, p3), sprod(pb, p3)) 102 | print("Testing FF") 103 | np.testing.assert_allclose(zprod(p1, p2), sprod(p1, p2)) 104 | if n == 3: 105 | np.testing.assert_allclose(zprod(p1, p3), sprod(p1, p3)) 106 | np.testing.assert_allclose(zprod(p2, p3), sprod(p2, p3)) 107 | -------------------------------------------------------------------------------- /benchmarks_and_tests/singletop_lo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import time 4 | import numpy as np 5 | import subprocess as sp 6 | from pdfflow.configflow import DTYPEINT as pint 7 | import tensorflow as tf 8 | from vegasflow.vflow import vegas_wrapper 9 | from pdfflow.pflow import mkPDF 10 | from vegasflow.configflow import DTYPE, DTYPEINT, float_me 11 | from pdfflow.configflow import DTYPE as pfloat 12 | 13 | # MC integration setup 14 | dim = 3 15 | ncalls = np.int32(5e6) 16 | n_iter = 5 17 | pdfset = "NNPDF31_nlo_as_0118/0" 18 | epsilon = float_me(1e-7) 19 | 20 | # Physics setup 21 | # top mass 22 | mt = tf.constant(173.2, dtype=DTYPE) 23 | # center of mass energy 24 | sqrts = tf.constant(8000, dtype=DTYPE) 25 | # minimum allowed center of mass energy 26 | sqrtsmin = tf.constant(173.2, dtype=DTYPE) 27 | # W-boson mass 28 | mw = tf.constant(80.419, dtype=DTYPE) 29 | # gaw 30 | gaw = tf.constant(2.1054, dtype=DTYPE) 31 | # GF 32 | gf = tf.constant(1.16639e-5, dtype=DTYPE) 33 | # load pdf 34 | DIRNAME = sp.run(['lhapdf-config','--datadir'], stdout=sp.PIPE, universal_newlines=True).stdout.strip('\n') + '/' 35 | pdf = mkPDF(pdfset, DIRNAME) 36 | 37 | 38 | # nx = 100 39 | # examplex = np.random.rand(nx) 40 | # exampleq = np.linspace(5,nx, nx) 41 | # r = pdf.xfxQ2([5,4,5], examplex, exampleq) 42 | # import ipdb 43 | # ipdb.set_trace() 44 | 45 | 46 | # auxiliary variables 47 | colf_bt = tf.constant(9, dtype=DTYPE) 48 | mt2 = tf.square(mt) 49 | s = tf.square(sqrts) 50 | s2 = tf.square(s) 51 | smin = tf.square(sqrtsmin) 52 | bmax = tf.sqrt(1 - smin / s) 53 | conv = tf.constant(0.3893793e9, dtype=DTYPE) # GeV to pb conversion 54 | gaw2 = tf.square(gaw) 55 | mw2 = tf.square(mw) 56 | gw4 = tf.square(4 * np.sqrt(2) * mw2 * gf) 57 | 58 | 59 | @tf.function 60 | def get_x1x2(xarr): 61 | """Remapping [0,1] to tau-y""" 62 | # building shat 63 | b = bmax * xarr[:, 0] 64 | onemb2 = 1 - tf.square(b) 65 | shat = smin / onemb2 66 | tau = shat / s 67 | 68 | # building rapidity 69 | ymax = -0.5 * tf.math.log(tau) 70 | y = ymax * (2 * xarr[:, 1] - 1) 71 | 72 | # building jacobian 73 | jac = 2 * tau * b * bmax / onemb2 # tau 74 | jac *= 2 * ymax # y 75 | 76 | # building x1 and x2 77 | sqrttau = tf.sqrt(tau) 78 | expy = tf.exp(y) 79 | x1 = sqrttau * expy 80 | x2 = sqrttau / expy 81 | 82 | return shat, jac, x1, x2 83 | 84 | 85 | @tf.function 86 | def make_event(xarr): 87 | """Generate event kinematics""" 88 | shat, jac, x1, x2 = get_x1x2(xarr) 89 | 90 | ecmo2 = tf.sqrt(shat) / 2 91 | cc = ecmo2 * (1 - mt2 / shat) 92 | cos = 1 - 2 * xarr[:, 2] 93 | sinxi = cc * tf.sqrt(1 - cos * cos) 94 | cosxi = cc * cos 95 | zeros = tf.zeros_like(ecmo2, dtype=DTYPE) 96 | 97 | p0 = tf.stack([ecmo2, zeros, zeros, ecmo2]) 98 | p1 = tf.stack([ecmo2, zeros, zeros, -ecmo2]) 99 | p2 = tf.stack([cc, sinxi, zeros, cosxi]) 100 | p3 = tf.stack([tf.sqrt(cc * cc + mt2), -sinxi, zeros, -cosxi]) 101 | 102 | psw = (1 - mt2 / shat) / (8 * np.pi) # psw 103 | psw *= jac # jac for tau, y 104 | flux = 1 / (2 * shat) # flux 105 | 106 | return psw, flux, p0, p1, p2, p3, x1, x2 107 | 108 | 109 | @tf.function 110 | def dot(p1, p2): 111 | """Dot product 4-momenta""" 112 | e = p1[0] * p2[0] 113 | px = p1[1] * p2[1] 114 | py = p1[2] * p2[2] 115 | pz = p1[3] * p2[3] 116 | return e - px - py - pz 117 | 118 | 119 | @tf.function 120 | def u0(p, i): 121 | """Compute the dirac spinor u0""" 122 | 123 | zeros = tf.zeros_like(p[0], dtype=DTYPE) 124 | czeros = tf.complex(zeros, zeros) 125 | ones = tf.ones_like(p[0], dtype=DTYPE) 126 | 127 | # case 1) py == 0 128 | rz = p[3] / (p[0] + epsilon) 129 | theta1 = tf.where(rz > 0, zeros, rz) 130 | theta1 = tf.where(rz < 0, np.pi * ones, theta1) 131 | phi1 = zeros 132 | 133 | # case 2) py != 0 134 | rrr = rz 135 | rrr = tf.where(rz < -1, -ones, rz) 136 | rrr = tf.where(rz > 1, ones, rrr) 137 | theta2 = tf.acos(rrr) 138 | rx = p[1] / p[0] 139 | phi2 = zeros 140 | phi2 = tf.where(rx < 0, np.pi * ones, phi2) 141 | 142 | # combine 143 | theta = tf.where(p[1] == 0, theta1, theta2) 144 | phi = tf.where(p[1] == 0, phi1, phi2) 145 | 146 | prefact = tf.complex(float_me(np.sqrt(2)), zeros) * tf.sqrt(tf.complex(p[0], zeros)) 147 | if i == 1: 148 | a = tf.complex(tf.cos(theta / 2), zeros) 149 | b = tf.complex(tf.sin(theta / 2), zeros) 150 | u0_0 = prefact * a 151 | u0_1 = prefact * b * tf.complex(tf.cos(phi), tf.sin(phi)) 152 | u0_2 = czeros 153 | u0_3 = czeros 154 | else: 155 | a = tf.complex(tf.sin(theta / 2), zeros) 156 | b = tf.complex(tf.cos(theta / 2), zeros) 157 | u0_0 = czeros 158 | u0_1 = czeros 159 | u0_2 = prefact * a * tf.complex(tf.cos(phi), -tf.sin(phi)) 160 | u0_3 = -prefact * b 161 | 162 | return tf.stack([u0_0, u0_1, u0_2, u0_3]) 163 | 164 | 165 | @tf.function 166 | def ubar0(p, i): 167 | """Compute the dirac spinor ubar0""" 168 | 169 | zeros = tf.zeros_like(p[0], dtype=DTYPE) 170 | czeros = tf.complex(zeros, zeros) 171 | ones = tf.ones_like(p[0], dtype=DTYPE) 172 | 173 | # case 1) py == 0 174 | rz = p[3] / (p[0] + epsilon) 175 | theta1 = tf.where(rz > 0, zeros, rz) 176 | theta1 = tf.where(rz < 0, np.pi * ones, theta1) 177 | phi1 = zeros 178 | 179 | # case 2) py != 0 180 | rrr = rz 181 | rrr = tf.where(rz < -1, -ones, rrr) 182 | rrr = tf.where(rz > 1, ones, rrr) 183 | theta2 = tf.acos(rrr) 184 | rrr = p[1] / p[0] / tf.sin(theta2) 185 | rrr = tf.where(rrr < -1, -ones, rrr) 186 | rrr = tf.where(rrr > 1, ones, rrr) 187 | phi2 = tf.acos(rrr) 188 | ry = p[2] / p[0] 189 | phi2 = tf.where(ry < 0, -phi2, phi2) 190 | 191 | # combine 192 | theta = tf.where(p[1] == 0, theta1, theta2) 193 | phi = tf.where(p[1] == 0, phi1, phi2) 194 | 195 | prefact = tf.complex(float_me(np.sqrt(2)), zeros) * tf.sqrt(tf.complex(p[0], zeros)) 196 | if i == -1: 197 | a = tf.complex(tf.sin(theta / 2), zeros) 198 | b = tf.complex(tf.abs(tf.cos(theta / 2)), zeros) 199 | ubar0_0 = prefact * a * tf.complex(tf.cos(phi), tf.sin(phi)) 200 | ubar0_1 = -prefact * b 201 | ubar0_2 = czeros 202 | ubar0_3 = czeros 203 | else: 204 | a = tf.complex(tf.cos(theta / 2), zeros) 205 | b = tf.complex(tf.sin(theta / 2), zeros) 206 | ubar0_0 = czeros 207 | ubar0_1 = czeros 208 | ubar0_2 = prefact * a 209 | ubar0_3 = prefact * b * tf.complex(tf.cos(phi), -tf.sin(phi)) 210 | 211 | return tf.stack([ubar0_0, ubar0_1, ubar0_2, ubar0_3]) 212 | 213 | 214 | @tf.function 215 | def za(p1, p2): 216 | ket = u0(p2, 1) 217 | bra = ubar0(p1, -1) 218 | return tf.reduce_sum(bra * ket, axis=0) 219 | 220 | 221 | @tf.function 222 | def zb(p1, p2): 223 | ket = u0(p2, -1) 224 | bra = ubar0(p1, 1) 225 | return tf.reduce_sum(bra * ket, axis=0) 226 | 227 | 228 | @tf.function 229 | def sprod(p1, p2): 230 | a = za(p1, p2) 231 | b = zb(p2, p1) 232 | return tf.math.real(a * b) 233 | 234 | 235 | @tf.function 236 | def qqxtbx(p0, p1, p2, p3): 237 | """Evaluate 0 -> qqbarttbar""" 238 | pw2 = sprod(p0, p1) 239 | wprop = tf.square(pw2 - mw2) + mw2 * gaw2 240 | a = sprod(p0, p2) 241 | b = sprod(p0, p3) 242 | c = sprod(p2, p3) 243 | d = sprod(p3, p1) 244 | return tf.abs((a + mt2 * b / c) * d) * colf_bt / wprop * gw4 / 36 245 | 246 | 247 | @tf.function 248 | def evaluate_matrix_element_square(p0, p1, p2, p3): 249 | """Evaluate Matrix Element square""" 250 | 251 | # massless projection 252 | k = mt2 / dot(p3, p0) / 2 253 | p3 -= p0 * k 254 | 255 | # channels evaluation 256 | c1 = qqxtbx(p2, -p1, p3, -p0) # BBARQBARQT +2 -1 +3 -0 257 | c2 = qqxtbx(-p1, p2, p3, -p0) # BBARQQBART -1 +2 +3 -0 258 | 259 | return tf.stack([c1, c2]) 260 | 261 | 262 | @tf.function 263 | def build_luminosity(x1, x2): 264 | """Single-top t-channel luminosity""" 265 | q2s = tf.fill(tf.shape(x1), mt2) 266 | p5x1 = tf.cast(pdf.xfxQ2([5], x1, q2s), dtype=DTYPE) 267 | pNx2 = tf.cast(pdf.xfxQ2([2, 4, -1, -3], x2, q2s), dtype=DTYPE) 268 | prod = tf.transpose(tf.reshape(p5x1, (-1, 1))*pNx2) 269 | lumis = tf.math.segment_sum(prod, tf.constant([0,0,1,1])) 270 | return lumis / x1 / x2 271 | 272 | @tf.function 273 | def singletop(xarr, n_dim=None, **kwargs): 274 | """Single-top (t-channel) at LO""" 275 | psw, flux, p0, p1, p2, p3, x1, x2 = make_event(xarr) 276 | wgts = evaluate_matrix_element_square(p0, p1, p2, p3) 277 | lumis = build_luminosity(x1, x2) 278 | lumi_me2 = tf.reduce_sum(2 * lumis * wgts, axis=0) 279 | return lumi_me2 * psw * flux * conv 280 | 281 | 282 | if __name__ == "__main__": 283 | """Testing a basic integration""" 284 | print(f"VEGAS MC, ncalls={ncalls}:") 285 | start = time.time() 286 | r = vegas_wrapper(singletop, dim, n_iter, ncalls, compilable=True) 287 | end = time.time() 288 | print(f"time (s): {end-start}") 289 | 290 | try: 291 | from vegas import Integrator 292 | except ModuleNotFoundError: 293 | sys.exit(0) 294 | 295 | if len(sys.argv) > 1: 296 | print(" > Doing also the comparison with original Vegas ") 297 | 298 | def fun(xarr): 299 | x = xarr.reshape(1, -1) 300 | return singletop(x) 301 | 302 | print("Comparing with Lepage's Vegas") 303 | limits = dim * [[0.0, 1.0]] 304 | integrator = Integrator(limits) 305 | start = time.time() 306 | vr = integrator(fun, neval=ncalls, nitn=n_iter) 307 | end = time.time() 308 | print(vr.summary()) 309 | print(f"time (s): {end-start}") 310 | print(f"Per iteration (s): {(end-start)/n_iter}") 311 | -------------------------------------------------------------------------------- /capi/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 3.0.2...3.28.1) 2 | 3 | # Use rpaths for now, previously there were issues with osx 4 | SET(CMAKE_SKIP_BUILD_RPATH FALSE) 5 | 6 | # when building, don't use the install RPATH already 7 | # (but later on when installing) 8 | SET(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) 9 | 10 | SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") 11 | 12 | project(libpdfflow) 13 | 14 | set(VERSION "\"0.1\"") 15 | 16 | find_package(Python3 COMPONENTS Interpreter Development) 17 | 18 | # running the cffi builder 19 | if (NOT EXISTS ${PROJECT_SOURCE_DIR/src/cpdfflow.cc}) 20 | execute_process(COMMAND ${Python3_EXECUTABLE} ${PROJECT_SOURCE_DIR}/src/build.py WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/src) 21 | endif() 22 | 23 | include_directories(${Python3_INCLUDE_DIRS}) 24 | include_directories(src) 25 | add_library(pdfflow SHARED ${PROJECT_SOURCE_DIR}/src/cpdfflow.c) 26 | target_link_libraries(pdfflow ${Python3_LIBRARIES}) 27 | 28 | # pkg-config 29 | set(prefix ${CMAKE_INSTALL_PREFIX}) 30 | set(exec_prefix "${prefix}") 31 | set(includedir "${prefix}/include") 32 | set(extraincludirs "-I${Python3_INCLUDE_DIRS}") 33 | set(libdir "${prefix}/lib") 34 | set(pythonlibs "${Python3_LIBRARIES}") 35 | 36 | configure_file( 37 | "${PROJECT_SOURCE_DIR}/src/pdfflow.pc.in" 38 | "${PROJECT_SOURCE_DIR}/src/pdfflow.pc" 39 | ) 40 | 41 | install(FILES ${PROJECT_SOURCE_DIR}/src/pdfflow.pc DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/pkgconfig) 42 | install(DIRECTORY ${PROJECT_SOURCE_DIR}/src/pdfflow DESTINATION ${CMAKE_INSTALL_PREFIX}/include) 43 | install(TARGETS pdfflow LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) 44 | -------------------------------------------------------------------------------- /capi/README.md: -------------------------------------------------------------------------------- 1 | PDFFlow C-API 2 | ============= 3 | 4 | This repository contains a C library to access PDFFlow from programming languages different from Python. 5 | 6 | ## Installation 7 | 8 | Make sure you have installed the `pdfflow` module in Python, then in order to install proceed with the usual cmake steps: 9 | ```bash 10 | cmake -S . -B build -DCMAKE_INSTALL_PREFIX= 11 | cmake --build build 12 | cmake --install build 13 | ``` 14 | 15 | ## Usage 16 | 17 | The compiler flags to include this library in your package can be 18 | retrieved with: 19 | ```bash 20 | pkg-config pdfflow --cflags 21 | pkg-config pdfflow --libs 22 | ``` 23 | 24 | If you installed to a non-standard location, you need to set up the `PKG_CONFIG_PATH` and `LD_LIBRARY_PATH`, e.g., for a `VIRTUAL_ENV`: 25 | ```bash 26 | export PKG_CONFIG_PATH=${VIRTUAL_ENV}/lib/pkgconfig/:${PKG_CONFIG_PATH}: 27 | export LD_LIBRARY_PATH=${VIRTUAL_ENV}/lib/:${LD_LIBRARY_PATH}: 28 | export DYLD_LIBRARY_PATH=${VIRTUAL_ENV}/lib:${DYLD_LIBRARY_PATH}: 29 | ``` 30 | 31 | 32 | Sample programs using this library are provided in the `capi/examples/` directory. 33 | -------------------------------------------------------------------------------- /capi/examples/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile example 2 | 3 | CFLAGS=`pkg-config pdfflow --cflags` 4 | LIBS=`pkg-config pdfflow --libs` 5 | 6 | all: example 7 | 8 | %: %.c 9 | $(CC) $(CFLAGS) -o $@ $< $(LIBS) 10 | 11 | clean: 12 | rm -rf example 13 | -------------------------------------------------------------------------------- /capi/examples/example.c: -------------------------------------------------------------------------------- 1 | // This file is part of PDFFlow 2 | #include 3 | #include "pdfflow/pdfflow.h" 4 | 5 | int main() { 6 | // load pdf 7 | mkpdf("NNPDF40_nnlo_as_01180/0", "/usr/share/lhapdf/LHAPDF/"); 8 | 9 | // test xfxq2 and alphasq2 10 | int pid[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; 11 | double xs[] = {0.1}; 12 | double q2s[] = {1.65}; 13 | double *xf_vectorized = xfxq2(pid, 11, xs, 1, q2s, 1); 14 | for (int fl = 0; fl < 11; fl++) 15 | printf("flv=%d - xfx = %f\n", fl-5, xf_vectorized[fl]); 16 | 17 | double q2_vectorized[] = {1.65, 10.65}; 18 | double* as_vectorized = alphasq2(q2_vectorized, 2); 19 | 20 | printf("alphas(q2=%f) = %f\n", q2_vectorized[0], as_vectorized[0]); 21 | printf("alphas(q2=%f) = %f\n", q2_vectorized[1], as_vectorized[1]); 22 | 23 | return 0; 24 | } 25 | -------------------------------------------------------------------------------- /capi/examples/fortran/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile example 2 | 3 | CFLAGS=`pkg-config pdfflow --cflags` 4 | LIBS=`pkg-config pdfflow --libs` 5 | FC=gfortran 6 | 7 | all: example 8 | 9 | %: %.f90 10 | $(FC) $(CFLAGS) -o $@ pdfflow_f_interface.c $< $(LIBS) 11 | 12 | run: example 13 | ./example 14 | 15 | clean: 16 | rm -rf example 17 | -------------------------------------------------------------------------------- /capi/examples/fortran/example.f90: -------------------------------------------------------------------------------- 1 | ! This file is part of pdfflow 2 | program example 3 | 4 | use, intrinsic :: ISO_C_BINDING, only: C_ptr, c_char 5 | implicit none 6 | 7 | integer, parameter :: dp = kind(1.d0) 8 | 9 | integer :: pid(0:10), i 10 | real(dp) :: alpha_s, q2(0:1), x(0:1), xfx(0:11) 11 | real(dp) :: q2_vectorized(0:2), as_vectorized(0:2) 12 | 13 | character(kind=c_char, len=23) :: ss = "NNPDF40_nnlo_as_01180/0" 14 | character(kind=c_char, len=24) :: pp = "/usr/share/lhapdf/LHAPDF/" 15 | 16 | call mkPDF(ss, pp) 17 | 18 | do i = 0, 10 19 | pid(i) = i - 5 20 | enddo 21 | x(0) = 0.1 22 | q2(0) = 1.65 23 | 24 | call xfxq2(pid, 11, x, 1, q2, 1, xfx) 25 | 26 | do i = 0, 10 27 | write(*, fmt=200)"Flavour: ", i - 5, " value: ", xfx(i) 28 | enddo 29 | 30 | q2_vectorized(0) = 1.65 31 | q2_vectorized(1) = 10.65 32 | call alphasq2(q2_vectorized, 2, as_vectorized); 33 | 34 | write(*, fmt=100)"alphas(q2=",q2_vectorized(0),") = ", as_vectorized(0) 35 | write(*, fmt=100)"alphas(q2=",q2_vectorized(1),") = ", as_vectorized(1) 36 | 37 | 100 format (A, F10.7, A, F10.7) 38 | 200 format (" ", A, I0, A, F10.7) 39 | 40 | end program 41 | -------------------------------------------------------------------------------- /capi/examples/fortran/pdfflow_f_interface.c: -------------------------------------------------------------------------------- 1 | // This file is part of PDFFlow 2 | #include 3 | #include "pdfflow/pdfflow.h" 4 | 5 | // Some notes about the Fortran interface (to be moved to the docs) 6 | // 7 | // There are two caveats to take into account when interfacing with Fortran: 8 | // 1 - In fortran all arguments are passed by reference, therefore this file 9 | // does little more than dereference the arguments to pass it through the cffi interface 10 | // 2 - Fortran assumes all functions to have a `_` at the end. This is not true in C 11 | // it is possible to change this behaviour with the -fno-underscoring gcc flag, but since 12 | // it is not standard we prefer to provide an interface between the "cnaming" and the "fnaming" 13 | 14 | void mkpdf_(const char *fname, const char *dirname) { 15 | mkpdf(fname, dirname); 16 | } 17 | 18 | void alphasq2_(double *q2, const int *n, double *alphas) { 19 | double *as = alphasq2(q2, *n); 20 | for (int i = 0; i < *n; i++) 21 | alphas[i] = as[i]; 22 | } 23 | 24 | void xfxq2_(int *pid, const int *n, double *x, const int *m, double *q2, const int *o, double *f) { 25 | double *xf = xfxq2(pid, *n, x, *m, q2, *o); 26 | for (int i = 0; i < *n * *m * *o; i++) 27 | f[i] = xf[i]; 28 | } 29 | -------------------------------------------------------------------------------- /capi/src/build.py: -------------------------------------------------------------------------------- 1 | # This file is part of PDFFlow 2 | import cffi 3 | ffibuilder = cffi.FFI() 4 | 5 | with open('pdfflow/pdfflow.h') as f: 6 | ffibuilder.embedding_api(f.read()) 7 | 8 | ffibuilder.set_source('cpdfflow', r''' 9 | #include "pdfflow/pdfflow.h" 10 | ''', source_extension='.c') 11 | 12 | with open('wrapper.py') as f: 13 | ffibuilder.embedding_init_code(f.read()) 14 | 15 | ffibuilder.emit_c_code('cpdfflow.c') 16 | -------------------------------------------------------------------------------- /capi/src/pdfflow.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@prefix@ 2 | exec_prefix=@exec_prefix@ 3 | includedir=@includedir@ 4 | extraincludirs=@extraincludirs@ 5 | libdir=@libdir@ 6 | pythonlibs=@pythonlibs@ 7 | 8 | Name: pdfflow 9 | Description: The PDFFlow C-API library 10 | Version: @VERSION@ 11 | Cflags: -I@includedir@ @extraincludirs@ 12 | Libs: -L@libdir@ -lpdfflow @pythonlibs@ 13 | 14 | -------------------------------------------------------------------------------- /capi/src/pdfflow/pdfflow.h: -------------------------------------------------------------------------------- 1 | /** 2 | * PDFFlow C API 3 | */ 4 | 5 | extern void mkpdf(const char *fname, const char * dirname); 6 | 7 | extern double *xfxq2(int *pid, int n, double *x, int m, double *q2, int o); 8 | 9 | extern double *alphasq2(double *q2, int n); 10 | -------------------------------------------------------------------------------- /capi/src/wrapper.py: -------------------------------------------------------------------------------- 1 | # This file is part of 2 | from cpdfflow import ffi 3 | from pdfflow import pflow 4 | import numpy as np 5 | 6 | pdf = None 7 | 8 | @ffi.def_extern() 9 | def mkpdf(fname, dirname): 10 | """Generate a PDF givena PDF name and a directory.""" 11 | pdfset = ffi.string(fname).decode('utf-8') 12 | path = ffi.string(dirname).decode('utf-8') 13 | global pdf 14 | pdf = pflow.mkPDF(pdfset, path) 15 | 16 | 17 | @ffi.def_extern() 18 | def xfxq2(pid, n, x, m, q2, o): 19 | """Returns the xfxQ2 value for arrays of PID, x and q2 values.""" 20 | global pdf 21 | pid_numpy = np.frombuffer(ffi.buffer(pid, n*ffi.sizeof('int')), dtype='int32') 22 | x_numpy = np.frombuffer(ffi.buffer(x, m*ffi.sizeof('double')), dtype='double') 23 | q2_numpy = np.frombuffer(ffi.buffer(q2, o*ffi.sizeof('double')), dtype='double') 24 | ret = pdf.xfxQ2(pid_numpy, x_numpy, q2_numpy).numpy() 25 | return ffi.cast("double*", ffi.from_buffer(ret)) 26 | 27 | 28 | @ffi.def_extern() 29 | def alphasq2(q2, n): 30 | """Returns the alpha strong coupling at for an array of q2 values.""" 31 | global pdf 32 | q2_numpy = np.frombuffer(ffi.buffer(q2, n*ffi.sizeof('double')), dtype='double') 33 | ret = pdf.alphasQ2(q2_numpy).numpy() 34 | return ffi.cast("double*", ffi.from_buffer(ret)) 35 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | view: html 23 | $(BROWSER) build/html/index.html 24 | -------------------------------------------------------------------------------- /doc/source/apisrc/pdfflow.rst: -------------------------------------------------------------------------------- 1 | pdfflow package 2 | =============== 3 | 4 | Submodules 5 | ---------- 6 | 7 | pdfflow.pdfflow module 8 | ------------------------ 9 | 10 | .. automodule:: pdfflow.pflow 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | :noindex: 15 | 16 | pdfflow.subgrid module 17 | ---------------------- 18 | 19 | .. automodule:: pdfflow.subgrid 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | :noindex: 24 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | from recommonmark.transform import AutoStructify 16 | 17 | sys.path.insert(0, os.path.abspath('..')) 18 | import pdfflow 19 | 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'pdfflow' 24 | copyright = '2020, Stefano Carrazza, Juan Cruz-Martinez and Marco Rossi' 25 | author = 'Stefano Carrazza, Juan Cruz-Martinez and Marco Rossi' 26 | 27 | # The full version, including alpha/beta/rc tags 28 | release = pdfflow.__version__ 29 | 30 | # The full version, including alpha/beta/rc tags 31 | autodoc_mock_imports = ['tensorflow', 'tensorflow-probability'] 32 | 33 | 34 | # -- General configuration --------------------------------------------------- 35 | # 36 | # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found 37 | master_doc = 'index' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | 'sphinx.ext.autodoc', 44 | 'sphinx.ext.doctest', 45 | 'sphinx.ext.coverage', 46 | 'sphinx.ext.napoleon', 47 | 'sphinx.ext.intersphinx', 48 | 'recommonmark', 49 | ] 50 | 51 | # Add any paths that contain templates here, relative to this directory. 52 | templates_path = ['_templates'] 53 | 54 | # Markdown configuration 55 | 56 | # The suffix(es) of source filenames. 57 | # You can specify multiple suffix as a list of string: 58 | # 59 | source_suffix = { 60 | '.rst': 'restructuredtext', 61 | '.txt': 'markdown', 62 | '.md': 'markdown', 63 | } 64 | 65 | autosectionlabel_prefix_document = True 66 | # Allow to embed rst syntax in markdown files. 67 | enable_eval_rst = True 68 | 69 | # List of patterns, relative to source directory, that match files and 70 | # directories to ignore when looking for source files. 71 | # This pattern also affects html_static_path and html_extra_path. 72 | exclude_patterns = [] 73 | 74 | 75 | # -- Options for HTML output ------------------------------------------------- 76 | 77 | # The theme to use for HTML and HTML Help pages. See the documentation for 78 | # a list of builtin themes. 79 | # 80 | html_theme = 'sphinx_rtd_theme' 81 | 82 | # Add any paths that contain custom static files (such as style sheets) here, 83 | # relative to this directory. They are copied after the builtin static files, 84 | # so a file named "default.css" will overwrite the builtin "default.css". 85 | html_static_path = ['_static'] 86 | 87 | 88 | # -- Intersphinx ------------------------------------------------------------- 89 | 90 | intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} 91 | 92 | # -- Doctest ------------------------------------------------------------------ 93 | # 94 | 95 | doctest_path = [os.path.abspath('../examples')] 96 | 97 | # -- Autodoc ------------------------------------------------------------------ 98 | # 99 | autodoc_member_order = 'bysource' 100 | 101 | # Adapted this from 102 | # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py 103 | # app setup hook 104 | def setup(app): 105 | app.add_config_value('recommonmark_config', { 106 | 'enable_eval_rst': True, 107 | }, True) 108 | app.add_transform(AutoStructify) 109 | -------------------------------------------------------------------------------- /doc/source/how_to.rst: -------------------------------------------------------------------------------- 1 | .. _howto-label: 2 | 3 | ============= 4 | Advanced usage 5 | ============= 6 | 7 | .. contents:: 8 | :local: 9 | :depth: 1 10 | 11 | 12 | Logging 13 | ------- 14 | 15 | ``PDFFlow`` uses the internal logging capabilities of python by 16 | creating a new logger handled named ``pdfflow``. 17 | You can modify the behavior of the logger as with any sane python library with the following lines: 18 | 19 | .. code-block:: python 20 | 21 | import logging 22 | log_dict = { 23 | "0" : logging.ERROR, 24 | "1" : logging.WARNING, 25 | "2" : logging.INFO, 26 | "3" : logging.DEBUG 27 | } 28 | logger_pdfflow = logging.getLogger('pdfflow') 29 | logger_pdfflow.setLevel(log_dict["0"]) 30 | 31 | Where the log level can be any level defined in the ``log_dict`` dictionary. 32 | 33 | Since ``PDFFlow`` is to be interfaced with non-python code it is also 34 | possible to control the behaviour through the environment variable ``PDFFLOW_LOG_LEVEL``, in that case any of the keys in ``log_dict`` can be used. For instance: 35 | 36 | .. code-block:: bash 37 | 38 | export PDFFLOW_LOG_LEVEL=1 39 | 40 | will suppress all logger information other than ``WARNING`` and ``ERROR``. 41 | 42 | 43 | Environment 44 | ----------- 45 | 46 | ``PDFFlow`` is based on ``TensorFlow`` and as such all environment variable that 47 | have an effect on ``TensorFlow``\`s behavior will also have an effect on ``PDFFlow``. 48 | 49 | Here we describe only some of what we found to be the most useful variables. 50 | For a complete description on the variables controlling the GPU-behavior of ``TensorFlow`` please refer to 51 | the `nvidia official documentation `_. 52 | 53 | - ``TF_CPP_MIN_LOG_LEVEL``: controls the ``TensorFlow`` logging level. It is set to 1 by default so that only errors are printed. 54 | - ``PDFFLOW_LOG_LEVEL``: controls the ``PDFFlow`` logging level. Set to 3 by default so that everything is printed. 55 | - ``CUDA_VISIBLE_DEVICES``: set the devices that are visible for ``TensorFlow``. If unset it will try to use all GPUs available. In order to force the code to run on CPU it needs to be set to ``""``. In a multi-GPU system you can choose, by index, the GPUs available for ``TensorFlow``, e.g. ``export CUDA_VISIBLE_DEVICES=0,1``. 56 | - ``PDFFLOW_FLOAT``: controls the ``PDFFlow`` float precision. Default is 64 for 64-bits. Accepts: 64, 32. 57 | - ``PDFFLOW_INT``: controls the ``PDFFlow`` integer precision. Default is 32 for 32-bits. Accepts: 64, 32. 58 | - ``PDFFLOW_DATA_PATH``: folder where to find the PDF sets, equivalent to ``LHAPDF_DATA_PATH`` 59 | 60 | 61 | Building the graph ahead of time 62 | -------------------------------- 63 | 64 | The very first call of PDFFlow compiles the ``tf.Graph``. TensorFlow compiles only functions that are called for the first time. The function ``PDF.trace()`` is intended to build all the necessary parts of the ``tf.Graph`` and prevent future retracings that could slow down the execution. 65 | 66 | The ``PDF`` graph can be generated by the following lines of code: 67 | 68 | .. code-block:: python 69 | 70 | from pdfflow import mkPDF 71 | 72 | pdf = mkPDF("NNPDF31_nnlo_as_0118") 73 | pdf.trace() 74 | pdf.alphas_trace() 75 | 76 | Note that the strong coupling interpolation requires calling 77 | its own ``PDF.alphas_trace()`` function instead. 78 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. title:: 2 | pdfflow's documentation! 3 | 4 | ==================================================== 5 | PDFflow: PDF interpolation for hardware accelerators 6 | ==================================================== 7 | 8 | .. image:: https://img.shields.io/badge/arXiv-hep--ph%2F2009.06635-%23B31B1B.svg 9 | :target: https://arxiv.org/abs/2009.06635 10 | 11 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3964190.svg 12 | :target: https://doi.org/10.5281/zenodo.3964190 13 | 14 | .. contents:: 15 | :local: 16 | :depth: 1 17 | 18 | PDFflow is a parton distribution function interpolation library written in Python and based on the `TensorFlow `_ framework. 19 | It is developed with a focus on speed and efficiency, enabling researchers to perform very expensive calculation as quick and easy as possible. 20 | 21 | 22 | How to obtain the code 23 | ====================== 24 | 25 | Open Source 26 | ----------- 27 | The ``pdfflow`` package is open source and available at https://github.com/N3PDF/pdfflow 28 | 29 | Installation 30 | ------------ 31 | The package can be installed with pip: 32 | 33 | .. code-block:: bash 34 | 35 | python3 -m pip install pdfflow 36 | 37 | If you prefer a manual installation just use: 38 | 39 | .. code-block:: bash 40 | 41 | git clone https://github.com/N3PDF/pdfflow 42 | cd pdfflow 43 | python3 setup.py install 44 | 45 | or if you are planning to extend or develop code just use: 46 | 47 | .. code-block:: bash 48 | 49 | python3 setup.py develop 50 | 51 | 52 | Motivation 53 | ========== 54 | 55 | PDFflow is developed within the `Particle Physics group `_ of the `University of Milan `_. 56 | Theoretical calculations in particle physics are incredibly time consuming operations, sometimes taking months in big clusters all around the world. 57 | 58 | These expensive calculations are driven by the high dimensional phase space that need to be integrated but also by a lack of expertise in new techniques on high performance computation. 59 | Indeed, while at the theoretical level these are some of the most complicated calculations performed by mankind; at the technical level most of these calculations are performed using very dated code and methodologies that are unable to make us of the available resources. 60 | 61 | With PDFflow we aim to fill this gap between theoretical calculations and technical performance by providing a framework which can automatically make the best of the machine in which it runs. 62 | To that end PDFflow is based on two technologies that together will enable a new age of research. 63 | 64 | 65 | 66 | How to cite ``pdfflow``? 67 | ========================= 68 | 69 | When using ``pdfflow`` in your research, please cite the following publications: 70 | 71 | 72 | 73 | .. image:: https://img.shields.io/badge/arXiv-hep--ph%2F2009.06635-%23B31B1B.svg 74 | :target: https://arxiv.org/abs/2009.06635 75 | 76 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3691926.svg 77 | :target: https://doi.org/10.5281/zenodo.3691926 78 | 79 | Bibtex: 80 | 81 | .. code-block:: latex 82 | 83 | @article{Carrazza:2020qwu, 84 | author = "Carrazza, Stefano and Cruz-Martinez, Juan M. and Rossi, Marco", 85 | title = "{PDFFlow: parton distribution functions on GPU}", 86 | eprint = "2009.06635", 87 | archivePrefix = "arXiv", 88 | primaryClass = "hep-ph", 89 | month = "9", 90 | year = "2020" 91 | } 92 | 93 | @software{pdfflow_package, 94 | author = {Juan Cruz-Martinez and 95 | Marco Rossi and 96 | Stefano Carrazza}, 97 | title = {N3PDF/pdfflow: PDFFlow 1.0}, 98 | month = sep, 99 | year = 2020, 100 | publisher = {Zenodo}, 101 | version = {v1.0}, 102 | doi = {10.5281/zenodo.3964190}, 103 | url = {https://doi.org/10.5281/zenodo.3964190} 104 | } 105 | 106 | 107 | FAQ 108 | === 109 | 110 | Why the name ``pdfflow``? 111 | --------------------------- 112 | 113 | It is a combination of the names `PDF` and `Tensorflow`. 114 | 115 | - **PDFs**: Parton Distribution Functions (or PDFs) are at the core of LHC phenomenology by providing a description of the parton content of the proton. 116 | 117 | - **TensorFlow**: the framework developed by Google and made public in November of 2015 is a perfect combination between performance and usability. With a focus on Deep Learning, TensorFlow provides an algebra library able to easily run operations in many different devices: CPUs, GPUs, TPUs with little input by the developer. Write your code once. 118 | 119 | 120 | .. toctree:: 121 | :maxdepth: 3 122 | :glob: 123 | :caption: Contents: 124 | 125 | PDFFlow 126 | overview 127 | how_to 128 | 129 | 130 | .. automodule:: pdfflow 131 | :members: 132 | :noindex: 133 | -------------------------------------------------------------------------------- /doc/source/overview.rst: -------------------------------------------------------------------------------- 1 | .. _overview-label: 2 | 3 | ======== 4 | Overview 5 | ======== 6 | 7 | .. contents:: 8 | :local: 9 | :depth: 1 10 | 11 | Installing a PDF set 12 | ==================== 13 | PDF sets can be installed in two ways 14 | 15 | 1. downloading directly the PDF sets, for instance from `LHAPDF PDF sets page `_ or `NNPDF `_ 16 | 2. exploiting the ``lhapdf`` scripts, through the following commands: 17 | 18 | .. code-block:: bash 19 | 20 | lhapdf list 21 | lhapdf install 22 | 23 | which downloads and install the sets directly to ``lhapdf-config --datadir``. 24 | 25 | 26 | Instantiating a PDF 27 | =================== 28 | 29 | mkPDF wrapper 30 | ^^^^^^^^^^^^^ 31 | 32 | PDFs can be instantiated in a similar manner to `LHAPDF `_ 33 | by calling the provided ``mkPDF`` and ``mkPDFs`` functions. 34 | 35 | If ``LHAPDF`` and the ``pdfset`` are installed in the system it is enough to call: 36 | 37 | .. code-block:: python 38 | 39 | from pdfflow import mkPDF 40 | pdf = mkPDF(f"{pdfset}/0") 41 | 42 | And ``pdfflow`` will try to obtain the PDF directory 43 | from ``LHAPDF``. If, instead, we have manually downloaded the PDF, we need to specify the folder 44 | in which the PDF folder can be found, for instance: 45 | 46 | .. code-block:: python 47 | 48 | from pdfflow import mkPDF 49 | pdf = mkPDF(f"{pdfset}/0", dirname="/usr/share/lhapdf/LHAPDF") 50 | 51 | To obtain the central member (0) of the ``pdfset``. 52 | It is often necessary to require several members of a set, for instance to compute 53 | pdf error. This can be achieved with the ``mkPDFs`` function, for instance, 54 | to obtain members (0,1,2) we can do: 55 | 56 | .. code-block:: python 57 | 58 | from pdfflow import mkPDFs 59 | pdf = mkPDFs(pdfset, [0, 1, 2]) 60 | 61 | Note that both ``mkPDF`` and ``mkPDFs`` accept the keyword argument ``dirname``. 62 | 63 | 64 | PDF class 65 | ^^^^^^^^^ 66 | 67 | The aforementioned functions are all wrappers around the low-level ``PDF`` class and provide an instance to the class. 68 | The class can also be instantiated directly with: 69 | 70 | .. code-block:: python 71 | 72 | from pdfflow.pflow import PDF 73 | pdf = PDF(dirname, pdfset, [0]) # obtain a PDF instance for member 0 74 | pdf = PDF(dirname, pdfset, [2, 5]) # obtain a PDF instance for members 2 and 5 75 | 76 | Note that in order to instantiate a PDF class it is always necessary to provide the source directory of the PDF sets. 77 | 78 | PDF interpolation 79 | ================= 80 | The PDF interpolation can be worked out calling the ``py_xfxQ2`` method with 81 | python or TensorFlow objects as arguments: 82 | 83 | Python interface 84 | ^^^^^^^^^^^^^^^^ 85 | 86 | When using python arguments as the input we provide the ``py_xfxQ2``. 87 | This function deals with the conversion of the input into TensorFlow variables. 88 | 89 | .. code-block:: python 90 | 91 | from pdfflow import mkPDFs 92 | 93 | pdf = mkPDFs(pdfset, [0,1,2]) 94 | x = [10**i for i in range(-6,-1)] 95 | q2 = [10**(2*i) for i in range(1,6)] 96 | pid = [-1,21,1] 97 | 98 | pdf.py_xfxQ2(pid, x, q2) 99 | 100 | 101 | TensorFlow interface 102 | ^^^^^^^^^^^^^^^^^^^^ 103 | 104 | Instead, if the arguments are already tensorflow objects, it is possible to call 105 | lower level ``tf.functions`` such as ``xfxQ2``: 106 | 107 | .. code-block:: python 108 | 109 | from pdfflow import mkPDFs, float_me, int_me 110 | 111 | pdf = mkPDFs(pdfset, [0,1,2]) 112 | x = float_me([10**i for i in range(-6,-1)]) 113 | q2 = float_me([10**(2*i) for i in range(1,6)]) 114 | pid = int_me([-1,21,1]) 115 | 116 | pdf.xfxQ2(pid, x, q2) 117 | 118 | .. note:: The ``float_me`` and ``int_me`` functions are wrappers around ``tf.cast`` which we provide with the aim of ensuring that integers are cast to 32-bit integers and float to 64-bit floats. 119 | 120 | If arguments had been ``tf.Tensor`` objects, the preferred way to call the interpolation would have been 121 | via the ``xfxQ2`` function. 122 | To go through the computation of all the pids in the flavor scheme, use ``xfxQ2_allpid`` or the 123 | ``py_xfxQ2_allpid`` version instead. 124 | 125 | 126 | Strong coupling interpolation 127 | ============================= 128 | 129 | The strong coupling interpolation requires calling its own methods of the ``PDF`` object: 130 | 131 | 132 | .. code-block:: python 133 | 134 | from pdfflow import mkPDFs, float_me 135 | 136 | pdf = mkPDFs(pdfset, [0,1,2]) 137 | pdf.alphas_trace() 138 | 139 | q2 = [10**(2*i) for i in range(1,6)] 140 | pdf.py_alphasQ2(q2) 141 | pdf.alphasQ2(float_me(q2)) 142 | 143 | 144 | Akin to the PDF interpolation discussed above, we provide the user with ``py_alphasQ2`` for the Python interface and ``alphasQ2`` for ``TensorFlow`` for the strong coupling interpolation. 145 | 146 | In order to mimic the ``LHAPDF`` set of functions, we implement also the ``alphasQ`` and ``py_alphasQ`` ``PDF`` methods, by which the user is relieved of squaring the query array elements manually. 147 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = pdfflow 3 | version = 1.2.1 4 | description = PDF interpolation with Tensorflow 5 | author = S.Carrazza, J.Cruz-Martinez, M.Rossi 6 | author_email = stefano.carrazza@cern.ch, juan.cruz@mi.infn.it, marco.rossi5@unimi.it 7 | url = https://github.com/N3PDF/pdfflow 8 | long_description = file: README.md 9 | long_description_content_type = text/markdown 10 | license_files = LICENSE 11 | classifiers = 12 | Operating System :: Unix 13 | Programming Language :: Python 14 | Programming Language :: Python :: 3 15 | Topic :: Scientific/Engineering 16 | Topic :: Scientific/Engineering :: Physics 17 | 18 | [options] 19 | package_dir= 20 | =src 21 | packages=find: 22 | zip_safe = False 23 | python_requires = >=3.6 24 | install_requires = 25 | numpy >= 1.21 26 | pyyaml 27 | lhapdf_management 28 | 29 | [options.packages.find] 30 | where = src 31 | 32 | [options.extras_require] 33 | capi = cffi 34 | docs = 35 | sphinx_rtd_theme 36 | recommonmark 37 | sphinxcontrib-bibtex 38 | examples = 39 | matplotlib 40 | vegasflow 41 | tf = tensorflow 42 | tf-cpu = tensorflow-cpu 43 | tf-gpu = tensorflow-gpu 44 | tf-amd = tensorflow-rocm 45 | -------------------------------------------------------------------------------- /src/pdfflow/__init__.py: -------------------------------------------------------------------------------- 1 | """PDF interpolation with tensorflow""" 2 | # Expose mkPDF and the int_me, float_me functions 3 | # that way the log system is imported from the very beginning 4 | from importlib.metadata import metadata 5 | from pdfflow.configflow import int_me, float_me, run_eager 6 | from pdfflow.pflow import mkPDF, mkPDFs 7 | 8 | PACKAGE = "pdfflow" 9 | 10 | __version__ = metadata(PACKAGE)["version"] 11 | -------------------------------------------------------------------------------- /src/pdfflow/alphas_functions.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the different alphas grids (first, last and inner) wrapper functions 3 | when compiled by tensorflow they all take alphas_GRID_FUNCTION_SIGNATURE as input 4 | which is defined in :py:module:`ppdfflow.alphas_subgrid` 5 | and will be compiled once they are linked to a specific subgrid. 6 | 7 | The function in this module apply different masks to the input to generate 8 | the different interpolation zones: 9 | 10 | (0) = log_q2min <= a_q2 <= log_q2max (log cubic interpolation) 11 | (1) = a_q2 > log_q2max (high q2, freezing the last grid value) 12 | (2) = a_q2 < log_q2max (low q2, logarithmic extrapolation) 13 | 14 | The input values defining the query are 15 | shape, a_q2 16 | while the rest of the input define the subgrid. 17 | The points are selected by a boolean mask 18 | 19 | and the functions to call depending on the zone are: 20 | alphas_interpolate: (0) 21 | alphas_lowq2_extrapolation: (2) 22 | 23 | """ 24 | import tensorflow as tf 25 | from pdfflow.configflow import DTYPE, int_me 26 | from pdfflow.alphas_region_interpolator import alphas_interpolate 27 | from pdfflow.functions import _condition_to_idx 28 | 29 | def alphas_inner_subgrid( 30 | shape, 31 | a_q2, 32 | log_q2min, 33 | log_q2max, 34 | padded_q2, 35 | s_q2, 36 | actual_padded, 37 | ): 38 | """ 39 | Inner (non-first and non-last) alphas subgrid interpolation 40 | Calls 41 | alphas_interpolate (basic interpolation) (0) 42 | 43 | Parameters 44 | ---------- 45 | shape: tf.tensor of shape [None] 46 | final output shape to scatter points into 47 | For other parameters refer to subgrid.py:alphas_interpolate 48 | 49 | Returns 50 | ---------- 51 | tf.tensor of shape `shape` 52 | alphas interpolated values for each query point 53 | """ 54 | res = tf.zeros(shape, dtype=DTYPE) 55 | 56 | # -------------------------------------------------------------------- 57 | # normal interpolation 58 | 59 | stripe, f_idx = _condition_to_idx(a_q2 >= log_q2min, a_q2 < log_q2max) 60 | if tf.math.equal(f_idx, 0) is not None: 61 | in_q2 = tf.boolean_mask(a_q2, stripe) 62 | ff_f = alphas_interpolate(in_q2, padded_q2, s_q2, actual_padded) 63 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 64 | return res 65 | 66 | 67 | def alphas_first_subgrid( 68 | shape, 69 | a_q2, 70 | log_q2min, 71 | log_q2max, 72 | padded_q2, 73 | s_q2, 74 | actual_padded, 75 | ): 76 | """ 77 | First subgrid interpolation 78 | Calls 79 | alphas_interpolate (basic interpolation) (0) 80 | 81 | Parameters 82 | ---------- 83 | shape: tf.tensor of shape [None] 84 | final output shape to scatter points into 85 | 86 | For other parameters refer to subgrid.py:alphas_interpolate 87 | 88 | Returns 89 | ---------- 90 | tf.tensor of shape `shape` 91 | alphas interpolated values for each query point 92 | """ 93 | res = tf.zeros(shape, dtype=DTYPE) 94 | 95 | # -------------------------------------------------------------------- 96 | # normal interpolation 97 | 98 | stripe, f_idx = _condition_to_idx(a_q2 >= log_q2min, a_q2 < log_q2max) 99 | if tf.math.equal(f_idx, 0) is not None: 100 | in_q2 = tf.boolean_mask(a_q2, stripe) 101 | ff_f = alphas_interpolate(in_q2, padded_q2, s_q2, actual_padded) 102 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 103 | 104 | # -------------------------------------------------------------------- 105 | # lowq2 106 | 107 | stripe = a_q2 < log_q2min 108 | f_idx = int_me(tf.where(stripe)) 109 | if tf.math.equal(f_idx, 0) is not None: 110 | in_q2 = tf.boolean_mask(a_q2, stripe) 111 | m = tf.math.log(actual_padded[2]/actual_padded[1])\ 112 | /(padded_q2[2] - padded_q2[1]) 113 | 114 | ff_f = actual_padded[1] * tf.math.pow( 115 | tf.math.exp(in_q2)/tf.math.exp(padded_q2[1]), 116 | m) 117 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 118 | 119 | return res 120 | 121 | 122 | def alphas_last_subgrid( 123 | shape, 124 | a_q2, 125 | log_q2min, 126 | log_q2max, 127 | padded_q2, 128 | s_q2, 129 | actual_padded, 130 | ): 131 | """ 132 | Last subgrid interpolation. 133 | Calls 134 | alphas_interpolate: (0) 135 | 136 | Parameters 137 | ---------- 138 | shape: tf.tensor of shape [None] 139 | final output shape to scatter points into 140 | 141 | For other parameters see :py:func:`pdfflow.alphas_region_interpolator.alphas_interpolate` 142 | 143 | Returns 144 | ---------- 145 | tf.tensor, shape: `shape` 146 | alphas interpolated values for each query point 147 | """ 148 | # Generate all conditions for all stripes 149 | res = tf.zeros(shape, dtype=DTYPE) 150 | 151 | # -------------------------------------------------------------------- 152 | # normal interpolation 153 | stripe, f_idx = _condition_to_idx(a_q2 >= log_q2min, a_q2 <= log_q2max) 154 | if tf.math.equal(f_idx, 0) is not None: 155 | # Check whether there are any points in this region 156 | # if there are, execute normal_interpolation 157 | in_q2 = tf.boolean_mask(a_q2, stripe) 158 | ff_f = alphas_interpolate(in_q2, padded_q2, s_q2, actual_padded) 159 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 160 | 161 | # -------------------------------------------------------------------- 162 | # high q2 163 | stripe = a_q2 > log_q2max 164 | f_idx = int_me(tf.where(stripe)) 165 | if tf.math.equal(f_idx, 0) is not None: 166 | ff_f = tf.ones_like(f_idx[:,0], dtype=DTYPE)*actual_padded[-2] 167 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 168 | 169 | return res 170 | -------------------------------------------------------------------------------- /src/pdfflow/alphas_interpolations.py: -------------------------------------------------------------------------------- 1 | """ 2 | Basic low-level interpolation functions 3 | """ 4 | 5 | import tensorflow as tf 6 | from pdfflow.configflow import DTYPE, DTYPEINT, FMAX 7 | 8 | 9 | @tf.function( 10 | input_signature=[ 11 | tf.TensorSpec(shape=[None], dtype=DTYPE), 12 | tf.TensorSpec(shape=[None], dtype=DTYPE), 13 | tf.TensorSpec(shape=[None], dtype=DTYPE), 14 | tf.TensorSpec(shape=[None], dtype=DTYPE), 15 | tf.TensorSpec(shape=[None], dtype=DTYPE), 16 | ] 17 | ) 18 | def cubic_interpolation(T, VL, VDL, VH, VDH): 19 | """Cubic extrapolation itself""" 20 | #print('cubic int') 21 | t2 = T * T 22 | t3 = t2 * T 23 | 24 | p0 = (2 * t3 - 3 * t2 + 1) * VL 25 | m0 = (t3 - 2 * t2 + T) * VDL 26 | 27 | p1 = (-2 * t3 + 3 * t2) * VH 28 | m1 = (t3 - t2) * VDH 29 | 30 | res = p0 + m0 + p1 + m1 31 | 32 | return tf.where(tf.math.abs(res)<2., res, tf.ones_like(res)*FMAX) 33 | 34 | 35 | @tf.function( 36 | input_signature=[ 37 | tf.TensorSpec(shape=[None], dtype=DTYPEINT), 38 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 39 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 40 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 41 | ] 42 | ) 43 | def daS_dq2_func(q2_id, s_q2, corn_q2, A): 44 | """ 45 | Computes derivatives to make the alphas cubic interpolation 46 | When a query point is in the left or rightmost bin of the q2 axis, it 47 | automatically ignores the knots that would have gone outside array 48 | boundaries in the computation (this is done by a mask and tf.where, 49 | exploiting the q2_id variable) 50 | """ 51 | # print('alphas_df_dx func') 52 | # just two kind of derivatives are usually useful 53 | # if we are interpolating in the [-1,2] interval: 54 | # left derivatives concerning q2 within [-1,0,1] 55 | # right derivatives concerning q2 within [0,1,2] 56 | # derivatives are returned in a tensor with shape (2,#draws) 57 | # Note: there shouldn't never be a division by zero 58 | 59 | #print('das dq2') 60 | diff_A = A[1:]-A[:-1] 61 | diff_q2 = corn_q2[1:] - corn_q2[:-1] 62 | 63 | forward = diff_A[1:]/diff_q2[1:] 64 | backward = diff_A[:-1]/diff_q2[:-1] 65 | central = (forward + backward)/2 66 | 67 | left = tf.where(q2_id == 1, forward[0], central[0]) 68 | 69 | right = tf.where(q2_id == s_q2 - 1, backward[1], central[1]) 70 | 71 | return tf.stack([left, right], 0) 72 | 73 | 74 | 75 | @tf.function( 76 | input_signature=[ 77 | tf.TensorSpec(shape=[None], dtype=DTYPE), 78 | tf.TensorSpec(shape=[None], dtype=DTYPEINT), 79 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 80 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 81 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 82 | ] 83 | ) 84 | def alphas_cubic_interpolation( 85 | a_q2, q2_id, corn_q2, A, s_q2 86 | ): 87 | """ 88 | Makes the alphas cubic interpolation: when a query point is in the lower 89 | or uppermost bin of the q2 axis, it automatically ignores the knots 90 | that would have gone outside array boundaries in the computation 91 | (this is done by a mask and tf.where, exploiting the q2_id variable) 92 | Returns 93 | ---------- 94 | tf.tensor of shape [None] 95 | LogCubic Interpolated points 96 | """ 97 | #print('alphas bic int') 98 | dlogq2 = corn_q2[2] - corn_q2[1] 99 | tlogq2 = (a_q2 - corn_q2[1])/dlogq2 100 | 101 | daS_dq2 = daS_dq2_func(q2_id, s_q2, corn_q2, A) 102 | 103 | return cubic_interpolation(tlogq2, A[1], daS_dq2[0]*dlogq2, 104 | A[2], daS_dq2[1]*dlogq2) 105 | -------------------------------------------------------------------------------- /src/pdfflow/alphas_region_interpolator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains the extrapolation and interpolation functions wrappers for the different regions 3 | """ 4 | 5 | import tensorflow as tf 6 | from pdfflow.configflow import DTYPE, DTYPEINT, float_me, fone 7 | from pdfflow.neighbour_knots import alphas_neighbour_knots 8 | from pdfflow.alphas_interpolations import alphas_cubic_interpolation 9 | 10 | alphas_INTERPOLATE_SIGNATURE = [ 11 | tf.TensorSpec(shape=[None], dtype=DTYPE), 12 | tf.TensorSpec(shape=[None], dtype=DTYPE), 13 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 14 | tf.TensorSpec(shape=[None], dtype=DTYPE), 15 | ] 16 | 17 | 18 | @tf.function(input_signature=alphas_INTERPOLATE_SIGNATURE) 19 | def alphas_interpolate( 20 | a_q2, padded_q2, s_q2, actual_padded, 21 | ): 22 | """ 23 | Basic Cubic Interpolation inside the alphas subgrid 24 | Four Neighbour Knots selects grid knots around each query point to 25 | make the interpolation: 4 knots on the q2 axis are needed for each point, 26 | plus the pdf fvalues there. 27 | Default bicubic interpolation performs the interpolation itself 28 | 29 | Parameters 30 | ---------- 31 | a_q2: tf.tensor of shape [None] 32 | query of values of log(q2) 33 | padded_q2: tf.tensor of shape [None] 34 | value for all the knots on the q2 axis 35 | padded with one zero at the beginning and one at the end to 36 | avoid out of range errors when querying points near boundaries 37 | s_q2: tf.tensor of shape [] 38 | size of q2 knots tensor without padding 39 | actual_padded: tf.tensor of shape [None] 40 | alphas values: contains the padded (q2) grid 41 | """ 42 | #print('alphas interpolate') 43 | q2_bins, corn_q2, alphas_vals = alphas_neighbour_knots( 44 | a_q2, padded_q2, actual_padded 45 | ) 46 | 47 | return alphas_cubic_interpolation( 48 | a_q2, q2_bins, corn_q2, alphas_vals, s_q2 49 | ) 50 | -------------------------------------------------------------------------------- /src/pdfflow/configflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | Encapsulate the definition of constant, logging behaviour and environment variables in one single module 3 | """ 4 | import os 5 | import sys 6 | import pathlib 7 | import logging 8 | import subprocess as sp 9 | import numpy as np 10 | from lhapdf_management import pdf_install 11 | from lhapdf_management.configuration import environment as lhapdf_environment 12 | 13 | # Log levels 14 | LOG_DICT = {"0": logging.ERROR, "1": logging.WARNING, "2": logging.INFO, "3": logging.DEBUG} 15 | 16 | # Read the PDFFLOW environment variables 17 | _log_level_idx = os.environ.get("PDFFLOW_LOG_LEVEL") 18 | _data_path = os.environ.get("PDFFLOW_DATA_PATH") 19 | _float_env = os.environ.get("PDFFLOW_FLOAT", "64") 20 | _int_env = os.environ.get("PDFFLOW_INT", "32") 21 | 22 | 23 | # Logging 24 | _bad_log_warning = None 25 | if _log_level_idx not in LOG_DICT: 26 | _bad_log_warning = _log_level_idx 27 | _log_level_idx = None 28 | 29 | if _log_level_idx is None: 30 | # If no log level is provided, set some defaults 31 | _log_level = LOG_DICT["2"] 32 | _tf_log_level = LOG_DICT["0"] 33 | else: 34 | _log_level = _tf_log_level = LOG_DICT[_log_level_idx] 35 | 36 | # Configure pdfflow logging 37 | logger = logging.getLogger(__name__.split(".")[0]) 38 | logger.setLevel(_log_level) 39 | 40 | # Create and format the log handler 41 | _console_handler = logging.StreamHandler() 42 | _console_handler.setLevel(_log_level) 43 | _console_format = logging.Formatter("[%(levelname)s] (%(name)s) %(message)s") 44 | _console_handler.setFormatter(_console_format) 45 | logger.addHandler(_console_handler) 46 | 47 | # pdfflow options set, now import tensorfow to prepare convenience wrappers 48 | # and set any options that we need 49 | os.environ.setdefault("TF_CPP_MIN_LOG_LEVEL", "1") 50 | import tensorflow as tf 51 | 52 | tf.get_logger().setLevel(_tf_log_level) 53 | 54 | 55 | def run_eager(flag=True): 56 | """Wrapper around `run_functions_eagerly` 57 | When used no function is compiled 58 | """ 59 | if tf.__version__ < "2.3.0": 60 | tf.config.experimental_run_functions_eagerly(flag) 61 | else: 62 | tf.config.run_functions_eagerly(flag) 63 | 64 | 65 | # set the precision type 66 | if _float_env == "64": 67 | DTYPE = tf.float64 68 | FMAX = tf.constant(np.finfo(np.float64).max, dtype=DTYPE) 69 | elif _float_env == "32": 70 | DTYPE = tf.float32 71 | FMAX = tf.constant(np.finfo(np.float32).max, dtype=DTYPE) 72 | else: 73 | DTYPE = tf.float64 74 | FMAX = tf.constant(np.finfo(np.float64).max, dtype=DTYPE) 75 | logger.warning(f"PDFFLOW_FLOAT={_float_env} not understood, defaulting to 64 bits") 76 | 77 | if _int_env == "64": 78 | DTYPEINT = tf.int64 79 | elif _int_env == "32": 80 | DTYPEINT = tf.int32 81 | else: 82 | DTYPEINT = tf.int64 83 | logger.warning(f"PDFFLOW_INT={_int_env} not understood, defaulting to 64 bits") 84 | 85 | # The wrappers below transform tensors and array to the correct type 86 | def int_me(i): 87 | """Cast the input to the `DTYPEINT` type""" 88 | return tf.cast(i, dtype=DTYPEINT) 89 | 90 | 91 | def float_me(i): 92 | """Cast the input to the `DTYPE` type""" 93 | return tf.cast(i, dtype=DTYPE) 94 | 95 | 96 | ione = int_me(1) 97 | izero = int_me(0) 98 | fone = float_me(1) 99 | fzero = float_me(0) 100 | 101 | # PDF paths 102 | def find_pdf_path(pdfname): 103 | """Look in all possible directories for the given `pdfname` 104 | if the pdfname is not found anywhere, fail with error 105 | """ 106 | all_paths = [] 107 | if _data_path: 108 | all_paths.append(_data_path) 109 | all_paths.append(lhapdf_environment.datapath) 110 | 111 | # Return whatever path has the pdf inside 112 | for path in all_paths: 113 | if pathlib.Path(f"{path}/{pdfname}").exists(): 114 | return path 115 | 116 | logger.warning("The PDF set %s could not be found in the system", pdfname) 117 | yn = input("Do you want to try and install it automatically? [y/n]: ") 118 | if yn.lower() in ("yes", "y"): 119 | if not pdf_install(pdfname): 120 | raise RuntimeError(f"Could not install {pdfname} in {lhapdf_environment.datapath}") 121 | 122 | # If none of them do, ask for possible installation 123 | if _data_path is not None: 124 | error_msg = f"\nPlease, download the PDF and uncompress it in {_data_path}" 125 | elif _lhapdf_data_path is not None: 126 | error_msg = f"\nPlease, download the PDf uncompress it in {_lhapdf_data_path}" 127 | else: 128 | error_msg += f""" 129 | Please, either download the set to an appropiate folder and make the environment variable 130 | PDFFLOW_DATA_PATH point to it or install with ``lhapdf_management install {pdfname}``""" 131 | raise RuntimeError(error_msg) 132 | -------------------------------------------------------------------------------- /src/pdfflow/functions.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the different grids (first, last and inner) wrapper functions 3 | when compiled by tensorflow they all take GRID_FUNCTION_SIGNATURE as input 4 | which is defined in :py:module:`ppdfflow.subgrid` 5 | and will be compiled once they are linked to a specific subgrid. 6 | 7 | The function in this module apply different masks to the input to generate 8 | the different interpolation zones: 9 | 10 | (0) = log_xmin <= a_x <= log_xmax 11 | (1) = log_q2min <= a_q2 <= log_q2max 12 | (2) = a_x < log_xmin (low x) 13 | (3) = a_q2 > log_q2max (high q2) 14 | (4) = a_q2 < log_q2max (low q2) 15 | 16 | The input values defining the query are 17 | u, shape, a_x, a_q2 18 | while the rest of the input define the subgrid. 19 | The points are selected by a boolean mask 20 | 21 | and the functions to call depending on the zone are: 22 | interpolate: (0) && (1) 23 | lowx_extrapolation: (1) && (2) 24 | highq2_extrapolation: (0) && (3) 25 | lowq2_extrapolation: (0) && (4) 26 | low_x_highq2_extrapolation: (2) && (3) 27 | lowx_lowq2_extrapolation: (2) && (4) 28 | 29 | """ 30 | import tensorflow as tf 31 | from pdfflow.configflow import DTYPE, int_me 32 | from pdfflow.region_interpolator import interpolate 33 | from pdfflow.region_interpolator import lowx_extrapolation 34 | from pdfflow.region_interpolator import lowq2_extrapolation 35 | from pdfflow.region_interpolator import lowx_lowq2_extrapolation 36 | from pdfflow.region_interpolator import highq2_extrapolation 37 | from pdfflow.region_interpolator import lowx_highq2_extrapolation 38 | 39 | # Auxiliary functions 40 | @tf.function( 41 | input_signature=[ 42 | tf.TensorSpec(shape=[None], dtype=bool), 43 | tf.TensorSpec(shape=[None], dtype=bool), 44 | ] 45 | ) 46 | def _condition_to_idx(cond1, cond2): 47 | """ Take two boolean masks and returns the indexes in which both are true """ 48 | full_condition = tf.logical_and(cond1, cond2) 49 | return full_condition, int_me(tf.where(full_condition)) 50 | 51 | 52 | def inner_subgrid( 53 | shape, 54 | a_x, 55 | a_q2, 56 | log_xmin, 57 | log_xmax, 58 | padded_x, 59 | s_x, 60 | log_q2min, 61 | log_q2max, 62 | padded_q2, 63 | s_q2, 64 | actual_padded, 65 | ): 66 | """ 67 | Inner (non-first and non-last) subgrid interpolation 68 | Calls 69 | interpolate (basic interpolation) (0) && (1) 70 | lowx_extrapolation (1) && (2) 71 | 72 | Parameters 73 | ---------- 74 | u: tf.tensor of shape [None] 75 | query of pids 76 | shape: tf.tensor of shape [None,None] 77 | final output shape to scatter points into 78 | For other parameters refer to subgrid.py:interpolate 79 | 80 | Returns 81 | ---------- 82 | tf.tensor of shape [None,None] 83 | pdf interpolated values for each query point and quey pids 84 | """ 85 | stripe_0 = tf.math.logical_and(a_x >= log_xmin, a_x <= log_xmax) 86 | stripe_1 = tf.math.logical_and(a_q2 >= log_q2min, a_q2 < log_q2max) 87 | stripe_2 = a_x < log_xmin 88 | 89 | res = tf.zeros(shape, dtype=DTYPE) 90 | 91 | # -------------------------------------------------------------------- 92 | # normal interpolation 93 | stripe, f_idx = _condition_to_idx(stripe_0, stripe_1) 94 | if tf.size(f_idx) != 0: 95 | in_x = tf.boolean_mask(a_x, stripe) 96 | in_q2 = tf.boolean_mask(a_q2, stripe) 97 | ff_f = interpolate(in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded,) 98 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 99 | 100 | # -------------------------------------------------------------------- 101 | # lowx 102 | stripe, f_idx = _condition_to_idx(stripe_1, stripe_2) 103 | if tf.size(f_idx) != 0: 104 | in_x = tf.boolean_mask(a_x, stripe) 105 | in_q2 = tf.boolean_mask(a_q2, stripe) 106 | ff_f = lowx_extrapolation( 107 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 108 | ) 109 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 110 | 111 | return res 112 | 113 | 114 | def first_subgrid( 115 | shape, 116 | a_x, 117 | a_q2, 118 | log_xmin, 119 | log_xmax, 120 | padded_x, 121 | s_x, 122 | log_q2min, 123 | log_q2max, 124 | padded_q2, 125 | s_q2, 126 | actual_padded, 127 | ): 128 | """ 129 | First subgrid interpolation 130 | Calls 131 | interpolate (basic interpolation) (0) && (1) 132 | lowx_extrapolation (1) && (2) 133 | lowq2_extrapolation (0) && (4) 134 | lowx_lowq2_extrapolation (2) && (4) 135 | 136 | Parameters 137 | ---------- 138 | u: tf.tensor(int) 139 | list of pids to query 140 | ... 141 | shape: tf.tensor(int, int) 142 | final output shape to scatter points into 143 | 144 | For other parameters refer to subgrid.py:interpolate 145 | 146 | Returns 147 | ---------- 148 | tf.tensor of shape `shape` 149 | pdf interpolated values for each query point and quey pids 150 | """ 151 | stripe_0 = tf.math.logical_and(a_x >= log_xmin, a_x <= log_xmax) 152 | stripe_1 = tf.math.logical_and(a_q2 >= log_q2min, a_q2 < log_q2max) 153 | stripe_2 = a_x < log_xmin 154 | stripe_4 = a_q2 < log_q2min 155 | 156 | res = tf.zeros(shape, dtype=DTYPE) 157 | 158 | # -------------------------------------------------------------------- 159 | # normal interpolation 160 | stripe, f_idx = _condition_to_idx(stripe_0, stripe_1) 161 | if tf.size(f_idx) != 0: 162 | in_x = tf.boolean_mask(a_x, stripe) 163 | in_q2 = tf.boolean_mask(a_q2, stripe) 164 | ff_f = interpolate(in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded,) 165 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 166 | 167 | # -------------------------------------------------------------------- 168 | # lowx 169 | stripe, f_idx = _condition_to_idx(stripe_1, stripe_2) 170 | if tf.size(f_idx) != 0: 171 | in_x = tf.boolean_mask(a_x, stripe) 172 | in_q2 = tf.boolean_mask(a_q2, stripe) 173 | ff_f = lowx_extrapolation( 174 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 175 | ) 176 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 177 | 178 | # -------------------------------------- 179 | # low q2 180 | stripe, f_idx = _condition_to_idx(stripe_0, stripe_4) 181 | if tf.size(f_idx) != 0: 182 | in_x = tf.boolean_mask(a_x, stripe) 183 | in_q2 = tf.boolean_mask(a_q2, stripe) 184 | ff_f = lowq2_extrapolation( 185 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 186 | ) 187 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 188 | 189 | # -------------------------------------------------------------------- 190 | # low x low q2 191 | stripe, f_idx = _condition_to_idx(stripe_2, stripe_4) 192 | if tf.size(f_idx) != 0: 193 | in_x = tf.boolean_mask(a_x, stripe) 194 | in_q2 = tf.boolean_mask(a_q2, stripe) 195 | ff_f = lowx_lowq2_extrapolation( 196 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 197 | ) 198 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 199 | 200 | return res 201 | 202 | 203 | def last_subgrid( 204 | shape, 205 | a_x, 206 | a_q2, 207 | log_xmin, 208 | log_xmax, 209 | padded_x, 210 | s_x, 211 | log_q2min, 212 | log_q2max, 213 | padded_q2, 214 | s_q2, 215 | actual_padded, 216 | ): 217 | """ 218 | Last subgrid interpolation. 219 | Calls 220 | interpolate: (0) && (1) 221 | lowx_extrapolation: (1) && (2) 222 | highq2_extrapolation: (0) && (3) 223 | 224 | Parameters 225 | ---------- 226 | u: tf.tensor, rank-1 227 | grid of pid being queried 228 | shape: tf.tensor, rank-1 shape: (2,) 229 | final output shape to scatter points into 230 | 231 | For other parameters see :py:func:`pdfflow.region_interpolator.interpolate` 232 | 233 | Returns 234 | ---------- 235 | tf.tensor, rank-2, shape: shape 236 | pdf interpolated values for each query point and quey pids 237 | """ 238 | # Generate all conditions for all stripes 239 | stripe_0 = tf.math.logical_and(a_x >= log_xmin, a_x <= log_xmax) 240 | stripe_1 = tf.math.logical_and(a_q2 >= log_q2min, a_q2 <= log_q2max) 241 | stripe_2 = a_x < log_xmin 242 | stripe_3 = a_q2 > log_q2max 243 | 244 | res = tf.zeros(shape, dtype=DTYPE) 245 | 246 | # -------------------------------------------------------------------- 247 | # normal interpolation 248 | stripe, f_idx = _condition_to_idx(stripe_0, stripe_1) 249 | if tf.size(f_idx) != 0: 250 | # Check whether there are any points in this region 251 | # if there are, execute normal_interpolation 252 | in_x = tf.boolean_mask(a_x, stripe) 253 | in_q2 = tf.boolean_mask(a_q2, stripe) 254 | ff_f = interpolate(in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded,) 255 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 256 | 257 | # -------------------------------------------------------------------- 258 | # lowx 259 | stripe, f_idx = _condition_to_idx(stripe_1, stripe_2) 260 | if tf.size(f_idx) != 0: 261 | in_x = tf.boolean_mask(a_x, stripe) 262 | in_q2 = tf.boolean_mask(a_q2, stripe) 263 | ff_f = lowx_extrapolation( 264 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 265 | ) 266 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 267 | 268 | # -------------------------------------------------------------------- 269 | # high q2 270 | stripe, f_idx = _condition_to_idx(stripe_0, stripe_3) 271 | if tf.size(f_idx) != 0: 272 | in_x = tf.boolean_mask(a_x, stripe) 273 | in_q2 = tf.boolean_mask(a_q2, stripe) 274 | ff_f = highq2_extrapolation( 275 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 276 | ) 277 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 278 | 279 | # -------------------------------------------------------------------- 280 | # low x high q2 281 | stripe, f_idx = _condition_to_idx(stripe_2, stripe_3) 282 | if tf.size(f_idx) != 0: 283 | in_x = tf.boolean_mask(a_x, stripe) 284 | in_q2 = tf.boolean_mask(a_q2, stripe) 285 | ff_f = lowx_highq2_extrapolation( 286 | in_x, in_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 287 | ) 288 | res = tf.tensor_scatter_nd_update(res, f_idx, ff_f) 289 | 290 | return res 291 | -------------------------------------------------------------------------------- /src/pdfflow/interpolations.py: -------------------------------------------------------------------------------- 1 | """ 2 | Basic low-level interpolation functions 3 | """ 4 | 5 | import tensorflow as tf 6 | from pdfflow.configflow import DTYPE, DTYPEINT 7 | 8 | 9 | @tf.function( 10 | input_signature=[ 11 | tf.TensorSpec(shape=[None], dtype=DTYPE), 12 | tf.TensorSpec(shape=[], dtype=DTYPE), 13 | tf.TensorSpec(shape=[], dtype=DTYPE), 14 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 15 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 16 | ] 17 | ) 18 | def linear_interpolation(x, xl, xh, yl, yh): 19 | """Linear extrapolation itself""" 20 | # print('lin interp') 21 | x = tf.expand_dims(x, 1) 22 | return yl + (x - xl) / (xh - xl) * (yh - yl) 23 | 24 | 25 | @tf.function( 26 | input_signature=[ 27 | tf.TensorSpec(shape=[None], dtype=DTYPE), 28 | tf.TensorSpec(shape=[], dtype=DTYPE), 29 | tf.TensorSpec(shape=[], dtype=DTYPE), 30 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 31 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 32 | ] 33 | ) 34 | def extrapolate_linear(x, xl, xh, yl, yh): 35 | """ 36 | Selects by a mask which point has yl and yh greater or lower than a threshold: 37 | for lower points a linear extrapolation is performed 38 | for greater points a log-linear extrapolation is performed 39 | Returns 40 | ---------- 41 | tf.tensor of shape [None,None] 42 | (Log)Linear Extrapolated points, with all pids queried 43 | 44 | """ 45 | mask = tf.math.logical_and(yl > 1e-3, yh > 1e-3) 46 | a = tf.where(mask, tf.math.log(yl), yl) 47 | b = tf.where(mask, tf.math.log(yh), yh) 48 | res = linear_interpolation(x, xl, xh, a, b) 49 | return tf.where(mask, tf.math.exp(res), res) 50 | 51 | 52 | @tf.function( 53 | input_signature=[ 54 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 55 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 56 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 57 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 58 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 59 | ] 60 | ) 61 | def cubic_interpolation(T, VL, VDL, VH, VDH): 62 | """Cubic extrapolation itself""" 63 | # print('cubic int') 64 | t2 = T * T 65 | t3 = t2 * T 66 | 67 | p0 = (2 * t3 - 3 * t2 + 1) * VL 68 | m0 = (t3 - 2 * t2 + T) * VDL 69 | 70 | p1 = (-2 * t3 + 3 * t2) * VH 71 | m1 = (t3 - t2) * VDH 72 | 73 | return p0 + m0 + p1 + m1 74 | 75 | 76 | @tf.function( 77 | input_signature=[ 78 | tf.TensorSpec(shape=[None], dtype=DTYPEINT), 79 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 80 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 81 | tf.TensorSpec(shape=[4, 4, None, None], dtype=DTYPE), 82 | ] 83 | ) 84 | def df_dx_func(x_id, s_x, corn_x, A): 85 | """ 86 | Computes derivatives to make the bicubic interpolation 87 | When a query point is in the left or rightmost bin of the x axis, it 88 | automatically ignores the knots that would have gone outside array 89 | boundaries in the computation (this is done by a mask and tf.where, 90 | exploiting the x_id variable) 91 | """ 92 | # print('df_dx func') 93 | # just two kind of derivatives are useful in the x direction 94 | # if we are interpolating in the [-1,2]x[-1,2] square: 95 | # four derivatives in x = 0 for all Qs (:,0,:) 96 | # four derivatives in x = 1 for all Qs (:,1,:) 97 | # derivatives are returned in a tensor with shape (2,4,#draws) 98 | edge = (A[2] - A[1]) / tf.reshape(corn_x[2] - corn_x[1], (1, -1, 1)) 99 | 100 | lddx = (A[1] - A[0]) / tf.reshape(corn_x[1] - corn_x[0], (1, -1, 1)) 101 | default_l = (lddx + edge) / 2 102 | 103 | rddx = (A[3] - A[2]) / tf.reshape(corn_x[3] - corn_x[2], (1, -1, 1)) 104 | default_r = (edge + rddx) / 2 105 | 106 | mask_l = tf.reshape(x_id == 1, (1, -1, 1)) 107 | left = tf.where(mask_l, edge, default_l) 108 | 109 | mask_r = tf.reshape(x_id == s_x - 1, (1, -1, 1)) 110 | right = tf.where(mask_r, edge, default_r) 111 | 112 | return tf.stack([left, right], 0) 113 | 114 | 115 | @tf.function( 116 | input_signature=[ 117 | tf.TensorSpec(shape=[None], dtype=DTYPE), 118 | tf.TensorSpec(shape=[None], dtype=DTYPE), 119 | tf.TensorSpec(shape=[None], dtype=DTYPEINT), 120 | tf.TensorSpec(shape=[None], dtype=DTYPEINT), 121 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 122 | tf.TensorSpec(shape=[4, None], dtype=DTYPE), 123 | tf.TensorSpec(shape=[4, 4, None, None], dtype=DTYPE), 124 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 125 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 126 | ] 127 | ) 128 | def default_bicubic_interpolation( 129 | a_x, a_q2, x_id, q2_id, corn_x, corn_q2, A, s_x, s_q2 130 | ): 131 | """ 132 | Makes the bicubic interpolation: when a query point is in the lower 133 | or uppermost bin of the q2 axis, it automatically ignores the knots 134 | that would have gone outside array boundaries in the computation 135 | (this is done by a mask and tf.where, exploiting the q2_id variable) 136 | Returns 137 | ---------- 138 | tf.tensor of shape [None,None] 139 | LogBicubic Interpolated points, with all pids queried 140 | """ 141 | # print('def bic int') 142 | dlogx_1 = corn_x[2] - corn_x[1] 143 | dlogq_1 = corn_q2[2] - corn_q2[1] 144 | 145 | tlogq = tf.expand_dims((a_q2 - corn_q2[1]) / dlogq_1, 1) 146 | tlogx = tf.expand_dims((a_x - corn_x[1]) / dlogx_1, 1) 147 | 148 | dlogq_0 = tf.expand_dims(corn_q2[1] - corn_q2[0], 1) 149 | dlogx_1 = tf.expand_dims(dlogx_1, 1) 150 | dlogq_1 = tf.expand_dims(dlogq_1, 1) 151 | dlogq_2 = tf.expand_dims(corn_q2[3] - corn_q2[2], 1) 152 | 153 | df_dx = df_dx_func(x_id, s_x, corn_x, A) * dlogx_1 154 | 155 | # TODO: can the 4 cubc interpolation be done in one go looking at the 4 dimension as the batch dimension? 156 | vl = cubic_interpolation(tlogx, A[1, 1], df_dx[0, 1], A[2, 1], df_dx[1, 1]) 157 | vh = cubic_interpolation(tlogx, A[1, 2], df_dx[0, 2], A[2, 2], df_dx[1, 2]) 158 | edge_vals = (vh - vl) / dlogq_1 159 | 160 | vll = cubic_interpolation(tlogx, A[1, 0], df_dx[0, 0], A[2, 0], df_dx[1, 0]) 161 | default_l = (edge_vals + (vl - vll) / dlogq_0) / 2 162 | 163 | vhh = cubic_interpolation(tlogx, A[1, 3], df_dx[0, 3], A[2, 3], df_dx[1, 3]) 164 | default_r = (edge_vals + (vhh - vh) / dlogq_2) / 2 165 | 166 | mask_l = tf.reshape(q2_id == 1, [-1, 1]) 167 | vdl = tf.where(mask_l, edge_vals, default_l) 168 | 169 | mask_r = tf.reshape(q2_id == s_q2 - 1, [-1, 1]) 170 | vdh = tf.where(mask_r, edge_vals, default_r) 171 | 172 | vdl *= dlogq_1 173 | vdh *= dlogq_1 174 | return cubic_interpolation(tlogq, vl, vdl, vh, vdh) 175 | -------------------------------------------------------------------------------- /src/pdfflow/neighbour_knots.py: -------------------------------------------------------------------------------- 1 | """ 2 | Find the four neighbours in the x, q^2 grid 3 | for the given input values 4 | """ 5 | 6 | import tensorflow as tf 7 | from pdfflow.configflow import DTYPE, DTYPEINT 8 | 9 | 10 | @tf.function( 11 | input_signature=[ 12 | tf.TensorSpec(shape=[None], dtype=DTYPE), 13 | tf.TensorSpec(shape=[None], dtype=DTYPE), 14 | tf.TensorSpec(shape=[None], dtype=DTYPE), 15 | tf.TensorSpec(shape=[None], dtype=DTYPE), 16 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 17 | ] 18 | ) 19 | def four_neighbour_knots(a_x, a_q2, padded_x, padded_q2, actual_values): 20 | """ 21 | Parameters 22 | ---------- 23 | a_x: tf.tensor 24 | tensor of values of x 25 | a_q2: tf.tensor 26 | tensor of values of q2 27 | padded_x: tf.tensor 28 | values of log(x) of the grid 29 | padded_q2: tf.tensor 30 | values of log(q2) of the grid 31 | actual_values: tf.tensor 32 | values of the grid 33 | Returns 34 | ---------- 35 | x_id: tf.tensor of shape [None] 36 | x bin for each query point 37 | q2_id: tf.tensor of shape [None] 38 | q2 bin for each query point 39 | corn_x: tf.tensor of shape [4,None] 40 | x values of the 4 knots around the query point 41 | corn_q2: tf.tensor of shape [4,None] 42 | q2 values of the 4 knots around the query point 43 | A: tf.tensor of shape [4,4,None,None] 44 | pdf values of the 4*4 grid knots around the query point 45 | (first None is for query points, second None is for query pids) 46 | """ 47 | # print('nk') 48 | x_id = tf.searchsorted(padded_x[1:-1], a_x, out_type=DTYPEINT, side='right') 49 | q2_id = tf.searchsorted(padded_q2[1:-1], a_q2, out_type=DTYPEINT, side='right') 50 | 51 | s_x = tf.size(padded_x, out_type=DTYPEINT) 52 | s = tf.size(padded_q2, out_type=DTYPEINT) 53 | 54 | x_id = tf.clip_by_value(x_id, tf.constant([0], dtype=DTYPEINT), s_x-3) 55 | q2_id = tf.clip_by_value(q2_id, tf.constant([0], dtype=DTYPEINT), s-3) 56 | 57 | s_x = tf.size(padded_x, out_type=DTYPEINT) 58 | s = tf.size(padded_q2, out_type=DTYPEINT) 59 | 60 | x_id = tf.clip_by_value(x_id, tf.constant([0], dtype=DTYPEINT), s_x-3) 61 | q2_id = tf.clip_by_value(q2_id, tf.constant([0], dtype=DTYPEINT), s-3) 62 | 63 | piu = tf.reshape(tf.range(-1, 3, dtype=DTYPEINT), (4, 1)) 64 | corn_x_id = tf.repeat(tf.reshape(x_id, (1, -1)), 4, axis=0) + piu 65 | corn_q2_id = tf.repeat(tf.reshape(q2_id, (1, -1)), 4, axis=0) + piu 66 | 67 | corn_x = tf.gather(padded_x, corn_x_id, name="fnk_1") 68 | corn_q2 = tf.gather(padded_q2, corn_q2_id, name="fnk_2") 69 | 70 | 71 | pdf_idx = tf.reshape(x_id * s + q2_id, (1, -1)) 72 | 73 | a = tf.repeat(pdf_idx - s, 4, axis=0) + piu 74 | b = tf.repeat(pdf_idx, 4, axis=0) + piu 75 | c = tf.repeat(pdf_idx + s, 4, axis=0) + piu 76 | d = tf.repeat(pdf_idx + 2 * s, 4, axis=0) + piu 77 | 78 | A_id = tf.stack([a, b, c, d]) 79 | A = tf.gather(actual_values, A_id, name="fnk_3") 80 | 81 | return x_id, q2_id, corn_x, corn_q2, A 82 | 83 | 84 | @tf.function( 85 | input_signature=[ 86 | tf.TensorSpec(shape=[None], dtype=DTYPE), 87 | tf.TensorSpec(shape=[None], dtype=DTYPE), 88 | tf.TensorSpec(shape=[None], dtype=DTYPE), 89 | ] 90 | ) 91 | def alphas_neighbour_knots(a_q2, padded_q2, actual_values): 92 | """ 93 | Parameters 94 | ---------- 95 | a_q2: tf.tensor 96 | tensor of values of q2 97 | padded_q2: tf.tensor 98 | values of log(q2) of the grid 99 | actual_values: tf.tensor 100 | values of the grid 101 | Returns 102 | ---------- 103 | q2_id: tf.tensor of shape [None] 104 | q2 bin for each query point 105 | corn_q2: tf.tensor of shape [4,None] 106 | q2 values of the 4 knots around the query point 107 | A: tf.tensor of shape [4,None] 108 | alphas values of the 4 grid knots around the query point 109 | """ 110 | # print('nk') 111 | q2_id = tf.searchsorted(padded_q2[1:-1], a_q2, out_type=DTYPEINT) 112 | s = tf.size(padded_q2, out_type=DTYPEINT) 113 | q2_id = tf.clip_by_value(q2_id, tf.constant([0], dtype=DTYPEINT), s-3) 114 | 115 | piu = tf.reshape(tf.range(-1, 3, dtype=DTYPEINT), (4, 1)) 116 | corn_q2_id = tf.repeat(tf.reshape(q2_id, (1, -1)), 4, axis=0) + piu 117 | 118 | corn_q2 = tf.gather(padded_q2, corn_q2_id, name="fnk_2") 119 | 120 | A = tf.gather(actual_values, corn_q2_id, name="fnk_3") 121 | 122 | return q2_id, corn_q2, A 123 | -------------------------------------------------------------------------------- /src/pdfflow/region_interpolator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains the extrapolation and interpolation functions wrappers for the different regions 3 | """ 4 | 5 | import tensorflow as tf 6 | from pdfflow.configflow import DTYPE, DTYPEINT, float_me, fone 7 | from pdfflow.neighbour_knots import four_neighbour_knots 8 | from pdfflow.interpolations import default_bicubic_interpolation 9 | from pdfflow.interpolations import extrapolate_linear 10 | 11 | INTERPOLATE_SIGNATURE = [ 12 | tf.TensorSpec(shape=[None], dtype=DTYPE), 13 | tf.TensorSpec(shape=[None], dtype=DTYPE), 14 | tf.TensorSpec(shape=[None], dtype=DTYPE), 15 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 16 | tf.TensorSpec(shape=[None], dtype=DTYPE), 17 | tf.TensorSpec(shape=[], dtype=DTYPEINT), 18 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), 19 | ] 20 | 21 | 22 | @tf.function(input_signature=INTERPOLATE_SIGNATURE) 23 | def interpolate( 24 | a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 25 | ): 26 | """ 27 | Basic Bicubic Interpolation inside the subgrid 28 | Four Neighbour Knots selects grid knots around each query point to 29 | make the interpolation: 4 knots on the x axis and 4 knots on the q2 30 | axis are needed for each point, plus the pdf fvalues there. 31 | Default bicubic interpolation performs the interpolation itself 32 | 33 | Parameters 34 | ---------- 35 | a_x: tf.tensor of shape [None] 36 | query of values of log(x) 37 | a_q2: tf.tensor of shape [None] 38 | query of values of log(q2) 39 | padded_x: tf.tensor of shape [None] 40 | value for all the knots on the x axis 41 | padded with one zero at the beginning and one at the end to 42 | avoid out of range errors when queryingpoints near boundaries 43 | s_x: tf.tensor of shape [] 44 | size of x knots tensor without padding 45 | padded_q2: tf.tensor of shape [None] 46 | value for all the knots on the q2 axis 47 | padded with one zero at the beginning and one at the end to 48 | avoid out of range errors when querying points near boundaries 49 | s_q2: tf.tensor of shape [] 50 | size of q2 knots tensor without padding 51 | actual_padded: tf.tensor of shape [None,None] 52 | pdf values: first axis is the flattened padded (q2,x) grid, 53 | second axis is needed pid column (dimension depends on the query) 54 | """ 55 | x_bins, q2_bins, corn_x, corn_q2, pdf_vals = four_neighbour_knots( 56 | a_x, a_q2, padded_x, padded_q2, actual_padded 57 | ) 58 | 59 | return default_bicubic_interpolation( 60 | a_x, a_q2, x_bins, q2_bins, corn_x, corn_q2, pdf_vals, s_x, s_q2 61 | ) 62 | 63 | 64 | @tf.function(input_signature=INTERPOLATE_SIGNATURE) 65 | def lowx_extrapolation( 66 | a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 67 | ): 68 | """ 69 | Extrapolation in low x regime 70 | 71 | Parameters 72 | ---------- 73 | a_x: tf.tensor of shape [None] 74 | query of values of log(x) 75 | a_q2: tf.tensor of shape [None] 76 | query of values of log(q2) 77 | padded_x: tf.tensor of shape [None] 78 | value for all the knots on the x axis 79 | padded with one zero at the beginning and one at the end to 80 | avoid out of range errors when queryingpoints near boundaries 81 | s_x: tf.tensor of shape [] 82 | size of x knots tensor without padding 83 | padded_q2: tf.tensor of shape [None] 84 | value for all the knots on the q2 axis 85 | padded with one zero at the beginning and one at the end to 86 | avoid out of range errors when querying points near boundaries 87 | s_q2: tf.tensor of shape [] 88 | size of q2 knots tensor without padding 89 | actual_padded: tf.tensor of shape [None,None] 90 | pdf values: first axis is the flattened padded(q2,x) grid, 91 | second axis is needed pid column (dimension depends on the query) 92 | """ 93 | corn_x = padded_x[1:3] 94 | s = tf.size(a_x, out_type=DTYPEINT) 95 | 96 | x, q2 = tf.meshgrid(corn_x, a_q2, indexing="ij") 97 | 98 | y = interpolate( 99 | tf.reshape(x, [-1]), 100 | tf.reshape(q2, [-1]), 101 | padded_x, 102 | s_x, 103 | padded_q2, 104 | s_q2, 105 | actual_padded, 106 | ) 107 | 108 | return extrapolate_linear(a_x, corn_x[0], corn_x[1], y[:s], y[s:]) 109 | 110 | 111 | @tf.function(input_signature=INTERPOLATE_SIGNATURE) 112 | def lowq2_extrapolation( 113 | a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 114 | ): 115 | """ 116 | Extrapolation in low q2 regime 117 | 118 | Parameters 119 | ---------- 120 | a_x: tf.tensor of shape [None] 121 | query of values of log(x) 122 | a_q2: tf.tensor of shape [None] 123 | query of values of log(q2) 124 | padded_x: tf.tensor of shape [None] 125 | value for all the knots on the x axis 126 | padded with one zero at the beginning and one at the end to 127 | avoid out of range errors when queryingpoints near boundaries 128 | s_x: tf.tensor of shape [] 129 | size of x knots tensor without padding 130 | padded_q2: tf.tensor of shape [None] 131 | value for all the knots on the q2 axis 132 | padded with one zero at the beginning and one at the end to 133 | avoid out of range errors when querying points near boundaries 134 | s_q2: tf.tensor of shape [] 135 | size of q2 knots tensor without padding 136 | actual_padded: tf.tensor of shape [None,None] 137 | pdf values: first axis is the flattened padded (q2,x) grid, 138 | second axis is needed pid column (dimension depends on the query) 139 | """ 140 | 141 | corn_q2 = tf.stack([padded_q2[1], 1.01 * padded_q2[1]], 0) 142 | 143 | x, q2 = tf.meshgrid(a_x, corn_q2) 144 | 145 | s = tf.size(a_x, out_type=DTYPEINT) 146 | 147 | fq2Min = interpolate( 148 | tf.reshape(x, [-1]), 149 | tf.reshape(q2, [-1]), 150 | padded_x, 151 | s_x, 152 | padded_q2, 153 | s_q2, 154 | actual_padded, 155 | ) 156 | 157 | fq2Min1 = fq2Min[s:] 158 | fq2Min = fq2Min[:s] 159 | 160 | a_q2 = tf.math.exp(a_q2) 161 | corn_q2 = tf.math.exp(corn_q2[:1]) 162 | 163 | mask = tf.math.abs(fq2Min) >= 1e-5 164 | anom = tf.where( 165 | mask, tf.maximum(float_me(-2.5), (fq2Min1 - fq2Min) / fq2Min / 0.01), fone 166 | ) 167 | corn_q2 = tf.expand_dims(corn_q2, 1) 168 | a_q2 = tf.expand_dims(a_q2, 1) 169 | 170 | return fq2Min * tf.math.pow( 171 | a_q2 / corn_q2, anom * a_q2 / corn_q2 + 1.0 - a_q2 / corn_q2 172 | ) 173 | 174 | 175 | @tf.function(input_signature=INTERPOLATE_SIGNATURE) 176 | def highq2_extrapolation( 177 | a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 178 | ): 179 | """ 180 | Extrapolation in high q2 regime 181 | 182 | Parameters 183 | ---------- 184 | a_x: tf.tensor of shape [None] 185 | query of values of log(x) 186 | a_q2: tf.tensor of shape [None] 187 | query of values of log(q2) 188 | padded_x: tf.tensor of shape [None] 189 | value for all the knots on the x axis 190 | padded with one zero at the beginning and one at the end to 191 | avoid out of range errors when queryingpoints near boundaries 192 | s_x: tf.tensor of shape [] 193 | size of x knots tensor without padding 194 | padded_q2: tf.tensor of shape [None] 195 | value for all the knots on the q2 axis 196 | padded with one zero at the beginning and one at the end to 197 | avoid out of range errors when querying points near boundaries 198 | s_q2: tf.tensor of shape [] 199 | size of q2 knots tensor without padding 200 | actual_padded: tf.tensor of shape [None,None] 201 | pdf values: first axis is the flattened padded (q2,x) grid, 202 | second axis is needed pid column (dimension depends on the query) 203 | """ 204 | corn_q2 = padded_q2[-2:-4:-1] 205 | 206 | x, q2 = tf.meshgrid(a_x, corn_q2) 207 | s = tf.size(a_x, out_type=DTYPEINT) 208 | 209 | y = interpolate( 210 | tf.reshape(x, [-1]), 211 | tf.reshape(q2, [-1]), 212 | padded_x, 213 | s_x, 214 | padded_q2, 215 | s_q2, 216 | actual_padded, 217 | ) 218 | 219 | return extrapolate_linear(a_q2, corn_q2[0], corn_q2[1], y[:s], y[s:]) 220 | 221 | 222 | @tf.function(input_signature=INTERPOLATE_SIGNATURE) 223 | def lowx_highq2_extrapolation( 224 | a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 225 | ): 226 | """ 227 | Extrapolation in high q2, low x regime 228 | 229 | Parameters 230 | ---------- 231 | a_x: tf.tensor of shape [None] 232 | query of values of log(x) 233 | a_q2: tf.tensor of shape [None] 234 | query of values of log(q2) 235 | padded_x: tf.tensor of shape [None] 236 | value for all the knots on the x axis 237 | padded with one zero at the beginning and one at the end to 238 | avoid out of range errors when queryingpoints near boundaries 239 | s_x: tf.tensor of shape [] 240 | size of x knots tensor without padding 241 | padded_q2: tf.tensor of shape [None] 242 | value for all the knots on the q2 axis 243 | padded with one zero at the beginning and one at the end to 244 | avoid out of range errors when querying points near boundaries 245 | s_q2: tf.tensor of shape [] 246 | size of q2 knots tensor without padding 247 | actual_padded: tf.tensor of shape [None,None] 248 | pdf values: first axis is the flattened padded (q2,x) grid, 249 | second axis is needed pid column (dimension depends on the query) 250 | """ 251 | 252 | corn_x = padded_x[1:3] 253 | corn_q2 = padded_q2[-2:-4:-1] 254 | 255 | x, q2 = tf.meshgrid(corn_x, corn_q2) 256 | 257 | f = interpolate( 258 | tf.reshape(x, [-1]), 259 | tf.reshape(q2, [-1]), 260 | padded_x, 261 | s_x, 262 | padded_q2, 263 | s_q2, 264 | actual_padded, 265 | ) 266 | 267 | fxMin = extrapolate_linear(a_q2, corn_q2[0], corn_q2[1], f[:1], f[2:3]) 268 | 269 | fxMin1 = extrapolate_linear(a_q2, corn_q2[0], corn_q2[1], f[1:2], f[3:]) 270 | 271 | return extrapolate_linear(a_x, corn_x[0], corn_x[1], fxMin, fxMin1) 272 | 273 | 274 | @tf.function(input_signature=INTERPOLATE_SIGNATURE) 275 | def lowx_lowq2_extrapolation( 276 | a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded, 277 | ): 278 | """ 279 | Extrapolation in low q2, low x regime 280 | 281 | Parameters 282 | ---------- 283 | a_x: tf.tensor of shape [None] 284 | query of values of log(x) 285 | a_q2: tf.tensor of shape [None] 286 | query of values of log(q2) 287 | padded_x: tf.tensor of shape [None] 288 | value for all the knots on the x axis 289 | padded with one zero at the beginning and one at the end to 290 | avoid out of range errors when queryingpoints near boundaries 291 | s_x: tf.tensor of shape [] 292 | size of x knots tensor without padding 293 | padded_q2: tf.tensor of shape [None] 294 | value for all the knots on the q2 axis 295 | padded with one zero at the beginning and one at the end to 296 | avoid out of range errors when querying points near boundaries 297 | s_q2: tf.tensor of shape [] 298 | size of q2 knots tensor without padding 299 | actual_padded: tf.tensor of shape [None,None] 300 | pdf values: first axis is the flattened padded (q2,x) grid, 301 | second axis is needed pid column (dimension depends on the query) 302 | """ 303 | corn_x = padded_x[1:3] 304 | corn_q2 = tf.stack([padded_q2[1], padded_q2[1]], 0) 305 | 306 | f = interpolate( 307 | tf.concat([corn_x, corn_x], 0), 308 | tf.concat([corn_q2, 1.01 * corn_q2], 0), 309 | padded_x, 310 | s_x, 311 | padded_q2, 312 | s_q2, 313 | actual_padded, 314 | ) 315 | 316 | fq2Min = extrapolate_linear(a_x, corn_x[0], corn_x[1], f[:1], f[1:2]) 317 | 318 | fq2Min1 = extrapolate_linear(a_x, corn_x[0], corn_x[1], f[2:3], f[3:]) 319 | 320 | a_q2 = tf.expand_dims(tf.math.exp(a_q2), 1) 321 | corn_q2 = tf.math.exp(corn_q2[0]) 322 | 323 | mask = tf.math.abs(fq2Min) >= 1e-5 324 | anom = tf.where( 325 | mask, tf.maximum(float_me(-2.5), (fq2Min1 - fq2Min) / fq2Min / 0.01), fone 326 | ) 327 | 328 | factor = tf.math.pow(a_q2 / corn_q2, anom * a_q2 / corn_q2 + 1.0 - a_q2 / corn_q2) 329 | 330 | return fq2Min * factor 331 | -------------------------------------------------------------------------------- /src/pdfflow/subgrid.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the Subgrid main class. 3 | 4 | Upon instantiation the subgrid class will select one of the 5 | interpolation functions defined in :py:module:`functions` and 6 | compile it using the `GRID_FUNCTION_SIGNATURE` defined in this function. 7 | """ 8 | 9 | import numpy as np 10 | import tensorflow as tf 11 | from pdfflow.configflow import DTYPE, DTYPEINT, float_me, int_me 12 | from pdfflow.functions import inner_subgrid 13 | from pdfflow.functions import first_subgrid 14 | from pdfflow.functions import last_subgrid 15 | from pdfflow.alphas_functions import alphas_first_subgrid 16 | from pdfflow.alphas_functions import alphas_inner_subgrid 17 | from pdfflow.alphas_functions import alphas_last_subgrid 18 | 19 | # Compilation signature and options of the subgrid functions 20 | GRID_FUNCTION_SIGNATURE = [ 21 | tf.TensorSpec(shape=[2], dtype=DTYPEINT), # shape 22 | tf.TensorSpec(shape=[None], dtype=DTYPE), # a_x 23 | tf.TensorSpec(shape=[None], dtype=DTYPE), # a_q2 24 | tf.TensorSpec(shape=[], dtype=DTYPE), # xmin 25 | tf.TensorSpec(shape=[], dtype=DTYPE), # xmax 26 | tf.TensorSpec(shape=[None], dtype=DTYPE), # padded_x 27 | tf.TensorSpec(shape=[], dtype=DTYPEINT), # s_x 28 | tf.TensorSpec(shape=[], dtype=DTYPE), # q2min 29 | tf.TensorSpec(shape=[], dtype=DTYPE), # q2max 30 | tf.TensorSpec(shape=[None], dtype=DTYPE), # padded_q2 31 | tf.TensorSpec(shape=[], dtype=DTYPEINT), # s_q2 32 | tf.TensorSpec(shape=[None, None], dtype=DTYPE), # grid 33 | ] 34 | 35 | ALPHAS_GRID_FUNCTION_SIGNATURE = [ 36 | tf.TensorSpec(shape=[], dtype=DTYPEINT), # shape 37 | tf.TensorSpec(shape=[None], dtype=DTYPE), # a_q2 38 | tf.TensorSpec(shape=[], dtype=DTYPE), # q2min 39 | tf.TensorSpec(shape=[], dtype=DTYPE), # q2max 40 | tf.TensorSpec(shape=[None], dtype=DTYPE), # padded_q2 41 | tf.TensorSpec(shape=[], dtype=DTYPEINT), # s_q2 42 | tf.TensorSpec(shape=[None], dtype=DTYPE), # grid 43 | ] 44 | 45 | AUTOGRAPH_OPT = tf.autograph.experimental.Feature.ALL 46 | OPT = { 47 | "experimental_autograph_options": AUTOGRAPH_OPT, 48 | "input_signature": GRID_FUNCTION_SIGNATURE, 49 | } 50 | 51 | 52 | class Subgrid(tf.Module): 53 | """ 54 | Wrapper class around subgrdis. 55 | This class reads the LHAPDF grid, parses it and stores all necessary 56 | information as tensorflow tensors. 57 | 58 | Saving this information as tf.tensors allows to reuse tf.function compiled function 59 | with no retracing. 60 | 61 | Note: 62 | the x and q2 arrays are padded with an extra value next to the boundaries 63 | to avoid out of bound errors driven by numerical precision 64 | The size we save for the arrays correspond to the size of the arrays before padding 65 | 66 | Parameters 67 | ---------- 68 | grid: collections.namedtuple 69 | tuple containing (x, q2, flav, grid) 70 | which correspond to the x and q2 arrays 71 | the flavour scheme and the pdf grid values respectively 72 | i: int 73 | index of the subgrid 74 | total: int 75 | total number of subgrids of the family 76 | compile_functions: bool 77 | whether to tf-compile the interpolation function(default True) 78 | alpha_s: bool 79 | whether the function to compile is for a PDF grid or an alpha_s grid 80 | 81 | Attributes 82 | ---------- 83 | log(x) 84 | log(Q2) 85 | Q2 86 | values of the pdf grid 87 | """ 88 | 89 | def __init__(self, grid, i=0, total=0, compile_functions=True, alpha_s=False): 90 | name_sg = f"grid_{i}" 91 | self.alpha_s = alpha_s 92 | if alpha_s: 93 | name_sg += "_alpha" 94 | self.name_sg = name_sg 95 | super().__init__(name=f"Parent_{name_sg}") 96 | q2min = min(grid.q2) 97 | q2max = max(grid.q2) 98 | self.log_q2min = float_me(np.log(q2min)) 99 | self.log_q2max = float_me(np.log(q2max)) 100 | 101 | # Save grid shape information 102 | self.s_q2 = int_me(grid.q2.size) 103 | 104 | # Insert a padding at the beginning and the end 105 | log_q2pad = np.pad(np.log(grid.q2), 1, mode="edge") 106 | log_q2pad[0] *= 0.99 107 | log_q2pad[-1] *= 1.01 108 | 109 | self.padded_q2 = float_me(log_q2pad) 110 | 111 | # Depending on whether it is an alphs_s grid or a pdf grid 112 | # we might need to change some options 113 | 114 | compilation_options = OPT.copy() 115 | 116 | if alpha_s: 117 | # the grid is sized (q.size), pad it with 0s 118 | self.padded_grid = float_me(np.pad(grid.grid, (1, 1))) 119 | 120 | if i == 0: 121 | self.fn_interpolation = alphas_first_subgrid 122 | elif i == (total - 1): 123 | self.fn_interpolation = alphas_last_subgrid 124 | else: 125 | self.fn_interpolation = alphas_inner_subgrid 126 | 127 | # Change the function signature to that of alpha_s 128 | compilation_options["input_signature"] = ALPHAS_GRID_FUNCTION_SIGNATURE 129 | else: 130 | # If this is a pdf grid, save also the x information 131 | xmin = min(grid.x) 132 | xmax = max(grid.x) 133 | self.log_xmin = float_me(np.log(xmin)) 134 | self.log_xmax = float_me(np.log(xmax)) 135 | 136 | self.s_x = int_me(grid.x.size) 137 | 138 | log_xpad = np.pad(np.log(grid.x), 1, mode="edge") 139 | log_xpad[0] *= 0.99 140 | log_xpad[-1] *= 1.01 141 | 142 | self.padded_x = float_me(log_xpad) 143 | 144 | # Finally parse the grid 145 | # the grid is sized (x.size * q.size, flavours) 146 | reshaped_grid = grid.grid.reshape(grid.x.size, grid.q2.size, -1) 147 | 148 | # and pad it with 0s in x and q 149 | padded_grid = np.pad(reshaped_grid, ((1, 1), (1, 1), (0, 0))) 150 | # flatten the x and q dimensions again and store it 151 | self.padded_grid = float_me(padded_grid.reshape(-1, grid.flav.size)) 152 | 153 | # Depending on the index of the grid, select with interpolation function should be run 154 | if i == 0: 155 | self.fn_interpolation = first_subgrid 156 | elif i == (total - 1): 157 | self.fn_interpolation = last_subgrid 158 | else: 159 | self.fn_interpolation = inner_subgrid 160 | 161 | 162 | if compile_functions: 163 | self.fn_interpolation = tf.function(self.fn_interpolation, **compilation_options) 164 | 165 | def __call__(self, shape, arr_q2, pids=None, arr_x=None): 166 | if self.alpha_s: 167 | if pids is not None or arr_x is not None: 168 | raise ValueError("alpha_s interpolation does not accept x-input or flavours") 169 | 170 | result = self.fn_interpolation( 171 | shape, 172 | arr_q2, 173 | self.log_q2min, 174 | self.log_q2max, 175 | self.padded_q2, 176 | self.s_q2, 177 | self.padded_grid, 178 | ) 179 | else: 180 | padded_grid = tf.gather(self.padded_grid, pids, axis=-1, name=self.name_sg) 181 | result = self.fn_interpolation( 182 | shape, 183 | arr_x, 184 | arr_q2, 185 | self.log_xmin, 186 | self.log_xmax, 187 | self.padded_x, 188 | self.s_x, 189 | self.log_q2min, 190 | self.log_q2max, 191 | self.padded_q2, 192 | self.s_q2, 193 | padded_grid, 194 | ) 195 | return result 196 | -------------------------------------------------------------------------------- /src/pdfflow/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/N3PDF/pdfflow/0546ddbfbdc747917f0ea55f3b30e43f39cf6da5/src/pdfflow/tests/__init__.py -------------------------------------------------------------------------------- /src/pdfflow/tests/test_alphas.py: -------------------------------------------------------------------------------- 1 | """ 2 | Ensures pdfflow produces results which are compatible with lhpdf 3 | this code is made to run eagerly as @tf.function is tested by test_pflow 4 | running eagerly means less overhead on the CI which is running on CPU 5 | """ 6 | import os 7 | import logging 8 | import subprocess as sp 9 | import numpy as np 10 | from lhapdf_management import pdf_install 11 | from lhapdf_management.configuration import environment 12 | import pdfflow.pflow as pdf 13 | from pdfflow.configflow import run_eager 14 | 15 | try: 16 | import lhapdf 17 | except ModuleNotFoundError as e: 18 | raise ModuleNotFoundError("Tests of alpha_s need an installation of LHAPDF") 19 | 20 | logger = logging.getLogger("pdfflow.test") 21 | 22 | 23 | # Run tests in CPU 24 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 25 | 26 | # Utility to install lhapdf sets 27 | def install_lhapdf(pdfset): 28 | try: 29 | lhapdf.mkPDF(pdfset) 30 | except RuntimeError: 31 | pdf_install(pdfset) 32 | 33 | 34 | SIZE = 200 35 | 36 | # Set up the PDF 37 | LIST_PDF = ["NNPDF31_nnlo_as_0118", "cteq61"] 38 | MEMBERS = 2 39 | DIRNAME = environment.datapath 40 | 41 | # Install the pdfs if they don't exist 42 | for pdfset in LIST_PDF: 43 | install_lhapdf(pdfset) 44 | 45 | # Set up the Q2 arr 46 | QS = [(1, 10), (100, 10000), (10, 100)] 47 | 48 | # utilities 49 | def gen_q2(qmin, qmax): 50 | """generate an array of q2 between qmin and qmax""" 51 | return np.random.rand(SIZE) * (qmax - qmin) + qmin 52 | 53 | 54 | def get_alphavals(q2arr, pdfset, sq2=False): 55 | """Generate an array of alphas(q) values from LHAPDF""" 56 | lhapdf_pdf = lhapdf.mkPDF(pdfset) 57 | if sq2: 58 | return np.array([lhapdf_pdf.alphasQ2(iq) for iq in q2arr]) 59 | else: 60 | return np.array([lhapdf_pdf.alphasQ(iq) for iq in q2arr]) 61 | 62 | 63 | def test_accuracy_alphas(atol=1e-6): 64 | """Check the accuracy for all PDF sets for all members given 65 | when computing alpha_s given Q is compatible within atol 66 | between pdfflow and LHAPDF. 67 | This test run eagerly 68 | """ 69 | run_eager(True) 70 | for setname in LIST_PDF: 71 | for member in range(MEMBERS): 72 | pdfset = f"{setname}/{member}" 73 | logger.info(" > Checking %s", pdfset) 74 | pdfflow = pdf.mkPDF(pdfset, f"{DIRNAME}/") 75 | for qi, qf in QS: 76 | qi = max(qi, pdfflow.q2min) 77 | qf = min(qf, pdfflow.q2max) 78 | q2arr = gen_q2(qi, qf) 79 | logger.info(" Q2 from %f to %f", qi, qf) 80 | flow_values = pdfflow.py_alphasQ(q2arr) 81 | lhapdf_values = get_alphavals(q2arr, pdfset, sq2=False) 82 | np.testing.assert_allclose(flow_values, lhapdf_values, atol=atol) 83 | run_eager(False) 84 | 85 | 86 | def test_alphas_q2(atol=1e-6): 87 | """Check the accuracy for all PDF sets for all members given 88 | when computing alpha_s given Q is compatible within atol 89 | between pdfflow and LHAPDF 90 | This test does not run eagerly 91 | """ 92 | for setname in LIST_PDF: 93 | for member in range(MEMBERS): 94 | pdfset = f"{setname}/{member}" 95 | logger.info(" > Checking %s", pdfset) 96 | pdfflow = pdf.mkPDF(pdfset, f"{DIRNAME}/") 97 | for qi, qf in QS: 98 | qi = max(qi, pdfflow.q2min) 99 | qf = min(qf, pdfflow.q2max) 100 | q2arr = gen_q2(qi, qf) 101 | logger.info(" Q2 from %f to %f", qi, qf) 102 | flow_values = pdfflow.py_alphasQ2(q2arr) 103 | lhapdf_values = get_alphavals(q2arr, pdfset, sq2=True) 104 | np.testing.assert_allclose(flow_values, lhapdf_values, atol=atol) 105 | 106 | 107 | def test_alpha_trace(): 108 | """Check that the alpha_s can be traced and then instantiated""" 109 | # Ensure the functions are not run eagerly 110 | run_eager(False) 111 | setname = LIST_PDF[0] 112 | # Do it for one single replica 113 | pdfset = f"{setname}/0" 114 | pex = pdf.mkPDF(pdfset, f"{DIRNAME}/") 115 | pex.alphas_trace() 116 | # Do it for many replicas 117 | pex2 = pdf.mkPDFs(setname, [0, 1, 2]) 118 | pex2.alphas_trace() 119 | 120 | 121 | if __name__ == "__main__": 122 | test_alpha_trace() 123 | -------------------------------------------------------------------------------- /src/pdfflow/tests/test_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test that the configuration is consistent 3 | """ 4 | 5 | import os 6 | import numpy as np 7 | import importlib 8 | import pdfflow.configflow 9 | from pdfflow.configflow import DTYPE, DTYPEINT, int_me, float_me 10 | 11 | 12 | def test_int_me(): 13 | res = int_me(4) 14 | assert res.dtype == DTYPEINT 15 | 16 | 17 | def test_float_me(): 18 | res = float_me(4.0) 19 | assert res.dtype == DTYPE 20 | 21 | 22 | def test_float_env(): 23 | os.environ["PDFFLOW_FLOAT"] = "32" 24 | importlib.reload(pdfflow.configflow) 25 | from pdfflow.configflow import DTYPE 26 | 27 | assert DTYPE.as_numpy_dtype == np.float32 28 | os.environ["PDFFLOW_FLOAT"] = "64" 29 | importlib.reload(pdfflow.configflow) 30 | from pdfflow.configflow import DTYPE 31 | 32 | assert DTYPE.as_numpy_dtype == np.float64 33 | # Reset to default 34 | os.environ["PDFFLOW_FLOAT"] = "64" 35 | importlib.reload(pdfflow.configflow) 36 | 37 | 38 | def test_int_env(): 39 | os.environ["PDFFLOW_INT"] = "32" 40 | importlib.reload(pdfflow.configflow) 41 | from pdfflow.configflow import DTYPEINT 42 | 43 | assert DTYPEINT.as_numpy_dtype == np.int32 44 | os.environ["PDFFLOW_INT"] = "64" 45 | importlib.reload(pdfflow.configflow) 46 | from pdfflow.configflow import DTYPEINT 47 | 48 | assert DTYPEINT.as_numpy_dtype == np.int64 49 | # Reset to default 50 | os.environ["PDFFLOW_INT"] = "32" 51 | importlib.reload(pdfflow.configflow) 52 | -------------------------------------------------------------------------------- /src/pdfflow/tests/test_lhapdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Ensures pdfflow produces results which are compatible with lhpdf 3 | this code is made to run eagerly as @tf.function is tested by test_pflow 4 | running eagerly means less overhead on the CI which is running on CPU 5 | """ 6 | import pdfflow.pflow as pdf 7 | import logging 8 | from lhapdf_management import pdf_install 9 | from lhapdf_management.configuration import environment 10 | 11 | logger = logging.getLogger("pdfflow.test") 12 | 13 | import os 14 | 15 | # Run tests in CPU 16 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 17 | import numpy as np 18 | 19 | try: 20 | import lhapdf 21 | except ModuleNotFoundError as e: 22 | raise ModuleNotFoundError("Tests against lhapdf need an installation of LHAPDF") 23 | 24 | 25 | SIZE = 200 26 | 27 | # Set up the PDF 28 | LIST_PDF = [ 29 | "PDF4LHC15_nnlo_100", 30 | "NNPDF31_nlo_as_0118", # some problem for the first bin 31 | "MSTW2008lo68cl_nf3", 32 | "NNPDF30_nnlo_as_0121_nf_6", 33 | "cteq61", 34 | ] 35 | MEMBERS = 2 36 | FLAVS = list(range(-3, 4)) 37 | FLAVS[FLAVS.index(0)] = 21 38 | DIRNAME = environment.datapath 39 | 40 | # Utility to install lhapdf sets 41 | def install_lhapdf(pdfset): 42 | try: 43 | lhapdf.mkPDF(pdfset) 44 | except RuntimeError: 45 | pdf_install(pdfset) 46 | 47 | 48 | # Install the pdfs if they don't exist 49 | for pdfset in LIST_PDF: 50 | install_lhapdf(pdfset) 51 | 52 | # Set up the xarr 53 | XARR = np.random.rand(SIZE) 54 | # ensure there is at least a point with a very low x 55 | XARR[0] = 1e-10 56 | 57 | # Set up the Q2 arr 58 | QS = [(1, 10), (100, 10000), (10, 100)] 59 | 60 | # utilities 61 | def gen_q2(qmin, qmax): 62 | """generate an array of q2 between qmin and qmax""" 63 | return np.random.rand(SIZE) * (qmax - qmin) + qmin 64 | 65 | 66 | def dict_update(old_dict, new_dict): 67 | if not old_dict: 68 | for key, item in new_dict.items(): 69 | old_dict[key] = [item] 70 | else: 71 | for key, item in new_dict.items(): 72 | old_dict[key].append(item) 73 | 74 | 75 | def get_pdfvals(xarr, qarr, pdfset): 76 | """Get the pdf values from LHAPDF""" 77 | lhapdf_pdf = lhapdf.mkPDF(pdfset) 78 | res = {} 79 | for x, q in zip(xarr, qarr): 80 | dict_update(res, lhapdf_pdf.xfxQ2(x, q)) 81 | return res 82 | 83 | 84 | def test_accuracy(atol=1e-6): 85 | """Check the accuracy for all PDF sets for all members 86 | in the lists LIST_PDF and MEMBERS 87 | for all defined ranges of Q for all flavours 88 | is better than atol. 89 | 90 | This test doesnt care about Q extrapolation 91 | """ 92 | import tensorflow as tf 93 | 94 | tf.config.experimental_run_functions_eagerly(True) 95 | for setname in LIST_PDF: 96 | for member in range(MEMBERS): 97 | pdfset = f"{setname}/{member}" 98 | logger.info(" > Checking %s", pdfset) 99 | pdfflow = pdf.mkPDF(pdfset, f"{DIRNAME}/") 100 | for qi, qf in QS: 101 | # Dont test extrapolation 102 | qi = max(qi, pdfflow.q2min) 103 | qf = min(qf, pdfflow.q2max) 104 | q2arr = gen_q2(qi, qf) 105 | logger.info(" Q2 from %f to %f", qi, qf) 106 | flow_values = pdfflow.py_xfxQ2(FLAVS, XARR, q2arr) 107 | lhapdf_values = get_pdfvals(XARR, q2arr, pdfset) 108 | for i, f in enumerate(FLAVS): 109 | np.testing.assert_allclose(flow_values[:, i], lhapdf_values[f], atol=atol) 110 | tf.config.experimental_run_functions_eagerly(False) 111 | 112 | 113 | if __name__ == "__main__": 114 | test_accuracy() 115 | -------------------------------------------------------------------------------- /src/pdfflow/tests/test_pflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | Checks pdflow can run with no errors 3 | 4 | This file also checks that functions can indeed compile 5 | 6 | Note that this test file does not install LHAPDF and (should) 7 | use a PDF set that is not installed by any other test. 8 | This ensures that pdfflow can indeed run independently of LHAPDF 9 | """ 10 | from pdfflow.pflow import mkPDF, mkPDFs 11 | from pdfflow.configflow import run_eager, int_me, float_me 12 | import logging 13 | 14 | logger = logging.getLogger("pdfflow.test") 15 | import os 16 | import subprocess as sp 17 | import numpy as np 18 | 19 | # Run tests in CPU 20 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 21 | import tensorflow as tf 22 | 23 | PDFNAME = "NNPDF31_nnlo_as_0118" 24 | 25 | 26 | def pdfflow_tester(pdf, members=None): 27 | """Test several pdfflow features: 28 | - Check the single/many/all-pid signatures 29 | - Checks the python and TF signatures 30 | - Checks the output of the python and TF signatures are the same 31 | - Checks the expected shape of the signatures is correct 32 | """ 33 | grid_size = 7 34 | x = np.random.rand(grid_size) 35 | q2 = 1000.0 * np.random.rand(grid_size) 36 | xtf = float_me(x) 37 | q2tf = float_me(q2) 38 | # Check I can get just one pid 39 | for i in range(-1, 2): 40 | # as int 41 | res_1 = pdf.py_xfxQ2(i, x, q2) 42 | # as list 43 | res_2 = pdf.py_xfxQ2([i], x, q2) 44 | np.testing.assert_allclose(res_1, res_2) 45 | # as tf objects 46 | tfpid = int_me([i]) 47 | res_3 = pdf.xfxQ2(tfpid, xtf, q2tf) 48 | np.testing.assert_allclose(res_2, res_3) 49 | 50 | # Check shape 51 | if members is None: 52 | assert res_1.numpy().shape == (grid_size,) 53 | else: 54 | assert res_1.numpy().shape == ( 55 | members, 56 | grid_size, 57 | ) 58 | 59 | # Check I can get more than one pid 60 | nfl_size = 6 61 | fl_scheme = pdf.flavor_scheme.numpy() 62 | nfl_total = fl_scheme.size 63 | many_pid = np.random.choice(fl_scheme, nfl_size) 64 | 65 | res_1 = pdf.py_xfxQ2(many_pid, x, q2) 66 | res_2 = pdf.xfxQ2(int_me(many_pid), xtf, q2tf) 67 | np.testing.assert_allclose(res_1, res_2) 68 | # Check shape 69 | if members is None: 70 | assert res_1.numpy().shape == (grid_size, nfl_size) 71 | else: 72 | assert res_1.numpy().shape == (members, grid_size, nfl_size) 73 | 74 | # Check I can actually get all PID 75 | res_1 = pdf.py_xfxQ2_allpid(x, q2) 76 | res_2 = pdf.xfxQ2_allpid(xtf, q2tf) 77 | 78 | np.testing.assert_allclose(res_1, res_2) 79 | # Check shape 80 | if members is None: 81 | assert res_1.numpy().shape == (grid_size, nfl_total) 82 | else: 83 | assert res_1.numpy().shape == (members, grid_size, nfl_total) 84 | 85 | 86 | def test_onemember(): 87 | """Test the one-central-member of pdfflow""" 88 | # Check the central member 89 | pdf = mkPDF(f"{PDFNAME}/0") 90 | pdfflow_tester(pdf) 91 | # Try a non-central member, but trace first 92 | # Ensure it is not running eagerly 93 | run_eager(False) 94 | pdf = mkPDF(f"{PDFNAME}/1") 95 | pdf.trace() 96 | pdfflow_tester(pdf) 97 | 98 | 99 | def test_multimember(): 100 | """Test the multi-member capabilities of pdfflow""" 101 | run_eager(False) 102 | members = 5 103 | pdf = mkPDFs(PDFNAME, range(members)) 104 | assert pdf.nmembers == members 105 | pdf.trace() 106 | assert len(pdf.active_members) == members 107 | pdfflow_tester(pdf, members=members) 108 | 109 | 110 | def test_one_multi(): 111 | """Test that the multimember-I is indeed the same as just the Ith instance""" 112 | run_eager(True) 113 | pdf = mkPDF(f"{PDFNAME}/0") 114 | multi_pdf = mkPDFs(PDFNAME, [4, 0, 6]) 115 | grid_size = 4 116 | x = np.random.rand(grid_size) 117 | q2 = np.random.rand(grid_size) * 1000.0 118 | res_1 = pdf.py_xfxQ2_allpid(x, q2) 119 | res_2 = multi_pdf.py_xfxQ2_allpid(x, q2) 120 | np.testing.assert_allclose(res_1, res_2[1]) 121 | 122 | 123 | if __name__ == "__main__": 124 | test_one_multi() 125 | --------------------------------------------------------------------------------