├── .coveragerc ├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── help-request.md ├── .gitignore ├── .readthedocs.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── CONTRIBUTORS.rst ├── COPYING ├── LICENSE ├── MANIFEST.in ├── PyPWA ├── __init__.py ├── info.py ├── libs │ ├── __init__.py │ ├── binning.py │ ├── common.py │ ├── file │ │ ├── __init__.py │ │ ├── cache.py │ │ ├── misc.py │ │ └── processor │ │ │ ├── __init__.py │ │ │ ├── main.py │ │ │ └── templates.py │ ├── fit │ │ ├── __init__.py │ │ ├── likelihoods.py │ │ ├── mcmc.py │ │ └── minuit.py │ ├── plotting.py │ ├── process.py │ ├── simulate.py │ └── vectors │ │ ├── __init__.py │ │ ├── _base_vector.py │ │ ├── four_vector.py │ │ ├── particle.py │ │ └── three_vector.py ├── plugins │ ├── __init__.py │ ├── _load.py │ └── data │ │ ├── __init__.py │ │ ├── gamp.py │ │ ├── kv.py │ │ ├── numpy.py │ │ ├── pgz.py │ │ └── sv.py └── progs │ ├── __init__.py │ └── masking.py ├── README.md ├── anaconda-environment.yml ├── dev-environment.yml ├── docs ├── Makefile ├── requirements.txt └── source │ ├── CHANGELOG.md │ ├── about.rst │ ├── conf.py │ ├── examples │ ├── 2Dgauss.ipynb │ ├── 2Dgauss_mcmc.ipynb │ ├── NewMinuit.ipynb │ ├── demo_JPAC_fit.ipynb │ ├── demo_JPAC_pre.ipynb │ └── demo_JPAC_sim.ipynb │ ├── index.rst │ ├── installing.rst │ ├── pypwa.bib │ └── references │ ├── data.rst │ ├── plotting.rst │ └── sim_fit.rst ├── helper ├── setup.cfg ├── setup.py └── tests ├── conftest.py ├── libs ├── file │ ├── test_misc.py │ └── test_processor.py ├── test_binning.py ├── test_process.py └── vectors │ ├── test_basic_vectors.py │ └── test_particle.py ├── plugins ├── data │ ├── conftest.py │ ├── test_gamp.py │ ├── test_kv.py │ ├── test_numpy.py │ ├── test_pgz.py │ └── test_sv.py └── test_load.py ├── system_tests ├── test_2dgauss.py └── test_masking_prog.py ├── test_data ├── docs │ ├── bad_set.csv │ ├── bad_set.kvars │ ├── bad_set.pf │ ├── bad_set.txt │ ├── configuration.yml │ ├── large.gamp │ ├── multiple.gamp │ ├── program_data │ │ ├── data │ │ │ ├── data.csv │ │ │ ├── internal_names.csv │ │ │ ├── monte_carlo.csv │ │ │ └── qfactor.txt │ │ └── rho │ │ │ ├── RHOfit │ │ │ ├── RHOint │ │ │ ├── RHOint_intensities.txt │ │ │ ├── RHOsim │ │ │ ├── RHOw.csv │ │ │ ├── RHOweg.json │ │ │ ├── flat_data.csv │ │ │ └── testRHO.py │ ├── set1.csv │ ├── set1.kvars │ ├── set1.npy │ ├── set1.pf │ ├── set1.tsv │ ├── set1.txt │ ├── set2.csv │ ├── set2.kvars │ ├── set2.npy │ ├── set2.pf │ ├── set2.tsv │ └── set2.txt └── source_files │ ├── functions_without_math.py │ ├── simple_option_object.py │ └── simple_prior.py └── test_sanity.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = 3 | */python?.?/* 4 | */site-packages/nose/* 5 | */tests/* 6 | *.pyi 7 | PyPWA/libs/logger.py 8 | 9 | exclude_lines = 10 | raise NotImplementedError 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve PyPWA 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **System Information:** 20 | - OS: [e.g. Redhat 7] 21 | - Python Version: 22 | - Anaconda, System, or Virtualenv: 23 | - PyPWA Version: 24 | - Personal Machine, JLab, or other (Explain): 25 | 26 | **Additional context** 27 | Add any other context about the problem here. 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for PyPWA 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Additional information** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/help-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Help Request 3 | about: Get help from us for your PyPWA use-case 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **What is your use-case, what are you analyzing?** 11 | A clear and concise description of what problem you're trying to solve. 12 | 13 | **Describe in detail what you are having issues with.** 14 | Are you having issues loading data? Are you getting unexpected results? 15 | 16 | **Additional information** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | .pytest_cache/ 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | .eggs/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # Limbo Distribution / packaging 28 | limbo/.Python 29 | limbo/env/ 30 | limbo/build/ 31 | limbo/develop-eggs/ 32 | limbo/dist/ 33 | limbo/downloads/ 34 | limbo/eggs/ 35 | limbo/lib/ 36 | limbo/lib64/ 37 | limbo/parts/ 38 | limbo/sdist/ 39 | limbo/var/ 40 | limbo/.eggs/ 41 | limbo/*.egg-info/ 42 | limbo/.installed.cfg 43 | limbo/*.egg 44 | 45 | # PyInstaller 46 | # Usually these files are written by a python script from a template 47 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 48 | *.manifest 49 | *.spec 50 | 51 | # Installer logs 52 | pip-log.txt 53 | pip-delete-this-directory.txt 54 | 55 | # Unit test / coverage reports 56 | htmlcov/ 57 | .tox/ 58 | .coverage 59 | .cache 60 | nosetests.xml 61 | coverage.xml 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | docs/build/ 73 | /docs/output/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # MyPy 79 | /.mypy_cache/ 80 | 81 | # Jupyter 82 | .ipynb_checkpoints/ 83 | 84 | # Vscode 85 | .vscode/ 86 | 87 | # PyCharm 88 | .idea/ 89 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | configuration: docs/source/conf.py 5 | 6 | formats: 7 | - htmlzip 8 | - epub 9 | - pdf 10 | 11 | python: 12 | version: 3.8 13 | install: 14 | - requirements: docs/requirements.txt 15 | - method: pip 16 | path: . 17 | extra_requirements: 18 | - torch 19 | - emcee 20 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thank you for contributing to PyPWA! With the help of people like you, 4 | we can make PyPWA a great Python based Partial Wave Analysis and High 5 | Energy Physics toolkit. 6 | 7 | ## Where to start? 8 | 9 | You don't need to have any physics or programming experience to start 10 | contributing to PyPWA. If you are a physists who has never written code, 11 | a software developer with little physics background, or even a new 12 | student with little experience in either, we have projects that you can 13 | help us with. 14 | 15 | If you want to help, or get help, lets us know in the Issues section! 16 | 17 | ## Filing issues, Feature Requests, or requesting help 18 | 19 | If you have a feature request, discovered a bug, a flaw in the 20 | documentation, or a use-case where the toolkit runs slowly, please let 21 | us know in the Issue tracker on Github. 22 | 23 | When you are making a new issue on Github, you'll have a choice between 24 | filing a bug report, feature request, or help request. Select which one 25 | you want, and we will get reply as quickly as we can. 26 | 27 | ## Developing for PyPWA 28 | 29 | PyPWA follows the PEP standard, which if you're using PyCharm the IDE 30 | should help you conform to the format if you're new to it. We also 31 | write all our documenation in numpydoc. 32 | 33 | Any functions or objects that are meant to be used by the users should 34 | be imported directly into PyPWA/__init__.py and should be thoroughly 35 | documented including examples. If it's an internal function however, it 36 | should be moreso documentated in the code itself. 37 | 38 | ### Forking and branching 39 | 40 | If you are working directly with us, and have access to the repository, 41 | you will be forking all your branches off of the `development` branch. 42 | All your additions will happen on that branch, and after your branch 43 | passes reviews, you're branch will be merged into the `development` 44 | branch to be included in the next release. 45 | 46 | If you are a contributor from outside Jefferson Lab, please fork PyPWA, 47 | make your changes to the `development` branch, and then submit a pull 48 | request. We'll review your pull request, suggest changes, and then 49 | accept your pull request. 50 | 51 | ### Running the test suite 52 | 53 | We use PyTest for our testing. All code contributed to PyPWA should have 54 | tests included in the fork or branch before it's merged into 55 | `development`. This is so that we can continue to deliver as stable of 56 | an experience as possible. We have no strict rules on the tests, but 57 | please do your best to keep them short and concise. 58 | 59 | ### Merging or combining branches (Core Devs Only) 60 | 61 | When you are combining a feature, fix, or documentation branch into 62 | PyPWA, you squash all commits into a single commit, and then merge into 63 | `development` with complete patch notes. The command to do this from a 64 | standard machine is `git merge --squash`. This is to prevent the git 65 | log from being polluted with small uninformative commits, and to keep 66 | tracking changes concise. 67 | 68 | ### Creating a release (Core Devs Only) 69 | 70 | When preparing for a release, a release branch for the new release 71 | should be forked off of master, and all it's documentation should be 72 | updated for the new release version. This should also be used as a final 73 | attempt to catch any bugs that may have yet to been patched. Once all 74 | bugs are patched, documentation is updated, tests passing, that is when 75 | you would merge the branch into both master and development without 76 | squash. It is desirable to maintain the full git log from development 77 | when merging into master. 78 | -------------------------------------------------------------------------------- /CONTRIBUTORS.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _contributors: 3 | 4 | 5 | Team Members 6 | ------------ 7 | 8 | **Current PyPWA Team members** 9 | 10 | - `Dr. Carlos Salgado `_ Norfolk State University 11 | - `Dr. Will Phelps `_ Christopher Newport University 12 | - `Mark Jones `_ VPCC and Old Dominion University 13 | - `Dr. Peter Hurck `_ University of Glasgow 14 | 15 | 16 | **Previous PyPWA Team members** 17 | 18 | - `Brandon DeMello `_ Old Dominion University 19 | - `Stephanie Bramlett `_ William and Mary 20 | - `Josh Pond `_ Virginia Peninsula Community College (VPCC) 21 | - `LaRay Hare `_ Norfolk State University 22 | - `Christopher Banks `_ Norfolk State University 23 | - `Michael Harris Jr `_ Norfolk State University 24 | 25 | 26 | **High School Interns** 27 | 28 | - `Ryan Wright `_ Hampton Governor's School for Science and Technology 29 | 30 | - Ran Amplitude benchmarks on the XeonPhi 31 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | * The PyPWA application core, and other portions of the official PyPWA 2 | distribution not explicitly licensed otherwise, are licensed under 3 | the GNU GENERAL PUBLIC LICENSE v3 -- see the 'COPYING' file in this 4 | directory for details. 5 | 6 | * Any research data generated, used, or obtained using this program are not 7 | not required to be released under the GPLv3 or any other Copyleft licences, 8 | however we do request that you cite PyPWA in your research to help promote 9 | the program and our efforts to provide a user friendly and efficient way 10 | to do Partial Wave Analysis. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include CHANGELOG.md 3 | include LICENSE 4 | include COPYING 5 | -------------------------------------------------------------------------------- /PyPWA/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Welcome to PyPWA! 21 | ================= 22 | To make using PyPWA easier from IPython or Jupyter, useful modules have 23 | been imported directly into this package so that you can get up and 24 | running as quickly as possible. To know more about the following modules, 25 | use help. i.e. ? read or help(read) 26 | 27 | Fitting and Simulation: 28 | ----------------------- 29 | - NestedFunction: Abstract object that should be used to define whatever 30 | function you want to simulate or fit. 31 | - FunctionAmplitude: Fallback for old functions for PyPWA 2.0, don't 32 | use unless you need. 33 | - monte_carlo_simulation: Function used for rejection sampling. 34 | - simulate.process_user_function: Processes the user function and returns 35 | the functions final values and max value. 36 | - simulate.make_rejection_list: Takes the final values and max values to 37 | produce a rejection list that can be used to mask the source data. 38 | - LogLikelihood: Sets up the log likelihood. Supports both the extended, 39 | binned, and standard likelihood. 40 | - ChiSquared: Sets up the ChiSquared likelihood, supports using working 41 | with expected values or binned 42 | - EmptyLikelihood: Sets up an empty likelihood. For use when you want 43 | to use the multiprocessing without a likelihood, or have included 44 | a likelihood directly into your NestedFunction. 45 | - minuit: A wrapper around iminuit to make it easier to use with our 46 | likelihoods. 47 | 48 | Reading and Writing data: 49 | ------------------------- 50 | Note: Data can be loaded and writen with Pandas or Numpy if preferred, 51 | however, read and write support caching which can make subsequent 52 | reads significantly quicker. You can use the caching module separately 53 | though if preferred. 54 | - read: Reads data from a file or path 55 | - write: Writes data from a file or path 56 | - DataType: Enum to select type for get_writer and get_reader 57 | - get_writer: Returns an object that supports writing one event at a time 58 | - get_reader: Returns an object that supports reading one event at a time 59 | - ProjectDatabase: A numerical database based off of HDF5 that allows for 60 | working with data larger than memory. Only recommended if you have 61 | to use it. 62 | - to_contiguous: Converts an numpy array to contiguous arrays to be used 63 | with Cython modules. 64 | - pandas_to_numpy: Converts a pandas dataframe to a contiguous numpy 65 | structured array. 66 | - cache.read: Reads the cache for a specific source file, or for an 67 | intermediate step. 68 | - cache.write: Writes the cache for a specific source file, or for an 69 | intermediate step. 70 | 71 | Tools: 72 | ------ 73 | - bin_with_fixed_widths: Supports binning any dataset into a bins with 74 | a fixed number of events per bin 75 | - bin_by_range: Supports binning any dataset into a fixed number of bins 76 | - make_lego: Produces a lego plot 77 | 78 | Provided Data Types: 79 | -------------------- 80 | - FourVector: Represents 4 vectors 81 | - ThreeVector: Represents 3 vectors 82 | - Particle: A 4 vector that includes extra particle data 83 | - ParticlePool: A collection of Particles. 84 | - ResonanceData: Represents Resonances, this is not stable and could 85 | change at any point in the future. 86 | """ 87 | 88 | from PyPWA import info as _info 89 | from PyPWA.libs import simulate 90 | from PyPWA.libs.binning import ( 91 | bin_by_range, bin_with_fixed_widths, bin_by_list 92 | ) 93 | from PyPWA.libs.common import to_contiguous, pandas_to_numpy 94 | from PyPWA.libs.file import ( 95 | get_reader, get_writer, read, write, cache, DataType 96 | ) 97 | from PyPWA.libs.fit import ( 98 | minuit, ChiSquared, LogLikelihood, EmptyLikelihood, 99 | sweightedLogLikelihood, NestedFunction, FunctionAmplitude 100 | ) 101 | from PyPWA.libs.plotting import make_lego 102 | from PyPWA.libs.simulate import monte_carlo_simulation 103 | from PyPWA.libs.vectors import FourVector, ThreeVector, ParticlePool, Particle 104 | 105 | __all__ = [ 106 | 'ChiSquared', 'DataType', 'EmptyLikelihood', 107 | 'FourVector', 'FunctionAmplitude', 'LogLikelihood', 108 | 'NestedFunction', 'Particle', 'ParticlePool', 109 | 'ThreeVector', 'bin_by_list', 'bin_by_range', 'bin_with_fixed_widths', 110 | 'cache', 'get_reader', 'get_writer', 'make_lego', 'mcmc', 111 | 'minuit', 'monte_carlo_simulation', 'pandas_to_numpy', 112 | 'read', 'simulate', 'sweightedLogLikelihood', 113 | 'to_contiguous', 'write' 114 | ] 115 | 116 | try: 117 | from PyPWA.libs.fit import mcmc 118 | __all__.append('mcmc') 119 | except ImportError: 120 | # EMCEE not installed, pass over the module 121 | pass 122 | 123 | __author__ = _info.AUTHOR 124 | __credits__ = ["Mark Jones"] 125 | __version__ = _info.VERSION 126 | __release__ = _info.RELEASE 127 | __license__ = _info.LICENSE 128 | -------------------------------------------------------------------------------- /PyPWA/info.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | ****************** 21 | How to get started 22 | ****************** 23 | 24 | To make using PyPWA easier from IPython or Jupyter, useful modules have 25 | been imported directly into this package so that you can get up and 26 | running as quickly as possible. To know more about the following modules, 27 | use help. 28 | 29 | Working with data: 30 | - get_reader, get_writer: Returns an object that can read/write data 31 | one event at a time. 32 | - read, write: Reads/Writes data to and from memory 33 | 34 | Programs: 35 | - monte_carlo_simulation: Simulates data using the Monte Carlo Rejection 36 | method. 37 | 38 | Complex Data Types: 39 | - FourVector ThreeVector Particle ParticlePool: Handles working 40 | with particle data. 41 | 42 | General Docs 43 | ============ 44 | 45 | PyPWA is statistical analysis toolkit that was built with Partial Wave 46 | Analysis in mind, however you should be able to use the tools included for 47 | anything statistical analysis. 48 | 49 | Currently, there are 4 different applications defined inside this package: 50 | 51 | - pyfit - Fitting with any likelihood. 52 | - pysimulate - Monte-Carlo something or the other. 53 | - pymask - Simple masking and conversion of data 54 | - pybin - Multi-Variable binning utility utilizing PyTables 55 | 56 | For information about how to use each of the programs, look in the docs 57 | folder included with the source code, or check the user docs at 58 | ReadTheDocs.io. 59 | 60 | Developer Docs 61 | ============== 62 | 63 | To attempt to achieve a flexible fitter that could be quickly adapted to 64 | changing needs, we actually built the entire package around a generalized 65 | plugin loader. The "main" objects ore defined as plugins, along with each 66 | task that needed to be solved. This means that fitting, data loading, 67 | the processing module, simulation, optimizers, etc are all defined as 68 | plugins internally. 69 | 70 | Package purposes 71 | ---------------- 72 | 73 | - plugins - Plug and play functionality for PyPWA. These are modular 74 | metadata based plugins that can be user defined to add support for 75 | new data types and likelihoods for pypwa. 76 | 77 | - libs - The main libraries for the program. Core file libs, interfaces, 78 | and mathematics are defined here. If you're writing a script to 79 | interface with PyPWA, this will be the package you'll want to use. 80 | 81 | - progs - This is where the various shell programs that PyPWA provides 82 | are defined. These programs can also be used as an example on how 83 | their respective lib component is used. 84 | 85 | For more information on how each package works, view their documentation. 86 | """ 87 | 88 | __credits__ = ["Mark Jones"] 89 | 90 | AUTHOR = "PyPWA Team and Contributors" 91 | VERSION = "4.0.1" 92 | RELEASE = f"{VERSION}" 93 | LICENSE = "GPLv3" 94 | STATUS = "production" 95 | -------------------------------------------------------------------------------- /PyPWA/libs/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | The core libraries of PyPWA 21 | --------------------------- 22 | Any core libraries that are needed for PyPWA that wouldn't or shouldn't 23 | function as a plugin, are located here. This means anything whose internal 24 | function shouldn't really change should be here, such as hash generation, 25 | data location, and such. 26 | 27 | - data_handler - Handles the data parsing and iteration for the entire 28 | package. 29 | 30 | - interfaces - The interfaces to the program, while the configurator and 31 | other initializing packages may offer their own interfaces to load into 32 | the program. These interfaces actually define how the objects should 33 | interact with each other instead. 34 | 35 | - initial_logging - Controls how logging works in PyPWA. Currently is 36 | limited to string logging. 37 | 38 | - misc_file_libs - File containing a collection of functions whose focus is 39 | handling file data for the package. This includes hashing, locations, 40 | and file length. 41 | 42 | - plugin_loader - The main plugin loading module inside PyPWA. It's generic 43 | enough to be used anywhere but also powerful enough to handle all plugin 44 | needs. 45 | """ 46 | 47 | from PyPWA import info as _info 48 | 49 | __credits__ = ["Mark Jones"] 50 | __author__ = _info.AUTHOR 51 | __version__ = _info.VERSION -------------------------------------------------------------------------------- /PyPWA/libs/common.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Miscellaneous file tools. 21 | ------------------------- 22 | """ 23 | 24 | from typing import Dict, List, Tuple, Union 25 | 26 | import numpy as np 27 | import pandas as pd 28 | 29 | from PyPWA import info as _info 30 | 31 | __credits__ = ["Mark Jones"] 32 | __author__ = _info.AUTHOR 33 | __version__ = _info.VERSION 34 | 35 | 36 | def pandas_to_numpy(df: Union[pd.Series, pd.DataFrame]) -> np.ndarray: 37 | """Converts Pandas DataTypes to Numpy 38 | 39 | Takes a Pandas Series or DataFrame and converts it to Numpy. Pandas 40 | does have a built in `to_records` function, however records are slower 41 | than Structured Arrays, while containing much of the same 42 | functionality. 43 | 44 | Parameters 45 | ---------- 46 | df : Pandas Series or DataFrame 47 | The pandas data structure that you wish to be converted to 48 | standard Numpy Structured Arrays 49 | 50 | Returns 51 | ------- 52 | Numpy ArrayLike 53 | The resulting Numpy array or structured array containing the data 54 | from the original DataFrame or Series. If it was a Series with 55 | each row named (like an element from a DataFrame) it'll be a 56 | Structured Array with length=1, if it was a standard Series it'll 57 | return a single Numpy Array, and if it was a DataFrame the results 58 | will be stored in Structured array matching the types and names 59 | from the DataFrame. 60 | """ 61 | 62 | if isinstance(df, pd.Series) and isinstance(df.name, (type(None), str)): 63 | return df.to_numpy() 64 | 65 | names = list(df.keys()) 66 | types = df.dtypes if len(df.dtypes) else [df.dtypes] * len(names) 67 | 68 | array_type = [] 69 | for name, dtype in zip(names, types): 70 | array_type.append((name, dtype)) 71 | 72 | if isinstance(df, pd.Series): 73 | length = 1 74 | else: 75 | length = len(df) 76 | 77 | array = np.empty(length, np.dtype(array_type, align=True)) 78 | for name in names: 79 | array[name] = df[name] 80 | return array 81 | 82 | 83 | def to_contiguous( 84 | data: Union[pd.DataFrame, np.ndarray, Dict[str, np.ndarray]], 85 | names: List[str] 86 | ) -> Union[np.ndarray, Tuple[np.ndarray, ...]]: 87 | """Convert DataFrame or Structured Array to List of Contiguous Arrays 88 | 89 | This takes a data-set and a list of column names and converts those 90 | columns into Contiguous arrays. The reason to use Contiguous arrays 91 | over DataFrames or Structured arrays is that the memory is better 92 | aligned to improve speed of computation. However, this does double 93 | the memory requirements of your data-set since this copies all the 94 | events over to the new array. Use only in amplitudes where you need 95 | to maximize the speed of your amplitude. 96 | 97 | Parameters 98 | ---------- 99 | data : Structured Array, DataFrame, or Dict-like 100 | This is the data frame or Structured array that you want to 101 | extract columns from 102 | names : List of Column Names or str 103 | This is either a list of columns you want from the array, or a 104 | single column you want from the array 105 | 106 | Returns 107 | ------- 108 | ArrayLike or Tuple[ArrayLike] 109 | If you provide only a single column, it'll only return a single 110 | array with the data from that array. However, if you have supplied 111 | multiple columns in a list or tuple, it'll return a tuple of 112 | arrays in the same order as the supplied names. 113 | """ 114 | 115 | if isinstance(names, str): 116 | return np.ascontiguousarray(data[names]) 117 | 118 | contiguous_data = [] 119 | for name in names: 120 | try: 121 | contiguous_data.append(np.ascontiguousarray(data[name])) 122 | except IndexError: 123 | if isinstance(data, np.ndarray) and not data.dtype.names: 124 | raise ValueError( 125 | f"to_contiguous doesn't support numpy flat arrays!" 126 | ) 127 | else: 128 | raise ValueError( 129 | f"to_contiguous doesn't support {type(data)}!" 130 | ) 131 | 132 | return tuple(contiguous_data) 133 | -------------------------------------------------------------------------------- /PyPWA/libs/file/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | ************ 21 | File Package 22 | ************ 23 | This package handles reading and writing of all data inside PyPWA, from 24 | GAMP files to HD5 tables. In this init file are all the functions, and 25 | objects you'll need to quickly start loading and writing data with PyPWA. 26 | 27 | Contained packages: 28 | =================== 29 | - processor: This parses and writes files either one event at a time 30 | through a reader or writer, or parses the entire file with a DataFrame 31 | through read or write. 32 | - project: This package handles reading and writing to a HD5 file in the 33 | PyPWA data structure. This handles multiple data types, predetermined 34 | data types, and unknown data types. 35 | This is also the package used for binning with large amounts of data 36 | 37 | Contained modules: 38 | ================== 39 | - cache: This module handles caching data for reads and writes. It stores 40 | the sha512 sum of each file in the cache so that any changes to the 41 | original file can be caught and invalidate the cache. This is 42 | primarily used by `processor` but should be usable by anything. 43 | - misc: This is a collection of useful functions that help the rest of 44 | the files work as intended. It provides the sha-sums, cache location, 45 | and file length for other modules in PyPWA 46 | """ 47 | 48 | from PyPWA import info as _info 49 | from .processor import DataProcessor as _Data, DataType 50 | from .processor import templates as _templates 51 | from PyPWA.libs.vectors import ParticlePool as _pp 52 | import numpy as _npy 53 | import pandas as _pd 54 | from typing import Union as _U 55 | 56 | __credits__ = ["Mark Jones"] 57 | __author__ = _info.AUTHOR 58 | __version__ = _info.VERSION 59 | 60 | 61 | __all__ = [ 62 | "get_reader", "get_writer", "read", "write" 63 | ] 64 | 65 | 66 | def get_reader(filename: str, use_pandas=False) -> _templates.ReaderBase: 67 | """Returns a reader that can read the file one event at a time 68 | 69 | .. note:: 70 | The return value from the reader coule bd a pointer, if you need 71 | to keep the event without it being overwrote on the next call, you 72 | must call the copy method on the returned data to get a unique 73 | copy. 74 | 75 | Parameters 76 | ---------- 77 | filename : str, Path 78 | File to read 79 | use_pandas : bool 80 | Determines if a numpy data type or pandas data type is returned. 81 | 82 | Returns 83 | ------- 84 | templates.ReaderBase 85 | A reader that can read the file, defined in PyPWA.plugins.data 86 | 87 | Raises 88 | ------ 89 | RuntimeError 90 | If there is no plugin that can load the data found 91 | 92 | See Also 93 | -------- 94 | read : Reads an entire file into a DataFrame, ParticlePool, or array 95 | 96 | Examples 97 | -------- 98 | The reader can be used inside a standard `for` loop 99 | 100 | >>> reader = get_reader("example.gamp") 101 | >>> for event in reader: 102 | >>> my_kept_event = event.copy() 103 | >>> regular_event = event 104 | >>> reader.close() 105 | """ 106 | data = _Data(True, False) 107 | return data.get_reader(filename, use_pandas) 108 | 109 | 110 | def get_writer(filename: str, dtype: DataType) -> _templates.WriterBase: 111 | """Returns a writer that can write to the file one event at a time 112 | 113 | Parameters 114 | ---------- 115 | filename : str, Path 116 | The file that you want to write to 117 | dtype : DataType 118 | Specifies the type of that needs to be written. TREE_VECTOR is 119 | used for ParticlePools and only works with the '.gamp' extension 120 | for now. STRUCTURED_ARRAY is used for both numpy structured arrays 121 | and pandas DataFrames. BASIC is used for standard numpy arrays. 122 | 123 | Returns 124 | ------- 125 | templates.WriterBase 126 | A writer that can read the file, defined in PyPWA.plugins.data 127 | 128 | Raises 129 | ------ 130 | RuntimeError 131 | If there is no plugin that can write the data found 132 | 133 | See Also 134 | -------- 135 | write : Writes a ParticlePool, DataFrame, or array to file 136 | 137 | Examples 138 | -------- 139 | The writer can be used to write a ParticlePool one event at a time 140 | 141 | >>> writer = get_writer("example.gamp", DataType.TREE_VECTOR) 142 | >>> for event in particles.iter_events(): 143 | >>> writer.write(event) 144 | >>> writer.close() 145 | """ 146 | data = _Data(True, False) 147 | return data.get_writer(filename, dtype) 148 | 149 | 150 | def read( 151 | filename: str, use_pandas=False, cache=True, clear_cache=False 152 | ) -> _U[_pd.DataFrame, _pp, _npy.ndarray]: 153 | """Reads the entire file and returns either DaataFrame, ParticlePool, 154 | or standard numpy array depending on the data found inside the file. 155 | 156 | Parameters 157 | ---------- 158 | filename : Path, str 159 | File to read. 160 | use_pandas : bool 161 | Determines if a numpy data type or pandas data type is returned. 162 | cache : bool, optional 163 | Enables or disables caching. Defaults to the enabled. Leaving this 164 | enabled should do no harm unless there something is broken with 165 | caching. Disable this if returning the wrong data for debug 166 | purposes. If it continues to return the incorrect data when 167 | disabled then caching isn't the issue. 168 | clear_cache : bool, optional 169 | Forcefully clears the cache for the files that are parsed. Instead 170 | of loading the cache, it'll delete the cache and write a new cache 171 | object instead if cache is enabled. 172 | 173 | Returns 174 | ------- 175 | DataFrame 176 | If the file is a kVars file, CSV, or TSV 177 | npy.ndarray 178 | If the file is a numpy file, PF file, or single column txt file 179 | ParticlePool 180 | If parsing a gamp file 181 | 182 | Raises 183 | ------ 184 | RuntimeError 185 | If there is no plugin that can load the data found 186 | """ 187 | data = _Data(cache, clear_cache) 188 | return data.parse(filename, use_pandas) 189 | 190 | 191 | def write(filename: str, data, cache=True, clear_cache=False): 192 | """Reads the entire file and returns either DaataFrame, ParticlePool, 193 | or standard numpy array depending on the data found inside the file. 194 | 195 | Parameters 196 | ---------- 197 | filename : Path, str 198 | The filename of the file you wish to write 199 | cache : bool, optional 200 | Enables or disables caching. Defaults to the enabled. Leaving this 201 | enabled should do no harm unless there something is broken with 202 | caching. 203 | clear_cache : bool, optional 204 | Forcefully clears the cache for the files that are parsed. It'll 205 | delete the cache and write a new cache object instead when cache 206 | is enabled. 207 | 208 | Raises 209 | ------ 210 | RuntimeError 211 | If there is no plugin that can load the data found 212 | """ 213 | writer = _Data(cache, clear_cache) 214 | return writer.write(filename, data) 215 | -------------------------------------------------------------------------------- /PyPWA/libs/file/cache.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Caching 21 | ------- 22 | Stores the data in a pickle cache. By taking advantage of SHA-512 and 23 | python's pickle, caches can be loaded and stored rapidly, while allowing 24 | us to automatically invalidate the cache when the source file has changed 25 | in any way. 26 | 27 | Both read and write support being used as an intermediate step. The 28 | intermediate step would allow you to save your data quickly for loading 29 | later. The resulting file would only be readable in Python. This method 30 | is very fast. 31 | """ 32 | 33 | import pickle 34 | from dataclasses import dataclass 35 | from pathlib import Path 36 | from typing import Any, Tuple, Union 37 | 38 | from PyPWA import info as _info 39 | from PyPWA.libs.file import misc 40 | 41 | __credits__ = ["Mark Jones"] 42 | __author__ = _info.AUTHOR 43 | __version__ = _info.VERSION 44 | 45 | 46 | @dataclass 47 | class _Package: 48 | """ 49 | Stores the contents of any data that is loaded and the files hash. 50 | """ 51 | hash: str = None 52 | data: Any = None 53 | version: int = 1 54 | 55 | 56 | def read( 57 | path: Union[str, Path], intermediate=True, remove_cache=False 58 | ) -> Tuple[bool, Any]: 59 | """Reads a cache object 60 | 61 | This reads caches objects from the disk. With its default settings 62 | it'll read the file as if it were a cache file. If intermediate is 63 | set to False, the path will be the source file, and it'll load the cache 64 | file as long as the source file's hash hasn't changed. It can also be 65 | 66 | Parameters 67 | ---------- 68 | path : Path or str 69 | The path of the source file, or path where you want the 70 | intermediate step to be stored. 71 | intermediate : bool 72 | If set to true, the cache will be treated as an intermediate step, 73 | this means it will assume there is no data file associated with 74 | the data, and will not check file hashes. By default this is True 75 | remove_cache : bool 76 | Setting this to true will remove the cache. 77 | 78 | Returns 79 | ------- 80 | Tuple[bool, any] 81 | The first value in the tuple is whether the cache is valid or not 82 | and the second value in the returned tuple is whatever data was 83 | stored in the cache. 84 | """ 85 | path = Path(path) 86 | cache_path = path.parent / (path.stem + ".cache") 87 | 88 | if intermediate: 89 | cache_path = Path(path.stem + ".intermediate") 90 | file_hash = "" 91 | else: 92 | try: 93 | file_hash = misc.get_sha512_hash(path) 94 | except FileNotFoundError: 95 | return False, None 96 | 97 | if remove_cache: 98 | cache_path.unlink() 99 | return False, None 100 | 101 | try: 102 | with cache_path.open("rb") as stream: 103 | data_package = pickle.load(stream) # type: _Package 104 | except Exception: 105 | return False, None 106 | 107 | if data_package.hash == file_hash or intermediate: 108 | return True, data_package.data 109 | else: 110 | return False, None 111 | 112 | 113 | def write(path, data, intermediate=True): 114 | """Writes a cache file 115 | 116 | With its default settings, it will treat the path as a save location for 117 | the cache as an intermediate step. If intermediate is set to false, 118 | it'll write the cache file into a computed cache location and store the 119 | source file's hash in the cache for future comparison. 120 | 121 | Parameters 122 | ---------- 123 | path : Path or str 124 | The path of the source file, or path where you want the 125 | intermediate step t0 be stored. 126 | data : Any 127 | Whatever data you wish to be stored in the cache. Almost anything 128 | that can be stored in a variable, can be stored on disk. 129 | intermediate : bool 130 | If set to true, the cache will be treated as an intermediate step, 131 | this means it will assume there is no data file associated with 132 | the data, and will not check file hashes. 133 | """ 134 | path = Path(path) 135 | cache_path = Path(path.stem + ".intermediate") 136 | file_hash = "" 137 | if not intermediate: 138 | file_hash = misc.get_sha512_hash(path) 139 | cache_path = path.parent / Path(path.stem + ".cache") 140 | 141 | data_package = _Package(file_hash, data) 142 | 143 | try: 144 | with cache_path.open("wb") as stream: 145 | pickle.dump(data_package, stream) 146 | except Exception: 147 | if cache_path.exists(): 148 | cache_path.unlink() 149 | raise RuntimeWarning("Your data can not be saved in cache!") 150 | -------------------------------------------------------------------------------- /PyPWA/libs/file/misc.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Miscellaneous file tools. 21 | ------------------------- 22 | """ 23 | 24 | import hashlib 25 | from pathlib import Path 26 | 27 | from PyPWA import info as _info 28 | 29 | __credits__ = ["Mark Jones"] 30 | __author__ = _info.AUTHOR 31 | __version__ = _info.VERSION 32 | 33 | 34 | _BUFFER = 40960 35 | 36 | 37 | def get_sha512_hash(file_location: Path) -> str: 38 | file_hash = hashlib.sha512() 39 | with file_location.open("rb") as stream: 40 | for chunk in iter(lambda: stream.read(_BUFFER), b""): 41 | file_hash.update(chunk) 42 | return file_hash.hexdigest() 43 | 44 | 45 | def get_file_length(file_location: Path) -> int: 46 | with file_location.open("rb") as binary_stream: 47 | last_chunk = binary_stream.raw.read(_BUFFER) 48 | lines = last_chunk.count(b'\n') 49 | 50 | for chunk in iter(lambda: binary_stream.raw.read(_BUFFER), b""): 51 | lines += chunk.count(b'\n') 52 | if not chunk and not last_chunk.endswith(b'\n'): 53 | lines += 1 54 | 55 | if last_chunk.endswith(b'\n\n'): 56 | lines -= 1 57 | return lines 58 | -------------------------------------------------------------------------------- /PyPWA/libs/file/processor/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | ************************ 21 | Processes data for PyPWA 22 | ************************ 23 | This package reads and writes data for PyPWA in a variety of formats 24 | defined by the plugins in PyPWA/plugins/data 25 | 26 | .. seealso:: 27 | ::mod:: `PyPWA.plugins.data` 28 | 29 | Examples: 30 | ========= 31 | - To load data from file:: 32 | data = DataProcessor() 33 | data.parse(path_to_file) 34 | reader = data.get_reader(path_to_file) 35 | 36 | - To write data to file:: 37 | data = DataProcessor() 38 | data.write(path_to_file, the_data) 39 | writer = data.get_writer(path_to_file, DataType.BASIC) 40 | 41 | Writer takes in DataType as an argument so that it can select which writer 42 | to use depending on the type of data the user wants to write. You can 43 | see what types are supported at :class:`PyPWA.libs.file.templates.DataType`. 44 | """ 45 | 46 | from PyPWA import info as _info 47 | 48 | __credits__ = ["Mark Jones"] 49 | __author__ = _info.AUTHOR 50 | __version__ = _info.VERSION 51 | 52 | from .main import DataProcessor, SUPPORTED_DATA, INPUT_TYPE 53 | from .templates import DataType, ReaderBase, WriterBase 54 | -------------------------------------------------------------------------------- /PyPWA/libs/file/processor/main.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Main object for Parsing Data 21 | """ 22 | 23 | import logging 24 | from pathlib import Path 25 | from typing import Union 26 | 27 | import numpy as npy 28 | import pandas as pd 29 | 30 | from PyPWA import info as _info 31 | from PyPWA.libs import common 32 | from PyPWA.libs.file import cache 33 | from PyPWA.plugins import load, data as data_plugins 34 | from . import templates 35 | from ... import vectors 36 | 37 | __credits__ = ["Mark Jones"] 38 | __author__ = _info.AUTHOR 39 | __version__ = _info.VERSION 40 | 41 | 42 | SUPPORTED_DATA = Union[npy.ndarray, vectors.ParticlePool, pd.DataFrame] 43 | INPUT_TYPE = Union[Path, str] 44 | 45 | 46 | def _get_read_plugin( 47 | filename: Path, needs_iterator: bool 48 | ) -> templates.IDataPlugin: 49 | found_plugins = load(data_plugins, "Data") 50 | for plugin in found_plugins: 51 | if plugin.get_read_test().can_read(filename): 52 | if not needs_iterator or plugin.supports_iterators: 53 | return plugin 54 | raise RuntimeError("Couldn't find plugin for {0}".format(filename)) 55 | 56 | 57 | def _get_write_plugin( 58 | filename: Path, data_type: templates.DataType, needs_iterator: bool 59 | ) -> templates.IDataPlugin: 60 | found_plugins = load(data_plugins, "Data") 61 | for plugin in found_plugins: 62 | if data_type in plugin.supported_data_types: 63 | extension = filename.suffix 64 | if not extension or extension in plugin.supported_extensions: 65 | return plugin 66 | raise RuntimeError("Couldn't find plugin for {0}".format(filename)) 67 | 68 | 69 | class _DataLoader: 70 | 71 | __LOGGER = logging.getLogger(__name__ + "._DataLoader") 72 | 73 | def __init__(self, use_cache: bool, clear_cache: bool): 74 | self.__use_cache = use_cache 75 | self.__clear_cache = clear_cache 76 | 77 | def __repr__(self): 78 | return (f"{self.__class__.__name__}" 79 | f"({self.__use_cache}, {self.__clear_cache})") 80 | 81 | def parse( 82 | self, filename: Path, use_pandas: bool 83 | ) -> Union[pd.DataFrame, pd.Series]: 84 | valid, cache_obj = cache.read( 85 | filename, intermediate=False, remove_cache=self.__clear_cache 86 | ) 87 | if valid and self.__use_cache: 88 | self.__LOGGER.info("Loading cache for %s" % filename) 89 | data = cache_obj 90 | else: 91 | self.__LOGGER.info("No cache found, loading file directly.") 92 | data = self.__read_data(filename) 93 | 94 | if use_pandas: 95 | if data.dtype.names: 96 | return pd.DataFrame(data) 97 | return pd.Series(data) 98 | else: 99 | return data 100 | 101 | def __read_data(self, filename): 102 | plugin = _get_read_plugin(filename, False) 103 | data = plugin.get_memory_parser().parse(filename) 104 | if self.__use_cache and plugin.use_caching: 105 | cache.write(filename, data, intermediate=False) 106 | return data 107 | 108 | 109 | class _DataDumper: 110 | 111 | def __init__(self, use_cache: bool, clear_cache: bool): 112 | self.__use_cache = use_cache 113 | self.__clear_cache = clear_cache 114 | 115 | def __repr__(self): 116 | return (f"{self.__class__.__name__}" 117 | f"({self.__use_cache}, {self.__clear_cache})") 118 | 119 | def write(self, filename: Path, data: SUPPORTED_DATA): 120 | plugin = self.__get_write_plugin(filename, data) 121 | parser = plugin.get_memory_parser() 122 | parser.write(filename, data) 123 | if self.__use_cache and plugin.use_caching: 124 | if isinstance(data, (pd.DataFrame, pd.Series)): 125 | data = common.pandas_to_numpy(data) 126 | cache.write(filename, data, intermediate=False) 127 | 128 | @staticmethod 129 | def __get_write_plugin(filename: Path, 130 | data: SUPPORTED_DATA) -> templates.IDataPlugin: 131 | if isinstance(data, vectors.ParticlePool): 132 | data_type = templates.DataType.TREE_VECTOR 133 | elif isinstance(data, pd.DataFrame): 134 | data_type = templates.DataType.STRUCTURED 135 | elif isinstance(data, pd.Series) or not data.dtype.names: 136 | data_type = templates.DataType.BASIC 137 | else: 138 | data_type = templates.DataType.STRUCTURED 139 | 140 | return _get_write_plugin(filename, data_type, False) 141 | 142 | 143 | class DataProcessor: 144 | 145 | def __init__(self, enable_cache=True, clear_cache=False): 146 | self.__args = (enable_cache, clear_cache) 147 | self.__loader = _DataLoader(enable_cache, clear_cache) 148 | self.__dumper = _DataDumper(enable_cache, clear_cache) 149 | 150 | def __repr__(self): 151 | return (f"{self.__class__.__name__}" 152 | f"({self.__args[0]}, {self.__args[1]})") 153 | 154 | def parse( 155 | self, filename: INPUT_TYPE, use_pandas: bool = False 156 | ) -> SUPPORTED_DATA: 157 | filename = Path(filename) 158 | return self.__loader.parse(filename, use_pandas) 159 | 160 | @staticmethod 161 | def get_reader( 162 | filename: INPUT_TYPE, use_pandas: bool = False 163 | ) -> templates.ReaderBase: 164 | filename = Path(filename) 165 | plugin = _get_read_plugin(filename, True) 166 | return plugin.get_reader(filename, use_pandas) 167 | 168 | def write(self, filename: INPUT_TYPE, data: SUPPORTED_DATA): 169 | filename = Path(filename) 170 | self.__dumper.write(filename, data) 171 | 172 | @staticmethod 173 | def get_writer(filename: INPUT_TYPE, 174 | data_type=templates.DataType.STRUCTURED 175 | ) -> templates.WriterBase: 176 | filename = Path(filename) 177 | plugin = _get_write_plugin(filename, data_type, True) 178 | return plugin.get_writer(filename) 179 | -------------------------------------------------------------------------------- /PyPWA/libs/file/processor/templates.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Holds the different implementation interfaces that are needed to interface 21 | data module. 22 | """ 23 | 24 | import enum 25 | from abc import ABC, abstractmethod 26 | from pathlib import Path 27 | from typing import List, Union 28 | 29 | import pandas as pd 30 | import numpy as np 31 | 32 | from PyPWA import info as _info 33 | 34 | __credits__ = ["Mark Jones"] 35 | __author__ = _info.AUTHOR 36 | __version__ = _info.VERSION 37 | 38 | 39 | class DataType(enum.Enum): 40 | """ 41 | Enumeration for type of data to be read or written using the reader 42 | and writer. 43 | 44 | Because of how the reader and writer are designed they can not 45 | inspect the data before it starts working with the data. This enum 46 | is used to specify the type of data you're working with. 47 | 48 | * BASIC = Standard arrays with no columns 49 | * STRUCTURED = Columned array (CSV, TSV, DataFrames) 50 | * TREE_VECTOR = Particle Data (GAMP) 51 | """ 52 | 53 | # Single arrays with no attached data names 54 | BASIC = 0 55 | 56 | # Structured data, such as CSV or Evil data 57 | STRUCTURED = 1 58 | 59 | # Tree-like data. At this moment, only GAMP 60 | TREE_VECTOR = 2 61 | 62 | 63 | class IMemory(ABC): 64 | 65 | @abstractmethod 66 | def parse(self, filename: Path) -> np.ndarray: 67 | ... 68 | 69 | @abstractmethod 70 | def write(self, filename: Path, data: Union[pd.DataFrame, np.ndarray]): 71 | ... 72 | 73 | 74 | class ReaderBase(ABC): 75 | 76 | @abstractmethod 77 | def next(self) -> pd.DataFrame: 78 | """ 79 | Called to get the next event from the reader. 80 | 81 | :return: A single event. 82 | :rtype: numpy.ndarray 83 | """ 84 | ... 85 | 86 | def __next__(self): 87 | return self.next() 88 | 89 | def __iter__(self): 90 | return self 91 | 92 | def __enter__(self): 93 | return self 94 | 95 | def __len__(self): 96 | return self.get_event_count() 97 | 98 | def __exit__(self, *args): 99 | self.close() 100 | 101 | @abstractmethod 102 | def get_event_count(self) -> int: 103 | """ 104 | Called to get the total number of events in the file. 105 | 106 | :return: Count of the events 107 | :rtype: int 108 | """ 109 | ... 110 | 111 | @abstractmethod 112 | def reset(self): 113 | """ 114 | Resets the reader back to the first event 115 | """ 116 | ... 117 | 118 | @abstractmethod 119 | def close(self): 120 | """ 121 | Should close any open objects or streams. 122 | """ 123 | ... 124 | 125 | @property 126 | def is_particle_pool(self) -> bool: 127 | return False 128 | 129 | @property 130 | @abstractmethod 131 | def fields(self) -> List[str]: 132 | ... 133 | 134 | @property 135 | @abstractmethod 136 | def data_type(self) -> DataType: 137 | ... 138 | 139 | @property 140 | @abstractmethod 141 | def input_path(self) -> Path: 142 | ... 143 | 144 | 145 | class WriterBase(ABC): 146 | 147 | @abstractmethod 148 | def write(self, data: Union[pd.DataFrame, np.ndarray]): 149 | """ 150 | Should write the received event to the stream. 151 | 152 | :param numpy.ndarray data: The event data stored in a numpy array. 153 | """ 154 | ... 155 | 156 | def __enter__(self): 157 | return self 158 | 159 | def __exit__(self, *args): 160 | self.close() 161 | 162 | @abstractmethod 163 | def close(self): 164 | """ 165 | Should close the stream and any open streams or objects. 166 | """ 167 | ... 168 | 169 | @property 170 | @abstractmethod 171 | def output_path(self) -> Path: 172 | ... 173 | 174 | 175 | class IReadTest(ABC): 176 | 177 | @abstractmethod 178 | def can_read(self, filename: Path) -> bool: 179 | ... 180 | 181 | 182 | class IDataPlugin: 183 | 184 | @property 185 | @abstractmethod 186 | def plugin_name(self) -> str: 187 | ... 188 | 189 | @abstractmethod 190 | def get_memory_parser(self) -> IMemory: 191 | ... 192 | 193 | @abstractmethod 194 | def get_reader(self, filename: Path, use_pandas: bool) -> ReaderBase: 195 | ... 196 | 197 | @abstractmethod 198 | def get_writer(self, filename: Path) -> WriterBase: 199 | ... 200 | 201 | @abstractmethod 202 | def get_read_test(self) -> IReadTest: 203 | ... 204 | 205 | @property 206 | @abstractmethod 207 | def supported_extensions(self) -> List[str]: 208 | ... 209 | 210 | @property 211 | @abstractmethod 212 | def supported_data_types(self) -> List[DataType]: 213 | ... 214 | 215 | @property 216 | def use_caching(self) -> bool: 217 | return True 218 | 219 | @property 220 | def supports_iterators(self) -> bool: 221 | return True 222 | -------------------------------------------------------------------------------- /PyPWA/libs/fit/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | 21 | """ 22 | 23 | from PyPWA import info as _info 24 | 25 | __credits__ = ["Mark Jones"] 26 | __author__ = _info.AUTHOR 27 | __version__ = _info.VERSION 28 | 29 | 30 | from .likelihoods import ( 31 | ChiSquared, LogLikelihood, EmptyLikelihood, 32 | NestedFunction, FunctionAmplitude, sweightedLogLikelihood 33 | ) 34 | 35 | from .minuit import minuit 36 | 37 | try: 38 | from .mcmc import mcmc 39 | except ImportError: 40 | # EMCEE not installed, so pass over it 41 | pass 42 | 43 | -------------------------------------------------------------------------------- /PyPWA/libs/fit/mcmc.py: -------------------------------------------------------------------------------- 1 | from typing import Any as _Any, Callable as _Call, List as _List 2 | 3 | import numpy as np 4 | 5 | from PyPWA import info as _info 6 | from PyPWA.libs.fit import likelihoods as _likelihoods 7 | 8 | try: 9 | import emcee as _emcee 10 | except ImportError: 11 | raise ImportError("Emcee must be installed!") 12 | 13 | # modelled after minuit.py 14 | 15 | __credits__ = ["Peter Pauli"] 16 | __author__ = _info.AUTHOR 17 | __version__ = _info.VERSION 18 | 19 | 20 | class _Translator: 21 | 22 | def __init__( 23 | self, parameters: _List[str], 24 | parameterlimits: _List[str], 25 | function_call: _Call[[_Any], float], 26 | prior: _Call[[_Any, _List[str]], float] 27 | ): 28 | self.__parameters = parameters 29 | self.__parameter_limits = parameterlimits 30 | self.__function = function_call 31 | self.__prior = prior 32 | 33 | def __call__(self, args: _List[float]) -> float: 34 | parameters_with_values = {} 35 | for parameter, arg in zip(self.__parameters, args): 36 | parameters_with_values[parameter] = arg 37 | prior = self.__prior(args, self.__parameter_limits) 38 | if not np.isfinite(prior): 39 | return -np.inf 40 | nll = self.__function(parameters_with_values) + prior 41 | if np.any(np.isnan(nll)): 42 | return -np.inf 43 | return nll 44 | 45 | 46 | def mcmc( 47 | parlist: _List[str], 48 | likelihood: _likelihoods.ChiSquared, 49 | nwalker=20, 50 | prior=1, 51 | nsteps=100, 52 | startpars=None, 53 | parlimits=None, 54 | emceemoves=_emcee.moves.GaussianMove(0.05, mode='vector', factor=None) 55 | ): 56 | """Inference using the emcee package () 57 | Parameters 58 | ---------- 59 | parlist : List[str] 60 | List of parameter names 61 | likelihood : Likelihood object from likelihoods or single function 62 | startpars : nparray with dim nwalker x len(parlist) 63 | Set the start parameters for all chains 64 | parlimits : list of tuples (lower limit and upper limit) with 65 | length = number of parameters 66 | nwalker : int (optional) 67 | Choose the number of walkers for the Markov chains (default = 20) 68 | prior : int (optional) 69 | Set the prior that is used during the walk 70 | uniform prior : 1 (default, currently only option) 71 | nsteps : int (optional) 72 | Choose the number of steps to generate with each walker 73 | (default = 100) 74 | emceemoves : Move from emcee.moves (optional) 75 | Choose a suitable move to create chain. 76 | Default: GaussianMove(0.05, mode='vector', factor=None) 77 | (see emcee docs) 78 | Returns 79 | ------- 80 | emcee.EnsembleSampler.run_mcmc 81 | Contains the whole chain. See emcee documentation for more info. 82 | See Also 83 | -------- 84 | emcee's documentation : Should explain the various options that can 85 | be passed to emcee, and how to use the resulting object after 86 | the chain has been produced. 87 | """ 88 | 89 | if prior == 1: 90 | translator = _Translator( 91 | parlist, parlimits, likelihood, log_uniform_prior 92 | ) 93 | else: 94 | print("So far only uniform prior is implemented.") 95 | return 0 96 | 97 | ndimension = len(parlist) 98 | 99 | if startpars.any() is None: 100 | startpars = np.zeros((nwalker, ndimension)) 101 | 102 | optimizer = _emcee.EnsembleSampler( 103 | nwalker, ndimension, translator, moves=emceemoves 104 | ) 105 | output = optimizer.run_mcmc( 106 | startpars, nsteps, progress=True, skip_initial_state_check=True 107 | ) 108 | return optimizer 109 | 110 | 111 | def log_uniform_prior(pars, parlimits): 112 | for index, par in enumerate(pars): 113 | if par < parlimits[index][0] or par > parlimits[index][1]: 114 | return -np.inf 115 | return 0. 116 | -------------------------------------------------------------------------------- /PyPWA/libs/fit/minuit.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | 21 | """ 22 | 23 | from typing import Any, Callable as Call, Dict, List, Union 24 | 25 | import iminuit as _iminuit 26 | import numpy as np 27 | 28 | from PyPWA import info 29 | from . import likelihoods 30 | 31 | __credits__ = ["Mark Jones"] 32 | __author__ = info.AUTHOR 33 | __version__ = info.VERSION 34 | 35 | 36 | class _Translator: 37 | 38 | def __init__( 39 | self, 40 | parameters: Union[List[str], None], 41 | function_call: Call[[Any], float] 42 | ): 43 | self.__parameters = parameters 44 | self.__function = function_call 45 | 46 | if self.__parameters is None: 47 | self.__call = self.__passthrough 48 | else: 49 | self.__call = self.__with_parameters 50 | 51 | def __call__(self, *args): 52 | return self.__call(*args) 53 | 54 | def __passthrough(self, array) -> float: 55 | return self.__function(array) 56 | 57 | def __with_parameters(self, *args: List[float]) -> float: 58 | parameters_with_values = {} 59 | for parameter, arg in zip(self.__parameters, args): 60 | parameters_with_values[parameter] = arg 61 | 62 | return self.__function(parameters_with_values) 63 | 64 | 65 | def minuit( 66 | settings: Union[Dict[str, Any], np.ndarray], 67 | likelihood: likelihoods.ChiSquared 68 | ): 69 | """Optimization using iminuit 70 | 71 | Parameters 72 | ---------- 73 | settings : Dict[str, Any] 74 | The settings to be passed to iminuit. Look into the documentation 75 | for iminuit for specifics 76 | likelihood : Likelihood object from likelihoods or single function 77 | 78 | Returns 79 | ------- 80 | iminuit.Minuit 81 | The minuit object after the fit has been completed. 82 | 83 | Note 84 | ---- 85 | See `Iminuit's documentation `_ 86 | for more imformation, as it should explain the various options 87 | that can be passed to iminuit, and how to use the resulting object 88 | after a fit has been completed. 89 | """ 90 | if isinstance(settings, np.ndarray): 91 | name = None 92 | elif "name" not in settings: 93 | name = list(settings.keys()) 94 | else: 95 | name = settings["name"] 96 | 97 | translator = _Translator(name, likelihood) 98 | 99 | if name is None: 100 | optimizer = _iminuit.Minuit(translator, settings) 101 | else: 102 | optimizer = _iminuit.Minuit(translator, name=name, **settings) 103 | 104 | # Set error for Likelihood, Migrad defaults to ChiSquared 105 | if hasattr(likelihood, "TYPE"): 106 | if likelihood.TYPE == likelihoods.LikelihoodType.LIKELIHOOD: 107 | optimizer.errordef = _iminuit.Minuit.LIKELIHOOD 108 | elif likelihood.TYPE == likelihoods.LikelihoodType.CHI_SQUARED: 109 | optimizer.errordef = _iminuit.Minuit.LEAST_SQUARES 110 | 111 | return optimizer 112 | -------------------------------------------------------------------------------- /PyPWA/libs/plotting.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | 20 | from typing import Union, Optional as Opt 21 | 22 | import matplotlib.pyplot as plt 23 | import numpy as npy 24 | import pandas as pd 25 | from matplotlib import cm 26 | from matplotlib import colors 27 | from mpl_toolkits.mplot3d import Axes3D 28 | 29 | 30 | def make_lego( 31 | x_data: Union[npy.ndarray, pd.Series], 32 | y_data: Union[npy.ndarray, pd.Series], 33 | bins: Opt[int] = None, 34 | cmap: Opt[Union[str, colors.ListedColormap]] = "jet", 35 | ax: Opt[Axes3D] = None, elev: Opt[int] = 10, azim: Opt[int] = 215 36 | ): 37 | """ 38 | Produces a 3D Lego plot, similar to what is produced by ROOT. This is 39 | similar to a 2D Histogram, but treats x and y as x and z, and projects 40 | the occurrences into the y dimension. 41 | 42 | Parameters 43 | ---------- 44 | x_data : ndarray or Series 45 | X data for the lego plot 46 | y_data : ndarray or Series 47 | Y data for the lego plot 48 | bins : int, optional 49 | Number of bins to create when making the lego plot. 50 | cmap : str or matplotlib.colors.ListedColormap, optional 51 | cmap to use when creating the lego plot. It takes either a string 52 | of the name for matplotlib, or a matplotlib cmap 53 | ax : Axes3D, optional 54 | An axes object to place the lego plot into. The axes must be an 55 | axes that supports 3d projection or it will cause the function 56 | to error. 57 | elev : int, optional 58 | Adjusts the elevation of the lego-plot 59 | azim : int, optional 60 | Adjusts the azimuth of the resulting image. It's value is a angle 61 | between 0 and 360 degrees. 62 | 63 | Returns 64 | ------- 65 | Axes3D 66 | The axes object of the plot 67 | 68 | Notes 69 | ----- 70 | If the number of bins isn't provided, it's instead calculated using 71 | one half of Sturge's Rule rounded up: 72 | 73 | .. math:: 74 | \lceil (1/2)(1 + 3.322 \cdot log(N_{events})\\rceil 75 | """ 76 | 77 | if bins is None: 78 | bins = npy.ceil((1 + 3.322 * npy.log(len(x_data))) / 2) 79 | 80 | if ax is None: 81 | fig = plt.figure() 82 | ax = Axes3D(fig) 83 | 84 | hist, xedges, yedges = npy.histogram2d(x_data, y_data, bins=(bins, bins)) 85 | xpos, ypos = npy.meshgrid( 86 | xedges[:-1] + xedges[1:], yedges[:-1] + yedges[1:] 87 | ) 88 | 89 | xpos = xpos.flatten() / 2 90 | ypos = ypos.flatten() / 2 91 | zpos = npy.zeros_like(xpos) 92 | 93 | dx = xedges[1] - xedges[0] 94 | dy = yedges[1] - yedges[0] 95 | dz = hist.flatten() 96 | 97 | if isinstance(cmap, str): 98 | cmap = cm.get_cmap(cmap) 99 | elif not isinstance(cmap, colors.ListedColormap): 100 | raise ValueError("cmap not understood!") 101 | 102 | max_height = npy.max(dz) 103 | min_height = npy.min(dz) 104 | rgba = [cmap((k - min_height) / max_height) for k in dz] 105 | 106 | ax.view_init(elev, azim) 107 | ax.bar3d(xpos, ypos, zpos, dx, dy, dz, alpha=1, color=rgba, zsort="average") 108 | return ax 109 | -------------------------------------------------------------------------------- /PyPWA/libs/vectors/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | 21 | """ 22 | 23 | from PyPWA import info as _info 24 | 25 | __credits__ = ["Mark Jones"] 26 | __author__ = _info.AUTHOR 27 | __version__ = _info.VERSION 28 | 29 | from PyPWA.libs.vectors.three_vector import ThreeVector 30 | from PyPWA.libs.vectors.four_vector import FourVector 31 | from PyPWA.libs.vectors.particle import Particle, ParticlePool 32 | -------------------------------------------------------------------------------- /PyPWA/libs/vectors/_base_vector.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | The backbone of all vectors 21 | --------------------------- 22 | This provides the method for vector inputs to be sanitized into something 23 | usable, and the base object for math that is similar for all vector types. 24 | """ 25 | 26 | from typing import Union 27 | 28 | import numpy as np 29 | import pandas as pd 30 | 31 | from PyPWA.libs import common 32 | from PyPWA import info as _info 33 | 34 | __credits__ = ["Mark Jones"] 35 | __author__ = _info.AUTHOR 36 | __version__ = _info.VERSION 37 | 38 | 39 | def sanitize_vector_input(a, b=None, c=None, d=None, has_e=False): 40 | names = ["x", "y", "z"] 41 | if has_e: 42 | names.insert(0, "e") 43 | 44 | # Produce empty arrays of length X 45 | if isinstance(a, int) and isinstance(b, type(None)): 46 | if a == 0: 47 | if has_e: 48 | return np.float64(0), np.float64(0), np.float64(0), np.float64(0) 49 | return np.float64(0), np.float64(0), np.float64(0) 50 | 51 | d = [np.zeros(a), np.zeros(a), np.zeros(a)] 52 | if has_e: 53 | d.append(np.zeros(a)) 54 | return tuple(d) 55 | 56 | # Convert numpy storage types to contiguous arrays 57 | elif isinstance(a, (np.void, np.record)) or \ 58 | isinstance(a, np.ndarray) and a.dtype.names: 59 | return common.to_contiguous(a, names) 60 | 61 | # Pass through single values 62 | elif all([isinstance(var, (int, float)) for var in [a, b, c]]): 63 | returns = [np.float64(a), np.float64(b), np.float64(c)] 64 | if has_e: 65 | if not isinstance(d, (int, float)): 66 | raise ValueError("No Z value provided!") 67 | else: 68 | returns.append(np.float64(d)) 69 | return returns 70 | 71 | # Convert Structured Arrays to Contiguous Arrays 72 | elif all([isinstance(var, np.ndarray) for var in [a, b, c]]): 73 | if has_e: 74 | if not isinstance(d, np.ndarray): 75 | raise ValueError("No Z Value provided!") 76 | else: 77 | if all([d.flags["C_CONTIGUOUS"]] for d in [a, b, c, d]): 78 | return a, b, c, d 79 | else: 80 | return common.to_contiguous( 81 | {"e": a, "x": b, "y": c, "z": d}, names 82 | ) 83 | else: 84 | if all([d.flags["C_CONTIGUOUS"]] for d in [a, b, c]): 85 | return a, b, c 86 | else: 87 | return common.to_contiguous({"x": a, "y": b, "z": c}, names) 88 | 89 | # Convert DataFrame to Contiguous Arrays 90 | elif isinstance(a, pd.DataFrame): 91 | return common.to_contiguous(a, names) 92 | 93 | # Pass the tuple from the records array directly 94 | elif isinstance(a, pd.Series): 95 | temp_storage = pd.DataFrame() 96 | temp_storage.append(a) 97 | return temp_storage.to_records(False)[0] 98 | 99 | else: 100 | raise ValueError( 101 | f"Can't sanitize vector input! Unknown data type {type(a)}!" 102 | ) 103 | 104 | 105 | class VectorMath: 106 | 107 | __slots__ = ["_x", "_y", "_z"] 108 | 109 | def __init__(self, x, y, z: np.ndarray): 110 | self._x = x 111 | self._y = y 112 | self._z = z 113 | 114 | def get_length(self) -> Union[pd.Series, float]: 115 | return np.sqrt(self._x**2 + self._y**2 + self._z**2) 116 | 117 | def get_theta(self) -> Union[pd.Series, float]: 118 | return np.arccos(self.get_cos_theta()) 119 | 120 | def get_phi(self) -> Union[pd.Series, float]: 121 | return np.arctan2(self._y, self._x) 122 | 123 | def get_sin_theta(self) -> Union[pd.Series, float]: 124 | return (self._x**2 + self._y**2) / self.get_length() 125 | 126 | def get_cos_theta(self) -> Union[pd.Series, float]: 127 | return self._z / self.get_length() 128 | 129 | @property 130 | def dataframe(self) -> pd.DataFrame: 131 | return pd.DataFrame({"x": self._x, "y": self._y, "z": self._z}) 132 | 133 | @property 134 | def x(self) -> Union[float, np.ndarray]: 135 | if isinstance(self._x, np.ndarray): 136 | return self._x.copy() 137 | return self._x 138 | 139 | @x.setter 140 | def x(self, value: Union[np.ndarray, float, pd.Series, str]): 141 | if isinstance(value, np.ndarray): 142 | if len(value) != len(self._x): 143 | raise ValueError("Size does not match vector!") 144 | self._x = value 145 | else: 146 | self._x *= 0 147 | self._x += np.float64(value) 148 | 149 | @property 150 | def y(self) -> Union[float, np.ndarray]: 151 | if isinstance(self._y, np.ndarray): 152 | return self._y.copy() 153 | return self._y 154 | 155 | @y.setter 156 | def y(self, value: Union[np.ndarray, float, pd.Series]): 157 | if isinstance(value, np.ndarray): 158 | if len(value) != len(self._y): 159 | raise ValueError("Size does not match vector!") 160 | self._y = value 161 | else: 162 | self._y *= 0 163 | self._y += np.float64(value) 164 | 165 | @property 166 | def z(self) -> Union[float, np.ndarray]: 167 | if isinstance(self._z, np.ndarray): 168 | return self._z.copy() 169 | return self._z 170 | 171 | @z.setter 172 | def z(self, value: Union[np.ndarray, float, pd.Series]): 173 | if isinstance(value, np.ndarray): 174 | if len(value) != len(self._z): 175 | raise ValueError("Size does not match vector!") 176 | self._z = value 177 | else: 178 | self._z *= 0 179 | self._z += np.float64(value) 180 | -------------------------------------------------------------------------------- /PyPWA/libs/vectors/three_vector.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | 20 | from typing import List, Union, Optional as Opt 21 | 22 | import numpy as np 23 | import pandas as pd 24 | from . import _base_vector 25 | 26 | from PyPWA import info as _info 27 | 28 | __credits__ = ["Mark Jones"] 29 | __author__ = _info.AUTHOR 30 | __version__ = _info.VERSION 31 | 32 | 33 | class ThreeVector(_base_vector.VectorMath): 34 | """DataFrame backed ThreeVector object for vector operations inside 35 | PyPWA. 36 | 37 | Parameters 38 | ---------- 39 | x : int, npy.ndarray, float, or DataFrame 40 | Can be an integer to specify size, a structured array or DataFrame 41 | with x y and z values, a single float value, or a Series or 42 | single dimensional array, If you provide a float, series, or 43 | array, you need to provide a float for the other options as well. 44 | y : int, npy.ndarray, float, or DataFrame, optional 45 | z : int, npy.ndarray, float, or DataFrame, optional 46 | 47 | See Also 48 | -------- 49 | FourVector : For storing a vector with it's energy. 50 | """ 51 | 52 | __slots__ = ["_vector"] 53 | 54 | def __init__( 55 | self, 56 | x: Union[int, np.ndarray, float, pd.DataFrame], 57 | y: Opt[Union[float, pd.Series, np.ndarray]] = None, 58 | z: Opt[Union[float, pd.Series, np.ndarray]] = None 59 | ): 60 | if isinstance(x, ThreeVector): 61 | self._x = x._x 62 | self._y = x._y 63 | self._z = x._z 64 | self._x, self._y, self._z = _base_vector.sanitize_vector_input(x, y, z) 65 | super(ThreeVector, self).__init__(self._x, self._y, self._z) 66 | 67 | def __repr__(self) -> str: 68 | return f"ThreeVector(x={self._x}, y={self._y}, z={self._z})" 69 | 70 | def _repr_pretty_(self, p, cycle): 71 | if cycle: 72 | p.text("ThreeVector( ?.)") 73 | else: 74 | if isinstance(self._x, np.ndarray): 75 | if all(self._z) == 0.0: 76 | theta = np.NaN 77 | else: 78 | theta = self.get_theta().mean() 79 | 80 | phi = self.get_phi().mean() 81 | else: 82 | if self._z == 0: 83 | theta = np.NaN 84 | else: 85 | theta = self.get_theta() 86 | 87 | phi = self.get_phi() 88 | 89 | p.text(f"ThreeVector(x̅Θ={theta}, x̅ϕ={phi})") 90 | 91 | def __eq__(self, vector: "ThreeVector") -> bool: 92 | if isinstance(vector, ThreeVector): 93 | if isinstance(self._x, np.ndarray): 94 | return ( 95 | all(self._x == vector._x) and all(self._y == vector._y) and 96 | all(self._z == vector._z) 97 | ) 98 | else: 99 | return ( 100 | self._x == vector._x and self._y == vector._y and 101 | self._z == vector._z 102 | ) 103 | else: 104 | return False 105 | 106 | def __add__(self, vector: Union["ThreeVector", float]) -> "ThreeVector": 107 | if isinstance(vector, ThreeVector): 108 | if len(vector) == len(self): 109 | return ThreeVector( 110 | self._x + vector._x, self._y + vector._y, 111 | self._z + vector._z 112 | ) 113 | else: 114 | raise ValueError("Vectors have different lengths!") 115 | elif isinstance(vector, (int, float, np.float)): 116 | return ThreeVector( 117 | self._x + vector, self._y + vector, self._z + vector 118 | ) 119 | else: 120 | raise ValueError(f"Can not add ThreeVector and {type(vector)}") 121 | 122 | def __radd__(self, other): 123 | return self.__add__(other) 124 | 125 | def __sub__(self, vector: Union["ThreeVector", float]) -> "ThreeVector": 126 | if isinstance(vector, (int, float, np.float, ThreeVector)): 127 | return self.__add__(-1 * vector) 128 | else: 129 | raise ValueError(f"Can not subtract ThreeVector and {type(vector)}") 130 | 131 | def __rsub__(self, other): 132 | return self.__sub__(other) 133 | 134 | def __mul__(self, vector: Union["ThreeVector", float]) -> "ThreeVector": 135 | if isinstance(vector, ThreeVector): 136 | new_x = self._y * vector._z - self._z * vector._y 137 | new_y = self._z * vector._x - self._x * vector._z 138 | new_z = self._x * vector._y - self._y * vector._x 139 | return ThreeVector(new_x, new_y, new_z) 140 | elif isinstance(vector, (int, float, np.float)): 141 | return ThreeVector( 142 | self._x * vector, self._y * vector, self._z * vector 143 | ) 144 | else: 145 | raise ValueError(f"Can not multiply ThreeVector by {type(vector)}") 146 | 147 | def __rmul__(self, other): 148 | return self.__mul__(other) 149 | 150 | def __len__(self): 151 | return len(self._vector) 152 | 153 | def __getitem__( 154 | self, item: Union[int, str, slice] 155 | ) -> Union["ThreeVector", pd.Series]: 156 | if isinstance(item, (slice, int)) or \ 157 | isinstance(item, np.ndarray) and item.dtype == bool: 158 | return ThreeVector( 159 | self._x[item], self._y[item], self._z[item] 160 | ) 161 | elif isinstance(item, str) and item in ("x", "y", "z"): 162 | return getattr(self, f"_{item}").copy() 163 | else: 164 | raise ValueError(f"Can not index with {item!r}") 165 | 166 | def split(self, count) -> List["ThreeVector"]: 167 | vectors = [] 168 | xs = np.split(self._x, count) 169 | ys = np.split(self._y, count) 170 | zs = np.split(self._z, count) 171 | for x, y, z in zip(xs, ys, zs): 172 | vectors.append(ThreeVector(x, y, z)) 173 | return vectors 174 | 175 | def get_copy(self): 176 | return ThreeVector(self._x.copy(), self._y.copy(), self._z.copy()) 177 | 178 | def get_dot(self, vector: "ThreeVector") -> np.ndarray: 179 | if isinstance(vector, ThreeVector): 180 | return ( 181 | self._x * vector._x + self._y * vector._y + 182 | self._z * vector._z 183 | ) 184 | else: 185 | raise ValueError("Dot product only works with another ThreeVector") 186 | 187 | def get_length_squared(self) -> Union[np.ndarray, float]: 188 | return self._x**2 + self._y**2 + self._z**2 189 | -------------------------------------------------------------------------------- /PyPWA/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | from ._load import load 20 | -------------------------------------------------------------------------------- /PyPWA/plugins/_load.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | 21 | """ 22 | 23 | import importlib 24 | import logging 25 | import pkgutil 26 | from typing import Any, List 27 | 28 | from PyPWA import info as _info 29 | 30 | __credits__ = ["Mark Jones"] 31 | __author__ = _info.AUTHOR 32 | __version__ = _info.VERSION 33 | 34 | 35 | _LOGGER = logging.getLogger(__name__) 36 | 37 | 38 | def load(root: type(importlib), plugin_type: str) -> List[Any]: 39 | plugins = [] 40 | plugin_path, plugin_name = (root.__path__, root.__name__ + ".") 41 | 42 | for f, name, i in pkgutil.iter_modules(plugin_path, plugin_name): 43 | plugins.append(_import_plugin(name, plugin_type)) 44 | _LOGGER.debug("Loaded Data Plugin: {0}".format(name)) 45 | 46 | return [plugin for plugin in plugins if plugin] # Remove Nones 47 | 48 | 49 | def _import_plugin(name: str, plugin_type: str) -> List[type(importlib)]: 50 | try: 51 | return importlib.import_module(name).metadata 52 | except ImportError as error: 53 | _LOGGER.exception(error) 54 | except AttributeError as error: 55 | _LOGGER.error( 56 | f"{plugin_type} plugin {name} has no metadata object! {error}" 57 | ) 58 | -------------------------------------------------------------------------------- /PyPWA/plugins/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JeffersonLab/PyPWA/29987a0471cb127dd2d352feccecc8d1f3210bf4/PyPWA/plugins/data/__init__.py -------------------------------------------------------------------------------- /PyPWA/plugins/data/kv.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Handles EVIL to / from memory. 21 | 22 | The objects in this file are dedicated to reading the EVIL files from disk 23 | and into memory. This file type is being depreciated for many reasons, and 24 | will live here until it shrivels away, is completely forgotten, and dies. 25 | 26 | EVIL (Expanded Variable Identification Lists) earned their name from their 27 | inefficient nature when it comes to reading in, writing out, or simply 28 | existing, its a name given to these EVIL formats out of a mixture of spite 29 | and love by current and former developers alike. 30 | 31 | This format exists currently only as backwards compatibility, and may not 32 | be bug free or entirely optimized, and may never be. If you are a user 33 | trying to figure out what you should export your data to, or a developer 34 | trying to learn the nature of data within PyPWA, you should move your 35 | attention to CSV/TSV in the SV object and forget that this ever existed. 36 | """ 37 | 38 | from pathlib import Path 39 | from typing import List, Union 40 | 41 | import numpy as np 42 | import pandas as pd 43 | 44 | from PyPWA import info as _info 45 | from PyPWA.libs.file import misc 46 | from PyPWA.libs.file.processor import templates, DataType 47 | 48 | __credits__ = ["Mark Jones"] 49 | __author__ = _info.AUTHOR 50 | __version__ = _info.VERSION 51 | 52 | 53 | class _EVILDataPlugin(templates.IDataPlugin): 54 | 55 | def __repr__(self): 56 | return f"{self.__class__.__name__}()" 57 | 58 | @property 59 | def plugin_name(self): 60 | return "EVIL" 61 | 62 | def get_memory_parser(self): 63 | return _EVILMemory() 64 | 65 | def get_reader(self, filename, use_pandas): 66 | return _EVILReader(filename, use_pandas) 67 | 68 | def get_writer(self, filename): 69 | return _EVILWriter(filename) 70 | 71 | def get_read_test(self): 72 | return _EVILDataTest() 73 | 74 | @property 75 | def supported_extensions(self): 76 | return [".txt", ".kvars"] 77 | 78 | @property 79 | def supported_data_types(self): 80 | return [DataType.STRUCTURED] 81 | 82 | 83 | metadata = _EVILDataPlugin() 84 | 85 | 86 | class _EVILDataTest(templates.IReadTest): 87 | 88 | def __repr__(self): 89 | return f"{self.__class__.__name__}()" 90 | 91 | def can_read(self, file_location: Path) -> bool: 92 | try: 93 | with file_location.open() as stream: 94 | line = stream.readline() 95 | equal_count = line.count("=") 96 | comma_count = line.count(",") + 1 97 | return equal_count == comma_count and equal_count 98 | except Exception: 99 | return False 100 | 101 | 102 | class _EVILReader(templates.ReaderBase): 103 | 104 | def __init__(self, filename: Path, use_pandas): 105 | self.__use_pandas = use_pandas 106 | self.__num_event: int = None 107 | self.__current_event_count: int = 0 108 | self.__filename = filename 109 | self.__file_handle = filename.open() 110 | self.__numpy_array = self.__get_numpy_array() 111 | 112 | def __repr__(self) -> str: 113 | return f"{self.__class__.__name__}({self.__filename})" 114 | 115 | def __get_numpy_array(self) -> np.ndarray: 116 | names = [column.split("=")[0] for column in self.__get_columns()] 117 | types = [(str(name), "f8") for name in names] 118 | self.__file_handle.seek(0) 119 | return np.empty(1, types) 120 | 121 | def next(self) -> Union[pd.Series, np.ndarray]: 122 | for column in self.__get_columns(): 123 | name, value = column.split("=") 124 | self.__numpy_array[name] = value 125 | 126 | if not self.__use_pandas: 127 | return self.__numpy_array[0] 128 | else: 129 | # If you don't copy the Series will break next call 130 | self.__current_event_count += 1 131 | return pd.Series( 132 | [a for a in self.__numpy_array[0]], 133 | self.__numpy_array.dtype.names, 134 | name=self.__current_event_count - 1 135 | ) 136 | 137 | def __get_columns(self) -> List[str]: 138 | string = self.__file_handle.readline().strip("\n").strip(" ") 139 | if string == "": 140 | raise StopIteration 141 | return string.split(",") 142 | 143 | def get_event_count(self) -> int: 144 | if not self.__num_event: 145 | self.__num_event = misc.get_file_length(self.__filename) 146 | return self.__num_event 147 | 148 | def reset(self): 149 | self.__file_handle.seek(0) 150 | 151 | def close(self): 152 | self.__file_handle.close() 153 | 154 | @property 155 | def fields(self): 156 | return [name for name in self.__numpy_array.dtype.names] 157 | 158 | @property 159 | def data_type(self) -> DataType: 160 | return DataType.STRUCTURED 161 | 162 | @property 163 | def input_path(self) -> Path: 164 | return self.__filename 165 | 166 | 167 | class _EVILWriter(templates.WriterBase): 168 | 169 | def __init__(self, filename: Path): 170 | self.__column_names: List[str] = None 171 | self.__filename = filename 172 | self.__file_handle = filename.open("w") 173 | 174 | def __repr__(self) -> str: 175 | return f"{self.__class__.__name__}({self.__filename})" 176 | 177 | def write(self, data: Union[np.ndarray, pd.Series]): 178 | self.__error_check(data) 179 | line = self.__get_line(data) 180 | self.__file_handle.write(line) 181 | 182 | def __error_check(self, data: Union[np.ndarray, pd.Series]): 183 | if not self.__column_names: 184 | if isinstance(data, pd.Series): 185 | self.__column_names = list(data.keys()) 186 | elif hasattr(data, "dtype") and data.dtype.names: 187 | self.__column_names = data.dtype.names 188 | else: 189 | raise ValueError(f"KV doesn't understand type {type(data)}") 190 | 191 | def __get_line(self, data: Union[pd.DataFrame, np.ndarray]) -> str: 192 | line = "" 193 | for column_index, column in enumerate(self.__column_names): 194 | line += "," if column_index > 0 else "" 195 | line += "%s=%.20f" % (column, data[column]) 196 | return line + "\n" 197 | 198 | def close(self): 199 | self.__file_handle.close() 200 | 201 | @property 202 | def output_path(self) -> Path: 203 | return self.__filename 204 | 205 | 206 | class _EVILMemory(templates.IMemory): 207 | 208 | def __init__(self): 209 | super(_EVILMemory, self).__init__() 210 | 211 | def __repr__(self) -> str: 212 | return f"{self.__class__.__name__}()" 213 | 214 | def parse(self, filename: Path) -> np.ndarray: 215 | with _EVILReader(filename, False) as reader: 216 | data = self.__get_empty_array(filename, len(reader)) 217 | for index, event in enumerate(reader): 218 | data[index] = event 219 | return data 220 | 221 | @staticmethod 222 | def __get_empty_array(filename: Path, array_length: int) -> np.ndarray: 223 | with filename.open() as stream: 224 | split = stream.readline().split(",") 225 | types = [(column.split("=")[0], "f8") for column in split] 226 | return np.empty(array_length, np.dtype(types, align=True)) 227 | 228 | def write(self, filename: Path, data: Union[pd.DataFrame, np.ndarray]): 229 | with _EVILWriter(filename) as iterator: 230 | if isinstance(data, pd.DataFrame): 231 | for index, event in data.iterrows(): 232 | iterator.write(event) 233 | else: 234 | for i in range(len(data)): 235 | iterator.write(data[i]) 236 | -------------------------------------------------------------------------------- /PyPWA/plugins/data/numpy.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | from pathlib import Path 20 | from typing import Union 21 | 22 | import numpy as np 23 | import pandas as pd 24 | 25 | from PyPWA import info as _info 26 | from PyPWA.libs import common 27 | from PyPWA.libs.file.processor import templates, DataType 28 | 29 | __credits__ = ["Christopher Banks", "Keandre Palmer", "Mark Jones"] 30 | __author__ = _info.AUTHOR 31 | __version__ = _info.VERSION 32 | 33 | 34 | class _NumpyDataPlugin(templates.IDataPlugin): 35 | 36 | def __repr__(self): 37 | return f"{self.__class__.__name__}()" 38 | 39 | @property 40 | def plugin_name(self): 41 | return "NumPy Data Files" 42 | 43 | def get_memory_parser(self): 44 | return _NumpyMemory() 45 | 46 | def get_reader(self, filename, use_pandas): 47 | return _NumpyReader(filename, use_pandas) 48 | 49 | def get_writer(self, filename): 50 | return _NumpyWriter(filename) 51 | 52 | def get_read_test(self): 53 | return _NumpyDataTest() 54 | 55 | @property 56 | def supported_extensions(self): 57 | return [".npy", ".pf", ".txt", ".sel", ".bamp"] 58 | 59 | @property 60 | def supported_data_types(self): 61 | return [DataType.BASIC, DataType.STRUCTURED] 62 | 63 | 64 | metadata = _NumpyDataPlugin() 65 | 66 | 67 | class _NumpyDataTest(templates.IReadTest): 68 | 69 | def __repr__(self): 70 | return f"{self.__class__.__name__}()" 71 | 72 | def can_read(self, filename): 73 | if self.__can_load_binary(filename) or self.__can_load_text(filename): 74 | return True 75 | else: 76 | return False 77 | 78 | @staticmethod 79 | def __can_load_binary(file_location: Path) -> bool: 80 | try: 81 | np.load(str(file_location)) 82 | return True 83 | except Exception: 84 | return False 85 | 86 | @staticmethod 87 | def __can_load_text(file_location: Path) -> bool: 88 | try: 89 | np.loadtxt(str(file_location)) 90 | return True 91 | except Exception: 92 | return False 93 | 94 | 95 | class _NumpyReader(templates.ReaderBase): 96 | 97 | def __init__(self, filename: Path, use_pandas): 98 | self.__filename = filename 99 | self.__array = _NumpyMemory().parse(self.__filename) 100 | self.__counter = 0 101 | self.__use_pandas = use_pandas 102 | 103 | def __repr__(self) -> str: 104 | return f"{self.__class__.__name__}({self.__filename})" 105 | 106 | def get_event_count(self) -> int: 107 | return len(self.__array) 108 | 109 | def next(self) -> Union[pd.Series, np.ndarray]: 110 | if self.__counter < len(self): 111 | self.__counter += 1 112 | if self.__use_pandas: 113 | if self.__array.dtype.names: 114 | return pd.DataFrame(self.__array).iloc[self.__counter-1] 115 | else: 116 | return pd.Series(self.__array).iloc[self.__counter-1] 117 | else: 118 | return self.__array[self.__counter-1] 119 | else: 120 | raise StopIteration 121 | 122 | def reset(self): 123 | self.__counter = 0 124 | 125 | def close(self): 126 | del self.__array 127 | 128 | @property 129 | def fields(self): 130 | return [name for name in self.__array.dtype.names] 131 | 132 | @property 133 | def data_type(self) -> DataType: 134 | if self.__array.dtype.names: 135 | return DataType.STRUCTURED 136 | else: 137 | return DataType.BASIC 138 | 139 | @property 140 | def input_path(self) -> Path: 141 | return self.__filename 142 | 143 | 144 | class _NumpyWriter(templates.WriterBase): 145 | 146 | def __init__(self, filename: Path): 147 | self.__array: np.ndarray = None 148 | self.__filename = filename 149 | 150 | def __repr__(self) -> str: 151 | return f"{self.__class__.__name__}()" 152 | 153 | def write(self, data: np.void): 154 | if isinstance(data, (pd.Series, pd.DataFrame)): 155 | data = common.pandas_to_numpy(data) 156 | 157 | if not isinstance(self.__array, np.ndarray): 158 | self.__array = np.zeros(1, dtype=data.dtype) 159 | self.__array[0] = data 160 | else: 161 | self.__array = np.resize(self.__array, self.__array.size + 1) 162 | self.__array[-1] = data 163 | 164 | def close(self): 165 | if self.__filename.suffix == ".txt": 166 | np.savetxt(str(self.__filename), self.__array) 167 | elif self.__filename.suffix in (".pf", ".sel"): 168 | np.savetxt(str(self.__filename), self.__array, fmt="%d") 169 | elif self.__filename.suffix == ".bamp": 170 | with self.__filename.open("wb") as stream: 171 | self.__array.tofile(stream) 172 | else: 173 | np.save(str(self.__filename), self.__array) 174 | 175 | @property 176 | def output_path(self) -> Path: 177 | return self.__filename 178 | 179 | 180 | class _NumpyMemory(templates.IMemory): 181 | 182 | def __repr__(self) -> str: 183 | return f"{self.__class__.__name__}()" 184 | 185 | def parse(self, filename: Path) -> np.ndarray: 186 | try: 187 | data = np.load(str(filename)) 188 | return data 189 | except Exception: 190 | return self.___load_text(filename) 191 | 192 | @staticmethod 193 | def ___load_text(filename: Path) -> np.ndarray: 194 | if filename.suffix == ".pf": 195 | return np.loadtxt(str(filename), dtype=bool) 196 | elif filename.suffix == ".sel": 197 | return np.loadtxt(str(filename), dtype="u4") 198 | else: 199 | return np.loadtxt(str(filename)) 200 | 201 | def write( 202 | self, filename: Path, 203 | data: Union[np.ndarray, pd.DataFrame, pd.Series] 204 | ): 205 | 206 | if not isinstance(data, np.ndarray): 207 | data = common.pandas_to_numpy(data) 208 | 209 | if filename.suffix in (".pf", ".sel"): 210 | np.savetxt(str(filename), data, fmt="%d") 211 | elif filename.suffix == ".bamp": 212 | with filename.open("wb") as stream: 213 | data.tofile(stream) 214 | elif filename.suffix == ".txt": 215 | np.savetxt(str(filename), data) 216 | else: 217 | np.save(str(filename), data) 218 | -------------------------------------------------------------------------------- /PyPWA/plugins/data/sv.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | import csv 20 | import os 21 | from pathlib import Path 22 | from typing import List, Union 23 | 24 | import numpy as np 25 | import pandas as pd 26 | 27 | from PyPWA import info as _info 28 | from PyPWA.libs import common 29 | from PyPWA.libs.file import misc 30 | from PyPWA.libs.file.processor import templates, DataType 31 | 32 | __credits__ = ["Mark Jones"] 33 | __author__ = _info.AUTHOR 34 | __version__ = _info.VERSION 35 | 36 | 37 | HEADER_SEARCH_BITS = 8192 38 | 39 | 40 | class _SvDataPlugin(templates.IDataPlugin): 41 | 42 | def __repr__(self): 43 | return f"{self.__class__.__name__}()" 44 | 45 | @property 46 | def plugin_name(self): 47 | return "Delimiter Separated Variable sheets" 48 | 49 | def get_memory_parser(self): 50 | return _SvMemory() 51 | 52 | def get_reader(self, file_location, use_pandas): 53 | return _SvReader(file_location, use_pandas) 54 | 55 | def get_writer(self, file_location): 56 | return _SvWriter(file_location) 57 | 58 | def get_read_test(self): 59 | return _SvDataTest() 60 | 61 | @property 62 | def supported_extensions(self): 63 | return [".tsv", ".csv"] 64 | 65 | @property 66 | def supported_data_types(self): 67 | return [DataType.STRUCTURED] 68 | 69 | 70 | metadata = _SvDataPlugin() 71 | 72 | 73 | class _SvDataTest(templates.IReadTest): 74 | 75 | def __repr__(self): 76 | return f"{self.__class__.__name__}()" 77 | 78 | def can_read(self, filename: Path) -> bool: 79 | sniffer = csv.Sniffer() 80 | sniffer.preferred = ['\t', ','] 81 | 82 | try: 83 | with filename.open() as stream: 84 | sample = stream.read(HEADER_SEARCH_BITS) 85 | return sniffer.has_header(sample) 86 | except Exception: 87 | return False 88 | 89 | 90 | class _SvReader(templates.ReaderBase): 91 | 92 | def __init__(self, filename: Path, use_pandas: bool): 93 | self.__event_count: int = 0 94 | self.__current_count: int = 0 95 | self.__filename = filename 96 | self.__file_handle = open(str(filename), "r") 97 | self.__reader = self.__get_reader() 98 | self.__elements = next(self.__reader) # First call is header 99 | self.__array = self.__get_data_array() 100 | self.__use_series = use_pandas 101 | 102 | def __repr__(self) -> str: 103 | return f"{self.__class__.__name__}({self.__filename})" 104 | 105 | def __get_reader(self) -> csv.DictReader: 106 | search_bits = self.__file_handle.read(HEADER_SEARCH_BITS) 107 | dialect = csv.Sniffer().sniff(search_bits, delimiters=[",", "\t"]) 108 | self.__file_handle.seek(0) 109 | reader = csv.reader(self.__file_handle, dialect) 110 | return reader 111 | 112 | def __get_data_array(self): 113 | array_type = [(name, "f8") for name in self.__elements] 114 | return np.zeros(1, array_type) 115 | 116 | def next(self) -> Union[pd.Series, np.ndarray]: 117 | values = next(self.__reader) 118 | if not len(values): 119 | raise StopIteration 120 | 121 | for column_index, element in enumerate(self.__elements): 122 | self.__array[0][element] = values[column_index] 123 | 124 | if self.__use_series: 125 | self.__current_count += 1 126 | return pd.Series( 127 | [a for a in self.__array[0]], self.__array.dtype.names, 128 | name=self.__current_count - 1 129 | ) 130 | else: 131 | return self.__array[0] 132 | 133 | def get_event_count(self) -> int: 134 | if not self.__event_count: 135 | length = misc.get_file_length(self.__filename) 136 | self.__event_count = length - 1 # Exclude header 137 | return self.__event_count 138 | 139 | def reset(self): 140 | self.__file_handle.seek(0) 141 | self.__reader = self.__get_reader() 142 | 143 | def close(self): 144 | self.__file_handle.close() 145 | 146 | @property 147 | def fields(self): 148 | return [name for name in self.__array.dtype.names] 149 | 150 | @property 151 | def data_type(self) -> DataType: 152 | return DataType.STRUCTURED 153 | 154 | @property 155 | def input_path(self) -> Path: 156 | return self.__filename 157 | 158 | 159 | class _SvWriter(templates.WriterBase): 160 | 161 | def __init__(self, filename: Path): 162 | self.__filename = filename 163 | self.__file_handle = open(str(filename), "w") 164 | self.__dialect = self.__get_dialect(filename) 165 | self.__writer: csv.DictWriter = None 166 | self.__field_names: List[str] = None 167 | 168 | def __repr__(self) -> str: 169 | return f"{self.__class__.__name__}({self.__filename})" 170 | 171 | @staticmethod 172 | def __get_dialect(file_location: Path) -> csv.Dialect: 173 | if file_location.suffix == ".tsv": 174 | return csv.excel_tab 175 | else: 176 | return csv.excel 177 | 178 | def write(self, data: Union[pd.Series, np.ndarray]): 179 | if not self.__writer: 180 | self.__setup_writer(data) 181 | self.__write_row(data) 182 | 183 | def __setup_writer(self, data: Union[pd.Series, np.ndarray]): 184 | if isinstance(data, pd.Series): 185 | self.__field_names = list(data.keys()) 186 | else: 187 | self.__field_names = list(data.dtype.names) 188 | self.__writer = csv.DictWriter( 189 | self.__file_handle, 190 | fieldnames=self.__field_names, 191 | dialect=self.__dialect, 192 | lineterminator=os.linesep # Fix issue where \r\n is used on Linux 193 | ) 194 | self.__writer.writeheader() 195 | 196 | def __write_row(self, data: Union[pd.Series, np.ndarray]): 197 | dict_data = {} 198 | for field_name in self.__field_names: 199 | dict_data[field_name] = repr(data[field_name]) 200 | self.__writer.writerow(dict_data) 201 | 202 | def close(self): 203 | self.__file_handle.close() 204 | 205 | @property 206 | def output_path(self) -> Path: 207 | return self.__filename 208 | 209 | 210 | class _SvMemory(templates.IMemory): 211 | 212 | def __repr__(self) -> str: 213 | return f"{self.__class__.__name__}()" 214 | 215 | def parse(self, filename: Path) -> np.ndarray: 216 | if filename.suffix == ".tsv": 217 | data = pd.read_csv(filename, sep="\t") 218 | else: 219 | data = pd.read_csv(filename) 220 | 221 | return common.pandas_to_numpy(data) 222 | 223 | def write(self, filename: Path, data: Union[pd.DataFrame]): 224 | data = pd.DataFrame(data) 225 | if filename.suffix == ".tsv": 226 | data.to_csv(filename, sep="\t", index=False) 227 | else: 228 | data.to_csv(filename, index=False) 229 | -------------------------------------------------------------------------------- /PyPWA/progs/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | """ 20 | Program modules 21 | --------------- 22 | All programs, GUI, scripts, or otherwise, are defined somewhere in this 23 | package. 24 | 25 | - blank - An empty program for testing initializers. 26 | - masking - The masking and data translation utility 27 | - shell - Where PyFit and PySimulate are defined. 28 | """ 29 | 30 | from PyPWA import info as _info 31 | 32 | __credits__ = ["Mark Jones"] 33 | __author__ = _info.AUTHOR 34 | __version__ = _info.VERSION 35 | -------------------------------------------------------------------------------- /PyPWA/progs/masking.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | 20 | from __future__ import annotations 21 | 22 | import argparse 23 | import sys 24 | from pathlib import Path 25 | from typing import List 26 | 27 | import numpy as npy 28 | import tqdm 29 | 30 | from PyPWA.libs.file import processor 31 | from PyPWA import info as _info 32 | 33 | __credits__ = ["Mark Jones"] 34 | __author__ = _info.AUTHOR 35 | __version__ = _info.VERSION 36 | 37 | 38 | def start_masking(arguments: List[str] = sys.argv[1:]): 39 | args = _arguments(arguments) 40 | file_manager = processor.DataProcessor(False) 41 | 42 | # Load the input file 43 | if args.input.exists(): 44 | input_file = file_manager.get_reader(args.input) 45 | output_file = file_manager.get_writer(args.output, input_file.data_type) 46 | else: 47 | print(f"{args.input} must exist!") 48 | return 1 49 | 50 | # Load the correct file 51 | if args.use_or: 52 | logic = npy.logical_or 53 | elif args.use_xor: 54 | logic = npy.logical_xor 55 | elif args.use_or and args.use_xor: 56 | print("Only select OR or XOR, not both!") 57 | return 1 58 | else: 59 | logic = npy.logical_and 60 | 61 | # Setup progress bar for mask files 62 | if len(args.mask) > 1: 63 | disable = False 64 | else: 65 | disable = True 66 | 67 | # Merge together masks 68 | pf = None 69 | for mask_file in tqdm.tqdm(args.mask, disable=disable): # type: Path 70 | if mask_file.exists(): 71 | current_pf = file_manager.parse(mask_file) 72 | else: 73 | print(f"{mask_file} must exist!") 74 | return 1 75 | 76 | # Convert selection array to a boolean mask. 77 | if "u8" == current_pf.dtype or "u4" == current_pf.dtype: 78 | new_pf = npy.zeros(len(input_file), bool) 79 | new_pf[current_pf] = True 80 | current_pf = new_pf 81 | 82 | if isinstance(pf, type(None)): 83 | pf = current_pf 84 | else: 85 | pf = logic(pf, current_pf) 86 | 87 | # Handle no masks provided 88 | if isinstance(pf, type(None)): 89 | pf = npy.ones(len(input_file), bool) 90 | 91 | if len(pf) != len(input_file): 92 | print( 93 | f"Masking data isn't the same length as input!" 94 | f" Mask is {len(pf)} and input is {len(input_file)}." 95 | ) 96 | return 1 97 | 98 | # Setup description 99 | if pf.all() == 1: 100 | description = "Converting:" 101 | else: 102 | description = "Masking:" 103 | 104 | # Input masked to output 105 | progress = tqdm.tqdm( 106 | zip(pf, input_file), total=len(pf), unit="Events", 107 | desc=description 108 | ) 109 | for do_write, event in progress: 110 | if do_write: 111 | output_file.write(event) 112 | 113 | input_file.close() 114 | output_file.close() 115 | 116 | 117 | def _arguments(args: List[str]) -> argparse.ArgumentParser.parse_args: 118 | arguments = argparse.ArgumentParser() 119 | 120 | arguments.add_argument( 121 | "--input", "-i", type=Path, required=True, 122 | help="The source file, or the file you want to mask." 123 | ) 124 | 125 | arguments.add_argument( 126 | "--output", "-o", type=Path, required=True, 127 | help="The destination file." 128 | ) 129 | 130 | arguments.add_argument( 131 | "--mask", "-m", type=Path, action="append", 132 | help="The masking files, can be either .pf or .sel" 133 | ) 134 | 135 | arguments.add_argument( 136 | "--use_or", action="store_true", 137 | help="OR mask files together instead of AND" 138 | ) 139 | 140 | arguments.add_argument( 141 | "--use_xor", action="store_true", 142 | help="XOR mask files together instead of AND" 143 | ) 144 | 145 | return arguments.parse_args(args) 146 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | PyPWA [![Build Status](https://travis-ci.org/JeffersonLab/PyPWA.svg?branch=development)](https://travis-ci.org/JeffersonLab/PyPWA) [![Coverage Status](https://coveralls.io/repos/github/JeffersonLab/PyPWA/badge.svg?branch=development)](https://coveralls.io/github/JeffersonLab/PyPWA?branch=development) 2 | ===== 3 | 4 | A python based software framework designed to perform Partial Wave and 5 | Amplitude Analysis with the goal of extracting resonance information from 6 | multi-particle final states. 7 | Is constantly tested to work with Python Version 3.7+ 8 | 9 | Has support for multiple likelihoods, including: 10 | 11 | - Extended Log Likelihood 12 | - Standard Log Likelihood, Optionally Binned 13 | - Binned ChiSquared Likelihood 14 | - Standard ChiSquared Likelihood 15 | 16 | You can even define your own likelihood, or calculate entirely without one 17 | if you chose to do so! 18 | 19 | Features 20 | -------- 21 | 22 | Generic Fitting Tools 23 | 24 | - Fitting 25 | - Can fit to a log-likelihood, chi-square, or you can define your own 26 | - Supports Binned Data 27 | - Supports a quality factor per event 28 | - Simulation using Monte Carlo Rejection 29 | - Easy to use Yaml based configuration for command line operation 30 | - Jupyter Integration 31 | - Supports using all the threads on the machine 32 | 33 | Installing into Anaconda 34 | ------------------------ 35 | 36 | We've setup an user channel on Anaconda so that you can install PyPWA 37 | into your Anaconda installation with the following command 38 | 39 | conda install -c markjonestx pypwa 40 | 41 | Notes for Apple Users 42 | --------------------- 43 | 44 | With testing, we've found that PyPWA's environment in Anaconda doesn't 45 | work well with Xterm in Mac OS X, however, Terminal found in your 46 | Utilities folder does not seem to have the same issues. 47 | 48 | Using from GitHub 49 | ----------------- 50 | 51 | Clone the master branch onto your computer, or if you are daring clone the 52 | development branch 53 | 54 | git clone https://github.com/JeffersonLab/PyPWA 55 | 56 | Setup and activate a virtualenv: 57 | 58 | virtualenv --system-site-packages venv 59 | source venv/bin/activate 60 | 61 | Install the package inside the virtualenv: 62 | 63 | pip install . 64 | 65 | Contribute or Support 66 | --------------------- 67 | 68 | If you have any issues, or would like to see any features added to the 69 | project, let us know! 70 | 71 | - Issue and Feature Tracker: 72 | - Source Code: 73 | 74 | License 75 | ------- 76 | 77 | The project is licensed under the GPLv3 license. 78 | 79 | Funding 80 | ------- 81 | 82 | This project is partially supported by NSF Grants #1507208 and #1820235 83 | -------------------------------------------------------------------------------- /anaconda-environment.yml: -------------------------------------------------------------------------------- 1 | name: PyPWA 2 | channels: 3 | - defaults 4 | dependencies: 5 | - python # Current latest Python 6 | - conda-forge::emcee>3.0 # Optional dependency for MCMC 7 | - jupyterlab # Jupyter and Jupyter Lab support 8 | - ipython # Quick CLI python interface 9 | - tensorflow # Machine Learning 10 | - pytorch # Machine Learning using Torch 11 | - tabulate # Better iMinuit tables 12 | - scipy # Scientific Python 13 | - matplotlib # Graphing 14 | - seaborn # Optional; Nicer Graphs 15 | - ipympl # 3D Graphs 16 | - tqdm # Progress Bars 17 | - iminuit 18 | - numpy 19 | - pandas 20 | - numexpr 21 | - pytest 22 | 23 | -------------------------------------------------------------------------------- /dev-environment.yml: -------------------------------------------------------------------------------- 1 | name: PyPWA-dev 2 | dependencies: 3 | - python=3.9 # Current latest Python 4 | - conda-forge::emcee # MCMC Fitting Optional dependency 5 | - jupyterlab # Jupyter and Jupyter Lab support 6 | - ipython # Quick CLI python interface 7 | - pytorch # Machine Learning using Torch 8 | - tabulate # Better iMinuit tables 9 | - tqdm # Progress Bars 10 | - iminuit # Default minimizer 11 | - numpy # Arrays and optimizations 12 | - pandas # A powerful statistics package that's used everywhere 13 | - matplotlib # Adds support for plotting 14 | - numexpr # Accelerates numpy by removing intermediate steps 15 | - pytest # Used to run the PyPWA unit tests 16 | - pytest-cov # Coverage reports for PyTest 17 | - sphinx # Document generating engine 18 | - sphinx_rtd_theme # The theme used by PyPWA for Sphinx 19 | - sphinx-autobuild # Sphinx Autobuilder for the Makefile 20 | - sphinx-autodoc-typehints # Support for Python 3+ typehints 21 | - sphinxcontrib-bibtex # LaTeX Bibtex References 22 | - nbsphinx # Allows for some pages to be Notebooks 23 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = PyPWA 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | livehtml: 18 | sphinx-autobuild -b html "$(SOURCEDIR)" "$(BUILDDIR)"/html 19 | 20 | # Catch-all target: route all unknown targets to Sphinx using the new 21 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 22 | %: Makefile 23 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 24 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | recommonmark~=0.7.1 2 | Sphinx~=5.0.2 3 | sphinx-autodoc-typehints~=1.18.3 4 | sphinxcontrib-bibtex~=2.4.2 5 | nbsphinx~=0.8.9 6 | jinja2~=3.1.2 7 | ipython~=8.4.0 8 | -------------------------------------------------------------------------------- /docs/source/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ../../CHANGELOG.md -------------------------------------------------------------------------------- /docs/source/about.rst: -------------------------------------------------------------------------------- 1 | ##### 2 | About 3 | ##### 4 | 5 | The PyPWA Project aims to develop a software framework that can be used to 6 | perform parametric model fitting to data. In particular, Partial Wave and 7 | Amplitude Analysis (PWA) of multiparticle final states. PyPWA is designed 8 | for photoproduction experiments using linearly polarized photon beams. The 9 | software makes use of the resources at the JLab Scientific Computer Center 10 | (Linux farm). PyPWA extract model parameters from data by performing 11 | extended likelihood fits. Two versions of the software are develop: one 12 | where general amplitudes (or any parametric model) can be used in the fit 13 | and simulation of data, and a second where the framework starts with a 14 | specific realization of the Isobar model, including extensions to 15 | Deck-type and baryon vertices corrections. 16 | 17 | Tutorials (Step-by-step instructions) leading to a full fit of data and 18 | the use of simulation software are included. Most of the code is in Python, but 19 | hybrid code (in Cython or Fortran) has been used when appropriate. 20 | Scripting to make use of vectorization and parallel coprocessors 21 | (Xeon-Phi and/or GPUs) are expected in the near future. The goal of this 22 | software framework is to create a user friendly environment for the 23 | spectroscopic analysis of linear polarized photoproduction experiments. 24 | The PyPWA Project software expects to be in a continue flow 25 | (of improvements!), therefore, please check on the more recent software 26 | download version. 27 | 28 | 29 | What can PyPWA do? 30 | ------------------ 31 | 32 | - Likelihood fitting with ChiSquared and Log Likelihood 33 | - Simulation using the Monte-Carlo Rejection Sampling method 34 | - Multi-variable binning for 4 vector particle data (in GAMP Format) 35 | - Convert and mask data between similar data types 36 | - Load data into an HDF5 dataset 37 | 38 | 39 | 40 | Further Reading 41 | --------------- 42 | 43 | - `iMinuit `_ 44 | - `Nestle `_ 45 | - `PyTables (HDF5) `_ 46 | 47 | 48 | 49 | .. include:: ../../CONTRIBUTORS.rst 50 | 51 | 52 | Citations 53 | --------- 54 | 55 | .. bibliography:: pypwa.bib 56 | :list: bullet 57 | :all: -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from datetime import datetime 4 | import sphinx_rtd_theme 5 | 6 | 7 | # Extensions 8 | extensions = [ 9 | 'sphinx.ext.mathjax', 'sphinx.ext.todo', 10 | 'sphinx.ext.autodoc', 'sphinx_autodoc_typehints', 11 | 'sphinxcontrib.bibtex', 'recommonmark', 12 | 'sphinx.ext.napoleon', 'nbsphinx' 13 | ] 14 | 15 | # We put the returns in the docstring, this prevents duplication 16 | typehints_document_rtype = False 17 | 18 | 19 | # Basic file information 20 | source_suffix = ['.rst', '.md'] 21 | master_doc = 'index' 22 | 23 | 24 | # Project information 25 | project = 'PyPWA' 26 | copyright = f'{datetime.now().year}, Norfolk State University' 27 | author = "PyPWA Team" 28 | version = "3.3.0" 29 | release = "Development" 30 | 31 | 32 | # Sphinx Extra Options 33 | language = "en" 34 | exclude_patterns = ['_build', '**.ipynb_checkpoints'] 35 | pygments_style = 'sphinx' 36 | todo_include_todos = False 37 | 38 | 39 | # HTML settings 40 | html_theme = 'sphinx_rtd_theme' 41 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 42 | htmlhelp_basename = 'PyPWAdoc' 43 | 44 | 45 | # LaTeX settings 46 | latex_elements = { 47 | 'papersize': 'letterpaper', 48 | 'pointsize': '11pt', 49 | 'preamble': r''' 50 | \usepackage{charter} 51 | \usepackage[defaultsans]{lato} 52 | \usepackage{inconsolata} 53 | ''', 54 | } 55 | latex_documents = [ 56 | (master_doc, 'PyPWA.tex', 'PyPWA Documentation', 'PyPWA Team', 'manual'), 57 | ] 58 | 59 | 60 | # manpages settings 61 | man_pages = [(master_doc, 'pypwa', 'PyPWA Documentation', [author], 1)] 62 | 63 | 64 | # Biblitex info 65 | texinfo_documents = [ 66 | ( 67 | master_doc, 'PyPWA', 'PyPWA Documentation', 68 | author, 'PyPWA', 'Python Partial Wave Analysis Toolkit.', 69 | 'Scientific Studies' 70 | ), 71 | ] 72 | bibtex_bibfiles = ["pypwa.bib"] 73 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | ##### 2 | PyPWA 3 | ##### 4 | 5 | Partial Wave Analysis done right. 6 | 7 | PyPWA is a Python partial wave analysis package that utilizes Numpy, 8 | iminuit, and PyTables to provide a high speed analysis framework that 9 | strives to help you get your work done without getting in your way. 10 | 11 | It's a package that can be used either as a standalone program inside 12 | your terminal, or as Python script or Jupyter Notebook, whatever your 13 | preference may be. 14 | 15 | You can take a look at our code directly 16 | `here `_ 17 | 18 | .. toctree:: 19 | :hidden: 20 | 21 | about 22 | installing 23 | CHANGELOG 24 | 25 | .. toctree:: 26 | :caption: Examples 27 | :hidden: 28 | 29 | examples/2Dgauss 30 | examples/demo_JPAC_sim 31 | examples/demo_JPAC_fit 32 | examples/demo_JPAC_pre 33 | 34 | .. toctree:: 35 | :caption: References 36 | :hidden: 37 | 38 | references/data 39 | references/sim_fit 40 | references/plotting 41 | -------------------------------------------------------------------------------- /docs/source/installing.rst: -------------------------------------------------------------------------------- 1 | 2 | ############ 3 | Installation 4 | ############ 5 | 6 | PyPWA can be installed with ``pip`` or ``conda`` with Python 3.7 or newer 7 | 8 | 9 | 10 | Conda 11 | ##### 12 | 13 | Thanks to tools provided by Anaconda, you can easily install PyPWA and all 14 | it's dependencies with a simple one line command. Check out `Anaconda's 15 | user guide `_ if you're 16 | new to using Anaconda. 17 | 18 | .. code-block:: sh 19 | 20 | conda install -c markjonestx pypwa 21 | 22 | If you want tools from PWA2000 (GAMP, HGAMP, VAMP, PPGEN) we've included them 23 | as well 24 | 25 | .. note:: 26 | PWA2000 is currently only available on Linux installs of Anaconda. 27 | 28 | .. code-block:: sh 29 | 30 | conda install -c markjonesyx pwa2000 31 | 32 | Pip 33 | ### 34 | 35 | .. warning:: 36 | 37 | Pip can interfere with your system python. Make sure to never run 38 | pip as root, and only perform local installs. 39 | 40 | **Fetch the latest version of PyPWA and install locally** 41 | 42 | .. note:: 43 | 44 | If you are using pip somewhere behind a firewall, you may need to 45 | pin pip's servers using 46 | ``pip install --trusted-host pypi.org --trusted-host pythonhosted.org`` 47 | 48 | .. code-block:: sh 49 | 50 | git clone --depth=1 https://github.com/JeffersonLab/PyPWA.git 51 | cd PyPWA 52 | pip install --local . 53 | -------------------------------------------------------------------------------- /docs/source/pypwa.bib: -------------------------------------------------------------------------------- 1 | 2 | 3 | @techreport{chung1971spin, 4 | title={Spin formalisms}, 5 | author={Chung, Suh Urk}, 6 | year={1971}, 7 | institution={CERN} 8 | } 9 | 10 | @article{jacob1959general, 11 | title={On the general theory of collisions for particles with spin}, 12 | author={Jacob, M and Wick, Gr C}, 13 | journal={Annals of Physics}, 14 | volume={7}, 15 | number={4}, 16 | pages={404--428}, 17 | year={1959}, 18 | publisher={Elsevier} 19 | } 20 | 21 | @article{schilling1970analysis, 22 | title={On the analysis of vector-meson production by polarized photons}, 23 | author={Schilling, K and Seyboth, P and Wolf, G}, 24 | journal={Nuclear Physics B}, 25 | volume={15}, 26 | number={2}, 27 | pages={397--412}, 28 | year={1970}, 29 | publisher={Elsevier} 30 | } 31 | 32 | @article{zemach1965use, 33 | title={Use of angular-momentum tensors}, 34 | author={Zemach, Charles}, 35 | journal={Physical Review}, 36 | volume={140}, 37 | number={1B}, 38 | pages={B97}, 39 | year={1965}, 40 | publisher={APS} 41 | } 42 | 43 | @article{barlow1990extended, 44 | title={Extended maximum likelihood}, 45 | author={Barlow, Roger}, 46 | journal={Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment}, 47 | volume={297}, 48 | number={3}, 49 | pages={496--506}, 50 | year={1990}, 51 | publisher={Elsevier} 52 | } 53 | 54 | @article{orear1982notes, 55 | title={Notes on statistics for physicists (1958)}, 56 | author={Orear, J}, 57 | journal={UCRL-8417}, 58 | year={1982} 59 | } 60 | 61 | @misc{eadie1982statistical, 62 | title={Statistical Methods in Experimental Physics, 2nd reprint}, 63 | author={Eadie, WT and Drijard, D and James, FE and Roos, M and Sadoulet, B}, 64 | year={1982}, 65 | publisher={North-Holland, Amsterdam} 66 | } 67 | 68 | @techreport{bevington1969data, 69 | title={Data reduction and error analysis for the physical sciences}, 70 | author={BEVINGTON, Ph R}, 71 | year={1969}, 72 | institution={McGraw-Hill} 73 | } 74 | 75 | @article{james2004minuit, 76 | title={MINUIT Reference Manual, CERN Program Library Long Writeup D506}, 77 | author={James, F}, 78 | journal={James and M. Winkler, MINUIT User’s Guide, CERN}, 79 | year={1994} 80 | } 81 | 82 | @article{jamesminuit, 83 | title={MINUIT User’s Guide}, 84 | author={James, Fred and Winkler, Matthias and others}, 85 | year = {2004}, 86 | journal={MIGRAD CERN} 87 | } 88 | 89 | @book{mackay2003information, 90 | title={Information theory, inference and learning algorithms}, 91 | author={MacKay, David JC and Mac Kay, David JC}, 92 | year={2003}, 93 | publisher={Cambridge university press} 94 | } 95 | 96 | @article{salgado2014partial, 97 | title={On the partial-wave analysis of mesonic resonances decaying to multiparticle final states produced by polarized photons}, 98 | author={Salgado, Carlos W and Weygand, Dennis P}, 99 | journal={Physics Reports}, 100 | volume={537}, 101 | number={1}, 102 | pages={1--58}, 103 | year={2014}, 104 | publisher={Elsevier} 105 | } 106 | 107 | @inproceedings{skilling2004nested, 108 | title={Nested sampling}, 109 | author={Skilling, John}, 110 | booktitle={AIP Conference Proceedings}, 111 | volume={735}, 112 | number={1}, 113 | pages={395--405}, 114 | year={2004}, 115 | organization={AIP} 116 | } 117 | -------------------------------------------------------------------------------- /docs/source/references/data.rst: -------------------------------------------------------------------------------- 1 | 2 | ================= 3 | Working with Data 4 | ================= 5 | 6 | 7 | Reading and Writing Data 8 | ======================== 9 | 10 | This is the reference documentation for the functions and classes 11 | inside PyPWA that can be used for parsing and writing data to disk. 12 | There exists four different methods to do so: 13 | 14 | * :ref:`Reading and Writing Data` 15 | * :ref:`Basic Data Sanitization` 16 | * :ref:`Data Iterators and Writers` 17 | * :ref:`Working with HDF5` 18 | * :ref:`Caching` 19 | 20 | PyPWA also defines a vector data types and collections for working with 21 | Particles, Four Vectors, and Three Vectors, which can be found 22 | :ref:`here.` 23 | 24 | 25 | .. _reading: 26 | 27 | Reading and Writing Data 28 | ------------------------ 29 | 30 | Reading and writing from disk to memory. This method will load the entire 31 | dataset straight into RAM, or write a dataset straight from RAM onto disk. 32 | 33 | .. autofunction:: PyPWA.read 34 | .. autofunction:: PyPWA.write 35 | 36 | 37 | .. _sanitization: 38 | 39 | Basic Data Sanitization 40 | ----------------------- 41 | 42 | Allows quick converting of data from Pandas to Numpy, as well as preps 43 | data to be passed to non-Python function's and classes; Such as Fortran 44 | modules compiled with f2py, or C/C++ modules bound by Cython. 45 | 46 | .. autofunction:: PyPWA.pandas_to_numpy 47 | .. autofunction:: PyPWA.to_contiguous 48 | 49 | 50 | .. _iterator: 51 | 52 | Data Iterators and Writers 53 | -------------------------- 54 | 55 | Reading and writing a single event at a time instead of having the entire 56 | contents of the dataset memory at once. This is good choice if you are 57 | wanting to rapidly transform the data that is on disk. 58 | 59 | .. autoclass:: PyPWA.DataType 60 | :members: 61 | 62 | .. autofunction:: PyPWA.get_writer 63 | .. autofunction:: PyPWA.get_reader 64 | 65 | 66 | .. _hdf5: 67 | 68 | .. _caching: 69 | 70 | Caching 71 | ------- 72 | 73 | Using pickles to quickly write and read data straight from disk as 74 | intermediate caching steps. These are special functions that allow caching 75 | values or program states quickly for resuming later. This is a good way to 76 | save essential data for a Jupyter Notebook so that if the kernel is 77 | rebooted, data isn't lost. 78 | 79 | .. autofunction:: PyPWA.cache.read 80 | .. autofunction:: PyPWA.cache.write 81 | 82 | 83 | 84 | Binning 85 | ======= 86 | 87 | We provide functions that make binning data in memory an easy process, 88 | however for HDF5 a future more in-depth example and documentation 89 | will be made available. 90 | 91 | .. autofunction:: PyPWA.bin_with_fixed_widths 92 | .. autofunction:: PyPWA.bin_by_range 93 | .. autofunction:: PyPWA.bin_by_list 94 | 95 | 96 | .. _vectors: 97 | 98 | Builtin Vectors 99 | =============== 100 | 101 | PyPWA includes support for both 3 and 4 vector classes, complete with 102 | methods to aid operating with vector data. Each vector utilizes Numpy 103 | for arrays and numerical operations. 104 | 105 | .. autoclass:: PyPWA.ParticlePool 106 | :members: 107 | :undoc-members: 108 | :inherited-members: 109 | 110 | .. autoclass:: PyPWA.Particle 111 | :members: 112 | :inherited-members: 113 | 114 | .. autoclass:: PyPWA.FourVector 115 | :members: 116 | :inherited-members: 117 | 118 | .. autoclass:: PyPWA.ThreeVector 119 | :members: 120 | :inherited-members: 121 | -------------------------------------------------------------------------------- /docs/source/references/plotting.rst: -------------------------------------------------------------------------------- 1 | 2 | ======== 3 | Plotting 4 | ======== 5 | 6 | As an attempt to make plotting in Python easier, we are building a 7 | plotting library that attempts to solve the more specific plotting needs 8 | when working with high energy physics. The first plotting tool we have is 9 | to reproduce ROOT's LEGO plot, but more will come in the future. 10 | 11 | .. autofunction:: PyPWA.make_lego 12 | -------------------------------------------------------------------------------- /docs/source/references/sim_fit.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Simulation and Fitting 3 | ====================== 4 | 5 | PyPWA defines both the monte carlo simulation method as well as the 6 | several likelihoods. To use these, the cost function or amplitude needs 7 | to be defined in a support object. 8 | 9 | * :ref:`Defining an Amplitude` describes how to define a 10 | function for use with the simulation and likelihoods. 11 | * :ref:`Simulating` describes the Monte Carlo Simulation 12 | methods. 13 | * :ref:`Likelihoods` describes the built in likelihoods. 14 | These likelihoods also automatically distribute the fitting function 15 | across several processors. 16 | * :ref:`Fitting` describes the built in minuit wrapper, as well 17 | as how to use the Likelihood objects with other optimizers. 18 | 19 | 20 | .. _amplitude: 21 | 22 | Defining an Amplitude 23 | --------------------- 24 | 25 | Amplitudes or cost functions can be defined for using either an Object 26 | Oriented approach, or a Functional programming approach. If using pure 27 | functions for the function, wrap the calculation function and optional 28 | setup function in `PyPWA.FunctionalAmplitude`, if using the OOP approach, 29 | extend the `PyPWA.NestedFunction` abstract class when defining the 30 | amplitude. 31 | 32 | It is assumed by both the Likelihoods and Monte Carlo that the calculate 33 | functions of either methods will return a standard numpy array of final 34 | values. 35 | 36 | .. autoclass:: PyPWA.NestedFunction 37 | :members: 38 | 39 | .. autoclass:: PyPWA.FunctionAmplitude 40 | :members: 41 | 42 | 43 | .. _simulation: 44 | 45 | Simulating 46 | ---------- 47 | 48 | There are two choices when using the Monte Carlo Simulation method 49 | defined in PyPWA: Simulation in one pass producing the rejection list, 50 | or simulation in two passes to produce the intensities and finally the 51 | rejection list. Both methods will take advantage of SMP where available. 52 | 53 | * If doing a single pass, just use the `PyPWA.monte_carlo_simulation` 54 | function. This will take the fitting function defined from 55 | :ref:`Defining an Amplitude` along with the data, and return 56 | a single rejection list. 57 | * If doing two passes for more control over when the intensities and 58 | rejection list, use both `PyPWA.simulate.process_user_function` to 59 | calculate the intensity and local max value, and 60 | `PyPWA.simulate.make_rejection_list` to take the global max value and 61 | local intensity to produce the local rejection list. 62 | 63 | .. autofunction:: PyPWA.monte_carlo_simulation 64 | .. autofunction:: PyPWA.simulate.process_user_function 65 | .. autofunction:: PyPWA.simulate.make_rejection_list 66 | 67 | 68 | .. _likelihoods: 69 | 70 | Likelihoods 71 | ----------- 72 | 73 | PyPWA supports 3 unique likelihood types for use with either the Minuit 74 | wrapper or any optimizer that expects a function. All likelihoods have 75 | built in support for SMP when they're called, and require to be closed 76 | when no longer needed. 77 | 78 | * `PyPWA.LogLikelihood` defines the likelihood, and works with either 79 | the standard log likelihood, the binned log likelihood, or the extended 80 | log likelihood. 81 | * `PyPWA.ChiSquared` defines the ChiSquared method, supporting both the 82 | binned and standard ChiSquare. 83 | * `PyPWA.EmptyLikelihood` does no post operation on the final values 84 | except sum the array and return the final sum. This allows for defining 85 | unique likelihoods that have not already been defined, fitting functions 86 | that do not require a likelihood, or using the builtin multi processing 87 | without the weight of a standard likelihood. 88 | 89 | .. autoclass:: PyPWA.LogLikelihood 90 | :members: 91 | 92 | .. autoclass:: PyPWA.ChiSquared 93 | :members: 94 | 95 | .. autoclass:: PyPWA.EmptyLikelihood 96 | :members: 97 | 98 | 99 | .. _fitting: 100 | 101 | Fitting 102 | ------- 103 | 104 | PyPWA supplies a single wrapper around iMinuit's module. This is a 105 | convenience function to make working with Minuit's parameters easier. 106 | However, if wanting to use a different fitting function, like Scikit or 107 | Scipy, the likelihoods should work natively with them. 108 | 109 | Most optimizers built in Python assume the data is some sort of global 110 | variable, and the function passed to them is just accepting parameters 111 | to fit against. The Likelihoods take advantage of this by wrapping the 112 | data and the defined functions a wrapper that attempts to scale the 113 | function to several processors, while providing function-like capabilities 114 | by taking advantage of Python's builtin `__call__` magic function. 115 | 116 | This should allow the likelihoods to work with any optimizer, as long as 117 | they're expecting a function or callable object, and as long as the 118 | parameters they pass are pickle-able. 119 | 120 | .. autofunction:: PyPWA.minuit 121 | -------------------------------------------------------------------------------- /helper: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # coding=utf-8 4 | # 5 | # PyPWA, a scientific analysis toolkit. 6 | # Copyright (C) 2016 JLab 7 | # 8 | # This program is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # This program is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with this program. If not, see . 20 | 21 | import argparse 22 | import os 23 | import shutil 24 | import subprocess 25 | import sys 26 | 27 | __credits__ = ["Mark Jones"] 28 | __version__ = "0.0.1" 29 | 30 | 31 | class _HelperArguments(object): 32 | 33 | def __init__(self): 34 | self.__parser = None 35 | self.__arguments = None 36 | 37 | def parse_arguments(self): 38 | self.__set_arguments() 39 | self.__parse_arguments() 40 | self.__quit_if_no_arguments() 41 | 42 | def __set_arguments(self): 43 | self.__set_parser() 44 | self.__add_clean_argument() 45 | self.__add_version_argument() 46 | 47 | def __set_parser(self): 48 | self.__parser = argparse.ArgumentParser( 49 | description="A helper utility for developers of PyPWA." 50 | ) 51 | 52 | def __add_clean_argument(self): 53 | self.__parser.add_argument( 54 | "--clean", "-c", action="store_true", 55 | help="Clean the development folder of build and cache files." 56 | ) 57 | 58 | def __add_version_argument(self): 59 | self.__parser.add_argument( 60 | "--version", action="version", 61 | version="%(prog)s (version " + __version__ +")" 62 | ) 63 | 64 | def __parse_arguments(self): 65 | self.__arguments = self.__parser.parse_args() 66 | 67 | def __quit_if_no_arguments(self): 68 | if not self.clean: 69 | self.__parser.print_help() 70 | sys.exit() 71 | 72 | @property 73 | def clean(self): 74 | return self.__arguments.clean 75 | 76 | 77 | class _ProjectRoot(object): 78 | 79 | def root(self): 80 | # type: () -> str 81 | out, error = self.__git_command() 82 | self.__check_error(error) 83 | return out.decode().strip() 84 | 85 | def __git_command(self): 86 | # type: () -> Tuple[bytes, bytes] 87 | git = subprocess.Popen( 88 | "git rev-parse --show-toplevel", shell=True, 89 | stdout=subprocess.PIPE, stderr=subprocess.PIPE 90 | ) 91 | 92 | return git.communicate() 93 | 94 | def __check_error(self, error): 95 | # type: (bytes) -> None 96 | if error.decode(): 97 | raise RuntimeError(error) 98 | 99 | 100 | class _CleanProject(object): 101 | 102 | __CLEAN_FILES = [ 103 | ".coverage", "build", "dist", "lib", ".cache", "docs/_build", "docs/build" 104 | ] 105 | 106 | def clean(self, root_dir): 107 | self.__clean_files() 108 | self.__clean_pypwa_cache(root_dir) 109 | self.__clean_tests_cache(root_dir) 110 | 111 | def __clean_files(self): 112 | for item in self.__CLEAN_FILES: 113 | self.__try_to_remove_item(item) 114 | 115 | def __try_to_remove_item(self, item): 116 | try: 117 | shutil.rmtree(item) 118 | except OSError: 119 | pass 120 | 121 | def __clean_pypwa_cache(self, root_dir): 122 | for root, directories, items in os.walk(root_dir + "/PyPWA"): 123 | self.__sanitize(root, directories, items) 124 | 125 | def __clean_tests_cache(self, root_dir): 126 | for root, directories, items in os.walk(root_dir + "/tests"): 127 | self.__sanitize(root, directories, items) 128 | 129 | def __sanitize(self, root, directories, items): 130 | self.__sanitize_directories(root, directories) 131 | self.__sanitize_items(root, items) 132 | 133 | def __sanitize_directories(self, root, directories): 134 | if "__pycache__" in directories: 135 | shutil.rmtree(root + "/__pycache__") 136 | 137 | def __sanitize_items(self, root, items): 138 | for item in items: 139 | if ".pyc" in item: 140 | os.remove(root + "/" + item) 141 | 142 | 143 | class _GitStatus(object): 144 | 145 | def status(self): 146 | # type: () -> bool 147 | out, error = self.__git_command() 148 | self.__check_error(error) 149 | return self.__check_cleanliness(out) 150 | 151 | def __git_command(self): 152 | # type: () -> Tuple[bytes, bytes] 153 | git = subprocess.Popen( 154 | "git status --porcelain", shell=True, 155 | stdout=subprocess.PIPE, stderr=subprocess.PIPE 156 | ) 157 | 158 | return git.communicate() 159 | 160 | def __check_error(self, error): 161 | # type: (bytes) -> None 162 | if error.decode(): 163 | raise RuntimeError(error) 164 | 165 | def __check_cleanliness(self, results): 166 | if len(results) == 0: 167 | return True 168 | else: 169 | return False 170 | 171 | 172 | class HelperUtil(object): 173 | 174 | def __init__(self): 175 | self.__arguments = _HelperArguments() 176 | self.__root = _ProjectRoot() 177 | self.__cleaner = _CleanProject() 178 | 179 | def run(self): 180 | self.__arguments.parse_arguments() 181 | if self.__arguments.clean: 182 | self.__clean_program() 183 | 184 | def __clean_program(self): 185 | self.__cleaner.clean(self.__root.root()) 186 | 187 | if __name__ == "__main__": 188 | program = HelperUtil() 189 | program.run() 190 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 1 3 | 4 | [tool:pytest] 5 | norecursedirs=test_data/ 6 | addopts = --cov=PyPWA --no-cov-on-fail 7 | testpaths = 8 | tests 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | import setuptools 20 | 21 | __author__ = "PyPWA Team and Contributors" 22 | __license__ = "GPLv3" 23 | __version__ = "4.0.1" 24 | __email__ = "salgado@jlab.org" 25 | __status__ = "Production" 26 | 27 | 28 | progs = "PyPWA.progs" 29 | 30 | entry_points = { 31 | "console_scripts": [ 32 | f"pymask = {progs}.masking:start_masking", 33 | ] 34 | } 35 | 36 | requires = [ 37 | "tqdm", # Progress Bars, used for PyMask 38 | "iminuit", # Default minimizer 39 | "numpy", # Arrays and optimizations 40 | "pandas", # A powerful statistics package that's used everywhere 41 | "matplotlib", # Adds support for plotting 42 | "numexpr", # Accelerates numpy by removing intermediate steps 43 | ] 44 | 45 | extras = { 46 | "emcee": ["emcee"], # Provides MCMC fitting 47 | "torch": ["torch"] # Provides PyTorch support 48 | } 49 | 50 | tests = [ 51 | 'pytest', 52 | 'pytest-runner', 53 | "pytest-cov" 54 | ] 55 | 56 | 57 | setuptools.setup( 58 | name="PyPWA", 59 | version=__version__, 60 | author=__author__, 61 | author_email=__email__, 62 | packages=setuptools.find_packages(), 63 | url="http//pypwa.jlab.org", 64 | license=__license__, 65 | description="General Partial Wave Analysis", 66 | test_suite="tests", 67 | entry_points=entry_points, 68 | keywords="PyPWA GeneralFitting Partial Wave Analysis Minimization", 69 | install_requires=requires, 70 | tests_require=tests, 71 | extras_require=extras, 72 | classifiers=[ 73 | "Development Status :: 5 - Production/Stable", 74 | "Environment :: Console", 75 | "Intended Audience :: Science/Research", 76 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 77 | "Natural Language :: English", 78 | "Operating System :: POSIX :: Linux", 79 | "Operating System :: MacOS :: MacOS X", 80 | "Programming Language :: Python :: 3.8", 81 | "Programming Language :: Python :: 3.9", 82 | "Programming Language :: Python :: 3.10", 83 | "Topic :: Scientific/Engineering :: Mathematics", 84 | "Topic :: Scientific/Engineering :: Physics" 85 | ] 86 | ) 87 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import numpy 4 | import pytest 5 | 6 | from PyPWA.libs import vectors 7 | 8 | 9 | def make_new_particle(geant_id): 10 | charge = numpy.random.choice([0, 1]) 11 | p = vectors.Particle(geant_id, charge, 500) 12 | p.x, p.y = numpy.random.rand(500), numpy.random.rand(500) 13 | p.z, p.e = numpy.random.rand(500), numpy.random.rand(500) 14 | return p 15 | 16 | 17 | def make_new_particle_pool(): 18 | new_particle_pool = [] 19 | for geant_id in [1, 3, 4, 7, 13]: 20 | new_particle_pool.append(make_new_particle(geant_id)) 21 | return vectors.ParticlePool(new_particle_pool) 22 | 23 | 24 | @pytest.fixture(scope="module") 25 | def random_particle_pool(): 26 | return make_new_particle_pool() 27 | -------------------------------------------------------------------------------- /tests/libs/file/test_misc.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from PyPWA.libs.file import misc 3 | 4 | 5 | ROOT = (Path(__file__).parent / "../../test_data/docs").resolve() 6 | SET1 = ROOT / "set1.csv" 7 | SET2 = ROOT / "set2.kvars" 8 | 9 | 10 | """ 11 | Tests File Hash 12 | """ 13 | 14 | 15 | def test_hash_is_string(): 16 | the_hash = misc.get_sha512_hash(SET1) 17 | assert isinstance(the_hash, str) 18 | assert len(the_hash) != 0 # We don't want an empty string 19 | 20 | 21 | """ 22 | Tests File Length 23 | """ 24 | 25 | 26 | def test_file_length_set1_csv(): 27 | assert misc.get_file_length(SET1) == 1001 28 | 29 | 30 | def test_file_length_set2_kvars(): 31 | assert misc.get_file_length(SET2) == 12 32 | -------------------------------------------------------------------------------- /tests/libs/file/test_processor.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pandas as pd 4 | import numpy as np 5 | import pytest 6 | 7 | from PyPWA.libs.file import processor 8 | 9 | DATA_DIR = (Path(__file__).parent / "../../test_data/docs").resolve() 10 | CSV = DATA_DIR / "set1.csv" 11 | EVIL = DATA_DIR / "set1.kvars" 12 | BOOL = DATA_DIR / "set1.pf" 13 | GAMP = DATA_DIR / "large.gamp" 14 | 15 | 16 | """ 17 | We actually check that all the plugins load the write data in 18 | tests/plugins/data. All these tests are for is to ensure that the 19 | data processor selects the right plugin for each of the known types. 20 | """ 21 | 22 | 23 | @pytest.fixture 24 | def parser(): 25 | return processor.DataProcessor(False, False) 26 | 27 | 28 | def test_can_load_csv(parser): 29 | assert isinstance(parser.parse(CSV), np.ndarray) 30 | assert isinstance(parser.parse(CSV, True), pd.DataFrame) 31 | 32 | 33 | def test_can_read_evil(parser): 34 | with parser.get_reader(EVIL) as reader: 35 | assert isinstance(next(reader), np.void) 36 | 37 | with parser.get_reader(EVIL, True) as reader: 38 | assert isinstance(next(reader), pd.Series) 39 | 40 | 41 | def test_can_load_pass_fail(parser): 42 | assert isinstance(parser.parse(BOOL, True), pd.Series) 43 | assert parser.parse(BOOL, True).dtype == bool 44 | assert isinstance(parser.parse(BOOL), np.ndarray) 45 | assert parser.parse(BOOL).dtype == bool 46 | -------------------------------------------------------------------------------- /tests/libs/test_binning.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | import numpy as npy 20 | import pandas as pd 21 | import pytest 22 | 23 | from PyPWA.libs import binning 24 | 25 | 26 | """ 27 | Fixtures for Binning Tests 28 | """ 29 | 30 | @pytest.fixture() 31 | def dataframe(): 32 | df = pd.DataFrame() 33 | df['x'] = npy.random.rand(10_000) 34 | df['y'] = npy.random.rand(10_000) 35 | df['z'] = npy.random.rand(10_000) 36 | 37 | return df 38 | 39 | 40 | @pytest.fixture() 41 | def array(): 42 | array = npy.empty(10_000, [('x', 'f8'), ('y', 'f8'), ('z', 'f8')]) 43 | array['x'] = npy.random.rand(10_000) 44 | array['y'] = npy.random.rand(10_000) 45 | array['z'] = npy.random.rand(10_000) 46 | 47 | return array 48 | 49 | 50 | @pytest.fixture(params=["dataframe", "structured"]) 51 | def data(request, dataframe, array): 52 | if request.param == "dataframe": 53 | return dataframe 54 | elif request.param == "structured": 55 | return array 56 | else: 57 | assert "datatype is not set as expected!" 58 | 59 | 60 | """ 61 | Tests for the bin_by_range function 62 | """ 63 | 64 | 65 | def test_range_throws_error(array): 66 | wrong_size = npy.random.rand(15_000) 67 | with pytest.raises(ValueError): 68 | binning.bin_by_range(array, wrong_size, 10) 69 | 70 | 71 | def test_bin_by_range_trims_data(data): 72 | results = binning.bin_by_range( 73 | data, 'x', 10, lower_cut=0.1, upper_cut=0.9 74 | ) 75 | for result in results: 76 | assert npy.all(result['x'] >= 0.1) 77 | assert npy.all(result['x'] <= 0.9) 78 | 79 | 80 | def test_range_correct_number_of_bins(data): 81 | results = binning.bin_by_range(data, 'x', 10) 82 | assert len(results) == 10 83 | 84 | 85 | def test_range_sample_size_reduces_data_size(data): 86 | results = binning.bin_by_range(data, 'x', 10, sample_size=100) 87 | assert npy.sum([len(x) for x in results]) == 1000 88 | 89 | 90 | def test_sum_of_all_range_lengths_matches_original(data): 91 | results = binning.bin_by_range(data, 'x', 10) 92 | assert npy.sum([len(x) for x in results]) == len(data) 93 | 94 | 95 | """ 96 | Tests for the bin_with_fixed_widths function 97 | """ 98 | 99 | 100 | def test_fixed_widths_throws_error(array): 101 | wrong_size = npy.random.rand(15_000) 102 | with pytest.raises(ValueError): 103 | binning.bin_with_fixed_widths(array, wrong_size, 10) 104 | 105 | 106 | def test_fixed_widths_returns_expected_number_of_bins(data): 107 | results = binning.bin_with_fixed_widths(data, 'x', 1000) 108 | assert len(results) == 10 109 | 110 | 111 | def test_fixed_widths_handles_bin_overflows(data): 112 | results = binning.bin_with_fixed_widths(data, 'x', 900) 113 | # Check first and last element of results has length of 50 114 | assert len(results[0]) == 50 115 | assert len(results[-1]) == 50 116 | 117 | # Check that the rest of the bins have length of 900 118 | for result in results[1:-1]: 119 | assert len(result) == 900 120 | 121 | 122 | """ 123 | Finally, test bin_by_lists 124 | """ 125 | 126 | -------------------------------------------------------------------------------- /tests/libs/test_process.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # 3 | # PyPWA, a scientific analysis toolkit. 4 | # Copyright (C) 2016 JLab 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | 19 | import numpy as npy 20 | import pytest 21 | 22 | from PyPWA.libs import process 23 | 24 | TEST_DATA = {"data": npy.random.rand(100)} 25 | 26 | 27 | """ 28 | Test Simplex Processes 29 | """ 30 | 31 | 32 | class SimplexKernel(process.Kernel): 33 | 34 | def __init__(self): 35 | self.data: npy.ndarray = False 36 | 37 | def setup(self): 38 | pass 39 | 40 | def process(self, data=False): 41 | return npy.sum(self.data) 42 | 43 | 44 | class SimplexInterface(process.Interface): 45 | 46 | def run(self, connections, *args): 47 | value = npy.zeros(len(connections)) 48 | for index, connection in enumerate(connections): 49 | value[index] = connection.recv() 50 | 51 | return npy.sum(value) 52 | 53 | 54 | @pytest.fixture 55 | def simplex_interface(request): 56 | interface = process.make_processes( 57 | TEST_DATA, SimplexKernel(), SimplexInterface(), 3, False 58 | ) 59 | 60 | yield interface 61 | interface.close() 62 | 63 | 64 | def test_simplex_sum_matches_expected(simplex_interface): 65 | calculated_sum = simplex_interface.run() 66 | npy.testing.assert_approx_equal(calculated_sum, npy.sum(TEST_DATA['data'])) 67 | 68 | 69 | """ 70 | Test Duplex 71 | """ 72 | 73 | 74 | class DuplexKernel(process.Kernel): 75 | 76 | def __init__(self): 77 | self.data: npy.ndarray = None 78 | 79 | def setup(self): 80 | pass 81 | 82 | def process(self, data: str = False) -> npy.ndarray: 83 | return npy.sum(self.data) 84 | 85 | 86 | class DuplexInterface(process.Interface): 87 | 88 | def run(self, connections, *arguments): 89 | for connection in connections: 90 | connection.send(arguments) 91 | 92 | value = npy.zeros(len(connections)) 93 | for index, connection in enumerate(connections): 94 | value[index] = connection.recv() 95 | 96 | return npy.sum(value) 97 | 98 | 99 | @pytest.fixture 100 | def duplex_interface(request): 101 | interface = process.make_processes( 102 | TEST_DATA, DuplexKernel(), DuplexInterface(), 3, True 103 | ) 104 | yield interface 105 | interface.close() 106 | 107 | 108 | def test_duplex_calculated_matches_expected(duplex_interface): 109 | final_value = duplex_interface.run("go") 110 | npy.testing.assert_approx_equal( 111 | final_value, npy.sum(TEST_DATA['data']) 112 | ) 113 | 114 | 115 | def test_duplex_reports_is_alive(duplex_interface): 116 | assert duplex_interface.is_alive 117 | 118 | 119 | """ 120 | Test Errors 121 | """ 122 | 123 | 124 | class KernelError(process.Kernel): 125 | 126 | def __init__(self): 127 | self.data: npy.ndarray = False 128 | 129 | def setup(self): 130 | pass 131 | 132 | def process(self, data=False): 133 | raise RuntimeError("Testing Errors are caught in processing") 134 | 135 | 136 | class InterfaceError(process.Interface): 137 | 138 | def __init__(self, is_duplex: bool): 139 | self.__duplex = is_duplex 140 | 141 | def run(self, connections, *args): 142 | if self.__duplex: 143 | for connection in connections: 144 | connection.send("go") 145 | 146 | returned = [0] * len(connections) 147 | for index, connection in enumerate(connections): 148 | returned[index] = connection.recv() 149 | 150 | return returned 151 | 152 | 153 | @pytest.fixture(params=[True, False]) 154 | def get_duplex_state(request): 155 | interface = InterfaceError(request.param) 156 | return interface, request.param 157 | 158 | 159 | def test_process_error_handling(get_duplex_state): 160 | interface = process.make_processes( 161 | TEST_DATA, KernelError(), get_duplex_state[0], 3, get_duplex_state[1] 162 | ) 163 | values = interface.run() 164 | assert process.ProcessCodes.ERROR in values 165 | interface.close() 166 | -------------------------------------------------------------------------------- /tests/libs/vectors/test_basic_vectors.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy 3 | import pandas 4 | 5 | from PyPWA.libs import vectors 6 | from PyPWA.libs.vectors import _base_vector 7 | 8 | ARRAY_LENGTH = 20 9 | 10 | @pytest.fixture() 11 | def four_vector(): 12 | v = vectors.FourVector(ARRAY_LENGTH) 13 | v.e = numpy.random.rand(ARRAY_LENGTH) 14 | v.x = numpy.random.rand(ARRAY_LENGTH) 15 | v.y = numpy.random.rand(ARRAY_LENGTH) 16 | v.z = numpy.random.rand(ARRAY_LENGTH) 17 | return v 18 | 19 | 20 | @pytest.fixture() 21 | def three_vector(four_vector): 22 | return four_vector.get_three_vector() 23 | 24 | 25 | """ 26 | Abstract Vector Tests 27 | """ 28 | # Test String 29 | 30 | def test_four_vector_str(four_vector): 31 | assert str(four_vector) 32 | 33 | 34 | # Test Magic Math 35 | 36 | def test_four_vector_addition(four_vector): 37 | assert isinstance(four_vector + four_vector, vectors.FourVector) 38 | assert isinstance(four_vector + four_vector, vectors.FourVector) 39 | 40 | 41 | def test_four_vector_subtraction(four_vector): 42 | assert isinstance(four_vector - four_vector, vectors.FourVector) 43 | 44 | 45 | def test_four_vector_scalar_multiplication(four_vector): 46 | assert isinstance(four_vector * 2, vectors.FourVector) 47 | 48 | 49 | # Test Iterators and length 50 | 51 | def test_four_vector_len(four_vector): 52 | assert len(four_vector) == ARRAY_LENGTH 53 | 54 | 55 | def test_four_vector_iterable(four_vector): 56 | for event in four_vector: 57 | assert isinstance(event, vectors.FourVector) 58 | 59 | 60 | # Test setters and getters 61 | 62 | def test_three_vector_can_not_set_energy(three_vector): 63 | with pytest.raises(AttributeError): 64 | three_vector.e = numpy.random.rand(ARRAY_LENGTH) 65 | 66 | 67 | def test_three_vector_errors_with_different_size_array(three_vector): 68 | with pytest.raises(ValueError): 69 | three_vector.z = numpy.random.rand(ARRAY_LENGTH+1) 70 | 71 | 72 | def test_four_vector_set_scalar(four_vector): 73 | vector = four_vector.get_copy() 74 | vector.e = 5 75 | for event in vector.e: 76 | assert event == 5 77 | 78 | 79 | def test_three_vector_set_array(three_vector): 80 | vector = three_vector.get_copy() 81 | new_array = numpy.random.rand(ARRAY_LENGTH) 82 | vector.x = new_array 83 | numpy.testing.assert_array_equal(vector.x, new_array) 84 | 85 | 86 | def test_four_vector_can_not_get_q(four_vector): 87 | with pytest.raises(AttributeError): 88 | four_vector.q 89 | 90 | 91 | def test_create_four_vector_without_numpy(): 92 | a = vectors.FourVector(1.0, 2.0, 3.0, 4.0) 93 | assert a.e == 1.0 94 | assert a.x == 2.0 95 | assert a.y == 3.0 96 | assert a.z == 4.0 97 | 98 | 99 | # Test Utilities 100 | 101 | def test_three_vector_copy_is_real(three_vector): 102 | copy = three_vector.get_copy() 103 | copy.x = 5 104 | assert not (copy.x == three_vector.x).all() 105 | 106 | 107 | def test_four_vector_get_array_returns_array(four_vector): 108 | assert isinstance(four_vector.dataframe, pandas.DataFrame) 109 | 110 | 111 | def test_three_vector_splits(three_vector): 112 | for split in three_vector.split(4): 113 | assert isinstance(split, vectors.ThreeVector) 114 | 115 | 116 | def test_dot_between_different_vector_types(four_vector, three_vector): 117 | with pytest.raises(ValueError): 118 | four_vector.get_dot(three_vector) 119 | 120 | 121 | # Test Builtin Properties 122 | 123 | def test_four_vector_length(four_vector): 124 | assert isinstance(four_vector.get_length(), numpy.ndarray) 125 | 126 | 127 | def test_three_vector_theta(three_vector): 128 | assert isinstance(three_vector.get_theta(), numpy.ndarray) 129 | 130 | 131 | def test_four_vector_phi(four_vector): 132 | assert isinstance(four_vector.get_phi(), numpy.ndarray) 133 | 134 | 135 | def test_three_vector_sin_theta(three_vector): 136 | assert isinstance(three_vector.get_sin_theta(), numpy.ndarray) 137 | 138 | 139 | def test_four_vector_cos_theta(four_vector): 140 | assert isinstance(four_vector.get_cos_theta(), numpy.ndarray) 141 | 142 | 143 | def test_three_vector_setters(three_vector): 144 | vector = three_vector.get_copy() 145 | vector.x = 3 146 | vector.y = 2 147 | vector.z = 1 148 | x = (vector.x == 3).all() 149 | y = (vector.y == 2).all() 150 | z = (vector.z == 1).all() 151 | assert x and y and z 152 | 153 | 154 | """ 155 | Four and Three Vector Tests 156 | """ 157 | # Test Cross Multiplication 158 | 159 | def test_four_vector_cross_multiplication(four_vector): 160 | with pytest.raises(ValueError): 161 | four_vector * four_vector 162 | 163 | 164 | def test_three_vector_multiplication(three_vector): 165 | assert isinstance(three_vector * three_vector, vectors.ThreeVector) 166 | 167 | 168 | # Test Dot Multiplication 169 | 170 | def test_four_vector_dot(four_vector): 171 | assert isinstance(four_vector.get_dot(four_vector), numpy.ndarray) 172 | 173 | 174 | def test_three_vector_dot(three_vector): 175 | assert isinstance(three_vector.get_dot(three_vector), numpy.ndarray) 176 | 177 | 178 | # Test Representation 179 | 180 | def test_four_vector_repr(four_vector): 181 | assert isinstance(repr(four_vector), str) 182 | 183 | 184 | def test_three_vector_repr(three_vector): 185 | assert isinstance(repr(three_vector), str) 186 | 187 | 188 | # Test Length Squared 189 | 190 | def test_three_vector_length_squared(three_vector): 191 | assert isinstance(three_vector.get_length_squared(), numpy.ndarray) 192 | 193 | 194 | def test_four_vector_length_squared(four_vector): 195 | assert isinstance(four_vector.get_length_squared(), numpy.ndarray) 196 | 197 | 198 | # Test sanitization 199 | 200 | def test_three_integers_results_in_floats(): 201 | result = _base_vector.sanitize_vector_input(1, 2, 3) 202 | 203 | assert isinstance(result[0], float) 204 | assert result[0] == 1.0 205 | assert result[1] == 2.0 206 | assert result[2] == 3.0 207 | 208 | 209 | # Test with standard floats 210 | 211 | @pytest.fixture(params=[True, False]) 212 | def four_vector_without_arrays(request): 213 | if request.param: 214 | return vectors.FourVector( 215 | numpy.random.random(), numpy.random.random(), 216 | numpy.random.random(), numpy.random.random() 217 | ) 218 | else: 219 | return vectors.FourVector(0.0, 0.0, 0.0, 0.0) 220 | 221 | 222 | def test_four_vector_math(four_vector_without_arrays): 223 | result = four_vector_without_arrays + four_vector_without_arrays 224 | 225 | assert result.e == 2 * four_vector_without_arrays.e 226 | assert result.x == 2 * four_vector_without_arrays.x 227 | assert result.y == 2 * four_vector_without_arrays.y 228 | assert result.z == 2 * four_vector_without_arrays.z 229 | -------------------------------------------------------------------------------- /tests/libs/vectors/test_particle.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PyPWA.libs import vectors 4 | 5 | 6 | def test_particle_pool_can_get_by_id(random_particle_pool): 7 | fetched_particle = random_particle_pool.get_particles_by_id(1) 8 | assert fetched_particle[0].id == 1 9 | 10 | 11 | def test_particle_pool_can_get_by_name(random_particle_pool): 12 | fetched_particle = random_particle_pool.get_particles_by_name("Gamma") 13 | assert fetched_particle[0].name == "Gamma" 14 | 15 | 16 | def test_particle_pool_length(random_particle_pool): 17 | assert random_particle_pool.particle_count == 5 18 | 19 | 20 | def test_particle_pool_event_count(random_particle_pool): 21 | assert random_particle_pool.event_count == 500 22 | 23 | 24 | def test_particle_pool_event_iterator(random_particle_pool): 25 | assert len(list(random_particle_pool.iter_events())) == 500 26 | 27 | 28 | def test_particle_pool_particle_iterator(random_particle_pool): 29 | assert len(list(random_particle_pool.iter_particles())) == 5 30 | 31 | 32 | def test_particle_pool_iterates_over_events(random_particle_pool): 33 | for index, particle_event in enumerate(random_particle_pool.iter_events()): 34 | assert isinstance(particle_event, vectors.ParticlePool) 35 | assert index == 499 36 | 37 | 38 | def test_particle_pool_split(random_particle_pool): 39 | split = random_particle_pool.split(4) 40 | for chunk in split: 41 | particle_length = 0 42 | for index, particle in enumerate(chunk.iter_particles()): 43 | if index == 0: 44 | particle_length = len(particle) 45 | assert len(particle) == particle_length 46 | 47 | 48 | def test_particle_can_do_math(): 49 | a = vectors.Particle(1, 1, 1.0, 2.0, 3.0, 4.0) 50 | b = vectors.Particle(14, 1, 1.1, 2.1, 3.1, 4.1) 51 | c = (a + b) * 2 52 | 53 | assert c.e == 4.2 54 | assert c.x == 8.2 55 | assert c.y == 12.2 56 | assert c.z == 16.2 57 | 58 | 59 | def test_particle_raw_display(random_particle_pool): 60 | random_particle_pool.display_raw() 61 | 62 | 63 | def test_particle_pool_can_be_masked(random_particle_pool): 64 | mask = np.random.choice([True, False], random_particle_pool.event_count) 65 | result = random_particle_pool[mask] 66 | assert result.event_count == sum(mask) 67 | -------------------------------------------------------------------------------- /tests/plugins/data/conftest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | import pytest 5 | 6 | 7 | @pytest.fixture 8 | def structured_data(): 9 | data = np.empty(1000, [("a", "f8"), ("b", "f8")]) 10 | data["a"] = np.random.rand(1000) 11 | data["b"] = np.random.rand(1000) 12 | return data 13 | 14 | 15 | """ 16 | Since all the plugins _should_ use the same interface, the actual tests 17 | for them have been generalized here. If you feel that a test needs to be 18 | altered to fit a specific plugin, then you're probably not following the 19 | interface closely, and will more than likely break compatibility with 20 | the Data Module. 21 | """ 22 | 23 | 24 | @pytest.fixture 25 | def check_iterator_passes_data(): 26 | """ 27 | This is the only test here that is _optional_ for iterators. An 28 | iterator is not required to pass a pointer, though it is strongly 29 | encouraged for speed reasons. If you have a file type that only 30 | supports parsing the entire file, and then you have to fake iterating 31 | over it, then this can be ignored. 32 | 33 | .. seealso:: 34 | The numpy plugin, since it has to fake iteration and can not 35 | pass a single pointer. 36 | """ 37 | def iterator_check(initialized_iterator): 38 | first_result = next(initialized_iterator) 39 | second_result = next(initialized_iterator) 40 | return first_result == second_result 41 | return iterator_check 42 | 43 | 44 | @pytest.fixture 45 | def check_memory_read_write(): 46 | def check_memory(parser, data, location): 47 | # Write and parse data 48 | parser.write(location, data) 49 | parsed_data = parser.parse(location) 50 | 51 | # Remove temporary file 52 | location.unlink() 53 | 54 | # Check data matches 55 | for name in data.dtype.names: 56 | np.testing.assert_array_almost_equal( 57 | data[name], parsed_data[name] 58 | ) 59 | return check_memory 60 | 61 | 62 | @pytest.fixture 63 | def iterate_numpy_arrays(): 64 | def numpy_array(iterator, data, location): 65 | # Write data with iterator 66 | with iterator["writer"](location) as writer: 67 | for row in data: 68 | writer.write(row) 69 | 70 | # Read data from file 71 | read_rows = [] 72 | with iterator["reader"](location, False) as reader: 73 | for event in reader: 74 | read_rows.append(event.copy()) 75 | read_data = np.array(read_rows) 76 | 77 | # Compare read data with structured data 78 | for name in read_data.dtype.names: 79 | np.testing.assert_array_almost_equal( 80 | data[name], 81 | read_data[name] 82 | ) 83 | 84 | # Remove temporary file 85 | location.unlink() 86 | return numpy_array 87 | 88 | 89 | @pytest.fixture 90 | def iterate_dataframe(): 91 | def dataframe_check(iterator, data, location): 92 | dataframe = pd.DataFrame(data) 93 | 94 | # Write data with iterator 95 | with iterator["writer"](location) as writer: 96 | for index, row in dataframe.iterrows(): 97 | writer.write(row) 98 | 99 | # Read data from file 100 | read_data = [] 101 | with iterator["reader"](location, True) as reader: 102 | for row in reader: 103 | read_data.append(row) 104 | read_data = pd.DataFrame(read_data) 105 | 106 | pd.testing.assert_frame_equal(dataframe, read_data) 107 | 108 | # Remove temporary file 109 | location.unlink() 110 | return dataframe_check 111 | 112 | -------------------------------------------------------------------------------- /tests/plugins/data/test_gamp.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from PyPWA.plugins.data import gamp 6 | 7 | ROOT = (Path(__file__).parent / "../../test_data/docs").resolve() 8 | TEMP_LOCATION = ROOT / "temporary_write_data.gamp" 9 | LARGE = ROOT / "large.gamp" 10 | MULTI = ROOT / "multiple.gamp" 11 | 12 | 13 | @pytest.fixture() 14 | def gamp_mem(): 15 | return gamp._GampMemory() 16 | 17 | 18 | @pytest.fixture() 19 | def large_gamp(gamp_mem): 20 | return gamp_mem.parse(LARGE) 21 | 22 | 23 | @pytest.fixture() 24 | def multi_gamp(gamp_mem): 25 | return gamp_mem.parse(MULTI) 26 | 27 | 28 | def test_large_has_all_events(large_gamp): 29 | assert 1000 == large_gamp.event_count 30 | 31 | 32 | def test_multi_has_all_events(multi_gamp): 33 | assert 5 == multi_gamp.event_count 34 | 35 | 36 | def test_large_has_four_particles(large_gamp): 37 | assert 4 == len(large_gamp) 38 | 39 | 40 | def test_multi_has_six_particles(multi_gamp): 41 | assert 6 == len(multi_gamp) 42 | 43 | 44 | def test_write_data(multi_gamp, gamp_mem): 45 | gamp_mem.write(TEMP_LOCATION, multi_gamp) 46 | intermediate = gamp_mem.parse(TEMP_LOCATION) 47 | TEMP_LOCATION.unlink(True) # clear the space after it's loaded. 48 | 49 | assert intermediate == multi_gamp 50 | -------------------------------------------------------------------------------- /tests/plugins/data/test_kv.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from PyPWA.plugins.data import kv 6 | 7 | ROOT = (Path(__file__).parent / "../../test_data/docs").resolve() 8 | 9 | TEMP_LOCATION = ROOT / "temporary_write_data" 10 | 11 | GOOD_DATA = [ 12 | ROOT / "set1.kvars", 13 | ROOT / "set2.kvars" 14 | ] 15 | 16 | BAD_DATA = [ 17 | ROOT / "set1.csv", 18 | ROOT / "set2.npy", 19 | ROOT / "bad_set.kvars" 20 | ] 21 | 22 | 23 | @pytest.fixture 24 | def parser(): 25 | return kv.metadata.get_memory_parser() 26 | 27 | 28 | @pytest.fixture 29 | def iterator(): 30 | return { 31 | "reader": kv.metadata.get_reader, 32 | "writer": kv.metadata.get_writer 33 | } 34 | 35 | 36 | @pytest.fixture 37 | def can_read(): 38 | return kv.metadata.get_read_test() 39 | 40 | 41 | @pytest.fixture(params=GOOD_DATA) 42 | def good_data(request): 43 | return request.param 44 | 45 | 46 | def test_can_read_known_good_data(good_data, can_read): 47 | assert can_read.can_read(good_data) 48 | 49 | 50 | def test_parser_and_writer(parser, structured_data, check_memory_read_write): 51 | check_memory_read_write(parser, structured_data, TEMP_LOCATION) 52 | 53 | 54 | def test_iterator_does_not_copy_data( 55 | iterator, structured_data, check_iterator_passes_data 56 | ): 57 | assert check_iterator_passes_data(iterator["reader"](GOOD_DATA[0], False)) 58 | 59 | 60 | def test_iterator_with_numpy_arrays( 61 | iterator, structured_data, iterate_numpy_arrays 62 | ): 63 | iterate_numpy_arrays(iterator, structured_data, TEMP_LOCATION) 64 | 65 | 66 | def test_iterator_with_pandas_dataframe( 67 | iterator, structured_data, iterate_dataframe 68 | ): 69 | iterate_dataframe(iterator, structured_data, TEMP_LOCATION) -------------------------------------------------------------------------------- /tests/plugins/data/test_numpy.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import pytest 6 | 7 | from PyPWA.plugins.data import numpy 8 | 9 | ROOT = (Path(__file__).parent / "../../test_data/docs").resolve() 10 | 11 | TEMP_LOCATION = ROOT / "temporary_write_data.npy" 12 | 13 | GOOD_DATA = [ 14 | ROOT / "set1.txt", 15 | ROOT / "set1.pf", 16 | ROOT / "set1.npy", 17 | ROOT / "set2.txt", 18 | ROOT / "set2.pf", 19 | ROOT / "set2.npy" 20 | ] 21 | 22 | BAD_DATA = [ 23 | ROOT / "bad_set.txt", 24 | ROOT / "set1.kvars", 25 | ROOT / "set2.csv" 26 | ] 27 | 28 | 29 | @pytest.fixture 30 | def parser(): 31 | return numpy.metadata.get_memory_parser() 32 | 33 | 34 | @pytest.fixture 35 | def iterator(): 36 | return { 37 | "reader": numpy.metadata.get_reader, 38 | "writer": numpy.metadata.get_writer 39 | } 40 | 41 | 42 | @pytest.fixture 43 | def can_read(): 44 | return numpy.metadata.get_read_test() 45 | 46 | 47 | @pytest.fixture(params=GOOD_DATA) 48 | def good_data(request): 49 | return request.param 50 | 51 | 52 | def test_can_read_known_good_data(good_data, can_read): 53 | assert can_read.can_read(good_data) 54 | 55 | 56 | def test_parser_and_writer(parser, structured_data, check_memory_read_write): 57 | check_memory_read_write(parser, structured_data, TEMP_LOCATION) 58 | 59 | 60 | def test_iterator_with_numpy_arrays( 61 | iterator, structured_data, iterate_numpy_arrays 62 | ): 63 | iterate_numpy_arrays(iterator, structured_data, TEMP_LOCATION) 64 | 65 | 66 | def test_iterator_with_pandas_dataframe( 67 | iterator, structured_data, iterate_dataframe 68 | ): 69 | iterate_dataframe(iterator, structured_data, TEMP_LOCATION) 70 | 71 | 72 | def test_numpy_read_and_write_pf(): 73 | pf_file = Path(TEMP_LOCATION.stem + ".pf") 74 | pass_fail = np.random.choice([True, False], 1000) 75 | 76 | numpy.metadata.get_memory_parser().write(pf_file, pass_fail) 77 | read = numpy.metadata.get_memory_parser().parse(pf_file) 78 | pf_file.unlink() 79 | 80 | np.testing.assert_array_equal(read, pass_fail) 81 | 82 | 83 | def test_numpy_read_and_write_floats(): 84 | float_file = Path(TEMP_LOCATION.stem + ".txt") 85 | floats = np.random.random(1000) 86 | 87 | numpy.metadata.get_memory_parser().write(float_file, floats) 88 | read = numpy.metadata.get_memory_parser().parse(float_file) 89 | float_file.unlink() 90 | 91 | np.testing.assert_array_equal(floats, read) 92 | 93 | 94 | @pytest.fixture(scope="module") 95 | def numpy_and_pandas(): 96 | data = np.zeros(30, [(name, "f8") for name in ['x', 'y', 'z']]) 97 | for column in data.dtype.names: 98 | data[column] = np.random.rand(30) 99 | return data, pd.DataFrame(data) 100 | 101 | 102 | def test_numpy_reader_and_writer(numpy_and_pandas): 103 | npy_file = Path(TEMP_LOCATION.stem + ".npy") 104 | data = numpy_and_pandas[1][:100] 105 | 106 | with numpy.metadata.get_writer(npy_file) as writer: 107 | for index, event in data.iterrows(): 108 | writer.write(event) 109 | 110 | with numpy.metadata.get_reader(npy_file, True) as reader: 111 | for index, event in enumerate(reader): 112 | pd.testing.assert_series_equal(data.iloc[index], event) 113 | 114 | npy_file.unlink() 115 | 116 | -------------------------------------------------------------------------------- /tests/plugins/data/test_pgz.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from PyPWA.plugins.data import pgz 6 | 7 | ROOT = (Path(__file__).parent / "../../test_data/docs").resolve() 8 | 9 | TEMP_LOCATION = ROOT / "temporary_write_data.pgz" 10 | 11 | 12 | @pytest.fixture 13 | def parser(): 14 | return pgz.metadata.get_memory_parser() 15 | 16 | 17 | def test_parser_and_writer(parser, structured_data, check_memory_read_write): 18 | check_memory_read_write(parser, structured_data, TEMP_LOCATION) 19 | -------------------------------------------------------------------------------- /tests/plugins/data/test_sv.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from PyPWA.plugins.data import sv 6 | 7 | ROOT = (Path(__file__).parent / "../../test_data/docs").resolve() 8 | 9 | TEMP_LOCATION = ROOT / "temporary_write_data" 10 | 11 | GOOD_DATA = [ 12 | ROOT / "set1.csv", 13 | ROOT / "set1.tsv", 14 | ROOT / "set2.csv", 15 | ROOT / "set2.tsv" 16 | ] 17 | 18 | BAD_DATA = [ 19 | ROOT / "set1.kvars", 20 | ROOT / "set2.npy", 21 | ROOT / "bad_set.csv" 22 | ] 23 | 24 | 25 | @pytest.fixture 26 | def parser(): 27 | return sv.metadata.get_memory_parser() 28 | 29 | 30 | @pytest.fixture 31 | def iterator(): 32 | return { 33 | "reader": sv.metadata.get_reader, 34 | "writer": sv.metadata.get_writer 35 | } 36 | 37 | 38 | @pytest.fixture 39 | def can_read(): 40 | return sv.metadata.get_read_test() 41 | 42 | 43 | @pytest.fixture(params=GOOD_DATA) 44 | def good_data(request): 45 | return request.param 46 | 47 | 48 | def test_can_read_known_good_data(good_data, can_read): 49 | assert can_read.can_read(good_data) 50 | 51 | 52 | def test_parser_and_writer(parser, structured_data, check_memory_read_write): 53 | check_memory_read_write(parser, structured_data, TEMP_LOCATION) 54 | 55 | 56 | def test_iterator_does_not_copy_data( 57 | iterator, structured_data, check_iterator_passes_data 58 | ): 59 | assert check_iterator_passes_data(iterator["reader"](GOOD_DATA[0], False)) 60 | 61 | 62 | def test_iterator_with_numpy_arrays( 63 | iterator, structured_data, iterate_numpy_arrays 64 | ): 65 | iterate_numpy_arrays(iterator, structured_data, TEMP_LOCATION) 66 | 67 | 68 | def test_iterator_with_pandas_dataframe( 69 | iterator, structured_data, iterate_dataframe 70 | ): 71 | iterate_dataframe(iterator, structured_data, TEMP_LOCATION) 72 | -------------------------------------------------------------------------------- /tests/plugins/test_load.py: -------------------------------------------------------------------------------- 1 | # PyPWA, a scientific analysis toolkit. 2 | # Copyright (C) 2016 JLab 3 | # 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU General Public License as published by 6 | # the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License 15 | # along with this program. If not, see . 16 | 17 | import pytest 18 | 19 | from PyPWA.plugins import data, load 20 | 21 | 22 | @pytest.fixture 23 | def found_data_plugins(): 24 | return load(data, "Data Test") 25 | 26 | 27 | def find_data_name(plugins, name): 28 | found = False 29 | for plugin in plugins: 30 | if plugin.plugin_name == name: 31 | found = True 32 | return found 33 | 34 | 35 | def test_found_sv_data(found_data_plugins): 36 | assert find_data_name( 37 | found_data_plugins, "Delimiter Separated Variable sheets" 38 | ) 39 | 40 | 41 | def test_found_gamp_data(found_data_plugins): 42 | assert find_data_name(found_data_plugins, "gamp") 43 | -------------------------------------------------------------------------------- /tests/system_tests/test_2dgauss.py: -------------------------------------------------------------------------------- 1 | import numpy as npy 2 | import pandas as pd 3 | 4 | import PyPWA as pwa 5 | import numexpr as ne 6 | import pytest 7 | 8 | 9 | class Gauss2dAmplitude(pwa.NestedFunction): 10 | 11 | def setup(self, array, initial_params=""): 12 | self.__data = array 13 | 14 | def calculate(self, params): 15 | valueA = (self.__data["x"] - params["A1"]) ** 2 16 | valueA /= params["A2"] ** 2 17 | valueB = (self.__data["y"] - params["A3"]) ** 2 18 | valueB /= params["A4"] ** 2 19 | 20 | values = 1 / (params["A2"] * params["A4"]) 21 | values *= npy.exp(-(valueA + valueB)) 22 | return values 23 | 24 | 25 | class NeGauss2dAmplitude(pwa.NestedFunction): 26 | 27 | USE_MP = False 28 | 29 | def setup(self, array, initial_params=""): 30 | self.__data = array 31 | 32 | def calculate(self, params): 33 | return ne.evaluate( 34 | "(1/(a2*a4)) * exp(-((((x-a1)**2)/(a2**2))+(((y-a3)**2)/(a4**2))))", 35 | local_dict={ 36 | "a1": params["A1"], "a2": params["A2"], "a3": params["A3"], 37 | "a4": params["A4"], "x": self.__data["x"], "y": self.__data["y"] 38 | } 39 | ) 40 | 41 | 42 | @pytest.fixture(params=[Gauss2dAmplitude, NeGauss2dAmplitude]) 43 | def gauss_function(request): 44 | return request.param 45 | 46 | 47 | def test_2d_gauss(gauss_function): 48 | flat_data = pd.DataFrame() 49 | flat_data["x"] = npy.random.rand(10000) * 20 50 | flat_data["y"] = npy.random.rand(10000) * 20 51 | 52 | simulation_params = {"A1": 10, "A2": 3, "A3": 10, "A4": 3} 53 | rejection = pwa.monte_carlo_simulation( 54 | gauss_function(), flat_data, simulation_params 55 | ) 56 | 57 | carved_data = flat_data[rejection] 58 | 59 | fitting_settings = {"A1": 1, "A2": 1, "A3": 1, "A4": 1} 60 | 61 | param_names = ["A1", "A2", "A3", "A4"] 62 | 63 | with pwa.LogLikelihood(gauss_function(), carved_data) as likelihood: 64 | optimizer = pwa.minuit(fitting_settings, likelihood) 65 | 66 | for param in param_names: 67 | optimizer.limits[param] = (.1, None) 68 | 69 | optimizer.migrad(1000) 70 | 71 | for param in optimizer.params: 72 | assert simulation_params[param.name] == round(param.value) 73 | 74 | 75 | -------------------------------------------------------------------------------- /tests/system_tests/test_masking_prog.py: -------------------------------------------------------------------------------- 1 | from PyPWA.progs import masking 2 | from pathlib import Path 3 | 4 | 5 | def test_masking_errors_with_too_many_flags(): 6 | result = masking.start_masking( 7 | [ 8 | "--use_or", "--use_xor", 9 | "-i", "tests/test_data/docs/large.gamp", 10 | "-m", "tests/test_data/docs/set2.pf", 11 | "-o", "error_masked.gamp" 12 | ] 13 | ) 14 | assert result == 1 15 | Path("error_masked.gamp").unlink(True) 16 | 17 | 18 | def test_masking_errors_with_unmatching_data(): 19 | result = masking.start_masking( 20 | [ 21 | "-i", "tests/test_data/docs/large.gamp", 22 | "-m", "tests/test_data/docs/set2.pf", 23 | "-o", "error_masked.gamp" 24 | ] 25 | ) 26 | assert result == 1 27 | Path("error_masked.gamp").unlink(True) 28 | 29 | 30 | def test_can_mask_data(): 31 | result = masking.start_masking( 32 | [ 33 | "-i", "tests/test_data/docs/large.gamp", 34 | "-m", "tests/test_data/docs/set1.pf", 35 | "-o", "masked.gamp" 36 | ] 37 | ) 38 | assert result != 1 39 | Path("masked.gamp").unlink() 40 | -------------------------------------------------------------------------------- /tests/test_data/docs/bad_set.csv: -------------------------------------------------------------------------------- 1 | x y z qf m 2 | 0.1700785345883843 0.8835545486715198 0.13935589263696535 0.33306110602811123 0.5712175366545377 3 | 0.7492472558825094 0.07964444533355208 0.7165261261532554 0.9607257615839396 0.9576933621094333 4 | 0.7496163304138005 0.5568268984485388 0.4993211014080208 0.5358262187865556 0.2291978114390456 5 | 0.8744773586050184 0.957810826520311 0.6951874298074182 0.1997663121364408 0.5884533560032446 6 | 0.33505800183205137 0.7117290610854213 0.6568227375130311 0.742303290467644 0.29609793535639817 7 | 0.5434347192891926 0.61712189077223 0.28454555730215025 0.3706391644281748 0.32848657751955657 8 | 0.4733574773274378 0.8327954154798881 0.0662661193895645 0.8795339812977899 0.5095118873325768 9 | 0.18386134548075939 0.26370755997134254 0.6156644259640816 0.23335600865015782 0.5742245393941757 10 | 0.16340289609245817 0.0735488481552673 0.6660297952967044 0.7234876899220862 0.039611741549858914 11 | 0.4917471444541621 0.14882605972069818 0.2973375068703369 0.5009020158244134, 0.7621506721503396 -------------------------------------------------------------------------------- /tests/test_data/docs/bad_set.kvars: -------------------------------------------------------------------------------- 1 | x=0.1700785345883843,y=0.8835545486715198,z=0.1393558926369654,qf=0.3330611060281112,m=0.5712175366545377 2 | x=0.7492472558825094,y=0.0796444453335521,z=0.7165261261532554,qf=0.9607257615839396,m=0.9576933621094333 3 | x=0.7496163304138005,y=0.5568268984485388,z=0.4993211014080208,qf=0.5358262187865556,m=0.2291978114390456 4 | x=0.8744773586050184,y=0.9578108265203110,z=0.6951874298074182,qf=0.1997663121364408,m=0.5884533560032446 5 | x=0.3350580018320514,y,z=0.6568227375130311,qf=0.7423032904676440,m=0.2960979353563982 6 | x=0.5434347192891926,y=0.6171218907722300,z=0.2845455573021503,qf=0.3706391644281748,m=0.3284865775195566 7 | x=0.4733574773274378,y=0.8327954154798881,z=0.0662661193895645,qf=0.8795339812977899,m=0.5095118873325768 8 | x=0.1838613454807594,y=0.2637075599713425,z=0.6156644259640816,qf=0.2333560086501578,m=0.5742245393941757 9 | x=0.1634028960924582,y=0.0735488481552673,z=0.6660297952967044,qf=0.7234876899220862,m=0.0396117415498589 10 | x=0.4917471444541621,y=0.1488260597206982;z=0.2973375068703369,0.5009020158244134,m=0.7621506721503396 -------------------------------------------------------------------------------- /tests/test_data/docs/bad_set.pf: -------------------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 4 | 2 5 | 0 6 | 1 7 | 0 8 | 1 9 | 1 10 | 8 -------------------------------------------------------------------------------- /tests/test_data/docs/bad_set.txt: -------------------------------------------------------------------------------- 1 | 2.742714374659507293e-01 2 | 9.485081587125938629e-01 3 | 6.068605870218348741e-01 4 | 3.736345569539551992e-01 5 | 6.806017258343411980e-01 6 | 6.860791249803817049e-01 7 | 9.144314926430581192e-01 8 | 3.964134800564039773e-01 9 | eight 10 | 9.796475221103184694e-01 -------------------------------------------------------------------------------- /tests/test_data/docs/configuration.yml: -------------------------------------------------------------------------------- 1 | buitin parse: 2 | enable cache: true 3 | Builtin Multiprocessing: 4 | number of processes: 4 5 | miniut: 6 | parameters: 7 | - O1 8 | - O2 9 | - O3 10 | - O4 11 | - O5 12 | setttings: { 13 | 'O1':6E+8, 'fix_O1':True, 'error_O1':0.1, 'limit_O1':[0,10.E+12], 14 | 'O2':0.3, 'fix_O2':True, 'error_O2':0.01, 15 | 'O3':-0.1, 'fix_O3':True, 'error_O3':0.01, 16 | 'O4':-0.1, 'fix_O4':True, 'error_O4':0.01, 17 | 'O5':0.1, 'fix_O5':False, 'error_O5':0.01, 'limit_O5':[-15.,10.] 18 | } 19 | stratigy: 1 20 | General Fitting: 21 | likelihoodtype: Chi Squared 22 | generated length: 23 | function location: test.py 24 | process name: weighting 25 | setup name: setup_fortran 26 | qfactors location: 27 | datas location: kvars.csv 28 | accepted montecarlo location: 29 | savename: output 30 | -------------------------------------------------------------------------------- /tests/test_data/docs/multiple.gamp: -------------------------------------------------------------------------------- 1 | 6 2 | 1 0 0 0 9 9 3 | 14 1 0.489366 1.16687 1.53077 2.19651 4 | 11 1 -0.120747 -0.252622 1.82881 1.91485 5 | 12 -1 -0.0471136 -0.468373 2.86627 2.94632 6 | 1 0 0.144554 -0.0924801 0.396306 0.431864 7 | 1 0 -0.46606 -0.353392 2.37784 2.44872 8 | 6 9 | 1 0 0 0 9 9 10 | 14 1 0.0482325 0.574326 0.588864 1.24871 11 | 11 1 0.147839 0.175565 3.20347 3.2494 12 | 12 -1 0.113935 0.0947125 1.91567 1.9838 13 | 1 0 0.116443 -0.468739 1.61144 1.68227 14 | 1 0 -0.426449 -0.375865 1.68055 1.77409 15 | 6 16 | 1 0 0 0 9 9 17 | 14 1 -0.603583 0.264803 7.6721 7.75731 18 | 11 1 0.277164 -0.0236316 0.372865 0.678319 19 | 12 -1 0.0582607 -0.150773 0.285835 0.592907 20 | 1 0 0.0214529 0.210537 0.337733 0.39856 21 | 1 0 0.246705 -0.300936 0.331466 0.511171 22 | 6 23 | 1 0 0 0 9 9 24 | 14 1 -0.6922 -0.280769 4.1272 4.29791 25 | 11 1 0.59143 0.0433621 2.47691 2.59431 26 | 12 -1 0.56086 0.262788 2.14699 2.28843 27 | 1 0 -0.220055 0.120091 0.374064 0.450299 28 | 1 0 -0.240035 -0.145471 -0.125159 0.307316 29 | 6 30 | 1 0 0 0 9 9 31 | 14 1 -1.08832 -0.149006 2.61188 2.98478 32 | 11 1 0.620169 -0.015586 2.69034 2.80472 33 | 12 -1 0.534044 -0.126931 3.35861 3.43879 34 | 1 0 -0.00930206 -0.143393 0.206859 0.251871 35 | 1 0 -0.0565951 0.434916 0.132317 0.458108 36 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/data/data.csv: -------------------------------------------------------------------------------- 1 | v,w,x,y,z,qfactor,BinN 2 | 0.95640452907820617,0.21553526105646836,0.16407093023482777,0.12917578294625076,0.3277699191994905,0.28041932989857055,0.84998371921222848 3 | 0.80598402056754426,0.38728379815484604,0.29578880746974667,0.53141542336721759,0.10895804013846477,0.64469559045314351,0.32189726311280187 4 | 0.25283927365157555,0.37968555334144727,0.24670113813959382,0.51082329073127553,0.38610236024870437,0.071017165260841009,0.36260228126169958 5 | 0.19246017603470056,0.33124506762492678,0.033570479121473595,0.96636350804057547,0.10966705191766124,0.68702340852520594,0.44159486213574817 6 | 0.440289547555061,0.79627591145624876,0.30641909772021036,0.55645226129198189,0.23347743001004262,0.45175139403431597,0.11418383151963885 7 | 0.10349442409841814,0.61344808072453805,0.34948450916011553,0.27957796915269062,0.15791979718640503,0.4742566290970277,0.5564488507687253 8 | 0.90655146797831399,0.52387637563707123,0.66503387391341984,0.18102447250586695,0.39525276251408548,0.8351333522172194,0.080595762290830208 9 | 0.60164049233224648,0.39796362513843486,0.68099246663658508,0.92522704746829409,0.14223355424444406,0.60312693237607806,0.66682065619078379 10 | 0.23145590491001633,0.48753505150594745,0.6062693680852772,0.95783647687862528,0.31142773877116825,0.71495123814156181,0.68137501593732963 11 | 0.70909873558506664,0.0014399776344070636,0.95768870177463405,0.079711361062450026,0.61673380795627764,0.35358705139883939,0.28909350776671894 12 | 0.37859768930387649,0.94000852328470819,0.29733708728340769,0.1720713596772826,0.82677440074274677,0.54359816175375386,0.81891869463421207 13 | 0.092150346723865129,0.92996491914382096,0.44493318493816336,0.25508004443981924,0.56481936085781259,0.89014586030965659,0.35751493885459895 14 | 0.68062419292472742,0.38258910949052405,0.17578187173963111,0.54623635142157501,0.1288308243892432,0.087735681032279356,0.91079604716055795 15 | 0.16220121650292763,0.96143417684021693,0.46822657124104206,0.69765434288806405,0.86287516134800446,0.28982648084247509,0.5434155113307314 16 | 0.99327247900951221,0.13455516749270524,0.55807967673977599,0.67394545233920167,0.10369212636070346,0.25544970675990053,0.78552319219947342 17 | 0.18824029656456109,0.81097494247815727,0.23640988643794969,0.68319512597674426,0.85599773909458476,0.32010307399624627,0.27486748791132287 18 | 0.50389447756107197,0.82886466041265872,0.66047118242838943,0.9467308377162118,0.32181678197407759,0.66895566225084491,0.088571782579902614 19 | 0.32021511880146525,0.25275217822756602,0.89386692895300679,0.19849312430727384,0.42953146752536897,0.09595619764479224,0.97295600769318324 20 | 0.60570990636006095,0.94679372733035638,0.90467686311465767,0.42592955818049039,0.11716346715227333,0.62557490316340258,0.95976656086260048 21 | 0.54318149251464309,0.47126421542167096,0.26874315308424745,0.022952542723720382,0.2018792323214349,0.26071271018522868,0.62922192919775599 -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/data/internal_names.csv: -------------------------------------------------------------------------------- 1 | x,y,qf,bn,exp,err 2 | 0.22103004237267732,0.19963131489364483,0.1931923516437426,0.9138648807444445,0.74691968932464348,0.034934761040754325 3 | 0.48643269532046229,0.02354315487698766,0.76136937293783036,0.,0.92977055626776883,0.19704036050381257 4 | 0.82899457747428951,0.72303121933782333,0.096044095978928601,0.10498435784555171,0.86245668108739393,0.32963709491544158 5 | 0.4717218456158726,0.75158736586676,0.13927900997841769,0.14072262275954006,0.41567564307425919,0.78418342753457071 6 | 0.32245112927429009,0.781271184992266,0.14433401005211699,0.70645993497548398,0.35783843756383882,0.6637430475944136 7 | 0.62815607633523463,0.97297295994112554,0.021080057853820056,0.19570370519755564,0.47015088854402676,0.16456571327243275 8 | 0.58907790600068721,0.89503984669988401,0.42530528529592515,0.69540027713580266,0.49486282083939248,0.50132492278719898 9 | 0.87054542167089011,0.10172287480074105,0.96362223573227956,0.14893951298039099,0.68350586386298684,0.32909323307219196 10 | 0.1467500782594281,0.99823349465040179,0.94504909714417895,0.33753358747662177,0.020305677100872277,0.581386624438051 11 | 0.26222556363577543,0.15475972852863307,0.63345058323478443,0.91125627399455189,0.69156329957950213,0.6997932362475997 12 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/data/monte_carlo.csv: -------------------------------------------------------------------------------- 1 | v,w,x,y,z 2 | 0.047160308400970408,0.71517976270039296,0.65294419519521496,0.74245531017027699,0.79282027558702328 3 | 0.23767253289263301,0.54925237360659507,0.88412446578222414,0.77076894239994664,0.48482401350281878 4 | 0.88344853738381934,0.76417951095295789,0.61128553092684967,0.66257529144051019,0.26807470599629868 5 | 0.35381424103133985,0.53762824811116616,0.088993310269751102,0.46596680592249395,0.28426659402006516 6 | 0.46628769212947174,0.034875663810313595,0.99298928095414118,0.46013565119956734,0.25937649730195378 7 | 0.7652716419579737,0.087930206453984971,0.20883163843615349,0.32243271424955944,0.60277048838974645 8 | 0.75702650630520085,0.63682620975804061,0.52497498635704853,0.64469233599164177,0.67675399024867622 9 | 0.30506068570757439,0.52906317173342698,0.14955112111667745,0.88513588174166558,0.84507553236368638 10 | 0.73519512717957325,0.6737064246345339,0.48279544112646389,0.06924134559269679,0.6009917025389484 11 | 0.78350803907744748,0.95168511098257236,0.94457581241492961,0.49321388614855133,0.4665842652194867 12 | 0.025473481408703869,0.98412558149380835,0.30188206106240201,0.43911561982934033,0.64924551066281244 13 | 0.018207132121802916,0.67998582354990256,0.66093797226057105,0.50003874811834437,0.36167395357898369 14 | 0.31753226950226177,0.077170569206215012,0.36072221530905402,0.73904865379442508,0.40685211609198879 15 | 0.8592771198287118,0.96695213090337151,0.40232316630925358,0.17622491689356501,0.78231681939914177 16 | 0.57809595470620367,0.81525685075844767,0.11833606369487537,0.77588522876158872,0.42296248649310964 17 | 0.84297786877708003,0.5549619273334363,0.19926241037039327,0.76608521602669133,0.57553869348020725 18 | 0.58572313694012157,0.51518005930214117,0.75546494074295789,0.84765842886732889,0.56913108623614284 19 | 0.8412773347966721,0.76291045577846095,0.7330905761300075,0.14858317832809931,0.15496574234745863 20 | 0.21666570998815826,0.16002896890891871,0.81529620694859217,0.23989462812561024,0.65669536395122174 21 | 0.24061902160471482,0.6761525383726178,0.056091881245030617,0.979183210980205,0.79299760774319705 22 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/data/qfactor.txt: -------------------------------------------------------------------------------- 1 | 0.87621533489525172 2 | 0.85928937552977247 3 | 0.71297409400025213 4 | 0.76479424656200823 5 | 0.22815998399355497 6 | 0.066430853969000614 7 | 0.87906172616923861 8 | 0.076794351265381589 9 | 0.63643570165221197 10 | 0.61120065681082614 11 | 0.71742672473533997 12 | 0.065797210543766749 13 | 0.72079349991285424 14 | 0.47687137144262959 15 | 0.29073031552893747 16 | 0.45023203912688536 17 | 0.41298455996334615 18 | 0.90510661038206386 19 | 0.72824489258438263 20 | 0.31514546276911348 21 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/rho/RHOfit: -------------------------------------------------------------------------------- 1 | Optimizer: 2 | parameters: 3 | - A1 4 | - A2 5 | selected optimizer: minuit 6 | configuration: 7 | settings: 8 | errordef: 1 9 | A1: 5 10 | fix_A1: true 11 | error_A1: 1 12 | A2: .375 13 | limit_A2: [.2,1] 14 | error_A2: 1 15 | strategy: 0 16 | number of calls: 1 17 | Data Processor: 18 | use cache: false 19 | Multiprocessing: 20 | number of processes: 2 21 | General Fitting: 22 | likelihood type: log-likelihood 23 | function's location: tests/test_data/docs/program_data/rho/testRHO.py 24 | processing name: processing_function 25 | setup name: setup_function 26 | data location: tests/test_data/docs/program_data/rho/RHOw.csv 27 | save name: outputRHOFIT 28 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/rho/RHOint: -------------------------------------------------------------------------------- 1 | Multiprocessing: 2 | number of processes: 2 3 | Data Processor: 4 | use cache: false 5 | Intensities: 6 | function's location: tests/test_data/docs/program_data/rho/testRHO.py 7 | processing name: processing_function 8 | setup name: setup_function 9 | data location: tests/test_data/docs/program_data/rho/flat_data.csv 10 | parameters: 11 | A1: 100 12 | A2: 0.4 13 | save name: outputINT 14 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/rho/RHOsim: -------------------------------------------------------------------------------- 1 | Version: 1 2 | Processes: 2 3 | Function: 4 | Path: tests/test_data/docs/program_data/rho/testRHO.py 5 | Intensity Name: processing_function 6 | Setup Name: setup_function 7 | Parameters: 8 | A1: 100. 9 | A2: 0.4 10 | Data: 11 | Path: tests/test_data/docs/program_data/rho/flat_data.csv 12 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/rho/RHOweg.json: -------------------------------------------------------------------------------- 1 | { 2 | "Multiprocessing": { 3 | "number of processes": 2 4 | }, 5 | "Data Processor": { 6 | "use cache": false 7 | }, 8 | "Rejection Method": { 9 | "data location": "tests/test_data/docs/program_data/rho/RHOint_intensities.txt", 10 | "max intensity": "134.217784821", 11 | "save name": "output" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tests/test_data/docs/program_data/rho/testRHO.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | 4 | def processing_function(the_array, the_params): 5 | if isinstance(the_params, dict) and 'A1' in the_params: 6 | return actual_function(the_array, the_params) 7 | elif isinstance(the_params, numpy.ndarray): 8 | params = { 9 | "A1": the_params[0], 10 | "A2": the_params[1] 11 | } 12 | return actual_function(the_array, params) 13 | else: 14 | raise ValueError("Recieved unknown %s" % the_params) 15 | 16 | 17 | def actual_function(the_array, the_params): 18 | wConst= the_params['A1'] 19 | polar = the_params['A2'] 20 | B= 6. 21 | 22 | theta = numpy.arccos(the_array["ctheta"]) 23 | values = wConst*numpy.exp(B*the_array["tM"])*numpy.sin(theta)**2.*(1+polar*numpy.cos(2*the_array["psi"])) 24 | return values 25 | 26 | 27 | def setup_function(): 28 | pass 29 | 30 | 31 | def prior_function(x): 32 | y = numpy.array([5000.*x[0], x[1]]) 33 | return y 34 | -------------------------------------------------------------------------------- /tests/test_data/docs/set1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JeffersonLab/PyPWA/29987a0471cb127dd2d352feccecc8d1f3210bf4/tests/test_data/docs/set1.npy -------------------------------------------------------------------------------- /tests/test_data/docs/set1.pf: -------------------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 4 | 0 5 | 1 6 | 0 7 | 0 8 | 0 9 | 1 10 | 0 11 | 1 12 | 0 13 | 0 14 | 0 15 | 1 16 | 0 17 | 0 18 | 1 19 | 0 20 | 1 21 | 0 22 | 1 23 | 1 24 | 1 25 | 0 26 | 0 27 | 1 28 | 0 29 | 0 30 | 1 31 | 1 32 | 1 33 | 1 34 | 0 35 | 1 36 | 0 37 | 1 38 | 1 39 | 0 40 | 1 41 | 1 42 | 1 43 | 1 44 | 1 45 | 1 46 | 0 47 | 1 48 | 0 49 | 1 50 | 1 51 | 1 52 | 1 53 | 0 54 | 1 55 | 1 56 | 0 57 | 1 58 | 0 59 | 1 60 | 1 61 | 0 62 | 1 63 | 1 64 | 0 65 | 1 66 | 1 67 | 1 68 | 0 69 | 1 70 | 0 71 | 1 72 | 0 73 | 0 74 | 0 75 | 0 76 | 1 77 | 1 78 | 1 79 | 1 80 | 0 81 | 1 82 | 0 83 | 0 84 | 0 85 | 0 86 | 0 87 | 1 88 | 0 89 | 0 90 | 1 91 | 1 92 | 0 93 | 1 94 | 1 95 | 0 96 | 1 97 | 0 98 | 0 99 | 0 100 | 1 101 | 0 102 | 0 103 | 1 104 | 1 105 | 0 106 | 1 107 | 0 108 | 0 109 | 0 110 | 0 111 | 0 112 | 0 113 | 1 114 | 1 115 | 1 116 | 0 117 | 1 118 | 1 119 | 0 120 | 0 121 | 0 122 | 0 123 | 0 124 | 1 125 | 0 126 | 1 127 | 0 128 | 0 129 | 0 130 | 1 131 | 0 132 | 1 133 | 1 134 | 0 135 | 1 136 | 0 137 | 1 138 | 0 139 | 0 140 | 1 141 | 0 142 | 0 143 | 0 144 | 1 145 | 0 146 | 0 147 | 1 148 | 1 149 | 1 150 | 0 151 | 1 152 | 1 153 | 1 154 | 0 155 | 0 156 | 0 157 | 0 158 | 0 159 | 0 160 | 1 161 | 1 162 | 1 163 | 1 164 | 0 165 | 0 166 | 1 167 | 1 168 | 0 169 | 1 170 | 0 171 | 1 172 | 0 173 | 1 174 | 0 175 | 0 176 | 1 177 | 0 178 | 1 179 | 0 180 | 1 181 | 1 182 | 1 183 | 0 184 | 0 185 | 0 186 | 1 187 | 0 188 | 1 189 | 1 190 | 1 191 | 1 192 | 0 193 | 1 194 | 1 195 | 0 196 | 0 197 | 1 198 | 1 199 | 0 200 | 0 201 | 1 202 | 1 203 | 0 204 | 0 205 | 1 206 | 0 207 | 0 208 | 0 209 | 0 210 | 1 211 | 0 212 | 1 213 | 0 214 | 0 215 | 0 216 | 0 217 | 1 218 | 1 219 | 1 220 | 1 221 | 0 222 | 0 223 | 0 224 | 1 225 | 1 226 | 1 227 | 0 228 | 0 229 | 1 230 | 1 231 | 1 232 | 0 233 | 0 234 | 0 235 | 1 236 | 0 237 | 1 238 | 1 239 | 0 240 | 0 241 | 0 242 | 1 243 | 1 244 | 1 245 | 0 246 | 0 247 | 1 248 | 0 249 | 1 250 | 0 251 | 1 252 | 1 253 | 1 254 | 0 255 | 1 256 | 0 257 | 1 258 | 1 259 | 0 260 | 0 261 | 1 262 | 0 263 | 0 264 | 0 265 | 1 266 | 1 267 | 1 268 | 1 269 | 1 270 | 0 271 | 0 272 | 1 273 | 0 274 | 0 275 | 0 276 | 0 277 | 0 278 | 0 279 | 0 280 | 1 281 | 0 282 | 0 283 | 0 284 | 1 285 | 1 286 | 1 287 | 1 288 | 1 289 | 0 290 | 0 291 | 0 292 | 1 293 | 0 294 | 1 295 | 0 296 | 0 297 | 1 298 | 1 299 | 1 300 | 0 301 | 1 302 | 1 303 | 1 304 | 1 305 | 0 306 | 1 307 | 1 308 | 0 309 | 0 310 | 0 311 | 1 312 | 1 313 | 1 314 | 1 315 | 1 316 | 0 317 | 0 318 | 1 319 | 0 320 | 0 321 | 0 322 | 0 323 | 1 324 | 1 325 | 0 326 | 1 327 | 0 328 | 1 329 | 1 330 | 0 331 | 0 332 | 1 333 | 0 334 | 1 335 | 0 336 | 0 337 | 1 338 | 1 339 | 0 340 | 1 341 | 1 342 | 0 343 | 1 344 | 0 345 | 0 346 | 1 347 | 0 348 | 0 349 | 1 350 | 0 351 | 0 352 | 0 353 | 0 354 | 1 355 | 0 356 | 0 357 | 0 358 | 1 359 | 0 360 | 0 361 | 0 362 | 0 363 | 1 364 | 1 365 | 1 366 | 0 367 | 1 368 | 0 369 | 1 370 | 0 371 | 1 372 | 0 373 | 0 374 | 0 375 | 1 376 | 0 377 | 0 378 | 1 379 | 1 380 | 0 381 | 1 382 | 0 383 | 1 384 | 0 385 | 0 386 | 0 387 | 1 388 | 1 389 | 1 390 | 0 391 | 1 392 | 1 393 | 1 394 | 1 395 | 1 396 | 0 397 | 1 398 | 0 399 | 0 400 | 1 401 | 0 402 | 1 403 | 1 404 | 1 405 | 1 406 | 1 407 | 0 408 | 0 409 | 1 410 | 0 411 | 1 412 | 1 413 | 0 414 | 0 415 | 0 416 | 1 417 | 1 418 | 0 419 | 0 420 | 0 421 | 1 422 | 1 423 | 0 424 | 0 425 | 0 426 | 1 427 | 0 428 | 0 429 | 0 430 | 1 431 | 1 432 | 0 433 | 0 434 | 0 435 | 1 436 | 1 437 | 1 438 | 0 439 | 0 440 | 0 441 | 0 442 | 1 443 | 1 444 | 1 445 | 0 446 | 1 447 | 1 448 | 1 449 | 1 450 | 0 451 | 0 452 | 1 453 | 0 454 | 0 455 | 1 456 | 1 457 | 0 458 | 1 459 | 1 460 | 1 461 | 0 462 | 1 463 | 1 464 | 0 465 | 1 466 | 0 467 | 1 468 | 0 469 | 1 470 | 0 471 | 1 472 | 0 473 | 1 474 | 0 475 | 0 476 | 1 477 | 1 478 | 0 479 | 1 480 | 1 481 | 1 482 | 1 483 | 0 484 | 1 485 | 1 486 | 0 487 | 0 488 | 0 489 | 0 490 | 0 491 | 0 492 | 0 493 | 0 494 | 0 495 | 0 496 | 0 497 | 0 498 | 0 499 | 0 500 | 1 501 | 0 502 | 1 503 | 1 504 | 1 505 | 1 506 | 1 507 | 1 508 | 1 509 | 1 510 | 0 511 | 0 512 | 0 513 | 0 514 | 0 515 | 0 516 | 1 517 | 0 518 | 0 519 | 0 520 | 0 521 | 0 522 | 1 523 | 0 524 | 1 525 | 0 526 | 1 527 | 0 528 | 1 529 | 1 530 | 1 531 | 1 532 | 0 533 | 0 534 | 1 535 | 1 536 | 1 537 | 0 538 | 1 539 | 0 540 | 0 541 | 1 542 | 1 543 | 0 544 | 1 545 | 1 546 | 0 547 | 1 548 | 0 549 | 0 550 | 0 551 | 0 552 | 0 553 | 0 554 | 0 555 | 1 556 | 1 557 | 1 558 | 0 559 | 0 560 | 1 561 | 1 562 | 1 563 | 0 564 | 0 565 | 1 566 | 1 567 | 1 568 | 0 569 | 0 570 | 1 571 | 0 572 | 1 573 | 0 574 | 1 575 | 0 576 | 0 577 | 0 578 | 1 579 | 0 580 | 0 581 | 0 582 | 1 583 | 1 584 | 1 585 | 0 586 | 0 587 | 0 588 | 0 589 | 0 590 | 1 591 | 1 592 | 1 593 | 0 594 | 1 595 | 1 596 | 1 597 | 0 598 | 1 599 | 1 600 | 0 601 | 0 602 | 1 603 | 0 604 | 1 605 | 0 606 | 1 607 | 0 608 | 0 609 | 1 610 | 1 611 | 0 612 | 1 613 | 0 614 | 0 615 | 0 616 | 0 617 | 0 618 | 0 619 | 1 620 | 1 621 | 1 622 | 1 623 | 1 624 | 0 625 | 0 626 | 1 627 | 0 628 | 0 629 | 1 630 | 0 631 | 0 632 | 0 633 | 1 634 | 0 635 | 0 636 | 0 637 | 0 638 | 1 639 | 1 640 | 1 641 | 0 642 | 1 643 | 0 644 | 0 645 | 1 646 | 1 647 | 0 648 | 0 649 | 1 650 | 0 651 | 0 652 | 0 653 | 0 654 | 1 655 | 0 656 | 0 657 | 0 658 | 0 659 | 1 660 | 1 661 | 1 662 | 1 663 | 1 664 | 1 665 | 1 666 | 0 667 | 0 668 | 1 669 | 1 670 | 0 671 | 1 672 | 0 673 | 1 674 | 0 675 | 0 676 | 1 677 | 1 678 | 0 679 | 0 680 | 0 681 | 1 682 | 1 683 | 0 684 | 1 685 | 0 686 | 0 687 | 1 688 | 0 689 | 1 690 | 1 691 | 1 692 | 0 693 | 1 694 | 1 695 | 1 696 | 0 697 | 1 698 | 0 699 | 1 700 | 0 701 | 1 702 | 0 703 | 1 704 | 0 705 | 1 706 | 0 707 | 0 708 | 1 709 | 0 710 | 1 711 | 1 712 | 0 713 | 0 714 | 0 715 | 1 716 | 1 717 | 0 718 | 0 719 | 0 720 | 1 721 | 1 722 | 0 723 | 0 724 | 0 725 | 0 726 | 0 727 | 1 728 | 1 729 | 1 730 | 0 731 | 0 732 | 0 733 | 0 734 | 0 735 | 1 736 | 0 737 | 0 738 | 0 739 | 1 740 | 0 741 | 0 742 | 0 743 | 0 744 | 1 745 | 1 746 | 1 747 | 1 748 | 1 749 | 0 750 | 0 751 | 1 752 | 0 753 | 0 754 | 0 755 | 1 756 | 1 757 | 0 758 | 0 759 | 0 760 | 1 761 | 1 762 | 1 763 | 1 764 | 1 765 | 1 766 | 1 767 | 0 768 | 0 769 | 1 770 | 1 771 | 0 772 | 1 773 | 0 774 | 1 775 | 0 776 | 0 777 | 1 778 | 0 779 | 0 780 | 0 781 | 0 782 | 0 783 | 1 784 | 1 785 | 1 786 | 1 787 | 0 788 | 0 789 | 1 790 | 1 791 | 1 792 | 1 793 | 1 794 | 0 795 | 0 796 | 0 797 | 1 798 | 0 799 | 1 800 | 1 801 | 0 802 | 0 803 | 0 804 | 0 805 | 0 806 | 1 807 | 1 808 | 0 809 | 1 810 | 1 811 | 1 812 | 0 813 | 1 814 | 1 815 | 1 816 | 1 817 | 1 818 | 0 819 | 0 820 | 1 821 | 0 822 | 0 823 | 0 824 | 1 825 | 0 826 | 0 827 | 0 828 | 1 829 | 1 830 | 0 831 | 0 832 | 0 833 | 1 834 | 1 835 | 1 836 | 0 837 | 1 838 | 1 839 | 1 840 | 1 841 | 0 842 | 1 843 | 0 844 | 0 845 | 0 846 | 1 847 | 1 848 | 1 849 | 0 850 | 0 851 | 1 852 | 0 853 | 0 854 | 1 855 | 1 856 | 0 857 | 1 858 | 1 859 | 1 860 | 0 861 | 1 862 | 1 863 | 0 864 | 0 865 | 1 866 | 0 867 | 1 868 | 1 869 | 1 870 | 0 871 | 1 872 | 1 873 | 1 874 | 1 875 | 1 876 | 0 877 | 0 878 | 1 879 | 1 880 | 0 881 | 1 882 | 0 883 | 0 884 | 1 885 | 1 886 | 0 887 | 0 888 | 1 889 | 0 890 | 1 891 | 1 892 | 0 893 | 1 894 | 0 895 | 0 896 | 1 897 | 0 898 | 1 899 | 0 900 | 1 901 | 0 902 | 0 903 | 1 904 | 1 905 | 1 906 | 0 907 | 0 908 | 0 909 | 0 910 | 0 911 | 0 912 | 1 913 | 1 914 | 0 915 | 0 916 | 0 917 | 1 918 | 1 919 | 0 920 | 1 921 | 0 922 | 1 923 | 0 924 | 0 925 | 0 926 | 1 927 | 1 928 | 1 929 | 1 930 | 1 931 | 1 932 | 0 933 | 1 934 | 0 935 | 0 936 | 1 937 | 0 938 | 0 939 | 1 940 | 0 941 | 1 942 | 1 943 | 1 944 | 0 945 | 1 946 | 0 947 | 0 948 | 0 949 | 1 950 | 1 951 | 1 952 | 1 953 | 0 954 | 1 955 | 0 956 | 0 957 | 0 958 | 0 959 | 0 960 | 0 961 | 1 962 | 0 963 | 1 964 | 1 965 | 1 966 | 0 967 | 0 968 | 0 969 | 0 970 | 0 971 | 0 972 | 1 973 | 0 974 | 0 975 | 0 976 | 0 977 | 0 978 | 0 979 | 0 980 | 0 981 | 0 982 | 0 983 | 0 984 | 0 985 | 1 986 | 1 987 | 0 988 | 0 989 | 1 990 | 1 991 | 0 992 | 1 993 | 0 994 | 1 995 | 0 996 | 0 997 | 1 998 | 1 999 | 0 1000 | 1 1001 | -------------------------------------------------------------------------------- /tests/test_data/docs/set2.csv: -------------------------------------------------------------------------------- 1 | x,y,z,qf,m 2 | 0.06528549195530009,0.8369724164812704,0.7376395041257414,0.5342383650704045,0.4298372537794186 3 | 0.6788840590370191,0.6073966210109847,0.5477652393821227,0.15217837049706318,0.3725206479081735 4 | 0.10492928999701223,0.3770082395800236,0.9488167674539995,0.19415195857411183,0.2387573549366777 5 | 0.6730480605363391,0.8746272956460867,0.8711640305362608,0.18870638146829177,0.17795973418600752 6 | 0.15059133821678028,0.4600052757396622,0.6927924762624414,0.012747694387929442,0.18375306104239986 7 | 0.7844715997868086,0.1492420020198586,0.7054848969840937,0.6964282288573892,0.23412562369543433 8 | 0.9088388197170559,0.16064597278310944,0.6347742403493505,0.5903056213698752,0.8424657194075013 9 | 0.6713453859643527,0.6043516376562633,0.09172058548310258,0.13979793682304253,0.2988431088132103 10 | 0.8816950524345414,0.29870272603258097,0.26315138232768154,0.46598638726332087,0.6248901503783735 11 | 0.15081465270124828,0.9981895862427472,0.18922083910907606,0.3730963639461383,0.8322126261807726 12 | 0.2720990052692801,0.6768548500180815,0.39306320694951824,0.265278971818128,0.3815521267659049 13 | 0.6815171304142238,0.6834261501275253,0.5423651193397708,0.7605831646953355,0.859659662612897 14 | 15 | -------------------------------------------------------------------------------- /tests/test_data/docs/set2.kvars: -------------------------------------------------------------------------------- 1 | x=0.0652854919553001,y=0.8369724164812704,z=0.7376395041257414,qf=0.5342383650704045,m=0.4298372537794186 2 | x=0.6788840590370191,y=0.6073966210109847,z=0.5477652393821227,qf=0.1521783704970632,m=0.3725206479081735 3 | x=0.1049292899970122,y=0.3770082395800236,z=0.9488167674539995,qf=0.1941519585741118,m=0.2387573549366777 4 | x=0.6730480605363391,y=0.8746272956460867,z=0.8711640305362608,qf=0.1887063814682918,m=0.1779597341860075 5 | x=0.1505913382167803,y=0.4600052757396622,z=0.6927924762624414,qf=0.0127476943879294,m=0.1837530610423999 6 | x=0.7844715997868086,y=0.1492420020198586,z=0.7054848969840937,qf=0.6964282288573892,m=0.2341256236954343 7 | x=0.9088388197170559,y=0.1606459727831094,z=0.6347742403493505,qf=0.5903056213698752,m=0.8424657194075013 8 | x=0.6713453859643527,y=0.6043516376562633,z=0.0917205854831026,qf=0.1397979368230425,m=0.2988431088132103 9 | x=0.8816950524345414,y=0.2987027260325810,z=0.2631513823276815,qf=0.4659863872633209,m=0.6248901503783735 10 | x=0.1508146527012483,y=0.9981895862427472,z=0.1892208391090761,qf=0.3730963639461383,m=0.8322126261807726 11 | x=0.2720990052692801,y=0.6768548500180815,z=0.3930632069495182,qf=0.2652789718181280,m=0.3815521267659049 12 | x=0.6815171304142238,y=0.6834261501275253,z=0.5423651193397708,qf=0.7605831646953355,m=0.8596596626128969 13 | 14 | -------------------------------------------------------------------------------- /tests/test_data/docs/set2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JeffersonLab/PyPWA/29987a0471cb127dd2d352feccecc8d1f3210bf4/tests/test_data/docs/set2.npy -------------------------------------------------------------------------------- /tests/test_data/docs/set2.pf: -------------------------------------------------------------------------------- 1 | 1 2 | 1 3 | 0 4 | 0 5 | 1 6 | 0 7 | 0 8 | 1 9 | 1 10 | 1 11 | 1 12 | 1 13 | 14 | -------------------------------------------------------------------------------- /tests/test_data/docs/set2.tsv: -------------------------------------------------------------------------------- 1 | x y z qf m 2 | 0.06528549195530009 0.8369724164812704 0.7376395041257414 0.5342383650704045 0.4298372537794186 3 | 0.6788840590370191 0.6073966210109847 0.5477652393821227 0.15217837049706318 0.3725206479081735 4 | 0.10492928999701223 0.3770082395800236 0.9488167674539995 0.19415195857411183 0.2387573549366777 5 | 0.6730480605363391 0.8746272956460867 0.8711640305362608 0.18870638146829177 0.17795973418600752 6 | 0.15059133821678028 0.4600052757396622 0.6927924762624414 0.012747694387929442 0.18375306104239986 7 | 0.7844715997868086 0.1492420020198586 0.7054848969840937 0.6964282288573892 0.23412562369543433 8 | 0.9088388197170559 0.16064597278310944 0.6347742403493505 0.5903056213698752 0.8424657194075013 9 | 0.6713453859643527 0.6043516376562633 0.09172058548310258 0.13979793682304253 0.2988431088132103 10 | 0.8816950524345414 0.29870272603258097 0.26315138232768154 0.46598638726332087 0.6248901503783735 11 | 0.15081465270124828 0.9981895862427472 0.18922083910907606 0.3730963639461383 0.8322126261807726 12 | 0.2720990052692801 0.6768548500180815 0.39306320694951824 0.265278971818128 0.3815521267659049 13 | 0.6815171304142238 0.6834261501275253 0.5423651193397708 0.7605831646953355 0.859659662612897 14 | 15 | -------------------------------------------------------------------------------- /tests/test_data/docs/set2.txt: -------------------------------------------------------------------------------- 1 | 5.320511385178094965e-01 2 | 8.132495295051267448e-02 3 | 9.320195404466951050e-01 4 | 4.328077073661902308e-01 5 | 7.879215649146626976e-01 6 | 6.057108875154645355e-01 7 | 1.916194266622841536e-01 8 | 9.812752746616826283e-01 9 | 6.804440232551101087e-02 10 | 8.482130163090950692e-01 11 | 2.260512354197400375e-01 12 | 6.729969893864380381e-01 13 | 14 | -------------------------------------------------------------------------------- /tests/test_data/source_files/functions_without_math.py: -------------------------------------------------------------------------------- 1 | def processing(values, parameters): 2 | return True 3 | 4 | 5 | def setup(): 6 | return True 7 | -------------------------------------------------------------------------------- /tests/test_data/source_files/simple_option_object.py: -------------------------------------------------------------------------------- 1 | from PyPWA.initializers.configurator import options 2 | 3 | 4 | class SimpleOptions(options.Component): 5 | 6 | name = "SimpleOptions" 7 | module_comment = "A Simple test plugin" 8 | 9 | def get_default_options(self): 10 | return { 11 | "Option1": "item 1", 12 | "Option2": 3, 13 | "Option3": "A string value" 14 | } 15 | 16 | def get_option_difficulties(self): 17 | return { 18 | "Option1": options.Levels.REQUIRED, 19 | "Option2": options.Levels.OPTIONAL, 20 | "Option3": options.Levels.ADVANCED 21 | } 22 | 23 | def get_option_types(self): 24 | return { 25 | "Option1": ["item1", "item2", "item3"], 26 | "Option2": int, 27 | "Option3": str 28 | } 29 | 30 | def get_option_comments(self): 31 | return { 32 | "Option1": "A specific item, predefined", 33 | "Option2": "Any integer", 34 | "Option3": "Anything as a string" 35 | } 36 | 37 | 38 | -------------------------------------------------------------------------------- /tests/test_data/source_files/simple_prior.py: -------------------------------------------------------------------------------- 1 | def prior(x): 2 | return x 3 | -------------------------------------------------------------------------------- /tests/test_sanity.py: -------------------------------------------------------------------------------- 1 | # PyPWA, a scientific analysis toolkit. 2 | # Copyright (C) 2016 JLab 3 | # 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU General Public License as published by 6 | # the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License 15 | # along with this program. If not, see . 16 | 17 | import io 18 | import os 19 | 20 | import pytest 21 | 22 | from pathlib import Path 23 | 24 | 25 | @pytest.fixture( 26 | scope="module", params=[Path(".testfile"), Path(".anotherfile")] 27 | ) 28 | def io_open(request): 29 | request.param.open("w").close() 30 | opened_file = request.param.open() 31 | 32 | yield opened_file 33 | 34 | opened_file.close() 35 | request.param.unlink() 36 | 37 | 38 | def test_empty_file_has_no_lines(io_open): 39 | """ 40 | Args: 41 | io_open (io.FileIO) 42 | """ 43 | assert io_open.read() == "" 44 | 45 | 46 | @pytest.mark.xfail(reason="Empty files should have no lines.", strict=True) 47 | def test_empty_file_has_lines(io_open): 48 | """ 49 | Args: 50 | io_open (io.FileIO) 51 | """ 52 | assert len(io_open.readlines()) > 1 53 | 54 | 55 | @pytest.fixture 56 | def os_remove_equals_3(monkeypatch): 57 | def returns_3(*args): 58 | # We don't actually care about the arguments 59 | return 3 60 | 61 | monkeypatch.setattr("os.remove", returns_3) 62 | 63 | 64 | def test_os_remove_returns_3(os_remove_equals_3): 65 | assert os.remove("A random path.") == 3 66 | 67 | 68 | def test_error_catching_works(): 69 | with pytest.raises(IOError): 70 | raise IOError 71 | --------------------------------------------------------------------------------