├── .github ├── scripts │ ├── install.sh │ ├── install_gitpython.sh │ └── runtests.sh └── workflows │ └── tests.yml ├── .gitignore ├── CHANGES.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── doc ├── Makefile └── source │ ├── bbd_2013_poster │ ├── example.py │ ├── meyer_bbd_2013.pdf │ ├── meyer_bbd_2013.png │ ├── meyer_bbd_2013_small.png │ └── multiply.hdf5 │ ├── conf.py │ ├── contact_license.rst │ ├── cookbook │ ├── brian.rst │ ├── concept.rst │ ├── environment.rst │ ├── parameter.rst │ └── trajectory.rst │ ├── examplesdoc │ ├── example_01.rst │ ├── example_02.rst │ ├── example_03.rst │ ├── example_04.rst │ ├── example_05.rst │ ├── example_06.rst │ ├── example_08.rst │ ├── example_09.rst │ ├── example_10.rst │ ├── example_12.rst │ ├── example_13.rst │ ├── example_14.rst │ ├── example_15.rst │ ├── example_16.rst │ ├── example_17.rst │ ├── example_18.rst │ ├── example_19.rst │ ├── example_20.rst │ ├── example_21.rst │ ├── example_22.rst │ ├── example_23.rst │ └── example_24.rst │ ├── figures │ ├── example_01.png │ ├── experiment_phases.png │ ├── layout.odp │ ├── layout.png │ ├── main_script.png │ ├── network_managing.odp │ ├── network_managing.png │ ├── network_run.png │ ├── tmp.png │ └── tutorial.png │ ├── index.rst │ ├── latex.rst │ ├── latex │ └── latex_preamble.tex │ ├── latex_contact_license.rst │ ├── manual │ ├── changelog.rst │ ├── code_toc.rst │ ├── cookbook_toc.rst │ ├── examples_toc.rst │ ├── faqs.rst │ ├── introduction.rst │ ├── manual_toc.rst │ ├── misc.rst │ ├── misc_toc.rst │ ├── optimization_tips.rst │ └── tutorial.rst │ ├── other │ └── to_new_tree.rst │ └── pypetdoc │ ├── annotationsdoc.rst │ ├── brian2networkdoc.rst │ ├── brian2parameterdoc.rst │ ├── environmentdoc.rst │ ├── parameterdoc.rst │ ├── pypetconstantsdoc.rst │ ├── pypetexceptionsdoc.rst │ ├── slotsloggingdoc.rst │ ├── storageservicedoc.rst │ ├── trajectorydoc.rst │ └── utilsdoc.rst ├── examples ├── .gitignore ├── __init__.py ├── example_01_first_steps.py ├── example_02_trajectory_access_and_storage.py ├── example_03_trajectory_merging.py ├── example_04_multiprocessing.py ├── example_05_custom_parameter.py ├── example_06_parameter_presetting.py ├── example_08_f_find_idx.py ├── example_09_large_results.py ├── example_10_get_items_from_all_runs.py ├── example_12_sharing_data_between_processes.py ├── example_13_post_processing │ ├── __init__.py │ ├── analysis.py │ ├── main.py │ └── pipeline.py ├── example_14_links.py ├── example_15_more_ways_to_add_data.py ├── example_16_multiproc_context.py ├── example_17_wrapping_an_existing_project │ ├── .gitignore │ ├── __init__.py │ ├── original.py │ └── pypetwrap.py ├── example_18_many_runs.py ├── example_19_using_deap.py ├── example_19b_using_deap_less_overhead.py ├── example_19c_using_deap_with_post_processing.py ├── example_20_using_deap_manual_runs.py ├── example_21_scoop_multiprocessing.py ├── example_22_saga_python │ ├── __init__.py │ ├── merge_trajs.py │ ├── saga_0.hdf5 │ ├── start_saga.py │ └── the_task.py ├── example_23_brian2_network.py └── example_24_large_scale_brian2_simulation │ ├── .gitignore │ ├── __init__.py │ ├── clusternet.py │ ├── plotff.py │ └── runscript.py ├── pylint ├── .gitignore ├── .pylintrc └── pylint.sh ├── pypet ├── TODO.txt ├── __init__.py ├── _version.py ├── annotations.py ├── brian2 │ ├── __init__.py │ ├── network.py │ └── parameter.py ├── environment.py ├── logging │ ├── debug.ini │ ├── default.ini │ ├── env_config_test.ini │ └── test.ini ├── naturalnaming.py ├── parameter.py ├── pypetconstants.py ├── pypetexceptions.py ├── pypetlogging.py ├── shareddata.py ├── slots.py ├── storageservice.py ├── tests │ ├── __init__.py │ ├── _atworema.py │ ├── all_examples.py │ ├── all_multi_core_tests.py │ ├── all_single_core_tests.py │ ├── all_tests.py │ ├── coverage_run.py │ ├── integration │ │ ├── __init__.py │ │ ├── brian2tests │ │ │ ├── __init__.py │ │ │ ├── another_network_test.py │ │ │ ├── hdf5_brian2_test.py │ │ │ └── network_test.py │ │ ├── envSCOOPdebug.py │ │ ├── environment_multiproc_test.py │ │ ├── environment_scoop_test.py │ │ ├── environment_test.py │ │ ├── git_check.py │ │ ├── link_multiproc_test.py │ │ ├── link_test.py │ │ ├── logging_multiproc_test.py │ │ ├── logging_test.py │ │ ├── merge_test.py │ │ ├── pipeline_test.py │ │ ├── removal_and_continue_test.py │ │ └── shared_data_test.py │ ├── profiling │ │ ├── __init__.py │ │ ├── creating_run_table.py │ │ ├── profiling.py │ │ ├── readable.txt │ │ ├── readable2.txt │ │ └── speed_analysis │ │ │ ├── __init__.py │ │ │ ├── avg_runtima_as_function_of_length.py │ │ │ ├── avg_runtima_improved_as_function_of_length.py │ │ │ ├── merge_analysis.py │ │ │ ├── pytables_testing_append.py │ │ │ ├── pytables_testing_iterrow.py │ │ │ ├── pytables_testing_many_children.py │ │ │ └── storage_analysis │ │ │ ├── __init__.py │ │ │ └── avg_runtima_as_function_of_length_plot_times.py │ ├── scoop_run.py │ ├── test_all_nose.py │ ├── testdata │ │ └── pypet_v0_1b_6.hdf5 │ ├── testutils │ │ ├── __init__.py │ │ ├── data.py │ │ └── ioutils.py │ └── unittests │ │ ├── __init__.py │ │ ├── annotations_test.py │ │ ├── brian2tests │ │ ├── __init__.py │ │ ├── brian2_monitor_test.py │ │ ├── brian2_parameter_test.py │ │ ├── module_test.py │ │ └── run_a_brian2_network.py │ │ ├── configparse_test.py │ │ ├── link_test.py │ │ ├── module_test.py │ │ ├── mpwrappers_test.py │ │ ├── parameter_test.py │ │ ├── pypetlogging_test.py │ │ ├── shared_data_test.py │ │ ├── storage_test.py │ │ ├── trajectory_test.py │ │ └── utils_test.py ├── trajectory.py └── utils │ ├── __init__.py │ ├── comparisons.py │ ├── configparsing.py │ ├── decorators.py │ ├── dynamicimports.py │ ├── explore.py │ ├── gitintegration.py │ ├── hdf5compression.py │ ├── helpful_classes.py │ ├── helpful_functions.py │ ├── mpwrappers.py │ ├── pypettest.py │ ├── siginthandling.py │ ├── storagefactory.py │ └── trajectory_utils.py └── setup.py /.github/scripts/install.sh: -------------------------------------------------------------------------------- 1 | echo "+++++++++++ Installing libraries +++++++++++++" 2 | sudo apt-get install gfortran libopenblas-dev liblapack-dev libhdf5-serial-dev 3 | echo "+++++++++++ Installing Stuff for Python $PYTHON_VERSION +++++++++++" 4 | conda install pip numpy scipy numexpr cython pandas pytables 5 | echo "+++++++ Conda Info and activate ++++++" 6 | conda info -a 7 | echo "+++++++++++ Installing Coveralls if coverage +++++++++++" 8 | if [[ $COVERAGE == ON ]]; then pip install coveralls; fi 9 | echo "+++++++++++ Installing Brian2 +++++++++++" 10 | pip install brian2 11 | echo "+++++++++++ Installing psutil +++++++++++" 12 | pip install psutil 13 | echo "+++++++++++ Installing dill ++++++++++++" 14 | pip install dill 15 | echo "+++++++++++ Installing GitPython and Sumatra if needed ++++++++++++" 16 | if [[ $GIT_TEST == ON ]]; then chmod +x ./.github/scripts/install_gitpython.sh; ./.github/scripts/install_gitpython.sh; fi 17 | echo "+++++++++++ Installing matplotlib and deap if needed ++++++++++++" 18 | if [[ $EXAMPLES == ON ]]; then conda install matplotlib; pip install deap; fi 19 | echo "++++++++++++ Installing SCOOP +++++++++++++++++++++++++" 20 | pip install scoop 21 | echo "+++++++++++ Installing PYPET unless coverage or append pwd to path +++++++++++" 22 | if [[ $COVERAGE == OFF ]]; then python setup.py install; else export PATH="./:$PATH"; fi 23 | echo "+++++++++++ FINISHED INSTALL +++++++++++" 24 | pip freeze -------------------------------------------------------------------------------- /.github/scripts/install_gitpython.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Installing Git and Sumatra Test" 4 | # sudo apt-get install git 5 | pip install GitPython 6 | if [[ $PYTHON_VERSION == 3* ]] 7 | then 8 | pip install django 9 | pip install pyyaml # otherwise smt init fails with yaml not defined error 10 | pip install Sumatra 11 | fi -------------------------------------------------------------------------------- /.github/scripts/runtests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # To exit upon any error 4 | set -u # Treat references to unset variables as an error 5 | 6 | if [[ $TEST_SUITE == ON ]] 7 | then 8 | echo "Running test suite (with SCOOP)" 9 | python -m scoop -n 3 ./pypet/tests/all_tests.py 10 | fi 11 | 12 | if [[ $TEST_SUITE == MULTIPROC ]] 13 | then 14 | echo "Running test suite (with SCOOP)" 15 | python -m scoop -n 3 ./pypet/tests/all_multi_core_tests.py 16 | fi 17 | 18 | if [[ $TEST_SUITE == SINGLECORE ]] 19 | then 20 | echo "Running test suite (with SCOOP)" 21 | python -m scoop -n 3 ./pypet/tests/all_single_core_tests.py 22 | fi 23 | 24 | if [[ $GIT_TEST == ON ]] 25 | then 26 | 27 | mkdir git_sumatra_test 28 | cp ./pypet/tests/integration/git_check.py git_sumatra_test 29 | cd git_sumatra_test 30 | echo "Initialise git repo" 31 | git init 32 | echo "Initialise Sumatra Repo" 33 | smt init GitTest 34 | git config --global user.email "you@example.com" 35 | git config --global user.name "Your Name" 36 | echo "DummyDummyDummy">>dummy.txt # Create a new dummy file 37 | git add dummy.txt 38 | git add git_check.py 39 | git commit -m "First Commit" 40 | echo "Dummy2">>dummy.txt # Change the file 41 | echo "Running First Git Test" 42 | if [[ $COVERAGE == ON ]] 43 | then 44 | echo "Running git coverage" 45 | coverage run --parallel-mode --source=./pypet --omit=*/compat.py,*/ptcompat.py,*/pypet/tests/*,*/shareddata.py git_check.py 46 | else 47 | python git_check.py -f # Also test failing of git 48 | fi 49 | rm -rvf experiments 50 | echo "Running Second Git Test (without actual commit)" 51 | if [[ $COVERAGE == ON ]] 52 | then 53 | echo "Running git coverage" 54 | coverage run --parallel-mode --source=./pypet --omit=*/compat.py,*/ptcompat.py,*/pypet/tests/*,*/shareddata.py git_check.py 55 | mv -v .coverage* ../../../ 56 | else 57 | python git_check.py -n # Test that git is not failing 58 | fi 59 | echo "Git Test complete, removing folder" 60 | cd .. 61 | rm -rvf git_sumatra_test 62 | echo "Removal complete" 63 | fi 64 | 65 | if [[ $COVERAGE == ON ]] 66 | then 67 | coverage run --parallel-mode --source=pypet --omit=*/compat.py,*/ptcompat.py,*/pypet/tests/*,*/shareddata.py ./pypet/tests/coverage_run.py 68 | coverage combine 69 | coveralls --verbose 70 | fi 71 | 72 | if [[ $EXAMPLES == ON ]] 73 | then 74 | cd pypet/tests 75 | python all_examples.py 76 | cd ../../ 77 | if [[ $SCOOP == ON ]] 78 | then 79 | cd examples 80 | echo "Running SCOOP example" 81 | python -m scoop -n 3 example_21_scoop_multiprocessing.py 82 | echo "SCOOP example succesfull" 83 | cd .. 84 | fi 85 | fi 86 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: pypet-test-suite 2 | 3 | on: 4 | push: 5 | branches: [ master, develop ] 6 | pull_request: 7 | branches: [ master, develop ] 8 | 9 | jobs: 10 | # Install dependencies for PyTables 11 | # command to install dependencies 12 | test: 13 | runs-on: ubuntu-latest 14 | defaults: 15 | run: 16 | shell: bash -l {0} 17 | strategy: 18 | matrix: 19 | env: 20 | - {PYTHON_VERSION: "3.10", COVERAGE: OFF, GIT_TEST: OFF, EXAMPLES: OFF, TEST_SUITE: SINGLECORE, SCOOP: OFF} 21 | - {PYTHON_VERSION: "3.8", COVERAGE: OFF, GIT_TEST: OFF, EXAMPLES: OFF, TEST_SUITE: MULTIPROC, SCOOP: OFF } 22 | # TODO figure out how to run the rest here eventually 23 | # - {PYTHON_VERSION: "3.9", COVERAGE: ON, GIT_TEST: ON, EXAMPLES: OFF, TEST_SUITE: OFF, SCOOP: OFF} 24 | # - {PYTHON_VERSION: "3.9", COVERAGE: OFF, GIT_TEST: ON, EXAMPLES: ON, TEST_SUITE: OFF, SCOOP: ON} 25 | env: ${{ matrix.env }} 26 | steps: 27 | - uses: conda-incubator/setup-miniconda@v2 28 | with: 29 | auto-update-conda: true 30 | python-version: ${{ matrix.env.PYTHON_VERSION }} 31 | auto-activate-base: true 32 | - uses: actions/checkout@v3 33 | with: 34 | fetch-depth: 1 35 | 36 | - run: chmod +x ./.github/scripts/install.sh; ./.github/scripts/install.sh 37 | - run: chmod +x ./.github/scripts/runtests.sh; ./.github/scripts/runtests.sh 38 | 39 | 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | *.py~ 3 | 4 | # C extensions 5 | *.so 6 | 7 | # Packages 8 | *.egg 9 | *.egg-info 10 | dist 11 | build 12 | eggs 13 | parts 14 | bin 15 | var 16 | sdist 17 | develop-eggs 18 | .installed.cfg 19 | lib 20 | lib64 21 | 22 | 23 | # Installer logs 24 | pip-log.txt 25 | 26 | # Unit test / coverage reports 27 | .coverage 28 | .tox 29 | nosetests.xml 30 | 31 | # Translations 32 | *.mo 33 | 34 | # Mr Developer 35 | .mr.developer.cfg 36 | .project 37 | .pydevproject 38 | .idea 39 | 40 | # My folders to ignore 41 | experiments 42 | Test 43 | Brian 44 | notebooks 45 | logs 46 | LOGS 47 | log 48 | LOG 49 | hdf5 50 | HDF5 51 | tmp 52 | 53 | # profiling 54 | .profile* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013-2023, Robert Meyer 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | Redistributions in binary form must reproduce the above copyright notice, this 11 | list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | Neither the name of the author nor the names of other contributors 15 | may be used to endorse or promote products 16 | derived from this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 22 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 25 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE -------------------------------------------------------------------------------- /doc/source/bbd_2013_poster/example.py: -------------------------------------------------------------------------------- 1 | from pypet import Environment, cartesian_product 2 | 3 | def multiply(traj): 4 | """Example of a sophisticated numerical experiment 5 | that involves multiplying two integer values. 6 | 7 | :param traj: 8 | Trajectory containing the parameters in a particular 9 | combination, it also serves as a container for results. 10 | """ 11 | z = traj.x * traj.y 12 | traj.f_add_result('z', z, comment='Result of x*y') 13 | 14 | # Create an environment that handles running the experiment 15 | env = Environment(trajectory='Multiplication', 16 | filename='multiply.hdf5', 17 | comment='A simulation of multiplication') 18 | # The environment provides a trajectory container for us 19 | traj = env.v_trajectory 20 | # Add two parameters, both with default value 0 21 | traj.f_add_parameter('x', 0, comment='First dimension') 22 | traj.f_add_parameter('y', 0, comment='Second dimension') 23 | # Explore the Cartesian product of x in {1,2,3,4} and y in {6,7,8} 24 | traj.f_explore(cartesian_product({'x': [1, 2, 3, 4], 25 | 'y': [6, 7, 8]})) 26 | # Run simulation function `multiply` with all parameter combinations 27 | env.f_run(multiply) -------------------------------------------------------------------------------- /doc/source/bbd_2013_poster/meyer_bbd_2013.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/bbd_2013_poster/meyer_bbd_2013.pdf -------------------------------------------------------------------------------- /doc/source/bbd_2013_poster/meyer_bbd_2013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/bbd_2013_poster/meyer_bbd_2013.png -------------------------------------------------------------------------------- /doc/source/bbd_2013_poster/meyer_bbd_2013_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/bbd_2013_poster/meyer_bbd_2013_small.png -------------------------------------------------------------------------------- /doc/source/bbd_2013_poster/multiply.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/bbd_2013_poster/multiply.hdf5 -------------------------------------------------------------------------------- /doc/source/contact_license.rst: -------------------------------------------------------------------------------- 1 | ------- 2 | Contact 3 | ------- 4 | 5 | Robert Meyer 6 | 7 | robert.meyer (at) ni.tu-berlin.de 8 | 9 | Marchstr. 23 10 | 11 | TU-Berlin, MAR 5.046 12 | 13 | D-10587 Berlin 14 | 15 | 16 | ------- 17 | License 18 | ------- 19 | 20 | .. literalinclude:: ../../LICENSE 21 | -------------------------------------------------------------------------------- /doc/source/cookbook/concept.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _more-on-concept: 3 | 4 | ===================================== 5 | How to Structure your Simulations 6 | ===================================== 7 | 8 | This project was born out of the need for a tool to manage and store results of spiking neural 9 | network simulations. In particular, to manage results obtained with the BRIAN_ neural network 10 | simulator (yet, this does not mean this project is restricted to BRIAN, you can use the 11 | package for any simulation or numerical experiment in python). 12 | The more complex simulations become the more complicated are the sets of parameters 13 | and the harder it gets to qualitatively compare results obtained from different 14 | simulation runs. There was a huge need to standardize the simulations and parameter 15 | exploration, to structure and group different parameters, and especially to entangle the 16 | scientific simulation from the environment that runs it. So when I designed 17 | this tool I always had in mind how I wanted to used it later on in my project. 18 | I do not want to spare these conceptual ideas from you. 19 | 20 | So, I would like to present you some remarks on how to effectively use this tool. 21 | I would divide any numerical simulations into 3 phases: 22 | 23 | 1. Parameter Definition Phase 24 | 25 | Here you will only add parameters (see :func:`pypet.naturalnaming.ParameterNode.f_add_parameter`, 26 | and :ref:`more-on-adding`) to your trajectory, no results, no derived parameters, 27 | no building of other objects used during your simulation. 28 | **ONLY parameters**. You could write a `conf.py` 29 | file that adds all parameters to your trajectory. To exclude parameter sets and 30 | to allow some control flow, you can consider :ref:`more-on-presetting`. 31 | 32 | 2. Preparation Phase 33 | 34 | Here you will prepare stuff before the actual runtime, 35 | e.g. create objects needed in your simulations. 36 | This encompasses stuff that only needs to be build once and is used 37 | during all individual runs. 38 | Here you can also start adding derived parameters. 39 | 40 | At the end of your preparation phase you define which parameters should be explored and 41 | how via :func:`pypet.trajectory.f_explore` (and take a look at :ref:`parameter-exploration`). 42 | 43 | 3. The Run Phase 44 | 45 | This is the phase where individual parameter space points along the trajectory that you 46 | explore are evaluated. Here you produce your main numerical results and maybe some 47 | derived parameters. 48 | You have a top-level function that uses a single run object (maybe called `traj`) 49 | and accesses the parameters needed during the single run 50 | to make some calculations (see :ref:`more-on-single-runs`). 51 | 52 | This top level function is handed over to the runtime environment in addition with 53 | some other arguments (like some objects not managed by your trajectory) to carry out the 54 | simulation (see :func:`pypet.environment.Environment.run`, and :ref:`more-on-running`). 55 | 56 | Usually to speed up your simulations and to compute several runs in parallel, you can 57 | use multiprocessing at this stage, see :ref:`more-on-multiprocessing`. 58 | 59 | 60 | After your parameter exploration is finished you might have a 4th stage of post processing. 61 | For instance, calculating summary statistics over all your simulation runs. 62 | Yet, I would separate this phase entirely from the previous ones. You can do this in a separate 63 | program that loads the trajectory. 64 | 65 | 66 | Well, that's it, so thanks for using *pypet*, 67 | 68 | Robert 69 | 70 | .. 71 | PS: If you use *pypet* for your research, I would be grateful if you 72 | follow the :ref:`citation_policy`. 73 | 74 | PS: If you use *pypet* for BRIAN_ simulations, also check out 75 | :ref:`brian-framework`. 76 | 77 | 78 | .. _BRIAN: http://briansimulator.org/ -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_01.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-01: 3 | 4 | =========== 5 | First Steps 6 | =========== 7 | 8 | Download: :download:`example_01_first_steps.py <../../../examples/example_01_first_steps.py>` 9 | 10 | This is a basic overview about the usage of the tool, nothing fancy. 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_01_first_steps.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_02.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-02: 3 | 4 | ======================================= 5 | Natural Naming, Storage and Loading 6 | ======================================= 7 | 8 | Download: :download:`example_02_trajectory_access_and_storage.py <../../../examples/example_02_trajectory_access_and_storage.py>` 9 | 10 | The following code snippet shows how natural naming works, and how you can store and load 11 | a trajectory. 12 | 13 | .. literalinclude:: ../../../examples/example_02_trajectory_access_and_storage.py 14 | 15 | -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_03.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-03: 3 | 4 | ======================= 5 | Merging of Trajectories 6 | ======================= 7 | 8 | Download: :download:`example_03_trajectory_merging.py <../../../examples/example_03_trajectory_merging.py>` 9 | 10 | The code snippet below shows how to merge trajectories. 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_03_trajectory_merging.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_04.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-04: 3 | 4 | =============== 5 | Multiprocessing 6 | =============== 7 | 8 | Download: :download:`example_04_multiprocessing.py <../../../examples/example_04_multiprocessing.py>` 9 | 10 | This code snippet shows how to use multiprocessing with locks. 11 | In order to use the queue based multiprocessing one simply needs to make the following change 12 | for the environment creation: 13 | 14 | ``wrap_mode=pypetconstants.WRAP_MODE_QUEUE``. 15 | 16 | .. literalinclude:: ../../../examples/example_04_multiprocessing.py 17 | 18 | -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_05.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-05: 3 | 4 | ============================================ 5 | Custom Parameter (Strange Attractor Inside!) 6 | ============================================ 7 | 8 | Download: :download:`example_05_custom_parameter.py <../../../examples/example_05_custom_parameter.py>` 9 | 10 | Here you can see an example of a custom parameter and how to reload results and use them for analysis. 11 | We will simulate the `Lorenz Attractor`_ and integrate with a simple Euler method. We 12 | will explore three different initial conditions. 13 | 14 | .. _`Lorenz Attractor`: https://en.wikipedia.org/wiki/Lorenz_attractor 15 | 16 | .. literalinclude:: ../../../examples/example_05_custom_parameter.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_06.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-06: 3 | 4 | ==================== 5 | Parameter Presetting 6 | ==================== 7 | 8 | Download: :download:`example_06_parameter_presetting.py <../../../examples/example_06_parameter_presetting.py>` 9 | 10 | We will reuse some stuff from the previous example :ref:`example-05`: 11 | 12 | * Our main euler simulation job `euler_scheme` 13 | 14 | * The `FunctionParameter` to store source code 15 | 16 | We will execute the same euler simulation as before, but now with a different 17 | differential equation yielding the `Roessler Attractor`_. 18 | If you erase the statement 19 | 20 | `traj.f_preset_parameter('diff_name', 'diff_roessler')` 21 | 22 | you will end up with the same results as in the previous example. 23 | 24 | .. _Roessler Attractor: https://en.wikipedia.org/wiki/R%C3%B6ssler_attractor 25 | 26 | .. literalinclude:: ../../../examples/example_06_parameter_presetting.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_08.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-08: 3 | 4 | ============================= 5 | Using the f_find_idx Function 6 | ============================= 7 | 8 | Download: :download:`example_08_f_find_idx.py <../../../examples/example_08_f_find_idx.py>` 9 | 10 | Here you can see how you can search for particular parameter combinations and the corresponding 11 | run indices using the :func:`~pypet.Trajectory.f_find_idx` function. 12 | 13 | 14 | .. literalinclude:: ../../../examples/example_08_f_find_idx.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_09.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-09: 3 | 4 | ========================================================= 5 | Storing and Loading Large Results (or just parts of them) 6 | ========================================================= 7 | 8 | Download: :download:`example_09_large_results.py <../../../examples/example_09_large_results.py>` 9 | 10 | Want to know how to load large results in parts? See below: 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_09_large_results.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_10.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-10: 3 | 4 | ======================================= 5 | Accessing Results from All Runs at Once 6 | ======================================= 7 | 8 | Download: :download:`example_10_get_items_from_all_runs.py <../../../examples/example_10_get_items_from_all_runs.py>` 9 | 10 | Want to know how to access all data from results at once? 11 | Check out :func:`~pypet.trajectory.Trajectory.f_get_from_runs` and the 12 | code below: 13 | 14 | 15 | .. literalinclude:: ../../../examples/example_10_get_items_from_all_runs.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_12.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-12: 3 | 4 | =================================== 5 | Sharing Data during Multiprocessing 6 | =================================== 7 | 8 | Here we show how data can be shared among multiple processes. 9 | Mind however, that this is conceptually a rather bad design 10 | since the single runs are no longer independent of each other. 11 | A better solution would be to simply return the data and 12 | sort it into a list during post-processing. 13 | 14 | Download: :download:`example_12_sharing_data_between_processes.py <../../../examples/example_12_sharing_data_between_processes.py>` 15 | 16 | 17 | .. literalinclude:: ../../../examples/example_12_sharing_data_between_processes.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_13.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-13: 3 | 4 | ================================================== 5 | Post-Processing and Pipelining (from the Tutorial) 6 | ================================================== 7 | 8 | Here you find an example of post-processing. 9 | 10 | It consists of a main script `main.py` for the three phases 11 | *pre-processing*, *run phase* and *post-processing* of a single neuron 12 | simulation and a `analysis.py` file 13 | giving an example of a potential data analysis encompassing plotting the results. 14 | Moreover, there exists a `pipeline.py` file to crunch all first three phases into 15 | a single function. 16 | 17 | A detail explanation of the example can be found in the :ref:`tutorial` section. 18 | 19 | 20 | Download: :download:`main.py <../../../examples/example_13_post_processing/main.py>` 21 | 22 | Download: :download:`analysis.py <../../../examples/example_13_post_processing/analysis.py>` 23 | 24 | Download: :download:`pipeline.py <../../../examples/example_13_post_processing/pipeline.py>` 25 | 26 | ------------ 27 | Main 28 | ------------ 29 | 30 | .. literalinclude:: ../../../examples/example_13_post_processing/main.py 31 | 32 | 33 | ---------- 34 | Analysis 35 | ---------- 36 | 37 | .. literalinclude:: ../../../examples/example_13_post_processing/analysis.py 38 | 39 | 40 | ----------- 41 | Pipelining 42 | ----------- 43 | 44 | Additionally, you can use pipelining. 45 | 46 | Since these three steps pre-processing, run-phase, post-processing define a common pipeline, 47 | you can actually also make *pypet* supervise all three steps at once. 48 | 49 | You can define a pipeline function, that does the pre-processing and returns 50 | the job function plus some optional arguments and the post-processing function 51 | with some other optional arguments. 52 | 53 | So, you could define the following pipeline function. 54 | The pipeline function has to only accept the trajectory as first argument and 55 | has to return 2 tuples, one for the run function and one for the 56 | post-processing. Since none of our functions takes any other arguments than the trajectory 57 | (and the pos-processing function the result list) we simply return an empty 58 | tuple ``()`` for no arguments and an empty dictionary ``{}`` for no keyword arguments. 59 | 60 | 61 | And that's it, than everything including the pre-processing and addition of parameters 62 | is supervised by *pypet*. Check out the source code below: 63 | 64 | .. literalinclude:: ../../../examples/example_13_post_processing/pipeline.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_14.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-14: 3 | 4 | =========== 5 | Using Links 6 | =========== 7 | 8 | Download: :download:`example_14_links.py <../../../examples/example_14_links.py>` 9 | 10 | You can also link between different nodes of your :class:`~pypet.trajectory.Trajectory`: 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_14_links.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_15.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-15: 3 | 4 | ============================= 5 | Adding Data to the Trajectory 6 | ============================= 7 | 8 | Download: :download:`example_15_more_ways_to_add_data.py <../../../examples/example_15_more_ways_to_add_data.py>` 9 | 10 | Here are the different ways to add data to your :class:`~pypet.trajectory.Trajectory` container: 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_15_more_ways_to_add_data.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_16.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-16: 3 | 4 | =========================== 5 | Lightweight Multiprocessing 6 | =========================== 7 | 8 | Download: :download:`example_16_multiproc_context.py <../../../examples/example_16_multiproc_context.py>` 9 | 10 | This example shows you how to use a :class:`~pypet.environment.MultiprocContext`. 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_16_multiproc_context.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_17.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-17: 3 | 4 | ======================================================== 5 | Wrapping an Existing Project (Cellular Automata Inside!) 6 | ======================================================== 7 | 8 | Here you can find out how to wrap *pypet* around an already existing simulation. 9 | The original project (``original.py``) simulates `elementary cellular automata`_. 10 | 11 | The code explores different starting conditions and automata rules. 12 | ``pypetwrap.py`` shows how to include *pypet* into the project without 13 | changing much of the original code. Basically, the core code of the simulation is left 14 | untouched. Only the *boilerplate* of the main script changes and a short wrapper function 15 | is needed that passes parameters from the *trajectory* to the core simulation. 16 | 17 | Moreover, introducing *pypet* allows 18 | much easier exploration of the parameter space. Now exploring different 19 | parameter sets requires no more code changes. 20 | 21 | Download: :download:`original.py <../../../examples/example_17_wrapping_an_existing_project/original.py>` 22 | 23 | Download: :download:`pypetwrap.py <../../../examples/example_17_wrapping_an_existing_project/pypetwrap.py>` 24 | 25 | ---------------- 26 | Original Project 27 | ---------------- 28 | 29 | .. literalinclude:: ../../../examples/example_17_wrapping_an_existing_project/original.py 30 | 31 | 32 | ------------- 33 | Using *pypet* 34 | ------------- 35 | 36 | .. literalinclude:: ../../../examples/example_17_wrapping_an_existing_project/pypetwrap.py 37 | 38 | 39 | .. _`elementary cellular automata`: http://en.wikipedia.org/wiki/Elementary_cellular_automaton -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_18.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-18: 3 | 4 | ================================= 5 | Large Explorations with Many Runs 6 | ================================= 7 | 8 | Download: :download:`example_18_many_runs.py <../../../examples/example_18_many_runs.py>` 9 | 10 | How to group many results into buckets 11 | 12 | 13 | .. literalinclude:: ../../../examples/example_18_many_runs.py 14 | -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_19.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-19: 3 | 4 | ================================================= 5 | Using DEAP the evolutionary computation framework 6 | ================================================= 7 | 8 | Download: :download:`example_19_using_deap.py <../../../examples/example_19_using_deap.py>` 9 | 10 | Less overhead version: :download:`example_19b_using_deap_less_overhead.py <../../../examples/example_19b_using_deap_less_overhead.py>` 11 | 12 | Less overhead and *post-processing* version: :download:`example_19c_using_deap_with_post_processing.py <../../../examples/example_19c_using_deap_with_post_processing.py>` 13 | 14 | 15 | This shows an example of how to use *pypet* in combination with 16 | the evolutionary computation framework DEAP_. 17 | 18 | Note storing during a single run as in the example adds a lot of overhead and only makes sense 19 | if your fitness evaluation takes quite long. There's also an example with less 20 | overhead at the middle section. 21 | 22 | Moreover, if you are interested in using DEAP_ in a post-processing scheme 23 | (:ref:`more-about-postproc`), at the very bottom you can find an example using 24 | post-processing. 25 | 26 | 27 | .. literalinclude:: ../../../examples/example_19_using_deap.py 28 | 29 | 30 | ^^^^^^^^^^^^^^^^^^^^^ 31 | Less Overhead Version 32 | ^^^^^^^^^^^^^^^^^^^^^ 33 | 34 | .. literalinclude:: ../../../examples/example_19b_using_deap_less_overhead.py 35 | 36 | 37 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 38 | Less Overhead and Post-Processing Version 39 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 40 | 41 | .. literalinclude:: ../../../examples/example_19c_using_deap_with_post_processing.py 42 | 43 | 44 | .. _DEAP: http://deap.readthedocs.org/ -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_20.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-20: 3 | 4 | ==================================== 5 | Starting runs WITHOUT an Environment 6 | ==================================== 7 | 8 | Download: :download:`example_20_using_deap_manual_runs.py <../../../examples/example_20_using_deap_manual_runs.py>` 9 | 10 | This shows an example of how to use *pypet* without an Environment and 11 | how to start runs manually. It is a modified version of :ref:`example-19` using the 12 | DEAP_ framework. 13 | 14 | 15 | .. literalinclude:: ../../../examples/example_20_using_deap_manual_runs.py 16 | 17 | 18 | .. _DEAP: http://deap.readthedocs.org/ -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_21.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-21: 3 | 4 | =========================== 5 | Using SCOOP multiprocessing 6 | =========================== 7 | 8 | Download: :download:`example_21_scoop_multiprocessing.py <../../../examples/example_21_scoop_multiprocessing.py>` 9 | 10 | Here you learn how to use *pypet* in combination wiht SCOOP_. 11 | If your SCOOP_ framework is configured correctly (see the `SCOOP docs`_ on how to set up 12 | start-up scripts for grid engines and/or multiple hosts), you can easily use 13 | *pypet* in a multi-server or cluster framework. 14 | 15 | Start the script via ``python -m scoop example_21_scoop_multiprocessing.py`` to run 16 | *pypet* with SCOOP_. 17 | 18 | By the way, if using SCOOP_, the only multiprocessing wrap mode supported is 19 | ``'LOCAL'``, i.e. all your data is actually stored 20 | by your main python process and results are collected from all workers. 21 | 22 | 23 | .. literalinclude:: ../../../examples/example_21_scoop_multiprocessing.py 24 | 25 | .. _SCOOP: http://scoop.readthedocs.org/ 26 | 27 | .. _SCOOP docs: http://scoop.readthedocs.org/en/0.7/usage.html#use-with-a-scheduler 28 | -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_22.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-22: 3 | 4 | ============================== 5 | Using *pypet* with SAGA-Python 6 | ============================== 7 | 8 | This example shows how to use *pypet* in combination with `SAGA Python`_. 9 | It shows how to establish an **ssh** connection to a given server (`start_saga.py`) and then 10 | 11 | 1. Upload all necessary scripts 12 | 2. Start several batches of trajectories 13 | 3. Merge all trajectories into a single one 14 | 15 | There are only a few modification necessary to switch from just using **ssh** to 16 | actually submitting jobs on cluster (like a Sun Grid Engine with ``qsub``), see the 17 | `SAGA Python`_ documentation. 18 | 19 | To run the example, you only need to add your server address, user name, password, and 20 | working directory (on the server) to the `start_saga.py` file and then 21 | execute ``python start_saga.py``. `the_task.py` and `merge_trajs` 22 | are used on the server side and you don't need to touch these at all, but they need to 23 | be in the same folder as your `start_saga.py` file. 24 | 25 | Download: :download:`start_saga.py <../../../examples/example_22_saga_python/start_saga.py>` 26 | 27 | Download: :download:`the_task.py <../../../examples/example_22_saga_python/the_task.py>` 28 | 29 | Download: :download:`merge_trajs.py <../../../examples/example_22_saga_python/merge_trajs.py>` 30 | 31 | 32 | --------------- 33 | Start Up Script 34 | --------------- 35 | 36 | .. literalinclude:: ../../../examples/example_22_saga_python/start_saga.py 37 | 38 | ------------------- 39 | The Experiment File 40 | ------------------- 41 | 42 | .. literalinclude:: ../../../examples/example_22_saga_python/the_task.py 43 | 44 | ---------------------------- 45 | Script to merge Trajectories 46 | ---------------------------- 47 | 48 | .. literalinclude:: ../../../examples/example_22_saga_python/merge_trajs.py 49 | 50 | 51 | .. _SAGA Python: http://saga-python.readthedocs.org/ -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_23.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-23: 3 | 4 | ==================== 5 | Short BRIAN2 Example 6 | ==================== 7 | 8 | Download: :download:`example_23_brian2_network.py <../../../examples/example_23_brian2_network.py>` 9 | 10 | Find an example usage with BRIAN2_ below. 11 | 12 | .. _BRIAN2: https://brian2.readthedocs.org/ 13 | 14 | .. literalinclude:: ../../../examples/example_23_brian2_network.py -------------------------------------------------------------------------------- /doc/source/examplesdoc/example_24.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _example-24: 3 | 4 | ============================= 5 | Large scale BRIAN2 simulation 6 | ============================= 7 | 8 | This example involves a large scale simulation of a BRIAN2_ network :ref:`brian-framework`. 9 | The example is taken from the `Litwin-Kumar and Doiron paper`_ from Nature neuroscience 2012. 10 | 11 | It is split into three different modules: The `clusternet.py` file containing 12 | the network specification, the `runscript.py` file to start a simulation 13 | (you have to be patient, BRIAN simulations can take some time), and 14 | the `plotff.py` to plot the results of the parameter exploration, i.e. the 15 | Fano Factor as a function of the clustering parameter `R_ee`. 16 | 17 | Download: :download:`clusternet.py <../../../examples/example_24_large_scale_brian2_simulation/clusternet.py>` 18 | 19 | Download: :download:`runscript.py <../../../examples/example_24_large_scale_brian2_simulation/runscript.py>` 20 | 21 | Download: :download:`plotff.py <../../../examples/example_24_large_scale_brian2_simulation/plotff.py>` 22 | 23 | 24 | ------------------- 25 | Clusternet 26 | ------------------- 27 | 28 | .. literalinclude:: ../../../examples/example_24_large_scale_brian2_simulation/clusternet.py 29 | 30 | ---------- 31 | Runscript 32 | ---------- 33 | 34 | .. literalinclude:: ../../../examples/example_24_large_scale_brian2_simulation/runscript.py 35 | 36 | ------- 37 | Plotff 38 | ------- 39 | 40 | .. literalinclude:: ../../../examples/example_24_large_scale_brian2_simulation/plotff.py 41 | 42 | .. _`Litwin-Kumar and Doiron paper`: http://www.nature.com/neuro/journal/v15/n11/full/nn.3220.html 43 | 44 | .. _BRIAN2: http://brian2.readthedocs.org/ -------------------------------------------------------------------------------- /doc/source/figures/example_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/example_01.png -------------------------------------------------------------------------------- /doc/source/figures/experiment_phases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/experiment_phases.png -------------------------------------------------------------------------------- /doc/source/figures/layout.odp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/layout.odp -------------------------------------------------------------------------------- /doc/source/figures/layout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/layout.png -------------------------------------------------------------------------------- /doc/source/figures/main_script.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/main_script.png -------------------------------------------------------------------------------- /doc/source/figures/network_managing.odp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/network_managing.odp -------------------------------------------------------------------------------- /doc/source/figures/network_managing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/network_managing.png -------------------------------------------------------------------------------- /doc/source/figures/network_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/network_run.png -------------------------------------------------------------------------------- /doc/source/figures/tmp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/tmp.png -------------------------------------------------------------------------------- /doc/source/figures/tutorial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/doc/source/figures/tutorial.png -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. pypet documentation master file, created by 2 | sphinx-quickstart on Wed Sep 4 12:12:59 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ================================== 7 | Welcome to *pypet*'s documentation 8 | ================================== 9 | 10 | .. image:: https://travis-ci.org/SmokinCaterpillar/pypet.svg?branch=master 11 | :target: https://travis-ci.org/SmokinCaterpillar/pypet 12 | .. image:: https://ci.appveyor.com/api/projects/status/9amhj3iyf105xa2y/branch/master?svg=true 13 | :target: https://ci.appveyor.com/project/SmokinCaterpillar/pypet/branch/master 14 | .. image:: https://coveralls.io/repos/github/SmokinCaterpillar/pypet/badge.svg?branch=master 15 | :target: https://coveralls.io/github/SmokinCaterpillar/pypet?branch=master 16 | .. image:: https://api.codacy.com/project/badge/grade/86268960751442799fcf6192b36e386f 17 | :target: https://www.codacy.com/app/robert-meyer/pypet 18 | .. image:: http://depsy.org/api/package/pypi/pypet/badge.svg 19 | :target: http://depsy.org/package/python/pypet 20 | 21 | The new python parameter exploration toolkit: 22 | *pypet* manages exploration of the parameter space 23 | of any numerical simulation in python, 24 | thereby storing your data into HDF5_ files for you. 25 | Moreover, *pypet* offers a new data container which 26 | lets you access all your parameters and results 27 | from a single source. Data I/O of your simulations and 28 | analyses become a piece of cake! 29 | 30 | Latest version: 31 | 32 | .. image:: https://badge.fury.io/py/pypet.svg 33 | :target: https://badge.fury.io/py/pypet 34 | 35 | .. _HDF5: http://www.hdfgroup.org/HDF5/ 36 | 37 | 38 | --------- 39 | IMPORTANT 40 | --------- 41 | 42 | This version **no** longer supports **Python 2**. If you are still using 43 | **Python 2** please download `pypet 0.3.0`_. 44 | 45 | .. _`pypet 0.3.0`: https://pypi.python.org/pypi/pypet/0.3.0 46 | 47 | 48 | ------------- 49 | Documentation 50 | ------------- 51 | 52 | .. toctree:: 53 | :maxdepth: 2 54 | 55 | manual/manual_toc 56 | manual/misc_toc 57 | manual/code_toc 58 | 59 | 60 | The documentation is also available for `download in PDF format`_. 61 | 62 | .. _download in PDF format: https://media.readthedocs.org/pdf/pypet/latest/pypet.pdf 63 | 64 | 65 | -------- 66 | Feedback 67 | -------- 68 | 69 | Please feel free to give feedback, 70 | suggestions, and report bugs. 71 | Use **github** (https://github.com/SmokinCaterpillar/pypet) issues or 72 | write to the `pypet Google Group`_. 73 | 74 | Thanks! 75 | 76 | .. _`pypet Google Group`: https://groups.google.com/forum/?hl=de#!forum/pypet 77 | 78 | .. include:: contact_license.rst 79 | 80 | 81 | ----- 82 | Index 83 | ----- 84 | 85 | * :ref:`genindex` 86 | 87 | 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /doc/source/latex.rst: -------------------------------------------------------------------------------- 1 | .. toctree:: 2 | :maxdepth: 2 3 | 4 | ../manual/manual_toc 5 | ../manual/misc_toc 6 | ../manual/code_toc 7 | latex_contact_license -------------------------------------------------------------------------------- /doc/source/latex/latex_preamble.tex: -------------------------------------------------------------------------------- 1 | 2 | \usepackage{enumitem} 3 | \setlistdepth{99} 4 | -------------------------------------------------------------------------------- /doc/source/latex_contact_license.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | Contact and License 3 | =================== 4 | 5 | .. include:: contact_license.rst -------------------------------------------------------------------------------- /doc/source/manual/changelog.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | .. include:: ../../../CHANGES.txt -------------------------------------------------------------------------------- /doc/source/manual/code_toc.rst: -------------------------------------------------------------------------------- 1 | ----------------- 2 | Library Reference 3 | ----------------- 4 | 5 | 6 | .. toctree:: 7 | :maxdepth: 3 8 | 9 | ../pypetdoc/environmentdoc 10 | ../pypetdoc/trajectorydoc 11 | ../pypetdoc/parameterdoc 12 | ../pypetdoc/annotationsdoc 13 | ../pypetdoc/utilsdoc 14 | ../pypetdoc/pypetexceptionsdoc 15 | ../pypetdoc/pypetconstantsdoc 16 | ../pypetdoc/slotsloggingdoc 17 | ../pypetdoc/storageservicedoc 18 | ../pypetdoc/brian2parameterdoc 19 | ../pypetdoc/brian2networkdoc 20 | 21 | -------------------------------------------------------------------------------- /doc/source/manual/cookbook_toc.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _cookbook: 3 | 4 | ========================== 5 | Cookbook (Detailed Manual) 6 | ========================== 7 | 8 | Here you can find some more detailed explanations of various concepts of *pypet*. 9 | 10 | 11 | .. toctree:: 12 | :maxdepth: 3 13 | :numbered: 14 | 15 | ../cookbook/trajectory 16 | ../cookbook/parameter 17 | ../cookbook/environment 18 | ../cookbook/brian.rst -------------------------------------------------------------------------------- /doc/source/manual/examples_toc.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _theexamples: 3 | 4 | ======== 5 | Examples 6 | ======== 7 | 8 | Here you can find some example code how to use the *pypet*. 9 | All examples were written and tested with python 2.7 and 10 | most of them also work under python 3. 11 | 12 | 13 | -------------- 14 | Basic Concepts 15 | -------------- 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | ../examplesdoc/example_01 21 | ../examplesdoc/example_02 22 | ../examplesdoc/example_14 23 | ../examplesdoc/example_15 24 | ../examplesdoc/example_04 25 | ../examplesdoc/example_21 26 | ../examplesdoc/example_09 27 | ../examplesdoc/example_13 28 | ../examplesdoc/example_17 29 | ../examplesdoc/example_18 30 | 31 | .. 32 | Ex1: First steps 33 | Ex2: Trajectory Access and Storage 34 | Ex14: Links 35 | Ex15: More ways to add data 36 | Ex4: Multiprocessing 37 | Ex21: SCOOP multiprocessing 38 | Ex9: Large Results 39 | Ex13: Pipeline and post-processing 40 | Ex17: Wrapping an existing project 41 | Ex18: Many runs 42 | 43 | 44 | ----------------- 45 | Advanced Concepts 46 | ----------------- 47 | 48 | .. toctree:: 49 | :maxdepth: 1 50 | 51 | ../examplesdoc/example_03 52 | ../examplesdoc/example_05 53 | ../examplesdoc/example_06 54 | ../examplesdoc/example_08 55 | ../examplesdoc/example_10 56 | ../examplesdoc/example_12 57 | ../examplesdoc/example_16 58 | ../examplesdoc/example_19 59 | ../examplesdoc/example_20 60 | ../examplesdoc/example_22 61 | 62 | .. 63 | Ex3: Merging 64 | Ex5: Custom parameters 65 | Ex6: Presetting 66 | Ex8: f_find_idx 67 | Ex10: Items from all runs 68 | Ex12: Sharing Data between Processes 69 | Ex16: Multiprocessing Context 70 | Ex19: DEAP 71 | Ex20: Manual runs (DEAP) 72 | Ex22: SAGA Python 73 | 74 | 75 | ----------------- 76 | BRIAN2 Examples 77 | ----------------- 78 | 79 | .. toctree:: 80 | :maxdepth: 1 81 | 82 | ../examplesdoc/example_23 83 | ../examplesdoc/example_24 84 | 85 | 86 | .. 87 | Ex23: Small Brian2 network 88 | Ex24: Large Complex Brian2 Example -------------------------------------------------------------------------------- /doc/source/manual/manual_toc.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | *pypet* User Manual 3 | =================== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | introduction 9 | tutorial 10 | cookbook_toc 11 | examples_toc 12 | optimization_tips 13 | faqs -------------------------------------------------------------------------------- /doc/source/manual/misc_toc.rst: -------------------------------------------------------------------------------- 1 | ------------- 2 | Miscellaneous 3 | ------------- 4 | 5 | .. toctree:: 6 | :maxdepth: 3 7 | 8 | misc 9 | changelog -------------------------------------------------------------------------------- /doc/source/manual/optimization_tips.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _optimization-tips: 3 | 4 | ================= 5 | Optimization Tips 6 | ================= 7 | 8 | ------------------------------------ 9 | Group your Results into Buckets/Sets 10 | ------------------------------------ 11 | 12 | HDF5 has a hard time managing nodes with more than 20,000 children. 13 | Accordingly, file I/O and reading or writing data can become very inefficient 14 | if one of your trajectory groups has more than 20,000 children. 15 | For instance, this may happen to you if you explore many runs. 16 | 17 | Suppose in every run you add the following result: 18 | 19 | >>> traj.f_add_result('some_group.$.z', 42, comment='Universal answer.') 20 | 21 | If this line is executed in each of your, let's say 100,000 runs, the node ``some_group`` 22 | will have at least 100k children. Hence, storage and loading becomes extremely slow. 23 | 24 | The simplest way around this problem is to group your results into buckets using the 25 | ``'$set'`` wildcard, see also :ref:`more-on-wildcards`. Accordingly, your result addition becomes: 26 | 27 | >>> traj.f_add_result('some_group.$set.$.z', 42, comment='Universal answer.') 28 | 29 | Hence, even running 100k runs, `some_group` has only 100 children, each having only 1000 children 30 | themselves. 31 | 32 | 33 | ----------------- 34 | Huge Explorations 35 | ----------------- 36 | 37 | Yet, this approach will still fall short in case you have parameter exploration of more than 38 | 1,000,000 runs, because loading meta-data of your trajectory may already take more than 39 | a minute. And this can be annoying. In case of such huge explorations, I would 40 | advise you to tailor your parameter space and split it among several individual trajectories. 41 | 42 | 43 | --------------------- 44 | Collect Small Results 45 | --------------------- 46 | 47 | In case you compute only small results during your runs, like a single value, 48 | but you do this quite often (100k+), it might be more convenient to return 49 | the result instead of storing it into the trajectory directly. 50 | As a consequence, you can collect these single values later on during the 51 | post-processing phase and store all of them together into a single result. 52 | This has also been done for the estimated firing rate in the :ref:`tutorial`. 53 | 54 | 55 | ------------------------- 56 | Many and Fast Single Runs 57 | ------------------------- 58 | 59 | In case you perform many single runs and milliseconds matter, use a pool (``use_pool=True``) in 60 | combination with a queue (``wrap_mode='QUEUE'``, see :ref:`more-on-multiprocessing`) or the even 61 | faster - but potentially unreliable - method of using a shared pipe (``wrape_mode='PIPE'``). 62 | Moreover, to avoid re-pickling of unnecessary data of your trajectory, 63 | store and remove all data that is not needed during single runs. 64 | 65 | For instance, if you don't really need config data during the runs, use the following 66 | **before** calling the environment's :func:`~pypet.environment.Environment.run` function: 67 | 68 | .. code-block:: python 69 | 70 | traj.f_store() 71 | traj.config.f_remove(recursive=True) 72 | 73 | 74 | This may save a couple of milliseconds each run because 75 | the config data no longer needs to be pickled and send over the queue for storage. 76 | 77 | Moreover, you can further avoid unnecessary pickling for the pool and SCOOP_ by setting 78 | ``freeze_input=True``. 79 | Accordingly, the trajectory, your target function, and all additional arguments are passed 80 | to each pool or SCOOP_ process at initialisation and not for each run individually. However, 81 | in order to use this feature, you must make sure that neither your target function nor the 82 | additional arguments are mutated over the course of your runs. 83 | In case you use SCOOP_, try do avoid running many batches of experiments 84 | in one go with ``freeze_input=True`` because memory consumption of all the SCOOP_ workers 85 | may increase with every batch, see also :ref:`pypet-and-scoop`. 86 | 87 | .. _SCOOP: http://scoop.readthedocs.org/ 88 | -------------------------------------------------------------------------------- /doc/source/other/to_new_tree.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _tree-migrating: 3 | 4 | ------------------------------------------------- 5 | Migrating from Old Tree-Structure to the New One 6 | ------------------------------------------------- 7 | 8 | The trajectory underwent a small change from version 0.1b.3 to 9 | 0.1b.4. It came clear that the current default tree structure with 10 | `traj.results.trajectory` and `traj.results.run_00000000` (etc. for more single runs) 11 | is less useful than having all single run results in one group to make browsing the tree easier. 12 | 13 | So now everything that is computed in a single run is found under the new subbranch `runs`. Thus 14 | `traj.results.run_00000000` becomes `traj.results.runs.run_00000000`, etc. 15 | 16 | This also renders the subbranch `trajectory` obsolete. Thus, everything that was originally 17 | stored under `traj.results.trajectory` is now moved one node up in the hierarchy to 18 | `traj.results`. All this, of course, happens analogously for derived parameters as well. 19 | 20 | If you have many trajectories computed with the old-style structure you can use the following 21 | file to change their structure: 22 | 23 | Download: :download:`to_new_tree.py <../../../pypet/utils/to_new_tree.py>` 24 | 25 | 26 | Just execute 27 | ``python to_new_tree.py -b --file=myhdf5file.hdf5`` 28 | within the terminal and the file will automatically be converted to the new structure. 29 | ``--file=`` to specify the filename and ``-b`` to backup the file before updating it 30 | (omit ``-b`` is you don't want a backup of the original file). 31 | 32 | 33 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/annotationsdoc.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Annotations 3 | ============================= 4 | .. autoclass:: pypet.annotations.Annotations 5 | :members: 6 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/brian2networkdoc.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Brian2 Network Framework 3 | ======================== 4 | 5 | .. automodule:: pypet.brian2.network 6 | 7 | 8 | ---------- 9 | Quicklinks 10 | ---------- 11 | 12 | These function can directly be called or used by the user. 13 | 14 | .. currentmodule:: pypet.brian2.network 15 | 16 | 17 | .. autosummary:: 18 | :nosignatures: 19 | 20 | NetworkManager.add_parameters 21 | NetworkManager.pre_run_network 22 | NetworkManager.pre_build 23 | 24 | The private functions of the runner and the manager are also listed below 25 | to allow fast browsing of the source code. 26 | 27 | ----------------------------------------------- 28 | Functions that can be implemented by a Subclass 29 | ----------------------------------------------- 30 | 31 | These functions can be implemented in the subclasses: 32 | 33 | .. autosummary:: 34 | :nosignatures: 35 | 36 | NetworkComponent.build 37 | NetworkComponent.add_to_network 38 | NetworkComponent.remove_from_network 39 | NetworkComponent.pre_build 40 | NetworkAnalyser.analyse 41 | 42 | 43 | -------------- 44 | NetworkManager 45 | -------------- 46 | 47 | .. autoclass:: pypet.brian2.network.NetworkManager 48 | :members: 49 | :private-members: 50 | :special-members: 51 | 52 | ------------- 53 | NetworkRunner 54 | ------------- 55 | 56 | .. autoclass:: pypet.brian2.network.NetworkRunner 57 | :members: 58 | :private-members: 59 | :special-members: 60 | 61 | ---------------- 62 | NetworkComponent 63 | ---------------- 64 | 65 | .. autoclass:: pypet.brian2.network.NetworkComponent 66 | :members: 67 | 68 | --------------- 69 | NetworkAnalyser 70 | --------------- 71 | 72 | .. autoclass:: pypet.brian2.network.NetworkAnalyser 73 | :members: -------------------------------------------------------------------------------- /doc/source/pypetdoc/brian2parameterdoc.rst: -------------------------------------------------------------------------------- 1 | ======================================= 2 | Brian2 Parameters, Results and Monitors 3 | ======================================= 4 | 5 | .. automodule:: pypet.brian2.parameter 6 | 7 | --------------- 8 | Brian2Parameter 9 | --------------- 10 | 11 | .. autoclass:: pypet.brian2.parameter.Brian2Parameter 12 | :members: 13 | 14 | ------------ 15 | Brian2Result 16 | ------------ 17 | 18 | .. autoclass:: pypet.brian2.parameter.Brian2Result 19 | :members: 20 | 21 | ------------------- 22 | Brian2MonitorResult 23 | ------------------- 24 | 25 | .. autoclass:: pypet.brian2.parameter.Brian2MonitorResult 26 | :members: 27 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/environmentdoc.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | The Environment 3 | =============== 4 | 5 | :ref:`genindex` 6 | 7 | ---------- 8 | Quicklinks 9 | ---------- 10 | 11 | .. currentmodule:: pypet.environment 12 | 13 | 14 | .. autosummary:: 15 | :nosignatures: 16 | 17 | Environment 18 | ~Environment.run 19 | ~Environment.resume 20 | ~Environment.pipeline 21 | ~Environment.trajectory 22 | 23 | ----------- 24 | Environment 25 | ----------- 26 | 27 | .. automodule:: pypet.environment 28 | 29 | .. autoclass:: pypet.environment.Environment 30 | :members: 31 | 32 | 33 | ---------------- 34 | MultiprocContext 35 | ---------------- 36 | 37 | .. autoclass:: pypet.environment.MultiprocContext 38 | :members: 39 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/parameterdoc.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Parameters and Results 3 | ========================= 4 | 5 | .. automodule:: pypet.parameter 6 | 7 | ------------------------- 8 | Parameter Quicklinks 9 | ------------------------- 10 | 11 | .. currentmodule:: pypet.parameter 12 | 13 | 14 | .. autosummary:: 15 | :nosignatures: 16 | 17 | ~Parameter.f_set 18 | ~Parameter.f_get 19 | ~Parameter.f_empty 20 | ~Parameter.f_get_range 21 | ~Parameter.f_has_range 22 | ~Parameter.f_supports 23 | 24 | 25 | ------------------------- 26 | Result Quicklinks 27 | ------------------------- 28 | 29 | .. autosummary:: 30 | :nosignatures: 31 | 32 | ~Result.f_set 33 | ~Result.f_get 34 | ~Result.f_empty 35 | ~Result.f_to_dict 36 | 37 | -------------------- 38 | Parameter 39 | -------------------- 40 | .. autoclass:: pypet.parameter.Parameter 41 | :members: 42 | :inherited-members: 43 | 44 | ---------------------- 45 | ArrayParameter 46 | ---------------------- 47 | 48 | .. autoclass:: pypet.parameter.ArrayParameter 49 | :members: 50 | 51 | ---------------------- 52 | SparseParameter 53 | ---------------------- 54 | 55 | .. autoclass:: pypet.parameter.SparseParameter 56 | :members: 57 | 58 | ---------------------- 59 | PickleParameter 60 | ---------------------- 61 | 62 | .. autoclass:: pypet.parameter.PickleParameter 63 | :members: 64 | 65 | 66 | ----------------------- 67 | Result 68 | ----------------------- 69 | 70 | .. autoclass:: pypet.parameter.Result 71 | :members: 72 | :inherited-members: 73 | 74 | ---------------------------- 75 | SparseResult 76 | ---------------------------- 77 | 78 | .. autoclass:: pypet.parameter.SparseResult 79 | :members: 80 | :inherited-members: 81 | 82 | ---------------------------- 83 | PickleResult 84 | ---------------------------- 85 | 86 | .. autoclass:: pypet.parameter.PickleResult 87 | :members: 88 | 89 | ----------------------------- 90 | Object Table 91 | ----------------------------- 92 | 93 | .. autoclass:: pypet.parameter.ObjectTable 94 | :members: 95 | 96 | ----------------------------------------------------------------------- 97 | The Abstract Base Classes of Parameters and Results 98 | ----------------------------------------------------------------------- 99 | 100 | These classes serve as a reference if you want to implement your own parameter or result. 101 | Therefore, also private functions are listed. 102 | 103 | .. autoclass:: pypet.parameter.BaseParameter 104 | :members: 105 | :inherited-members: 106 | :private-members: 107 | :special-members: 108 | :undoc-members: 109 | 110 | .. autoclass:: pypet.parameter.BaseResult 111 | :members: 112 | :inherited-members: 113 | :private-members: 114 | :special-members: 115 | :undoc-members: 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/pypetconstantsdoc.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Global Constants 3 | ============================= 4 | 5 | Here you can find global constants. These constants define the data supported by the storage 6 | service and the standard parameter, maximum length of comments, messages for storing and loading 7 | etc. 8 | 9 | .. automodule:: pypet.pypetconstants 10 | :members: 11 | :member-order: bysource 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/pypetexceptionsdoc.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Exceptions 3 | ==================== 4 | 5 | .. automodule:: pypet.pypetexceptions 6 | :members: -------------------------------------------------------------------------------- /doc/source/pypetdoc/slotsloggingdoc.rst: -------------------------------------------------------------------------------- 1 | 2 | ===== 3 | Slots 4 | ===== 5 | 6 | For performance reasons all tree nodes support slots_. 7 | They all sub-class the ``HasSlots`` class, which is the top-level class of *pypet* 8 | (its direct descendant is ``HasLogger``, see below). 9 | This class provides an ``__all_slots__`` property 10 | (with the help of the ``MetaSlotMachine`` metaclass) 11 | that lists all existing ``__slots__`` of a class including the inherited ones. 12 | Moreover, via ``__getstate__`` and ``__setstate__`` ``HasSlots`` takes care that all 13 | sub-classes can be pickled with the lowest protocol and don't need to implement 14 | ``__getstate__`` and ``__setstate__`` themselves even when they have ``__slots__``. 15 | However, sub-classes that still implement these 16 | functions should call the parent ones via ``super``. Sub-classes are not required to 17 | define ``__slots__``. If they don't, ``HasSlots`` wil also automatically 18 | handle their ``__dict__`` in ``__getstate__`` and ``__setstate__``. 19 | 20 | .. autoclass:: pypet.slots.HasSlots 21 | :members: 22 | :private-members: 23 | :special-members: 24 | 25 | 26 | .. autofunction:: pypet.slots.get_all_slots 27 | 28 | .. autoclass:: pypet.slots.MetaSlotMachine 29 | :members: 30 | :private-members: 31 | :special-members: 32 | 33 | 34 | ======= 35 | Logging 36 | ======= 37 | 38 | ``HasLogger`` can be sub-classed to allow per class or even 39 | per instance logging. The logger is initialized via ``_set_logger()`` and is available via 40 | the ``_logger`` attribute. 41 | ``HasLogger`` also takes care that the logger does not get pickled when ``__getstate__`` and 42 | ``__setstate__`` are called. 43 | Thus, you are always advised in sub-classes that also implement these functions 44 | to call the parent ones via ``super``. ``HasLogger`` is a direct sub-class of ``HasSlots``. 45 | Hence, support for ``__slots__`` is ensured. 46 | 47 | 48 | .. autoclass:: pypet.pypetlogging.HasLogger 49 | :members: 50 | :private-members: 51 | :special-members: 52 | 53 | 54 | .. autofunction:: pypet.pypetlogging.rename_log_file 55 | 56 | .. _slots: https://docs.python.org/2/reference/datamodel.html#slots -------------------------------------------------------------------------------- /doc/source/pypetdoc/storageservicedoc.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | Storage Services 3 | ================ 4 | ------------------------ 5 | The HDF5 Storage Service 6 | ------------------------ 7 | 8 | .. autoclass:: pypet.storageservice.HDF5StorageService 9 | :members: 10 | :member-order: bysource 11 | 12 | 13 | ----------------------------------- 14 | Empty Storage Service for Debugging 15 | ----------------------------------- 16 | 17 | .. autoclass:: pypet.storageservice.LazyStorageService 18 | :members: 19 | 20 | 21 | ---------------------------- 22 | The Multiprocessing Wrappers 23 | ---------------------------- 24 | 25 | .. autoclass:: pypet.utils.mpwrappers.LockWrapper 26 | :members: 27 | 28 | .. autoclass:: pypet.utils.mpwrappers.QueueStorageServiceSender 29 | :members: 30 | 31 | .. autoclass:: pypet.utils.mpwrappers.QueueStorageServiceWriter 32 | :members: 33 | 34 | .. autoclass:: pypet.utils.mpwrappers.PipeStorageServiceSender 35 | :members: 36 | 37 | .. autoclass:: pypet.utils.mpwrappers.PipeStorageServiceWriter 38 | :members: 39 | 40 | .. autoclass:: pypet.utils.mpwrappers.ReferenceWrapper 41 | :members: 42 | 43 | .. autoclass:: pypet.utils.mpwrappers.ReferenceStore 44 | :members: 45 | 46 | .. autoclass:: pypet.utils.mpwrappers.LockerServer 47 | :members: 48 | 49 | .. autoclass:: pypet.utils.mpwrappers.LockerClient 50 | :members: 51 | 52 | .. autoclass:: pypet.utils.mpwrappers.TimeOutLockerServer 53 | :members: 54 | 55 | .. autoclass:: pypet.utils.mpwrappers.ForkAwareLockerClient 56 | :members: -------------------------------------------------------------------------------- /doc/source/pypetdoc/trajectorydoc.rst: -------------------------------------------------------------------------------- 1 | ============================================ 2 | The Trajectory and Group Nodes 3 | ============================================ 4 | 5 | 6 | ---------------- 7 | Quicklinks 8 | ---------------- 9 | Here are some links to important functions: 10 | 11 | .. currentmodule:: pypet 12 | 13 | 14 | .. autosummary:: 15 | :nosignatures: 16 | 17 | ~trajectory.Trajectory 18 | ~naturalnaming.ParameterGroup.f_add_parameter 19 | ~naturalnaming.DerivedParameterGroup.f_add_derived_parameter 20 | ~naturalnaming.ResultGroup.f_add_result 21 | ~naturalnaming.NNGroupNode.f_add_link 22 | ~naturalnaming.NNGroupNode.f_add_leaf 23 | ~naturalnaming.NNGroupNode.f_iter_leaves 24 | ~naturalnaming.NNGroupNode.f_iter_nodes 25 | ~naturalnaming.NNGroupNode.f_get 26 | ~naturalnaming.NNGroupNode.f_store_child 27 | ~naturalnaming.NNGroupNode.f_store 28 | ~naturalnaming.NNGroupNode.f_load_child 29 | ~naturalnaming.NNGroupNode.f_load 30 | ~trajectory.Trajectory.f_explore 31 | ~trajectory.Trajectory.f_store 32 | ~trajectory.Trajectory.f_load 33 | ~trajectory.Trajectory.f_load_skeleton 34 | ~trajectory.Trajectory.f_preset_parameter 35 | ~trajectory.Trajectory.f_get_from_runs 36 | ~trajectory.Trajectory.f_load_items 37 | ~trajectory.Trajectory.f_store_items 38 | ~trajectory.Trajectory.f_remove_items 39 | ~trajectory.Trajectory.f_delete_items 40 | ~trajectory.Trajectory.f_find_idx 41 | ~trajectory.Trajectory.f_get_run_information 42 | ~trajectory.Trajectory.v_crun 43 | ~trajectory.Trajectory.v_idx 44 | ~trajectory.Trajectory.v_standard_parameter 45 | ~trajectory.Trajectory.v_standard_result 46 | ~naturalnaming.NNGroupNode.v_annotations 47 | ~trajectory.load_trajectory 48 | 49 | 50 | -------------------- 51 | Trajectory 52 | -------------------- 53 | .. autoclass:: pypet.trajectory.Trajectory 54 | :members: 55 | 56 | .. autofunction:: pypet.trajectory.load_trajectory 57 | 58 | ------------------- 59 | NNGroupNode 60 | ------------------- 61 | 62 | .. autoclass:: pypet.naturalnaming.NNGroupNode 63 | :members: 64 | :inherited-members: 65 | 66 | 67 | ------------------- 68 | ParameterGroup 69 | ------------------- 70 | 71 | .. autoclass:: pypet.naturalnaming.ParameterGroup 72 | :members: 73 | 74 | 75 | ------------------- 76 | ConfigGroup 77 | ------------------- 78 | 79 | .. autoclass:: pypet.naturalnaming.ConfigGroup 80 | :members: 81 | 82 | ----------------------- 83 | DerivedParameterGroup 84 | ----------------------- 85 | 86 | .. autoclass:: pypet.naturalnaming.DerivedParameterGroup 87 | :members: 88 | 89 | ------------------- 90 | ResultGroup 91 | ------------------- 92 | 93 | .. autoclass:: pypet.naturalnaming.ResultGroup 94 | :members: 95 | 96 | -------------------------------------------------------------------------------- /doc/source/pypetdoc/utilsdoc.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Utils 3 | ===== 4 | 5 | --------------------- 6 | Exploration Functions 7 | --------------------- 8 | 9 | .. automodule:: pypet.utils.explore 10 | :members: cartesian_product, find_unique_points 11 | 12 | 13 | ----------------- 14 | Utility Functions 15 | ----------------- 16 | 17 | ^^^^^^^^^^^^^^^^^^^^^ 18 | HDF5 File Compression 19 | ^^^^^^^^^^^^^^^^^^^^^ 20 | 21 | You can use the following function to compress an existing HDF5 file 22 | that already contains a trajectory. This only works under **Linux**. 23 | 24 | .. autofunction:: pypet.compact_hdf5_file 25 | 26 | ^^^^^^^^^^^ 27 | Progressbar 28 | ^^^^^^^^^^^ 29 | 30 | Simple progressbar that can be used during a for-loop (no initialisation necessary). 31 | It displays progress and estimates remaining time. 32 | 33 | .. autofunction:: pypet.progressbar 34 | 35 | 36 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 37 | Multiprocessing Directory Creation 38 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 39 | 40 | Function that calls ``os.makedirs`` but takes care about race conditions if multiple 41 | processes or threads try to create the directories at the same time. 42 | 43 | .. autofunction:: pypet.racedirs 44 | 45 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 46 | Merging many Trajectories 47 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 48 | 49 | You can easily merge several trajectories located in one directory into one with 50 | 51 | .. autofunction:: pypet.merge_all_in_folder 52 | 53 | 54 | ^^^^^^^^^^^ 55 | Manual Runs 56 | ^^^^^^^^^^^ 57 | 58 | If you don't want to use an Environment but manually schedule runs, take a look at the 59 | following decorator: 60 | 61 | .. autofunction:: pypet.manual_run 62 | 63 | 64 | ------------------------------------------------------------------- 65 | General Equality Function and Comparisons of Parameters and Results 66 | ------------------------------------------------------------------- 67 | 68 | .. automodule:: pypet.utils.comparisons 69 | :members: 70 | 71 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | experiments 2 | tmp -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | -------------------------------------------------------------------------------- /examples/example_01_first_steps.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os # To allow file paths working under Windows and Linux 4 | 5 | from pypet import Environment 6 | from pypet.utils.explore import cartesian_product 7 | 8 | def multiply(traj): 9 | """Example of a sophisticated simulation that involves multiplying two values. 10 | 11 | :param traj: 12 | 13 | Trajectory containing 14 | the parameters in a particular combination, 15 | it also serves as a container for results. 16 | 17 | """ 18 | z = traj.x * traj.y 19 | traj.f_add_result('z', z, comment='Result of our simulation!') 20 | 21 | 22 | # Create an environment that handles running 23 | filename = os.path.join('hdf5','example_01.hdf5') 24 | env = Environment(trajectory='Multiplication', 25 | filename=filename, 26 | overwrite_file=True, 27 | file_title='Example_01_First_Steps', 28 | comment='The first example!', 29 | large_overview_tables=True, # To see a nice overview of all 30 | # computed `z` values in the resulting HDF5 file. 31 | # Per default disabled for more compact HDF5 files. 32 | ) 33 | 34 | # The environment has created a trajectory container for us 35 | traj = env.trajectory 36 | 37 | # Add both parameters 38 | traj.f_add_parameter('x', 1, comment='I am the first dimension!') 39 | traj.f_add_parameter('y', 1, comment='I am the second dimension!') 40 | 41 | # Explore the parameters with a cartesian product 42 | traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]})) 43 | 44 | # Run the simulation 45 | env.run(multiply) 46 | 47 | 48 | 49 | # Now let's see how we can reload the stored data from above. 50 | # We do not need an environment for that, just a trajectory. 51 | from pypet.trajectory import Trajectory 52 | 53 | # So, first let's create a new trajectory and pass it the path and name of the HDF5 file. 54 | # Yet, to be very clear let's delete all the old stuff. 55 | del traj 56 | # Before deleting the environment let's disable logging and close all log-files 57 | env.disable_logging() 58 | del env 59 | 60 | traj = Trajectory(filename=filename) 61 | 62 | # Now we want to load all stored data. 63 | traj.f_load(index=-1, load_parameters=2, load_results=2) 64 | 65 | # Above `index` specifies that we want to load the trajectory with that particular index 66 | # within the HDF5 file. We could instead also specify a `name`. 67 | # Counting works also backwards, so `-1` yields the last or newest trajectory in the file. 68 | # 69 | # Next we need to specify how the data is loaded. 70 | # Therefore, we have to set the keyword arguments `load_parameters` and `load_results`, 71 | # here we chose both to be `2`. 72 | # `0` would mean we do not want to load anything at all. 73 | # `1` would mean we only want to load the empty hulls or skeletons of our parameters 74 | # or results. Accordingly, we would add parameters or results to our trajectory 75 | # but they would not contain any data. 76 | # Instead `2` means we want to load the parameters and results including the data they contain. 77 | 78 | # Finally we want to print a result of a particular run. 79 | # Let's take the second run named `run_00000001` (Note that counting starts at 0!). 80 | print('The result of `run_00000001` is: ') 81 | print(traj.run_00000001.z) 82 | 83 | -------------------------------------------------------------------------------- /examples/example_02_trajectory_access_and_storage.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os # To allow pathnames under Windows and Linux 4 | 5 | from pypet import Trajectory, NotUniqueNodeError 6 | 7 | 8 | # We first generate a new Trajectory 9 | filename = os.path.join('hdf5', 'example_02.hdf5') 10 | traj = Trajectory('Example', filename=filename, 11 | overwrite_file=True, 12 | comment='Access and Storage!') 13 | 14 | 15 | # We add our first parameter with the data 'Harrison Ford' 16 | traj.f_add_parameter('starwars.characters.han_solo', 'Harrison Ford') 17 | 18 | # This automatically added the groups 'starwars' and the subgroup 'characters' 19 | # Let's get the characters subgroup 20 | characters = traj.parameters.starwars.characters 21 | 22 | # Since characters is unique we could also use shortcuts 23 | characters = traj.characters 24 | 25 | # Or the get method 26 | characters = traj.f_get('characters') 27 | 28 | # Or square brackets 29 | characters = traj['characters'] 30 | 31 | # Lets add another character 32 | characters.f_add_parameter('luke_skywalker', 'Mark Hamill', comment='May the force be with you!') 33 | 34 | #The full name of luke skywalker is now `parameters.starwars.characters.luke_skywalker`: 35 | print('The full name of the new Skywalker Parameter is %s' % 36 | traj.f_get('luke_skywalker').v_full_name) 37 | 38 | #Lets see what happens if we have not unique entries: 39 | traj.f_add_parameter_group('spaceballs.characters') 40 | 41 | # Now our shortcuts no longer work, since we have two character groups! 42 | try: 43 | traj.characters 44 | except NotUniqueNodeError as exc: 45 | print('Damn it, there are two characters groups in the trajectory: %s' % repr(exc)) 46 | 47 | # But if we are more specific we have again a unique finding 48 | characters = traj.starwars.characters 49 | 50 | # Now let's see what fast access is: 51 | print('The name of the actor playing Luke is %s.' % traj.luke_skywalker) 52 | 53 | # And now what happens if you forbid it 54 | traj.v_fast_access=False 55 | print('The object found for luke_skywalker is `%s`.' % str(traj.luke_skywalker)) 56 | 57 | #Let's store the trajectory: 58 | traj.f_store() 59 | 60 | # That was easy, let's assume we already completed a simulation and now we add a veeeery large 61 | # result that we want to store to disk immediately and than empty it 62 | traj.f_add_result('starwars.gross_income_of_film', amount=10.1 ** 11, currency='$$$', 63 | comment='George Lucas is rich, dude!') 64 | 65 | # This is a large number, we better store it and than free the memory: 66 | traj.f_store_item('gross_income_of_film') 67 | traj.gross_income_of_film.f_empty() 68 | 69 | # Moreover, if you don't like prefixes `f_` and `v_` you can also use `func` and `vars`: 70 | traj.func.add_result('starwars.robots', c3p0='android', r2d2='beeep!', comment='Help me Obiwan!') 71 | print(traj.results.starwars.robots.vars.comment) 72 | 73 | # Now lets reload the trajectory 74 | del traj 75 | traj = Trajectory(filename=filename) 76 | # We want to load the last trajectory in the file, therefore index = -1 77 | # We want to load the parameters, therefore load_parameters=2 78 | # We only want to load the skeleton of the results, so load_results=1 79 | traj.f_load(index=-1, load_parameters=2, load_results=1) 80 | 81 | # Let's check if our result is really empty 82 | if traj.gross_income_of_film.f_is_empty(): 83 | print('Nothing there!') 84 | else: 85 | print('I found something!') 86 | 87 | # Ok, let's manually reload the result 88 | traj.f_load_item('gross_income_of_film') 89 | if traj.gross_income_of_film.f_is_empty(): 90 | print('Still empty :-(') 91 | else: 92 | print('George Lucas earned %s%s!' %(str(traj.gross_income_of_film.amount), 93 | traj.gross_income_of_film.currency)) 94 | 95 | # And that's how it works! If you wish, you can inspect the 96 | # experiments/example_02/HDF5/example_02.hdf5 file to take a look at the tree structure -------------------------------------------------------------------------------- /examples/example_03_trajectory_merging.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os # For using pathnames under Windows and Linux 4 | 5 | from pypet import Environment, cartesian_product 6 | 7 | 8 | # Let's reuse the simple multiplication example 9 | def multiply(traj): 10 | """Sophisticated simulation of multiplication""" 11 | z=traj.x*traj.y 12 | traj.f_add_result('z',z=z, comment='I am the product of two reals!',) 13 | 14 | 15 | 16 | # Create 2 environments that handle running 17 | filename = os.path.join('hdf5', 'example_03.hdf5') 18 | env1 = Environment(trajectory='Traj1', 19 | filename=filename, 20 | file_title='Example_03', 21 | add_time=True, # Add the time of trajectory creation to its name 22 | comment='I will be increased!') 23 | 24 | env2 = Environment(trajectory='Traj2', 25 | filename=filename, 26 | file_title='Example_03', log_config=None, # One environment keeping log files 27 | # is enough 28 | add_time=True, 29 | comment = 'I am going to be merged into some other trajectory!') 30 | 31 | # Get the trajectories from the environment 32 | traj1 = env1.trajectory 33 | traj2 = env2.trajectory 34 | 35 | # Add both parameters 36 | traj1.f_add_parameter('x', 1.0, comment='I am the first dimension!') 37 | traj1.f_add_parameter('y', 1.0, comment='I am the second dimension!') 38 | traj2.f_add_parameter('x', 1.0, comment='I am the first dimension!') 39 | traj2.f_add_parameter('y', 1.0, comment='I am the second dimension!') 40 | 41 | # Explore the parameters with a cartesian product for the first trajectory: 42 | traj1.f_explore(cartesian_product({'x':[1.0,2.0,3.0,4.0], 'y':[6.0,7.0,8.0]})) 43 | # Let's explore slightly differently for the second: 44 | traj2.f_explore(cartesian_product({'x':[3.0,4.0,5.0,6.0], 'y':[7.0,8.0,9.0]})) 45 | 46 | 47 | # Run the simulations with all parameter combinations 48 | env1.run(multiply) 49 | env2.run(multiply) 50 | 51 | # Now we merge them together into traj1 52 | # We want to remove duplicate entries 53 | # like the parameter space point x=3.0, y=7.0. 54 | # Several points have been explored by both trajectories and we need them only once. 55 | # Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!). 56 | # We also want to backup both trajectories, but we let the system choose the filename. 57 | # Accordingly we choose backup_filename=True instead of providing a filename. 58 | # We want to move the hdf5 nodes from one trajectory to the other. 59 | # Thus we set move_nodes=True. 60 | # Finally,we want to delete the other trajectory afterwards since we already have a backup. 61 | traj1.f_merge(traj2, 62 | remove_duplicates=True, 63 | backup_filename=True, 64 | move_data=True, 65 | delete_other_trajectory=True) 66 | 67 | # And that's it, now we can take a look at the new trajectory and print all x,y,z triplets. 68 | # But before that we need to load the data we computed during the runs from disk. 69 | # We choose load_parameters=2 and load_results=2 since we want to load all data and not only 70 | # the skeleton 71 | traj1.f_load(load_parameters=2, load_results=2) 72 | 73 | for run_name in traj1.f_get_run_names(): 74 | # We can make the trajectory belief it is a single run. All parameters will 75 | # be treated as they were in the specific run. And we can use the `crun` wildcard. 76 | traj1.f_set_crun(run_name) 77 | x=traj1.x 78 | y=traj1.y 79 | # We need to specify the current run, because there exists more than one z value 80 | z=traj1.crun.z 81 | print('%s: x=%f, y=%f, z=%f' % (run_name, x, y, z)) 82 | 83 | # Don't forget to reset you trajectory to the default settings, to release its belief to 84 | # be the last run. 85 | traj1.f_restore_default() 86 | 87 | # As you can see duplicate parameter space points have been removed. 88 | # If you wish you can take a look at the files and backup files in 89 | # the experiments/example_03/HDF5 directory 90 | 91 | # Finally, disable logging and close log files 92 | env1.disable_logging() -------------------------------------------------------------------------------- /examples/example_04_multiprocessing.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os # For path names being viable under Windows and Linux 4 | import logging 5 | 6 | from pypet import Environment, cartesian_product 7 | from pypet import pypetconstants 8 | 9 | 10 | # Let's reuse the simple multiplication example 11 | def multiply(traj): 12 | """Sophisticated simulation of multiplication""" 13 | z=traj.x*traj.y 14 | traj.f_add_result('z',z=z, comment='I am the product of two reals!') 15 | 16 | 17 | def main(): 18 | """Main function to protect the *entry point* of the program. 19 | 20 | If you want to use multiprocessing under Windows you need to wrap your 21 | main code creating an environment into a function. Otherwise 22 | the newly started child processes will re-execute the code and throw 23 | errors (also see https://docs.python.org/2/library/multiprocessing.html#windows). 24 | 25 | """ 26 | 27 | # Create an environment that handles running. 28 | # Let's enable multiprocessing with 2 workers. 29 | filename = os.path.join('hdf5', 'example_04.hdf5') 30 | env = Environment(trajectory='Example_04_MP', 31 | filename=filename, 32 | file_title='Example_04_MP', 33 | log_stdout=True, 34 | comment='Multiprocessing example!', 35 | multiproc=True, 36 | ncores=4, 37 | use_pool=True, # Our runs are inexpensive we can get rid of overhead 38 | # by using a pool 39 | freeze_input=True, # We can avoid some 40 | # overhead by freezing the input to the pool 41 | wrap_mode=pypetconstants.WRAP_MODE_QUEUE, 42 | graceful_exit=True, # We want to exit in a data friendly way 43 | # that safes all results after hitting CTRL+C, try it ;-) 44 | overwrite_file=True) 45 | 46 | # Get the trajectory from the environment 47 | traj = env.trajectory 48 | 49 | # Add both parameters 50 | traj.f_add_parameter('x', 1.0, comment='I am the first dimension!') 51 | traj.f_add_parameter('y', 1.0, comment='I am the second dimension!') 52 | 53 | # Explore the parameters with a cartesian product, but we want to explore a bit more 54 | traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)], 55 | 'y':[float(y) for y in range(20)]})) 56 | 57 | # Run the simulation 58 | env.run(multiply) 59 | 60 | # Finally disable logging and close all log-files 61 | env.disable_logging() 62 | 63 | 64 | if __name__ == '__main__': 65 | # This will execute the main function in case the script is called from the one true 66 | # main process and not from a child processes spawned by your environment. 67 | # Necessary for multiprocessing under Windows. 68 | main() -------------------------------------------------------------------------------- /examples/example_08_f_find_idx.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os # For path names being viable under Windows and Linux 4 | 5 | from pypet import Environment, cartesian_product 6 | from pypet import pypetconstants 7 | 8 | 9 | def multiply(traj): 10 | """Sophisticated simulation of multiplication""" 11 | z=traj.x*traj.y 12 | traj.f_add_result('z',z, comment='I am the product of two reals!') 13 | 14 | 15 | 16 | # Create an environment that handles running 17 | filename = os.path.join('hdf5', 'example_08.hdf5') 18 | env = Environment(trajectory='Example08',filename=filename, 19 | file_title='Example08', 20 | overwrite_file=True, 21 | comment='Another example!') 22 | 23 | # Get the trajectory from the environment 24 | traj = env.trajectory 25 | 26 | # Add both parameters 27 | traj.f_add_parameter('x', 1, comment='I am the first dimension!') 28 | traj.f_add_parameter('y', 1, comment='I am the second dimension!') 29 | 30 | # Explore the parameters with a cartesian product: 31 | traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]})) 32 | 33 | # Run the simulation 34 | env.run(multiply) 35 | 36 | # We load all results 37 | traj.f_load(load_results=pypetconstants.LOAD_DATA) 38 | 39 | # And now we want to find som particular results, the ones where x was 2 or y was 8. 40 | # Therefore, we use a lambda function 41 | my_filter_predicate= lambda x,y: x==2 or y==8 42 | 43 | # We can now use this lambda function to search for the run indexes associated with x==2 OR y==8. 44 | # We need a list specifying the names of the parameters and the predicate to do this. 45 | # Note that names need to be in the order as listed in the lambda function, here 'x' and 'y': 46 | idx_iterator = traj.f_find_idx(['x','y'], my_filter_predicate) 47 | 48 | # Now we can print the corresponding results: 49 | print('The run names and results for parameter combinations with x==2 or y==8:') 50 | for idx in idx_iterator: 51 | # We focus on one particular run. This is equivalent to calling `traj.f_set_crun(idx)`. 52 | traj.v_idx=idx 53 | run_name = traj.v_crun 54 | # and print everything nicely 55 | print('%s: x=%d, y=%d, z=%d' %(run_name, traj.x, traj.y, traj.crun.z)) 56 | 57 | # And we do not forget to set everything back to normal 58 | traj.f_restore_default() 59 | 60 | # Finally disable logging and close all log-files 61 | env.disable_logging() -------------------------------------------------------------------------------- /examples/example_10_get_items_from_all_runs.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | from mpl_toolkits.mplot3d import axes3d 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | import os # For path names working under Windows ans Linux 7 | 8 | from pypet import Environment, cartesian_product 9 | from pypet import pypetconstants 10 | 11 | 12 | def multiply(traj): 13 | """Sophisticated simulation of multiplication""" 14 | z=traj.x * traj.y 15 | traj.f_add_result('z', z, comment='I am the product of two reals!') 16 | 17 | 18 | # Create an environment that handles running 19 | filename = os.path.join('hdf5', 'example_10.hdf5') 20 | env = Environment(trajectory='Example10', filename=filename, 21 | file_title='Example10', 22 | overwrite_file=True, 23 | comment='Another example!') 24 | 25 | # Get the trajectory from the environment 26 | traj = env.trajectory 27 | 28 | # Add both parameters 29 | traj.f_add_parameter('x', 1, comment='I am the first dimension!') 30 | traj.f_add_parameter('y', 1, comment='I am the second dimension!') 31 | 32 | # Explore the parameters with a cartesian product: 33 | x_length = 12 34 | y_length = 12 35 | traj.f_explore(cartesian_product({'x': range(x_length), 'y': range(y_length)})) 36 | 37 | # Run the simulation 38 | env.run(multiply) 39 | 40 | # We load all results 41 | traj.f_load(load_results=pypetconstants.LOAD_DATA) 42 | 43 | # We access the ranges for plotting 44 | xs = traj.f_get('x').f_get_range() 45 | ys = traj.f_get('y').f_get_range() 46 | 47 | # Now we want to directly get all numbers z from all runs 48 | # for plotting. 49 | # We use `fast_access=True` to directly get access to 50 | # the values. 51 | # Moreover, since `f_get_from_runs` returns an ordered dictionary 52 | # `values()` gives us all values already in the correct order of the runs. 53 | zs = list(traj.f_get_from_runs(name='z', fast_access=True).values()) 54 | # We also make sure it's a list (because in python 3 ``value()`` returns an 55 | # iterator instead of a list) 56 | 57 | # Convert the lists to numpy 2D arrays 58 | x_mesh = np.reshape(np.array(xs), (x_length, y_length)) 59 | y_mesh = np.reshape(np.array(ys), (x_length, y_length)) 60 | z_mesh = np.reshape(np.array(zs), (x_length, y_length)) 61 | 62 | # Make fancy 3D plot 63 | fig=plt.figure() 64 | ax = fig.add_subplot(111, projection='3d') 65 | ax.plot_wireframe(x_mesh, y_mesh, z_mesh, rstride=1, cstride=1) 66 | plt.show() 67 | 68 | # Finally disable logging and close all log-files 69 | env.disable_logging() 70 | -------------------------------------------------------------------------------- /examples/example_12_sharing_data_between_processes.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import multiprocessing as mp 4 | import numpy as np 5 | import os # For path names working under Windows and Linux 6 | 7 | from pypet import Environment, cartesian_product 8 | 9 | 10 | def multiply(traj, result_list): 11 | """Example of a sophisticated simulation that involves multiplying two values. 12 | 13 | This time we will store tha value in a shared list and only in the end add the result. 14 | 15 | :param traj: 16 | 17 | Trajectory containing 18 | the parameters in a particular combination, 19 | it also serves as a container for results. 20 | 21 | 22 | """ 23 | z=traj.x*traj.y 24 | result_list[traj.v_idx] = z 25 | 26 | 27 | def main(): 28 | # Create an environment that handles running 29 | filename = os.path.join('hdf5', 'example_12.hdf5') 30 | env = Environment(trajectory='Multiplication', 31 | filename=filename, 32 | file_title='Example_12_Sharing_Data', 33 | overwrite_file=True, 34 | comment='The first example!', 35 | continuable=False, # We have shared data in terms of a multiprocessing list, 36 | # so we CANNOT use the continue feature. 37 | multiproc=True, 38 | ncores=2) 39 | 40 | # The environment has created a trajectory container for us 41 | traj = env.trajectory 42 | 43 | # Add both parameters 44 | traj.f_add_parameter('x', 1, comment='I am the first dimension!') 45 | traj.f_add_parameter('y', 1, comment='I am the second dimension!') 46 | 47 | # Explore the parameters with a cartesian product 48 | traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]})) 49 | 50 | # We want a shared list where we can put all out results in. We use a manager for this: 51 | result_list = mp.Manager().list() 52 | # Let's make some space for potential results 53 | result_list[:] =[0 for _dummy in range(len(traj))] 54 | 55 | # Run the simulation 56 | env.run(multiply, result_list) 57 | 58 | # Now we want to store the final list as numpy array 59 | traj.f_add_result('z', np.array(result_list)) 60 | 61 | # Finally let's print the result to see that it worked 62 | print(traj.z) 63 | 64 | #Disable logging and close all log-files 65 | env.disable_logging() 66 | 67 | if __name__ == '__main__': 68 | main() -------------------------------------------------------------------------------- /examples/example_13_post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /examples/example_13_post_processing/analysis.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | import os 4 | 5 | from pypet import Trajectory 6 | import matplotlib.pyplot as plt 7 | 8 | 9 | def main(): 10 | 11 | # This time we don't need an environment since we just going to look 12 | # at data in the trajectory 13 | traj = Trajectory('FiringRate', add_time=False) 14 | 15 | # Let's load the trajectory from the file 16 | # Only load the parameters, we will load the results on the fly as we need them 17 | filename = os.path.join('hdf5', 'FiringRate.hdf5') 18 | traj.f_load(load_parameters=2, load_derived_parameters=0, load_results=0, 19 | load_other_data=0, filename=filename) 20 | 21 | # We'll simply use auto loading so all data will be loaded when needed. 22 | traj.v_auto_load = True 23 | 24 | rates_frame = traj.res.summary.firing_rates.rates_frame 25 | # Here we load the data automatically on the fly 26 | 27 | plt.figure() 28 | plt.subplot(2,1,1) 29 | #Let's iterate through the columns and plot the different firing rates : 30 | for tau_ref, I_col in rates_frame.iteritems(): 31 | plt.plot(I_col.index, I_col, label='Avg. Rate for tau_ref=%s' % str(tau_ref)) 32 | 33 | # Label the plot 34 | plt.xlabel('I') 35 | plt.ylabel('f[Hz]') 36 | plt.title('Firing as a function of input current `I`') 37 | plt.legend(loc='best') 38 | 39 | # Also let's plot an example run, how about run 13 ? 40 | example_run = 13 41 | 42 | traj.v_idx = example_run # We make the trajectory behave as a single run container. 43 | # This short statement has two major effects: 44 | # a) all explored parameters are set to the value of run 13, 45 | # b) if there are tree nodes with names other than the current run aka `run_00000013` 46 | # they are simply ignored, if we use the `$` sign or the `crun` statement, 47 | # these are translated into `run_00000013`. 48 | 49 | # Get the example data 50 | example_I = traj.I 51 | example_tau_ref = traj.tau_ref 52 | example_V = traj.results.neuron.crun.V # Here crun stands for run_00000013 53 | 54 | # We need the time step... 55 | dt = traj.dt 56 | # ...to create an x-axis for the plot 57 | dt_array = [irun * dt for irun in range(len(example_V))] 58 | 59 | # And plot the development of V over time, 60 | # Since this is rather repetitive, we only 61 | # plot the first eighth of it. 62 | plt.subplot(2,1,2) 63 | plt.plot(dt_array, example_V) 64 | plt.xlim((0, dt*len(example_V)/8)) 65 | 66 | # Label the axis 67 | plt.xlabel('t[ms]') 68 | plt.ylabel('V') 69 | plt.title('Example of development of V for I=%s, tau_ref=%s in run %d' % 70 | (str(example_I), str(example_tau_ref), traj.v_idx)) 71 | 72 | # And let's take a look at it 73 | plt.show() 74 | 75 | # Finally revoke the `traj.v_idx=13` statement and set everything back to normal. 76 | # Since our analysis is done here, we could skip that, but it is always a good idea 77 | # to do that. 78 | traj.f_restore_default() 79 | 80 | 81 | if __name__ == '__main__': 82 | main() -------------------------------------------------------------------------------- /examples/example_13_post_processing/pipeline.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | import logging 4 | import os # For path names working under Windows and Linux 5 | 6 | from main import add_parameters, add_exploration, run_neuron, neuron_postproc 7 | from pypet import Environment 8 | 9 | 10 | def mypipeline(traj): 11 | """A pipeline function that defines the entire experiment 12 | 13 | :param traj: 14 | 15 | Container for results and parameters 16 | 17 | :return: 18 | 19 | Two tuples. First tuple contains the actual run function plus additional 20 | arguments (yet we have none). Second tuple contains the 21 | postprocessing function including additional arguments. 22 | 23 | """ 24 | add_parameters(traj) 25 | add_exploration(traj) 26 | return (run_neuron,(),{}), (neuron_postproc,(),{}) 27 | 28 | def main(): 29 | filename = os.path.join('hdf5', 'FiringRate.hdf5') 30 | env = Environment(trajectory='FiringRatePipeline', 31 | comment='Experiment to measure the firing rate ' 32 | 'of a leaky integrate and fire neuron. ' 33 | 'Exploring different input currents, ' 34 | 'as well as refractory periods', 35 | add_time=False, # We don't want to add the current time to the name, 36 | log_stdout=True, 37 | multiproc=True, 38 | ncores=2, #My laptop has 2 cores ;-) 39 | filename=filename, 40 | overwrite_file=True) 41 | 42 | env.pipeline(mypipeline) 43 | 44 | # Finally disable logging and close all log-files 45 | env.disable_logging() 46 | 47 | if __name__ == '__main__': 48 | main() 49 | -------------------------------------------------------------------------------- /examples/example_14_links.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os # To allow file paths working under Windows and Linux 4 | 5 | from pypet import Environment, Result, Parameter 6 | 7 | def multiply(traj): 8 | """Example of a sophisticated simulation that involves multiplying two values. 9 | 10 | :param traj: 11 | 12 | Trajectory containing 13 | the parameters in a particular combination, 14 | it also serves as a container for results. 15 | 16 | """ 17 | z=traj.mylink1*traj.mylink2 # And again we now can also use the different names 18 | # due to the creation of links 19 | traj.f_add_result('runs.$.z', z, comment='Result of our simulation!') 20 | 21 | 22 | # Create an environment that handles running 23 | filename = os.path.join('hdf5','example_14.hdf5') 24 | env = Environment(trajectory='Multiplication', 25 | filename=filename, 26 | file_title='Example_14_Links', 27 | overwrite_file=True, 28 | comment='How to use links') 29 | 30 | # The environment has created a trajectory container for us 31 | traj = env.trajectory 32 | 33 | # Add both parameters 34 | traj.par.x = Parameter('x', 1, 'I am the first dimension!') 35 | traj.par.y = Parameter('y', 1, 'I am the second dimension!') 36 | 37 | # Explore just two points 38 | traj.f_explore({'x': [3, 4]}) 39 | 40 | # So far everything was as in the first example. However now we add links: 41 | traj.f_add_link('mylink1', traj.f_get('x')) 42 | # Note the `f_get` here to ensure to get the parameter instance, not the value 1 43 | # This allows us now to access x differently: 44 | print('x=' + str(traj.mylink1)) 45 | # We can try to avoid fast access as well, and recover the original parameter 46 | print(str(traj.f_get('mylink1'))) 47 | # And also colon notation is allowed that creates new groups on the fly 48 | traj.f_add_link('parameters.mynewgroup.mylink2', traj.f_get('y')) 49 | 50 | 51 | 52 | # And, of course, we can also use the links during run: 53 | env.run(multiply) 54 | 55 | # Finally disable logging and close all log-files 56 | env.disable_logging() 57 | 58 | -------------------------------------------------------------------------------- /examples/example_15_more_ways_to_add_data.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | from pypet import Trajectory, Result, Parameter 4 | 5 | 6 | traj = Trajectory() 7 | 8 | # There are more ways to add data, 9 | # 1st the standard way: 10 | traj.f_add_parameter('x', 1, comment='I am the first dimension!') 11 | # 2nd by providing a new parameter/result instance, be aware that the data is added where 12 | # you specify it. There are no such things as shortcuts for parameter creation: 13 | traj.parameters.y = Parameter('y', 1, comment='I am the second dimension!') 14 | # 3rd as before, but if our new leaf has NO name it will be renamed accordingly: 15 | traj.parameters.t = Parameter('', 1, comment='Third dimension') 16 | # See: 17 | print('t=' + str(traj.t)) 18 | 19 | # This also works for adding groups on the fly and with the well known *dot* notation: 20 | traj.parameters.subgroup = Parameter('subgroup.subsubgroup.w', 2) 21 | # See 22 | print('w='+str(traj.par.subgroup.subsubgroup.w)) 23 | 24 | 25 | # Finally, there's one more thing. Using this notation we can also add links. 26 | # Simply use the `=` assignment with objects that already exist in your trajectory: 27 | traj.mylink = traj.f_get('x') 28 | # now `mylink` links to parameter `x`, also fast access works: 29 | print('Linking to x gives: ' + str(traj.mylink)) 30 | 31 | 32 | -------------------------------------------------------------------------------- /examples/example_16_multiproc_context.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os 4 | import multiprocessing as mp 5 | import logging 6 | 7 | from pypet import Trajectory, MultiprocContext 8 | 9 | 10 | def manipulate_multiproc_safe(traj): 11 | """ Target function that manipulates the trajectory. 12 | 13 | Stores the current name of the process into the trajectory and 14 | **overwrites** previous settings. 15 | 16 | :param traj: 17 | 18 | Trajectory container with multiprocessing safe storage service 19 | 20 | """ 21 | 22 | # Manipulate the data in the trajectory 23 | traj.last_process_name = mp.current_process().name 24 | # Store the manipulated data 25 | traj.results.f_store(store_data=3) # Overwrites data on disk 26 | # Not recommended, here only for demonstration purposes :-) 27 | 28 | 29 | def main(): 30 | # We don't use an environment so we enable logging manually 31 | logging.basicConfig(level=logging.INFO) 32 | 33 | filename = os.path.join('hdf5','example_16.hdf5') 34 | traj = Trajectory(filename=filename, overwrite_file=True) 35 | 36 | # The result that will be manipulated 37 | traj.f_add_result('last_process_name', 'N/A', 38 | comment='Name of the last process that manipulated the trajectory') 39 | 40 | with MultiprocContext(trajectory=traj, wrap_mode='LOCK') as mc: 41 | # The multiprocessing context manager wraps the storage service of the trajectory 42 | # and passes the wrapped service to the trajectory. 43 | # Also restores the original storage service in the end. 44 | # Moreover, wee need to use the `MANAGER_LOCK` wrapping because the locks 45 | # are pickled and send to the pool for all function executions 46 | 47 | # Start a pool of processes manipulating the trajectory 48 | iterable = (traj for x in range(50)) 49 | pool = mp.Pool(processes=4) 50 | # Pass the trajectory and the function to the pool and execute it 20 times 51 | pool.map_async(manipulate_multiproc_safe, iterable) 52 | pool.close() 53 | # Wait for all processes to join 54 | pool.join() 55 | 56 | # Reload the data from disk and overwrite the existing result in RAM 57 | traj.results.f_load(load_data=3) 58 | # Print the name of the last process the trajectory was manipulated by 59 | print('The last process to manipulate the trajectory was: `%s`' % traj.last_process_name) 60 | 61 | 62 | if __name__ == '__main__': 63 | main() -------------------------------------------------------------------------------- /examples/example_17_wrapping_an_existing_project/.gitignore: -------------------------------------------------------------------------------- 1 | experiments 2 | tmp 3 | logs -------------------------------------------------------------------------------- /examples/example_17_wrapping_an_existing_project/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /examples/example_18_many_runs.py: -------------------------------------------------------------------------------- 1 | """Exploring more than 20000 runs may slow down *pypet*. 2 | 3 | HDF5 has problems handling nodes with more than 10000 children. 4 | To overcome this problem, simply group your runs into buckets or sets 5 | using the `$set` wildcard. 6 | 7 | """ 8 | 9 | __author__ = 'Robert Meyer' 10 | 11 | 12 | import os # To allow file paths working under Windows and Linux 13 | 14 | from pypet import Environment 15 | from pypet.utils.explore import cartesian_product 16 | 17 | def multiply(traj): 18 | """Example of a sophisticated simulation that involves multiplying two values.""" 19 | z = traj.x * traj.y 20 | # Since we perform many runs we will group results into sets of 1000 each 21 | # using the `$set` wildcard 22 | traj.f_add_result('$set.$.z', z, comment='Result of our simulation ' 23 | 'sorted into buckets of ' 24 | '1000 runs each!') 25 | 26 | def main(): 27 | # Create an environment that handles running 28 | filename = os.path.join('hdf5','example_18.hdf5') 29 | env = Environment(trajectory='Multiplication', 30 | filename=filename, 31 | file_title='Example_18_Many_Runs', 32 | overwrite_file=True, 33 | comment='Contains many runs', 34 | multiproc=True, 35 | use_pool=True, 36 | freeze_input=True, 37 | ncores=2, 38 | wrap_mode='QUEUE') 39 | 40 | # The environment has created a trajectory container for us 41 | traj = env.trajectory 42 | 43 | # Add both parameters 44 | traj.f_add_parameter('x', 1, comment='I am the first dimension!') 45 | traj.f_add_parameter('y', 1, comment='I am the second dimension!') 46 | 47 | # Explore the parameters with a cartesian product, yielding 2500 runs 48 | traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)})) 49 | 50 | # Run the simulation 51 | env.run(multiply) 52 | 53 | # Disable logging 54 | env.disable_logging() 55 | 56 | # turn auto loading on, since results have not been loaded, yet 57 | traj.v_auto_load = True 58 | # Use the `v_idx` functionality 59 | traj.v_idx = 2042 60 | print('The result of run %d is: ' % traj.v_idx) 61 | # Now we can rely on the wildcards 62 | print(traj.res.crunset.crun.z) 63 | traj.v_idx = -1 64 | # Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results 65 | print('The result of run %d is: ' % 2044) 66 | print(traj.res.rts_2044.r_2044.z) 67 | 68 | if __name__ == '__main__': 69 | main() -------------------------------------------------------------------------------- /examples/example_21_scoop_multiprocessing.py: -------------------------------------------------------------------------------- 1 | """ Example how to use SCOOP (http://scoop.readthedocs.org/en/0.7/) with pypet. 2 | 3 | Start the script via ``python -m scoop example_21_scoop_multiprocessing.py``. 4 | 5 | """ 6 | 7 | __author__ = 'Robert Meyer' 8 | 9 | import os # For path names being viable under Windows and Linux 10 | 11 | from pypet import Environment, cartesian_product 12 | from pypet import pypetconstants 13 | 14 | 15 | # Let's reuse the simple multiplication example 16 | def multiply(traj): 17 | """Sophisticated simulation of multiplication""" 18 | z=traj.x*traj.y 19 | traj.f_add_result('z',z=z, comment='I am the product of two reals!') 20 | 21 | 22 | def main(): 23 | """Main function to protect the *entry point* of the program. 24 | 25 | If you want to use multiprocessing with SCOOP you need to wrap your 26 | main code creating an environment into a function. Otherwise 27 | the newly started child processes will re-execute the code and throw 28 | errors (also see http://scoop.readthedocs.org/en/latest/usage.html#pitfalls). 29 | 30 | """ 31 | 32 | # Create an environment that handles running. 33 | # Let's enable multiprocessing with scoop: 34 | filename = os.path.join('hdf5', 'example_21.hdf5') 35 | env = Environment(trajectory='Example_21_SCOOP', 36 | filename=filename, 37 | file_title='Example_21_SCOOP', 38 | log_stdout=True, 39 | comment='Multiprocessing example using SCOOP!', 40 | multiproc=True, 41 | freeze_input=True, # We want to save overhead and freeze input 42 | use_scoop=True, # Yes we want SCOOP! 43 | wrap_mode=pypetconstants.WRAP_MODE_LOCAL, # SCOOP only works with 'LOCAL' 44 | # or 'NETLOCK' wrapping 45 | overwrite_file=True) 46 | 47 | # Get the trajectory from the environment 48 | traj = env.trajectory 49 | 50 | # Add both parameters 51 | traj.f_add_parameter('x', 1.0, comment='I am the first dimension!') 52 | traj.f_add_parameter('y', 1.0, comment='I am the second dimension!') 53 | 54 | # Explore the parameters with a cartesian product, but we want to explore a bit more 55 | traj.f_explore(cartesian_product({'x':[float(x) for x in range(20)], 56 | 'y':[float(y) for y in range(20)]})) 57 | # Run the simulation 58 | env.run(multiply) 59 | 60 | # Let's check that all runs are completed! 61 | assert traj.f_is_completed() 62 | 63 | # Finally disable logging and close all log-files 64 | env.disable_logging() 65 | 66 | 67 | if __name__ == '__main__': 68 | # This will execute the main function in case the script is called from the one true 69 | # main process and not from a child processes spawned by your environment. 70 | # Necessary for multiprocessing under Windows. 71 | main() -------------------------------------------------------------------------------- /examples/example_22_saga_python/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /examples/example_22_saga_python/merge_trajs.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os 4 | 5 | from pypet import merge_all_in_folder 6 | from the_task import FunctionParameter 7 | 8 | 9 | def main(): 10 | """Simply merge all trajectories in the working directory""" 11 | folder = os.getcwd() 12 | print('Merging all files') 13 | merge_all_in_folder(folder, 14 | delete_other_files=True, # We will only keep one trajectory 15 | dynamic_imports=FunctionParameter, 16 | backup=False) 17 | print('Done') 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /examples/example_22_saga_python/saga_0.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/examples/example_22_saga_python/saga_0.hdf5 -------------------------------------------------------------------------------- /examples/example_23_brian2_network.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import logging 4 | import os # For path names being viable under Windows and Linux 5 | 6 | from pypet.environment import Environment 7 | from pypet.brian2.parameter import Brian2Parameter, Brian2MonitorResult 8 | from pypet.utils.explore import cartesian_product 9 | # Don't do this at home: 10 | from brian2 import pF, nS, mV, ms, nA, NeuronGroup, SpikeMonitor, StateMonitor, linspace,\ 11 | Network 12 | 13 | # We define a function to set all parameter 14 | def add_params(traj): 15 | """Adds all necessary parameters to `traj`.""" 16 | 17 | # We set the BrianParameter to be the standard parameter 18 | traj.v_standard_parameter=Brian2Parameter 19 | traj.v_fast_access=True 20 | 21 | # Add parameters we need for our network 22 | traj.f_add_parameter('Net.C',281*pF) 23 | traj.f_add_parameter('Net.gL',30*nS) 24 | traj.f_add_parameter('Net.EL',-70.6*mV) 25 | traj.f_add_parameter('Net.VT',-50.4*mV) 26 | traj.f_add_parameter('Net.DeltaT',2*mV) 27 | traj.f_add_parameter('Net.tauw',40*ms) 28 | traj.f_add_parameter('Net.a',4*nS) 29 | traj.f_add_parameter('Net.b',0.08*nA) 30 | traj.f_add_parameter('Net.I',.8*nA) 31 | traj.f_add_parameter('Net.Vcut','vm > 0*mV') # practical threshold condition 32 | traj.f_add_parameter('Net.N',50) 33 | 34 | eqs=''' 35 | dvm/dt=(gL*(EL-vm)+gL*DeltaT*exp((vm-VT)/DeltaT)+I-w)/C : volt 36 | dw/dt=(a*(vm-EL)-w)/tauw : amp 37 | Vr:volt 38 | ''' 39 | traj.f_add_parameter('Net.eqs', eqs) 40 | traj.f_add_parameter('reset', 'vm=Vr;w+=b') 41 | 42 | # This is our job that we will execute 43 | def run_net(traj): 44 | """Creates and runs BRIAN network based on the parameters in `traj`.""" 45 | 46 | eqs=traj.eqs 47 | 48 | # Create a namespace dictionairy 49 | namespace = traj.Net.f_to_dict(short_names=True, fast_access=True) 50 | # Create the Neuron Group 51 | neuron=NeuronGroup(traj.N, model=eqs, threshold=traj.Vcut, reset=traj.reset, 52 | namespace=namespace) 53 | neuron.vm=traj.EL 54 | neuron.w=traj.a*(neuron.vm-traj.EL) 55 | neuron.Vr=linspace(-48.3*mV,-47.7*mV,traj.N) # bifurcation parameter 56 | 57 | # Run the network initially for 100 milliseconds 58 | print('Initial Run') 59 | net = Network(neuron) 60 | net.run(100*ms, report='text') # we discard the first spikes 61 | 62 | # Create a Spike Monitor 63 | MSpike=SpikeMonitor(neuron) 64 | net.add(MSpike) 65 | # Create a State Monitor for the membrane voltage, record from neurons 1-3 66 | MStateV = StateMonitor(neuron, variables=['vm'],record=[1,2,3]) 67 | net.add(MStateV) 68 | 69 | # Now record for 500 milliseconds 70 | print('Measurement run') 71 | net.run(500*ms,report='text') 72 | 73 | # Add the BRAIN monitors 74 | traj.v_standard_result = Brian2MonitorResult 75 | traj.f_add_result('SpikeMonitor',MSpike) 76 | traj.f_add_result('StateMonitorV', MStateV) 77 | 78 | 79 | def main(): 80 | # Let's be very verbose! 81 | logging.basicConfig(level = logging.INFO) 82 | 83 | 84 | # Let's do multiprocessing this time with a lock (which is default) 85 | filename = os.path.join('hdf5', 'example_23.hdf5') 86 | env = Environment(trajectory='Example_23_BRIAN2', 87 | filename=filename, 88 | file_title='Example_23_Brian2', 89 | comment = 'Go Brian2!', 90 | dynamically_imported_classes=[Brian2MonitorResult, Brian2Parameter]) 91 | 92 | traj = env.trajectory 93 | 94 | # 1st a) add the parameters 95 | add_params(traj) 96 | 97 | # 1st b) prepare, we want to explore the different network sizes and different tauw time scales 98 | traj.f_explore(cartesian_product({traj.f_get('N').v_full_name:[50,60], 99 | traj.f_get('tauw').v_full_name:[30*ms,40*ms]})) 100 | 101 | # 2nd let's run our experiment 102 | env.run(run_net) 103 | 104 | # You can take a look at the results in the hdf5 file if you want! 105 | 106 | # Finally disable logging and close all log-files 107 | env.disable_logging() 108 | 109 | 110 | if __name__ == '__main__': 111 | main() 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /examples/example_24_large_scale_brian2_simulation/.gitignore: -------------------------------------------------------------------------------- 1 | experiments -------------------------------------------------------------------------------- /examples/example_24_large_scale_brian2_simulation/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /examples/example_24_large_scale_brian2_simulation/plotff.py: -------------------------------------------------------------------------------- 1 | """Script to plot the fano factor graph for a given simulation 2 | stored as a trajectory to an HDF5 file. 3 | 4 | """ 5 | 6 | __author__ = 'Robert Meyer' 7 | 8 | import os 9 | import matplotlib.pyplot as plt 10 | 11 | from pypet import Trajectory, Environment 12 | from pypet.brian2.parameter import Brian2MonitorResult, Brian2Parameter 13 | 14 | 15 | def main(): 16 | 17 | filename = os.path.join('hdf5', 'Clustered_Network.hdf5') 18 | # If we pass a filename to the trajectory a new HDF5StorageService will 19 | # be automatically created 20 | traj = Trajectory(filename=filename, 21 | dynamically_imported_classes=[Brian2MonitorResult, 22 | Brian2Parameter]) 23 | 24 | # Let's create and fake environment to enable logging: 25 | env = Environment(traj, do_single_runs=False) 26 | 27 | 28 | # Load the trajectory, but onyl laod the skeleton of the results 29 | traj.f_load(index=-1, load_parameters=2, load_derived_parameters=2, load_results=1) 30 | 31 | # Find the result instances related to the fano factor 32 | fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False) 33 | 34 | # Load the data of the fano factor results 35 | ffs = fano_dict.values() 36 | traj.f_load_items(ffs) 37 | 38 | # Extract all values and R_ee values for each run 39 | ffs_values = [x.f_get() for x in ffs] 40 | Rees = traj.f_get('R_ee').f_get_range() 41 | 42 | # Plot average fano factor as a function of R_ee 43 | plt.plot(Rees, ffs_values) 44 | plt.xlabel('R_ee') 45 | plt.ylabel('Avg. Fano Factor') 46 | plt.show() 47 | 48 | # Finally disable logging and close all log-files 49 | env.disable_logging() 50 | 51 | 52 | if __name__ == '__main__': 53 | main() -------------------------------------------------------------------------------- /examples/example_24_large_scale_brian2_simulation/runscript.py: -------------------------------------------------------------------------------- 1 | """Starting script to run a network simulation of the clustered network 2 | by Litwin-Kumar and Doiron (Nature neuroscience 2012). 3 | 4 | The network has been implemented using the *pypet* network framework. 5 | 6 | """ 7 | 8 | __author__ = 'Robert Meyer' 9 | 10 | import numpy as np 11 | import os # To allow path names work under Windows and Linux 12 | import brian2 13 | brian2.prefs.codegen.target = 'numpy' 14 | 15 | from pypet.environment import Environment 16 | from pypet.brian2.network import NetworkManager 17 | 18 | from clusternet import CNMonitorAnalysis, CNNeuronGroup, CNNetworkRunner, CNConnections,\ 19 | CNFanoFactorComputer 20 | 21 | 22 | def main(): 23 | filename = os.path.join('hdf5', 'Clustered_Network.hdf5') 24 | env = Environment(trajectory='Clustered_Network', 25 | add_time=False, 26 | filename=filename, 27 | continuable=False, 28 | lazy_debug=False, 29 | multiproc=True, 30 | ncores=4, 31 | use_pool=False, # We cannot use a pool, our network cannot be pickled 32 | wrap_mode='QUEUE', 33 | overwrite_file=True) 34 | 35 | #Get the trajectory container 36 | traj = env.trajectory 37 | 38 | # We introduce a `meta` parameter that we can use to easily rescale our network 39 | scale = 1.0 # To obtain the results from the paper scale this to 1.0 40 | # Be aware that your machine will need a lot of memory then! 41 | traj.f_add_parameter('simulation.scale', scale, 42 | comment='Meta parameter that can scale default settings. ' 43 | 'Rescales number of neurons and connections strenghts, but ' 44 | 'not the clustersize.') 45 | 46 | 47 | # We create a Manager and pass all our components to the Manager. 48 | # Note the order, CNNeuronGroups are scheduled before CNConnections, 49 | # and the Fano Factor computation depends on the CNMonitorAnalysis 50 | clustered_network_manager = NetworkManager(network_runner=CNNetworkRunner(), 51 | component_list=(CNNeuronGroup(), CNConnections()), 52 | analyser_list=(CNMonitorAnalysis(),CNFanoFactorComputer())) 53 | 54 | # Add original parameters (but scaled according to `scale`) 55 | clustered_network_manager.add_parameters(traj) 56 | 57 | # We need `tolist` here since our parameter is a python float and not a 58 | # numpy float. 59 | explore_list = np.arange(1.0, 3.5, 0.4).tolist() 60 | # Explore different values of `R_ee` 61 | traj.f_explore({'R_ee' : explore_list}) 62 | 63 | # Pre-build network components 64 | clustered_network_manager.pre_build(traj) 65 | 66 | # Run the network simulation 67 | traj.f_store() # Let's store the parameters already before the run 68 | env.run(clustered_network_manager.run_network) 69 | 70 | # Finally disable logging and close all log-files 71 | env.disable_logging() 72 | 73 | 74 | if __name__=='__main__': 75 | main() -------------------------------------------------------------------------------- /pylint/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore pylint statistics 2 | *.html 3 | *.txt -------------------------------------------------------------------------------- /pylint/pylint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Running pylint" 3 | pylint --rcfile=.pylintrc ../pypet -------------------------------------------------------------------------------- /pypet/TODO.txt: -------------------------------------------------------------------------------- 1 | 2 | * Add docu about graceful exit *DONEÜ 3 | 4 | * Add docu about no clobber *DONE* 5 | 6 | * Add docu about rt2, runtoset *DONE* 7 | 8 | * Add docu about new map feature *DONE* 9 | 10 | * Write more tests for new map feature *DONE* 11 | 12 | * Doc manual runs *DONE* 13 | 14 | * Write tests for manual runs *DONE* 15 | 16 | * Add examples to docu *DONE* 17 | 18 | * Better document examples *DONE* 19 | 20 | * Doc new wrapper *DONE* 21 | 22 | * Test new wrapper *DONE* 23 | 24 | * more test for copying *DONE* 25 | 26 | * Doc new niceness feature *DONE* 27 | 28 | * Doc new scoop feature *DONE* 29 | 30 | * Add gc.collect to queue and pipe feature *DONE* 31 | 32 | * Document new NETLOCK wrap mode *DONE* 33 | 34 | * Create NETQUEUE mode (Mehmet) *DONE* 35 | 36 | * Document and test TimeOutLockerServer *DONE* 37 | 38 | * Add changes about naming *DONE* 39 | 40 | * Doc new naming *DONE* 41 | 42 | * Doc Brian changes *DONE* -------------------------------------------------------------------------------- /pypet/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | try: 6 | from ._version import __version__ 7 | except ImportError: 8 | # We're running in a tree that doesn't 9 | # have a _version.py, so we don't know what our version is. 10 | __version__ = "unknown" 11 | 12 | 13 | from pypet.environment import Environment, MultiprocContext 14 | from pypet.trajectory import Trajectory, load_trajectory 15 | from pypet.storageservice import HDF5StorageService, LazyStorageService 16 | from pypet.naturalnaming import ParameterGroup, DerivedParameterGroup, ConfigGroup,\ 17 | ResultGroup, NNGroupNode, NNLeafNode, KnowsTrajectory 18 | from pypet.parameter import Parameter, ArrayParameter, SparseParameter,\ 19 | PickleParameter, Result, SparseResult, PickleResult, ObjectTable, BaseParameter, BaseResult 20 | from pypet.pypetexceptions import DataNotInStorageError, NoSuchServiceError,\ 21 | NotUniqueNodeError, ParameterLockedException, PresettingError, TooManyGroupsError,\ 22 | VersionMismatchError, GitDiffError 23 | from pypet.pypetlogging import HasLogger, rename_log_file 24 | from pypet.utils.explore import cartesian_product, find_unique_points 25 | from pypet.utils.hdf5compression import compact_hdf5_file 26 | from pypet.utils.helpful_functions import progressbar, racedirs 27 | from pypet.shareddata import SharedArray, SharedCArray, SharedEArray,\ 28 | SharedVLArray, SharedPandasFrame, SharedTable, SharedResult,\ 29 | StorageContextManager, make_ordinary_result, make_shared_result 30 | from pypet.slots import HasSlots 31 | from pypet.utils.trajectory_utils import merge_all_in_folder 32 | from pypet.utils.decorators import manual_run 33 | from pypet.utils.pypettest import test 34 | 35 | 36 | __all__ = [ 37 | Trajectory.__name__, 38 | Environment.__name__, 39 | MultiprocContext.__name__, 40 | HDF5StorageService.__name__, 41 | LazyStorageService.__name__, 42 | ParameterGroup.__name__, 43 | DerivedParameterGroup.__name__, 44 | ConfigGroup.__name__, 45 | ResultGroup.__name__, 46 | NNGroupNode.__name__, 47 | NNLeafNode.__name__, 48 | BaseParameter.__name__, 49 | Parameter.__name__, 50 | ArrayParameter.__name__, 51 | SparseParameter.__name__, 52 | PickleParameter.__name__, 53 | BaseResult.__name__, 54 | Result.__name__, 55 | SparseResult.__name__, 56 | PickleResult.__name__, 57 | ObjectTable.__name__, 58 | DataNotInStorageError.__name__, 59 | NoSuchServiceError.__name__, 60 | NotUniqueNodeError.__name__, 61 | ParameterLockedException.__name__, 62 | PresettingError.__name__, 63 | TooManyGroupsError.__name__, 64 | VersionMismatchError.__name__, 65 | GitDiffError.__name__, 66 | HasSlots.__name__, 67 | HasLogger.__name__, 68 | rename_log_file.__name__, 69 | cartesian_product.__name__, 70 | load_trajectory.__name__, 71 | compact_hdf5_file.__name__, 72 | KnowsTrajectory.__name__, 73 | StorageContextManager.__name__, 74 | SharedArray.__name__, 75 | SharedCArray.__name__, 76 | SharedEArray.__name__, 77 | SharedVLArray.__name__, 78 | SharedPandasFrame.__name__, 79 | SharedTable.__name__, 80 | SharedResult.__name__, 81 | make_ordinary_result.__name__, 82 | make_shared_result.__name__, 83 | progressbar.__name__, 84 | racedirs.__name__, 85 | find_unique_points.__name__, 86 | merge_all_in_folder.__name__, 87 | manual_run.__name__, 88 | test.__name__ 89 | ] 90 | -------------------------------------------------------------------------------- /pypet/_version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.6.1' 2 | -------------------------------------------------------------------------------- /pypet/brian2/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = ['Henri Bunting', 'Robert Meyer'] 2 | 3 | 4 | from pypet.brian2.parameter import Brian2Parameter, Brian2Result, Brian2MonitorResult 5 | from pypet.brian2.network import NetworkManager, NetworkRunner, NetworkComponent, NetworkAnalyser 6 | 7 | 8 | __all__ = [ 9 | Brian2Parameter.__name__, 10 | Brian2Result.__name__, 11 | Brian2MonitorResult.__name__, 12 | NetworkManager.__name__, 13 | NetworkRunner.__name__, 14 | NetworkComponent.__name__, 15 | NetworkAnalyser.__name__ 16 | ] -------------------------------------------------------------------------------- /pypet/logging/debug.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [logger_root] 5 | handlers=file_main,file_error,stream 6 | level=NOTSET 7 | 8 | [formatters] 9 | keys=file,stream 10 | 11 | [formatter_file] 12 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 13 | 14 | [formatter_stream] 15 | format=%(processName)-10s %(name)s %(levelname)-8s %(message)s 16 | 17 | [handlers] 18 | keys=file_main, file_error, stream 19 | 20 | [handler_file_error] 21 | class=FileHandler 22 | level=ERROR 23 | args=('$temp$traj/$env/ERROR.txt',) 24 | formatter=file 25 | 26 | [handler_file_main] 27 | class=FileHandler 28 | args=('$temp$traj/$env/LOG.txt',) 29 | formatter=file 30 | 31 | [handler_stream] 32 | class=StreamHandler 33 | level=INFO 34 | args=() 35 | formatter=stream 36 | 37 | 38 | [multiproc_loggers] 39 | keys=root 40 | 41 | [multiproc_logger_root] 42 | handlers=file_main,file_error,stream 43 | level=NOTSET 44 | 45 | [multiproc_formatters] 46 | keys=file, stream 47 | 48 | [multiproc_formatter_file] 49 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 50 | 51 | [multiproc_handlers] 52 | keys=file_main, file_error, stream 53 | 54 | [multiproc_handler_file_error] 55 | class=FileHandler 56 | level=ERROR 57 | args=('$temp$traj/$env/$run_$host_$proc_ERROR.txt',) 58 | formatter=file 59 | 60 | [multiproc_handler_file_main] 61 | class=FileHandler 62 | args=('$temp$traj/$env/$run_$host_$proc_LOG.txt',) 63 | formatter=file 64 | 65 | [multiproc_handler_stream] 66 | class=StreamHandler 67 | level=INFO 68 | args=() 69 | formatter=stream 70 | 71 | [multiproc_formatter_stream] 72 | format=%(processName)-10s %(name)s %(levelname)-8s %(message)s -------------------------------------------------------------------------------- /pypet/logging/default.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [logger_root] 5 | handlers=file_main,file_error,stream 6 | level=INFO 7 | 8 | [formatters] 9 | keys=file,stream 10 | 11 | [formatter_file] 12 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 13 | 14 | [formatter_stream] 15 | format=%(processName)-10s %(name)s %(levelname)-8s %(message)s 16 | 17 | [handlers] 18 | keys=file_main, file_error, stream 19 | 20 | [handler_file_error] 21 | class=FileHandler 22 | level=ERROR 23 | args=('logs/$traj/$env/ERROR.txt',) 24 | formatter=file 25 | 26 | [handler_file_main] 27 | class=FileHandler 28 | args=('logs/$traj/$env/LOG.txt',) 29 | formatter=file 30 | 31 | [handler_stream] 32 | class=StreamHandler 33 | level=INFO 34 | args=() 35 | formatter=stream 36 | 37 | 38 | [multiproc_loggers] 39 | keys=root 40 | 41 | [multiproc_logger_root] 42 | handlers=file_main,file_error 43 | level=INFO 44 | 45 | [multiproc_formatters] 46 | keys=file 47 | 48 | [multiproc_formatter_file] 49 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 50 | 51 | [multiproc_handlers] 52 | keys=file_main,file_error 53 | 54 | [multiproc_handler_file_error] 55 | class=FileHandler 56 | level=ERROR 57 | args=('logs/$traj/$env/$run_$host_$proc_ERROR.txt',) 58 | formatter=file 59 | 60 | [multiproc_handler_file_main] 61 | class=FileHandler 62 | args=('logs/$traj/$env/$run_$host_$proc_LOG.txt',) 63 | formatter=file 64 | -------------------------------------------------------------------------------- /pypet/logging/env_config_test.ini: -------------------------------------------------------------------------------- 1 | ######### Environment ############## 2 | [trajectory] 3 | trajectory='ConfigTest' 4 | add_time=True 5 | comment='' 6 | auto_load=True 7 | v_with_links=True 8 | 9 | [environment] 10 | automatic_storing=True 11 | log_stdout=('STDOUT', 50) 12 | report_progress = (10, 'pypet', 50) 13 | multiproc=True 14 | ncores=2 15 | use_pool=True 16 | cpu_cap=100.0 17 | memory_cap=100.0 18 | swap_cap=100.0 19 | wrap_mode='LOCK' 20 | clean_up_runs=True 21 | immediate_postproc=False 22 | continuable=False 23 | continue_folder=None 24 | delete_continue=True 25 | storage_service='pypet.HDF5StorageService' 26 | do_single_runs=True 27 | lazy_debug=False 28 | 29 | [storage_service] 30 | filename='test_overwrite' 31 | file_title=None 32 | overwrite_file=False 33 | encoding='utf-8' 34 | complevel=4 35 | complib='zlib' 36 | shuffle=False 37 | fletcher32=True 38 | pandas_format='t' 39 | purge_duplicate_comments=False 40 | summary_tables=False 41 | small_overview_tables=False 42 | large_overview_tables=True 43 | results_per_run=1000 44 | derived_parameters_per_run=1000 45 | display_time=50 46 | 47 | 48 | ###### Config and Parameters ###### 49 | [config] 50 | test.testconfig=True, 'This is a test config' 51 | 52 | [parameters] 53 | test.x=42 54 | y=43, 'This is the second variable' 55 | 56 | 57 | ############ Logging ############### 58 | [loggers] 59 | keys=root 60 | 61 | [logger_root] 62 | handlers=file_main,file_error,stream 63 | level=INFO 64 | 65 | [formatters] 66 | keys=file,stream 67 | 68 | [formatter_file] 69 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 70 | 71 | [formatter_stream] 72 | format=%(processName)-10s %(name)s %(levelname)-8s %(message)s 73 | 74 | [handlers] 75 | keys=file_main, file_error, stream 76 | 77 | [handler_file_error] 78 | class=FileHandler 79 | level=ERROR 80 | args=('$temp$traj/$env/ERROR.txt',) 81 | formatter=file 82 | 83 | [handler_file_main] 84 | class=FileHandler 85 | args=('$temp$traj/$env/LOG.txt',) 86 | formatter=file 87 | 88 | [handler_stream] 89 | class=StreamHandler 90 | level=ERROR 91 | args=() 92 | formatter=stream 93 | 94 | 95 | [multiproc_loggers] 96 | keys=root 97 | 98 | [multiproc_logger_root] 99 | handlers=file_main,file_error 100 | level=INFO 101 | 102 | [multiproc_formatters] 103 | keys=file 104 | 105 | [multiproc_formatter_file] 106 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 107 | 108 | [multiproc_handlers] 109 | keys=file_main, file_error 110 | 111 | [multiproc_handler_file_error] 112 | class=FileHandler 113 | level=ERROR 114 | args=('$temp$traj/$env/$run_$host_$proc_ERROR.txt',) 115 | formatter=file 116 | 117 | [multiproc_handler_file_main] 118 | class=FileHandler 119 | args=('$temp$traj/$env/$run_$host_$proc_LOG.txt',) 120 | formatter=file 121 | -------------------------------------------------------------------------------- /pypet/logging/test.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [logger_root] 5 | handlers=file_main,file_error,stream 6 | level=INFO 7 | 8 | [formatters] 9 | keys=file,stream 10 | 11 | [formatter_file] 12 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 13 | 14 | [formatter_stream] 15 | format=%(processName)-10s %(asctime)s %(name)s %(levelname)-8s %(message)s 16 | 17 | [handlers] 18 | keys=file_main, file_error, stream 19 | 20 | [handler_file_error] 21 | class=FileHandler 22 | level=ERROR 23 | args=('$temp$traj/$env/ERROR.txt',) 24 | formatter=file 25 | 26 | [handler_file_main] 27 | class=FileHandler 28 | args=('$temp$traj/$env/LOG.txt',) 29 | formatter=file 30 | 31 | [handler_stream] 32 | class=StreamHandler 33 | level=ERROR 34 | args=() 35 | formatter=stream 36 | 37 | 38 | [multiproc_loggers] 39 | keys=root 40 | 41 | [multiproc_logger_root] 42 | handlers=file_main,file_error,stream 43 | level=INFO 44 | 45 | [multiproc_formatters] 46 | keys=file, stream 47 | 48 | [multiproc_formatter_file] 49 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 50 | 51 | [multiproc_handlers] 52 | keys=file_main, file_error, stream 53 | 54 | [multiproc_handler_file_error] 55 | class=FileHandler 56 | level=ERROR 57 | args=('$temp$traj/$env/$run_$host_$proc_ERROR.txt',) 58 | formatter=file 59 | 60 | [multiproc_handler_file_main] 61 | class=FileHandler 62 | args=('$temp$traj/$env/$run_$host_$proc_LOG.txt',) 63 | formatter=file 64 | 65 | [multiproc_handler_stream] 66 | class=StreamHandler 67 | level=ERROR 68 | args=() 69 | formatter=stream 70 | 71 | [multiproc_formatter_stream] 72 | format=%(processName)-10s %(asctime)s %(name)s %(levelname)-8s %(message)s -------------------------------------------------------------------------------- /pypet/pypetexceptions.py: -------------------------------------------------------------------------------- 1 | """Module containing all exceptions""" 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | 6 | class ParameterLockedException(TypeError): 7 | """Exception raised if someone tries to modify a locked Parameter.""" 8 | pass 9 | 10 | 11 | class VersionMismatchError(TypeError): 12 | """Exception raised if the current version of pypet does not match the version with which 13 | the trajectory was handled.""" 14 | pass 15 | 16 | 17 | class PresettingError(RuntimeError): 18 | """Exception raised if parameter presetting failed. 19 | 20 | Probable cause might be a typo in the parameter name. 21 | 22 | """ 23 | pass 24 | 25 | 26 | class NoSuchServiceError(TypeError): 27 | """Exception raised by the Storage Service if a specific operation is not supported, 28 | i.e. the message is not understood. 29 | 30 | """ 31 | pass 32 | 33 | 34 | class NotUniqueNodeError(AttributeError): 35 | """Exception raised by the Natural Naming if a node can be found more than once.""" 36 | pass 37 | 38 | 39 | class TooManyGroupsError(TypeError): 40 | """Exception raised by natural naming fast search if fast search cannot be applied. 41 | """ 42 | pass 43 | 44 | 45 | class DataNotInStorageError(IOError): 46 | """Excpetion raise by Storage Service if data that is supposed to be loaded cannot 47 | be found on disk.""" 48 | pass 49 | 50 | 51 | class GitDiffError(RuntimeError): 52 | """Exception raised if there are uncommited changes.""" 53 | pass 54 | -------------------------------------------------------------------------------- /pypet/slots.py: -------------------------------------------------------------------------------- 1 | """Module containing the superclass having slots""" 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | 6 | def get_all_slots(cls): 7 | """Iterates through a class' (`cls`) mro to get all slots as a set.""" 8 | slots_iterator = (getattr(c, '__slots__', ()) for c in cls.__mro__) 9 | # `__slots__` might only be a single string, 10 | # so we need to put the strings into a tuple. 11 | slots_converted = ((slots,) if isinstance(slots, str) else slots 12 | for slots in slots_iterator) 13 | all_slots = set() 14 | all_slots.update(*slots_converted) 15 | return all_slots 16 | 17 | 18 | class MetaSlotMachine(type): 19 | """Meta-class that adds the attribute `__all_slots__` to a class. 20 | 21 | `__all_slots__` is a set that contains all unique slots of a class, 22 | including the ones that are inherited from parents. 23 | 24 | """ 25 | def __init__(cls, name, bases, dictionary): 26 | super(MetaSlotMachine, cls).__init__(name, bases, dictionary) 27 | cls.__all_slots__ = get_all_slots(cls) 28 | 29 | 30 | class HasSlots(object, metaclass=MetaSlotMachine): 31 | """Top-class that allows mixing of classes with and without slots. 32 | 33 | Takes care that instances can still be pickled with the lowest 34 | protocol. Moreover, provides a generic `__dir__` method that 35 | lists all slots. 36 | 37 | """ 38 | __slots__ = ('__weakref__',) 39 | 40 | def __getstate__(self): 41 | if hasattr(self, '__dict__'): 42 | # We don't require that all sub-classes also define slots, 43 | # so they may provide a dictionary 44 | statedict = self.__dict__.copy() 45 | else: 46 | statedict = {} 47 | # Get all slots of potential parent classes 48 | for slot in self.__all_slots__: 49 | try: 50 | value = getattr(self, slot) 51 | statedict[slot] = value 52 | except AttributeError: 53 | pass 54 | # Pop slots that cannot or should not be pickled 55 | statedict.pop('__dict__', None) 56 | statedict.pop('__weakref__', None) 57 | return statedict 58 | 59 | def __setstate__(self, state): 60 | """Recalls state for items with slots""" 61 | for key in state: 62 | setattr(self, key, state[key]) 63 | 64 | def __dir__(self): 65 | """Includes all slots in the `dir` method""" 66 | result = set() 67 | result.update(dir(self.__class__), self.__all_slots__) 68 | if hasattr(self, '__dict__'): 69 | result.update(self.__dict__.keys()) 70 | return list(result) 71 | -------------------------------------------------------------------------------- /pypet/tests/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | -------------------------------------------------------------------------------- /pypet/tests/_atworema.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | from pypet.tests.testutils.ioutils import run_suite, discover_tests, TEST_IMPORT_ERROR 4 | 5 | if __name__ == '__main__': 6 | suite = discover_tests(predicate= lambda class_name, test_name, tags: 7 | class_name != TEST_IMPORT_ERROR) 8 | run_suite(remove=False, folder=None, suite=suite) -------------------------------------------------------------------------------- /pypet/tests/all_multi_core_tests.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | 4 | from pypet.tests.testutils.ioutils import run_suite, discover_tests, TEST_IMPORT_ERROR, parse_args 5 | 6 | tests_include=set(('MultiprocNoPoolLockTest', 7 | #'MultiprocNoPoolPipeTest', 8 | 'MultiprocPoolSortQueueTest', 9 | 'MultiprocFrozenPoolSortQueueTest', 10 | 'MultiprocLinkLockTest', 11 | 'CapTest', 12 | # 'MultiprocPoolLockTest', 13 | 'MultiprocStorageLockTest', 14 | 'MultiprocNoPoolQueueLoggingTest', 15 | 'MultiprocFrozenPoolLocalTest', 16 | 'TestMPImmediatePostProcQueue', 17 | 'MultiprocStorageNetlockTest', 18 | 'MultiprocSCOOPSortNetqueueTest')) 19 | 20 | big_suite_1 = discover_tests(lambda class_name, test_name, tags: class_name in tests_include) 21 | 22 | tests_include=set((#'MultiprocNoPoolQueueTest', 23 | 'MultiprocPoolQueueTest', 24 | 'MultiprocFrozenPoolPipeTest', 25 | 'MultiprocPoolSortLockTest', 26 | 'MultiprocPoolSortPipeTest', 27 | 'MultiprocFrozenPoolSortLockTest', 28 | 'MultiprocLinkNoPoolLockTest', 29 | 'TestMPPostProc', 30 | 'ContinueMPPoolTest', 31 | 'MultiprocPoolLockLoggingTest', 32 | 'MultiprocNoPoolSortLocalTest', 33 | 'TestMPImmediatePostProcLocal', 34 | 'MultiprocSCOOPSortLocalTest', 35 | 'MultiprocFrozenSCOOPLocalTest')) 36 | big_suite_2 = discover_tests(lambda class_name, test_name, tags: class_name in tests_include) 37 | 38 | tests_include=set((#'MultiprocFrozenPoolLockTest', 39 | 'MultiprocNoPoolSortQueueTest', 40 | 'MultiprocLinkNoPoolQueueTest', 41 | 'MultiprocPoolSortPipeTest', 42 | 'TestMPImmediatePostProcLock', 43 | #'MultiprocPoolPipeTest', 44 | 'MultiprocStorageNoPoolLockTest', 45 | 'MultiprocNoPoolLockLoggingTest', 46 | 'MultiprocFrozenSCOOPSortNetlockTest', 47 | #'MultiprocFrozenSCOOPSortLocalTest', 48 | 'MultiprocSCOOPNetqueueTest')) 49 | big_suite_3 = discover_tests(lambda class_name, test_name, tags: class_name in tests_include) 50 | 51 | tests_include=set(('MultiprocFrozenPoolQueueTest', 52 | 'MultiprocFrozenPoolSortPipeTest', 53 | 'MultiprocNoPoolSortLockTest', 54 | 'MultiprocLinkQueueTest', 55 | 'ContinueMPTest', 56 | 'BrianFullNetworkMPTest', 57 | 'MultiprocPoolQueueLoggingTest', 58 | 'MultiprocPoolSortLocalTest', 59 | 'MultiprocLinkLocalTest', 60 | 'TestMPImmediatePostProcPipe', 61 | 'MultiprocSCOOPNetlockTest', 62 | 'MultiprocFrozenSCOOPLocalTest')) 63 | big_suite_4 = discover_tests(lambda class_name, test_name, tags: class_name in tests_include) 64 | 65 | 66 | suite_dict = {'1': big_suite_1, '2': big_suite_2, '3': big_suite_3, '4': big_suite_4} 67 | 68 | 69 | if __name__ == '__main__': 70 | opt_dict = parse_args() 71 | suite = None 72 | if 'suite_no' in opt_dict: 73 | suite_no = opt_dict.pop('suite_no') 74 | suite = suite_dict[suite_no] 75 | 76 | if suite is None: 77 | pred = lambda class_name, test_name, tags: ('multiproc' in tags and 78 | class_name != TEST_IMPORT_ERROR) 79 | suite = discover_tests(pred) 80 | 81 | run_suite(suite=suite, **opt_dict) -------------------------------------------------------------------------------- /pypet/tests/all_single_core_tests.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | 4 | from pypet.tests.testutils.ioutils import run_suite, TEST_IMPORT_ERROR, discover_tests, \ 5 | parse_args 6 | 7 | unit_pred = lambda class_name, test_name, tags: ('unittest' in tags and 8 | 'multiproc' not in tags) 9 | unit_suite = discover_tests(unit_pred) 10 | 11 | exclude_set = set(('hdf5_settings', 'multiproc', 'merge')) 12 | integration_pred = lambda class_name, test_name, tags: ('integration' in tags and 13 | not bool(exclude_set & tags)) 14 | integration_suite = discover_tests(integration_pred) 15 | 16 | include_set = set(('hdf5_settings', 'links', 'merge')) 17 | integration_pred_2 = lambda class_name, test_name, tags: ('integration' in tags and 18 | bool(include_set & tags) and 19 | 'multiproc' not in tags and 20 | 'links' not in tags) 21 | integration_suite_2 = discover_tests(integration_pred_2) 22 | 23 | suite_dict = {'1': unit_suite, '2': integration_suite, '3': integration_suite_2} 24 | 25 | 26 | if __name__ == '__main__': 27 | opt_dict = parse_args() 28 | suite = None 29 | if 'suite_no' in opt_dict: 30 | suite_no = opt_dict.pop('suite_no') 31 | suite = suite_dict[suite_no] 32 | 33 | if suite is None: 34 | pred = lambda class_name, test_name, tags: ('multiproc' not in tags and 35 | class_name != TEST_IMPORT_ERROR) 36 | suite = discover_tests(pred) 37 | 38 | run_suite(suite=suite, **opt_dict) 39 | -------------------------------------------------------------------------------- /pypet/tests/all_tests.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | 4 | from pypet.tests.testutils.ioutils import run_suite, discover_tests, TEST_IMPORT_ERROR, parse_args 5 | 6 | 7 | if __name__ == '__main__': 8 | opt_dict = parse_args() 9 | suite = discover_tests(predicate=lambda class_name, test_name, tags: 10 | class_name != TEST_IMPORT_ERROR) 11 | run_suite(suite=suite, **opt_dict) -------------------------------------------------------------------------------- /pypet/tests/coverage_run.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import multiprocessing 4 | # Monkey Patch from here: https://bitbucket.org/ned/coveragepy/issue/117/enable-coverage-measurement-of-code-run-by 5 | def coverage_multiprocessing_process(): # pragma: no cover 6 | try: 7 | import coverage as _coverage 8 | _coverage 9 | except: 10 | return 11 | 12 | from coverage.collector import Collector 13 | from coverage import coverage 14 | # detect if coverage was running in forked process 15 | if Collector._collectors: 16 | original = multiprocessing.Process._bootstrap 17 | class Process_WithCoverage(multiprocessing.Process): 18 | def _bootstrap(self): 19 | cov = coverage(data_suffix=True, 20 | omit='*/pypet/tests/*,*/shareddata.py'.split(',')) 21 | 22 | cov.start() 23 | try: 24 | return original(self) 25 | finally: 26 | cov.stop() 27 | cov.save() 28 | return Process_WithCoverage 29 | 30 | ProcessCoverage = coverage_multiprocessing_process() 31 | if ProcessCoverage: 32 | multiprocessing.Process = ProcessCoverage 33 | print('Added Monkey-Patch for multiprocessing and code-coverage') 34 | 35 | 36 | import sys 37 | import os 38 | 39 | pypetpath=os.path.abspath(os.getcwd()) 40 | sys.path.append(pypetpath) 41 | print('Appended path `%s`' % pypetpath) 42 | 43 | from pypet.tests.testutils.ioutils import run_suite, discover_tests, TEST_IMPORT_ERROR, parse_args 44 | 45 | 46 | if __name__ == '__main__': 47 | opt_dict = parse_args() 48 | tests_include = set(('TestMPImmediatePostProcLock', 49 | 'MultiprocFrozenPoolSortQueueTest', 50 | 'MultiprocFrozenPoolSortPipeTest', 51 | 'MultiprocLinkNoPoolLockTest', 52 | 'MultiprocLinkNoPoolQueueTest', 53 | 'MultiprocLinkQueueTest', 54 | 'MultiprocPoolSortLocalTest', 55 | 'MultiprocSCOOPSortLocalTest', 56 | 'MultiprocFrozenSCOOPSortNetlockTest', 57 | 'MultiprocFrozenSCOOPSortNetqueueTest', 58 | 'Brain2NetworkTest', 59 | 'BrainNetworkTest', 60 | 'CapTest')) 61 | pred = lambda class_name, test_name, tags: (class_name in tests_include or 62 | 'multiproc' not in tags) 63 | suite = discover_tests(pred) 64 | run_suite(suite=suite, **opt_dict) -------------------------------------------------------------------------------- /pypet/tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /pypet/tests/integration/brian2tests/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'henri' 2 | -------------------------------------------------------------------------------- /pypet/tests/integration/brian2tests/another_network_test.py: -------------------------------------------------------------------------------- 1 | __author__ = ['Henri Bunting', 'Robert Meyer'] 2 | 3 | 4 | import numpy as np 5 | import time 6 | import os 7 | 8 | try: 9 | import brian2 10 | from brian2 import NeuronGroup, Synapses, SpikeMonitor, StateMonitor, mV, ms, Network, second, \ 11 | PopulationRateMonitor 12 | from pypet.brian2.parameter import Brian2Parameter, Brian2MonitorResult 13 | from brian2 import prefs 14 | prefs.codegen.target = 'numpy' 15 | except ImportError: 16 | brian2 = None 17 | 18 | from pypet import Environment 19 | from pypet.tests.testutils.data import TrajectoryComparator 20 | from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, get_log_config, \ 21 | parse_args, run_suite, unittest 22 | 23 | 24 | def run_network(traj): 25 | """Runs brian network consisting of 26 | 200 inhibitory IF neurons""" 27 | 28 | eqs = ''' 29 | dv/dt=(v0-v)/(5*ms) : volt (unless refractory) 30 | v0 : volt 31 | ''' 32 | group = NeuronGroup(100, model=eqs, threshold='v>10 * mV', 33 | reset='v = 0*mV', refractory=5*ms) 34 | group.v0 = traj.par.v0 35 | group.v = np.random.rand(100) * 10.0 * mV 36 | 37 | syn = Synapses(group, group, on_pre='v-=1*mV') 38 | syn.connect('i != j', p=0.2) 39 | 40 | spike_monitor = SpikeMonitor(group, variables=['v']) 41 | voltage_monitor = StateMonitor(group, 'v', record=True) 42 | pop_monitor = PopulationRateMonitor(group, name='pop' + str(traj.v_idx)) 43 | 44 | net = Network(group, syn, spike_monitor, voltage_monitor, pop_monitor) 45 | net.run(0.25*second, report='text') 46 | 47 | traj.f_add_result(Brian2MonitorResult, 'spikes', 48 | spike_monitor) 49 | traj.f_add_result(Brian2MonitorResult, 'v', 50 | voltage_monitor) 51 | traj.f_add_result(Brian2MonitorResult, 'pop', 52 | pop_monitor) 53 | 54 | 55 | @unittest.skipIf(brian2 is None, 'Can only be run with brian2!') 56 | class Brian2FullNetworkTest(TrajectoryComparator): 57 | 58 | tags = 'integration', 'brian2', 'parameter', 'network', 'hdf5', 'henri' 59 | 60 | def get_data(self, traj): 61 | traj.f_load(load_data=2) 62 | #plt.subplot(1,2,1) 63 | 64 | #plt.scatter(spikes.t, spikes.i) 65 | #plt.title('Spikes of first run') 66 | #plt.subplot(1,2,2) 67 | for name in traj.f_iter_runs(): 68 | spikes = traj.res.runs[name].spikes 69 | self.assertTrue(len(spikes.t) > 0) 70 | self.assertTrue(len(spikes.i) > 0) 71 | self.assertTrue(len(spikes.v) > 0) 72 | voltage = traj.res.crun.v.v[0, :] 73 | times = traj.res.crun.v.t 74 | self.assertTrue(len(voltage) > 0) 75 | self.assertTrue(len(times) > 0) 76 | pop = traj.res.runs[name].pop 77 | self.assertTrue(len(pop.rate) >0) 78 | #plt.plot(times, voltage) 79 | #plt.title('Voltage trace of different runs') 80 | #plt.show() 81 | 82 | 83 | def test_net(self): 84 | env = Environment(trajectory='Test_'+repr(time.time()).replace('.','_'), 85 | filename=make_temp_dir(os.path.join( 86 | 'experiments', 87 | 'tests', 88 | 'briantests', 89 | 'HDF5', 90 | 'briantest.hdf5')), 91 | file_title='test', 92 | log_config=get_log_config(), 93 | dynamic_imports=['pypet.brian2.parameter.Brian2Parameter', 94 | Brian2MonitorResult], 95 | multiproc=False) 96 | traj = env.v_traj 97 | traj.f_add_parameter(Brian2Parameter, 'v0', 0.0*mV, 98 | comment='Input bias') 99 | traj.f_explore({'v0': [11*mV, 13*mV, 15*mV]}) 100 | env.f_run(run_network) 101 | self.get_data(traj) 102 | 103 | 104 | 105 | if __name__ == '__main__': 106 | opt_args = parse_args() 107 | run_suite(**opt_args) -------------------------------------------------------------------------------- /pypet/tests/integration/envSCOOPdebug.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | import os 4 | 5 | from pypet import LazyStorageService 6 | 7 | from pypet.tests.integration.environment_scoop_test import EnvironmentTest, pypetconstants, \ 8 | check_nice, unittest 9 | from pypet.tests.integration.environment_test import make_temp_dir, make_trajectory_name, \ 10 | random, Environment, get_log_config, Parameter, create_param_dict, add_params 11 | 12 | import pypet.tests.testutils.ioutils as tu 13 | tu.testParams['log_config'] = 'debug' 14 | tu.prepare_log_config() 15 | 16 | 17 | 18 | 19 | 20 | @unittest.skip 21 | class MultiprocSCOOPNetlockTest(EnvironmentTest): 22 | 23 | tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netlock', 'scoop' 24 | 25 | def compare_trajectories(self,traj1,traj2): 26 | return True 27 | 28 | def setUp(self): 29 | self.set_mode() 30 | self.logfolder = make_temp_dir(os.path.join('experiments', 31 | 'tests', 32 | 'Log')) 33 | 34 | random.seed() 35 | self.trajname = make_trajectory_name(self) 36 | self.filename = make_temp_dir(os.path.join('experiments', 37 | 'tests', 38 | 'HDF5', 39 | 'test%s.hdf5' % self.trajname)) 40 | 41 | env = Environment(trajectory=self.trajname, 42 | storage_service=LazyStorageService, 43 | filename=self.filename, 44 | file_title=self.trajname, 45 | log_stdout=self.log_stdout, 46 | log_config=get_log_config(), 47 | results_per_run=5, 48 | wildcard_functions=self.wildcard_functions, 49 | derived_parameters_per_run=5, 50 | multiproc=self.multiproc, 51 | ncores=self.ncores, 52 | wrap_mode=self.mode, 53 | use_pool=self.use_pool, 54 | gc_interval=self.gc_interval, 55 | freeze_input=self.freeze_input, 56 | fletcher32=self.fletcher32, 57 | complevel=self.complevel, 58 | complib=self.complib, 59 | shuffle=self.shuffle, 60 | pandas_append=self.pandas_append, 61 | pandas_format=self.pandas_format, 62 | encoding=self.encoding, 63 | niceness=self.niceness, 64 | use_scoop=self.use_scoop, 65 | port=self.url) 66 | 67 | traj = env.v_trajectory 68 | 69 | traj.v_standard_parameter=Parameter 70 | 71 | ## Create some parameters 72 | self.param_dict={} 73 | create_param_dict(self.param_dict) 74 | ### Add some parameter: 75 | add_params(traj,self.param_dict) 76 | 77 | #remember the trajectory and the environment 78 | self.traj = traj 79 | self.env = env 80 | 81 | def set_mode(self): 82 | super(MultiprocSCOOPNetlockTest, self).set_mode() 83 | self.mode = pypetconstants.WRAP_MODE_NETLOCK 84 | self.multiproc = True 85 | self.freeze_input = False 86 | self.ncores = 4 87 | self.gc_interval = 3 88 | self.niceness = check_nice(1) 89 | self.use_pool=False 90 | self.use_scoop=True 91 | self.url = None 92 | 93 | @unittest.skip('Does not work with scoop (fully), because scoop uses main frame.') 94 | def test_niceness(self): 95 | pass -------------------------------------------------------------------------------- /pypet/tests/integration/git_check.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import getopt 4 | 5 | try: 6 | import pypet 7 | except ImportError: 8 | # Check if pypet is installed otherwise append /pypet folder 9 | # this is important for travis-ci 10 | path = os.path.abspath('../../../') 11 | print('Adding pypet path:`%s`' % path) 12 | sys.path.append(path) 13 | 14 | 15 | from pypet import Environment 16 | from pypet import cartesian_product, GitDiffError 17 | 18 | 19 | def multiply(traj): 20 | z=traj.x*traj.y 21 | traj.f_add_result('z',z, comment='Im the product of two reals!') 22 | 23 | 24 | def get_opt(): 25 | opt_list, _ = getopt.getopt(sys.argv[1:],'fn') 26 | opt_dict = {} 27 | for opt, arg in opt_list: 28 | if opt == '-f': 29 | opt_dict['fail'] = True 30 | print('I will try to fail on diffs.') 31 | if opt == '-n': 32 | opt_dict['no_fail'] = True 33 | print('I will try to fail on diffs, but there should not be any.') 34 | 35 | return opt_dict 36 | 37 | 38 | def fail_on_diff(): 39 | try: 40 | Environment(trajectory='fail', 41 | filename=os.path.join('fail', 42 | 'HDF5',), 43 | file_title='failing', 44 | git_repository='.', git_message='Im a message!', 45 | git_fail=True) 46 | raise RuntimeError('You should not be here!') 47 | except GitDiffError as exc: 48 | print('I expected the GitDiffError: `%s`' % repr(exc)) 49 | 50 | 51 | def main(fail=False): 52 | try: 53 | sumatra_project = '.' 54 | 55 | if fail: 56 | print('There better be not any diffs.') 57 | 58 | # Create an environment that handles running 59 | with Environment(trajectory='Example1_Quick_And_Not_So_Dirty', 60 | filename=os.path.join('experiments', 61 | 'HDF5',), 62 | file_title='Example1_Quick_And_Not_So_Dirty', 63 | comment='The first example!', 64 | complib='blosc', 65 | small_overview_tables=False, 66 | git_repository='.', git_message='Im a message!', 67 | git_fail=fail, 68 | sumatra_project=sumatra_project, sumatra_reason='Testing!') as env: 69 | 70 | # Get the trajectory from the environment 71 | traj = env.v_trajectory 72 | 73 | # Add both parameters 74 | traj.f_add_parameter('x', 1, comment='Im the first dimension!') 75 | traj.f_add_parameter('y', 1, comment='Im the second dimension!') 76 | 77 | # Explore the parameters with a cartesian product: 78 | traj.f_explore(cartesian_product({'x':[1,2,3], 'y':[6,7,8]})) 79 | 80 | # Run the simulation 81 | env.f_run(multiply) 82 | 83 | # Check that git information was added to the trajectory 84 | assert 'config.git.hexsha' in traj 85 | assert 'config.git.committed_date' in traj 86 | assert 'config.git.message' in traj 87 | assert 'config.git.name_rev' in traj 88 | 89 | print("Python git test successful") 90 | 91 | # traj.f_expand({'x':[3,3],'y':[42,43]}) 92 | # 93 | # env.f_run(multiply) 94 | except Exception as exc: 95 | print(repr(exc)) 96 | sys.exit(1) 97 | 98 | 99 | if __name__ == '__main__': 100 | opt_dict = get_opt() 101 | test_fail = opt_dict.get('fail', False) 102 | if test_fail: 103 | fail_on_diff() 104 | test_no_fail = opt_dict.get('no_fail', False) 105 | main(test_no_fail) -------------------------------------------------------------------------------- /pypet/tests/integration/link_multiproc_test.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import pypet.pypetconstants as pypetconstants 4 | from pypet.tests.integration.link_test import LinkEnvironmentTest 5 | from pypet.tests.testutils.ioutils import parse_args, run_suite 6 | try: 7 | import psutil 8 | except ImportError: 9 | psutil = None 10 | 11 | 12 | class MultiprocLinkQueueTest(LinkEnvironmentTest): 13 | 14 | tags = 'integration', 'hdf5', 'environment', 'multiproc', 'queue', 'pool', 'links' 15 | 16 | def set_mode(self): 17 | LinkEnvironmentTest.set_mode(self) 18 | self.mode = pypetconstants.WRAP_MODE_QUEUE 19 | self.multiproc = True 20 | self.log_stdout = True 21 | if psutil is not None: 22 | self.ncores = 0 23 | else: 24 | self.ncores = 3 25 | self.use_pool=True 26 | 27 | 28 | class MultiprocLinkLocalTest(LinkEnvironmentTest): 29 | 30 | tags = 'integration', 'hdf5', 'environment', 'multiproc', 'local', 'pool', 'links' 31 | 32 | def set_mode(self): 33 | LinkEnvironmentTest.set_mode(self) 34 | self.mode = pypetconstants.WRAP_MODE_LOCAL 35 | self.multiproc = True 36 | self.log_stdout = True 37 | self.use_pool=True 38 | 39 | 40 | class MultiprocLinkLockTest(LinkEnvironmentTest): 41 | 42 | tags = 'integration', 'hdf5', 'environment', 'multiproc', 'lock', 'pool', 'links' 43 | 44 | # def test_run(self): 45 | # super(MultiprocLockTest, self).test_run() 46 | 47 | def set_mode(self): 48 | LinkEnvironmentTest.set_mode(self) 49 | self.mode = pypetconstants.WRAP_MODE_LOCK 50 | self.multiproc = True 51 | self.ncores = 4 52 | self.use_pool=True 53 | 54 | 55 | class MultiprocLinkNoPoolQueueTest(LinkEnvironmentTest): 56 | 57 | tags = 'integration', 'hdf5', 'environment', 'multiproc', 'queue', 'nopool', 'links' 58 | 59 | def set_mode(self): 60 | LinkEnvironmentTest.set_mode(self) 61 | self.mode = pypetconstants.WRAP_MODE_QUEUE 62 | self.multiproc = True 63 | self.ncores = 3 64 | self.use_pool=False 65 | 66 | 67 | class MultiprocLinkNoPoolLockTest(LinkEnvironmentTest): 68 | 69 | tags = 'integration', 'hdf5', 'environment', 'multiproc', 'lock', 'nopool', 'links' 70 | 71 | def set_mode(self): 72 | LinkEnvironmentTest.set_mode(self) 73 | self.mode = pypetconstants.WRAP_MODE_LOCK 74 | self.multiproc = True 75 | self.ncores = 2 76 | self.use_pool=False 77 | 78 | 79 | if __name__ == '__main__': 80 | opt_args = parse_args() 81 | run_suite(**opt_args) -------------------------------------------------------------------------------- /pypet/tests/integration/logging_multiproc_test.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | 4 | from pypet.tests.integration.logging_test import LoggingTest 5 | from pypet.tests.testutils.ioutils import run_suite, parse_args 6 | 7 | 8 | 9 | class MultiprocNoPoolQueueLoggingTest(LoggingTest): 10 | 11 | tags = 'integration', 'environment', 'logging', 'multiproc', 'nopool', 'queue' 12 | 13 | def set_mode(self): 14 | # import pypet.tests.testutils.ioutils as io 15 | # io.testParams['log_level'] = 40 16 | # io.testParams['remove'] = False 17 | super(MultiprocNoPoolQueueLoggingTest, self).set_mode() 18 | self.mode.multiproc = True 19 | self.mode.wrap_mode = 'QUEUE' 20 | self.mode.ncores = 3 21 | self.mode.use_pool = False 22 | 23 | 24 | class MultiprocPoolLockLoggingTest(LoggingTest): 25 | 26 | tags = 'integration', 'environment', 'logging', 'multiproc', 'pool', 'lock' 27 | 28 | def set_mode(self): 29 | # import pypet.tests.testutils.ioutils as io 30 | # io.testParams['log_level'] = 40 31 | # io.testParams['remove'] = False 32 | super(MultiprocPoolLockLoggingTest, self).set_mode() 33 | self.mode.multiproc = True 34 | self.mode.wrap_mode = 'LOCK' 35 | self.mode.ncores = 2 36 | self.mode.use_pool = True 37 | 38 | 39 | class MultiprocPoolQueueLoggingTest(LoggingTest): 40 | 41 | tags = 'integration', 'environment', 'logging', 'multiproc', 'pool', 'queue' 42 | 43 | def set_mode(self): 44 | # import pypet.tests.testutils.ioutils as io 45 | # io.testParams['log_level'] = 40 46 | # io.testParams['remove'] = False 47 | super(MultiprocPoolQueueLoggingTest, self).set_mode() 48 | self.mode.multiproc = True 49 | self.mode.wrap_mode = 'QUEUE' 50 | self.mode.ncores = 2 51 | self.mode.use_pool = True 52 | 53 | 54 | def test_logfile_old_way_disabling_mp_log(self): 55 | return super(MultiprocPoolQueueLoggingTest, self).test_logfile_old_way_disabling_mp_log() 56 | 57 | 58 | class MultiprocNoPoolLockLoggingTest(LoggingTest): 59 | 60 | tags = 'integration', 'environment', 'logging', 'multiproc', 'nopool', 'lock' 61 | 62 | def set_mode(self): 63 | # import pypet.tests.testutils.ioutils as io 64 | # io.testParams['log_level'] = 40 65 | # io.testParams['remove'] = False 66 | super(MultiprocNoPoolLockLoggingTest, self).set_mode() 67 | self.mode.multiproc = True 68 | self.mode.wrap_mode = 'LOCK' 69 | self.mode.ncores = 4 70 | self.mode.use_pool = False 71 | 72 | 73 | if __name__ == '__main__': 74 | opt_args = parse_args() 75 | run_suite(**opt_args) -------------------------------------------------------------------------------- /pypet/tests/profiling/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /pypet/tests/profiling/creating_run_table.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import logging 4 | import os 5 | 6 | import numpy as np 7 | import scipy.sparse as spsp 8 | from pycallgraph import PyCallGraph, Config, GlobbingFilter 9 | from pycallgraph.output import GraphvizOutput 10 | from pycallgraph.color import Color 11 | 12 | 13 | class CustomOutput(GraphvizOutput): 14 | def node_color(self, node): 15 | value = float(node.time.fraction) 16 | return Color.hsv(value / 2 + .5, value, 0.9) 17 | 18 | def edge_color(self, edge): 19 | value = float(edge.time.fraction) 20 | return Color.hsv(value / 2 + .5, value, 0.7) 21 | 22 | 23 | from pypet import Environment, Parameter, load_trajectory, cartesian_product, Trajectory 24 | 25 | from pypet.tests.testutils.ioutils import make_temp_dir 26 | from pypet.tests.testutils.data import create_param_dict, add_params, simple_calculations 27 | 28 | filename = None 29 | 30 | 31 | def to_test(traj, length): 32 | for irun in range(length): 33 | traj._add_run_info(irun) 34 | 35 | 36 | def test_load(): 37 | newtraj = load_trajectory(index=-1, filename=filename, load_data=1) 38 | 39 | 40 | if __name__ == '__main__': 41 | if not os.path.isdir('./tmp'): 42 | os.mkdir('tmp') 43 | graphviz = CustomOutput() 44 | graphviz.output_file = './tmp/traj_add_run_info.png' 45 | service_filter = GlobbingFilter(include=['*storageservice.*', '*ptcompat.*', 46 | '*naturalnaming.*', '*parameter.*', 47 | '*trajectory.*']) 48 | # service_filter = GlobbingFilter(include=['*naturalnaming.*', '*trajectory.*']) 49 | 50 | config = Config(groups=True, verbose=True) 51 | config.trace_filter = service_filter 52 | 53 | print('RUN PROFILE') 54 | with PyCallGraph(config=config, output=graphviz): 55 | to_test(Trajectory(), 10000) 56 | print('DONE RUN PROFILE') 57 | -------------------------------------------------------------------------------- /pypet/tests/profiling/profiling.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import logging 4 | import os 5 | 6 | import numpy as np 7 | import scipy.sparse as spsp 8 | from pycallgraph import PyCallGraph, Config, GlobbingFilter 9 | from pycallgraph.output import GraphvizOutput 10 | from pycallgraph.color import Color 11 | 12 | 13 | class CustomOutput(GraphvizOutput): 14 | def node_color(self, node): 15 | value = float(node.time.fraction) 16 | return Color.hsv(value / 2 + .5, value, 0.9) 17 | 18 | def edge_color(self, edge): 19 | value = float(edge.time.fraction) 20 | return Color.hsv(value / 2 + .5, value, 0.7) 21 | 22 | 23 | from pypet import Environment, Parameter, load_trajectory, cartesian_product 24 | 25 | from pypet.tests.testutils.ioutils import make_temp_dir 26 | from pypet.tests.testutils.data import create_param_dict, add_params, simple_calculations 27 | 28 | filename = None 29 | 30 | 31 | def explore(traj): 32 | explored ={'Normal.trial': [0], 33 | 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 34 | 'csr_mat' :[spsp.csr_matrix((2222,22)), spsp.csr_matrix((2222,22))]} 35 | 36 | explored['csr_mat'][0][1,2]=44.0 37 | explored['csr_mat'][1][2,2]=33 38 | 39 | traj.f_explore(cartesian_product(explored)) 40 | 41 | 42 | def test_run(): 43 | 44 | global filename 45 | 46 | 47 | np.random.seed() 48 | trajname = 'profiling' 49 | filename = make_temp_dir(os.path.join('hdf5', 'test%s.hdf5' % trajname)) 50 | 51 | env = Environment(trajectory=trajname, filename=filename, 52 | file_title=trajname, 53 | log_stdout=False, 54 | results_per_run=5, 55 | derived_parameters_per_run=5, 56 | multiproc=False, 57 | ncores=1, 58 | wrap_mode='LOCK', 59 | use_pool=False, 60 | overwrite_file=True) 61 | 62 | traj = env.v_trajectory 63 | 64 | traj.v_standard_parameter=Parameter 65 | 66 | ## Create some parameters 67 | param_dict={} 68 | create_param_dict(param_dict) 69 | ### Add some parameter: 70 | add_params(traj,param_dict) 71 | 72 | #remember the trajectory and the environment 73 | traj = traj 74 | env = env 75 | 76 | traj.f_add_parameter('TEST', 'test_run') 77 | ###Explore 78 | explore(traj) 79 | 80 | ### Make a test run 81 | simple_arg = -13 82 | simple_kwarg= 13.0 83 | env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) 84 | 85 | size=os.path.getsize(filename) 86 | size_in_mb = size/1000000. 87 | print('Size is %sMB' % str(size_in_mb)) 88 | 89 | 90 | def test_load(): 91 | newtraj = load_trajectory(index=-1, filename=filename, load_data=1) 92 | 93 | 94 | if __name__ == '__main__': 95 | if not os.path.isdir('./tmp'): 96 | os.mkdir('tmp') 97 | graphviz = CustomOutput() 98 | graphviz.output_file = './tmp/run_profile_traj_slots.png' 99 | # service_filter = GlobbingFilter(include=['*storageservice.*', '*ptcompat.*', 100 | # '*naturalnaming.*', '*parameter.*', 101 | # '*trajectory.*']) 102 | service_filter = GlobbingFilter(include=['*naturalnaming.*', '*trajectory.*']) 103 | 104 | config = Config(groups=True, verbose=True) 105 | config.trace_filter = service_filter 106 | 107 | print('RUN PROFILE') 108 | with PyCallGraph(config=config, output=graphviz): 109 | test_run() 110 | print('DONE RUN PROFILE') 111 | 112 | graphviz = CustomOutput() 113 | graphviz.output_file = './tmp/load_mode_1_profile_traj_slots.png' 114 | 115 | print('LOAD PROFILE') 116 | with PyCallGraph(config=config, output=graphviz): 117 | test_load() 118 | print('DONE LOAD PROFILE') -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/avg_runtima_as_function_of_length.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | 4 | 5 | from pypet import Environment, Trajectory 6 | from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config 7 | import os 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import time 11 | 12 | def job(traj): 13 | traj.f_ares('$set.$', 42, comment='A result') 14 | 15 | 16 | 17 | def get_runtime(length): 18 | filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5') 19 | 20 | with Environment(filename = filename, 21 | log_levels=50, report_progress=(0.0002, 'progress', 50), 22 | overwrite_file=True, purge_duplicate_comments=False, 23 | log_stdout=False, 24 | multiproc=False, ncores=2, use_pool=True, 25 | wrap_mode='PIPE', #freeze_input=True, 26 | summary_tables=False, small_overview_tables=False) as env: 27 | 28 | traj = env.v_traj 29 | 30 | traj.par.f_apar('x', 0, 'parameter') 31 | 32 | traj.f_explore({'x': range(length)}) 33 | 34 | # traj.v_full_copy = False 35 | 36 | max_run = 1000 37 | 38 | for idx in range(len(traj)): 39 | if idx > max_run: 40 | traj.f_get_run_information(idx, copy=False)['completed'] = 1 41 | start = time.time() 42 | env.f_run(job) 43 | end = time.time() 44 | # dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))] 45 | total = end - start 46 | return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj) 47 | 48 | def main(): 49 | #lengths = [1000000, 500000, 100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1] 50 | lengths = [100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1] 51 | runtimes = [get_runtime(x) for x in lengths] 52 | avg_runtimes = [x[0] for x in runtimes] 53 | summed_runtime = [x[1] for x in runtimes] 54 | 55 | plt.subplot(2, 1, 1) 56 | plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2) 57 | plt.xlabel('Runs') 58 | plt.ylabel('t[s]') 59 | plt.title('Average Runtime per single run') 60 | plt.grid() 61 | plt.subplot(2, 1, 2) 62 | plt.loglog(lengths, summed_runtime, linewidth=2) 63 | plt.grid() 64 | plt.xlabel('Runs') 65 | plt.ylabel('t[s]') 66 | plt.title('Total runtime of experiment') 67 | plt.savefig('avg_runtime_as_func_of_lenght_1000_single_core') 68 | plt.show() 69 | 70 | 71 | 72 | 73 | 74 | 75 | if __name__ == '__main__': 76 | main() -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/avg_runtima_improved_as_function_of_length.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | 4 | 5 | from pypet import Environment, Trajectory 6 | from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config 7 | import os 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import time 11 | 12 | SIZE = 100 13 | 14 | def job(traj): 15 | traj.f_ares('set_%d.$.result' % int(traj.v_idx / SIZE), 42, comment='A result') 16 | 17 | 18 | 19 | def get_runtime(length): 20 | filename = os.path.join('tmp', 'hdf5', 'many_runs_improved.hdf5') 21 | start = time.time() 22 | with Environment(filename = filename, 23 | log_levels=50, report_progress=(2, 'progress', 50), 24 | overwrite_file=True, purge_duplicate_comments=False, 25 | summary_tables=False, small_overview_tables=False) as env: 26 | 27 | traj = env.v_traj 28 | 29 | traj.par.x = 0, 'parameter' 30 | 31 | traj.f_explore({'x': range(length)}) 32 | 33 | max_run = 1000000000 34 | 35 | for idx in range(len(traj)): 36 | if idx > max_run: 37 | traj.f_get_run_information(idx, copy=False)['completed'] = 1 38 | 39 | env.f_run(job) 40 | end = time.time() 41 | dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))] 42 | total = end - start 43 | return total/float(len(traj)), total 44 | 45 | def main(): 46 | lengths = [5000, 1000, 500, 100, 50, 10, 5, 1] 47 | runtimes = [get_runtime(x) for x in lengths] 48 | avg_runtimes = [x[0] for x in runtimes] 49 | summed_runtime = [x[1] for x in runtimes] 50 | 51 | plt.subplot(2, 1, 1) 52 | plt.semilogx(lengths, avg_runtimes, linewidth=2) 53 | plt.xlabel('Runs') 54 | plt.ylabel('t[s]') 55 | plt.title('Average Runtime per single run') 56 | plt.grid() 57 | plt.subplot(2, 1, 2) 58 | plt.loglog(lengths, summed_runtime, linewidth=2) 59 | plt.grid() 60 | plt.xlabel('Runs') 61 | plt.ylabel('t[s]') 62 | plt.title('Total runtime of experiment') 63 | plt.show() 64 | 65 | 66 | 67 | 68 | 69 | if __name__ == '__main__': 70 | main() -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/pytables_testing_append.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | from pypet.tests.testutils.ioutils import make_temp_dir 4 | import tables as pt 5 | import tables.parameters 6 | import time 7 | import os 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | 11 | def appendtime(table, length): 12 | 13 | runtimes = [] 14 | for irun in range(length): 15 | start = time.time() 16 | 17 | row = table.row 18 | row['test'] = 'testing' 19 | row.append() 20 | table.flush() 21 | 22 | end = time.time() 23 | 24 | runtime = end-start 25 | runtimes.append(runtime) 26 | return np.mean(runtimes) 27 | 28 | def make_table(hdf5_file, length): 29 | description = {'test' : pt.StringCol(42)} 30 | table = hdf5_file.create_table(where='/', name='t%d' % length, description=description) 31 | return table 32 | 33 | def table_runtime(filename, length): 34 | hdf5_file = pt.open_file(filename, mode='w') 35 | table = make_table(hdf5_file, length) 36 | appendtimes = appendtime(table, length) 37 | hdf5_file.close() 38 | return appendtimes 39 | 40 | def compute_runtime(): 41 | filename = os.path.join(make_temp_dir('tests'), 'iterrow.hdf5') 42 | dirs = os.path.dirname(filename) 43 | if not os.path.isdir(dirs): 44 | os.makedirs(dirs) 45 | if os.path.isfile(filename): 46 | os.remove(filename) 47 | 48 | 49 | lengths = [1, 10, 100, 1000, 10000, 100000, 1000000] 50 | times = [] 51 | for length in lengths: 52 | print('Testing %d' % length) 53 | times.append(table_runtime(filename, length)) 54 | print('Done') 55 | plt.semilogx(lengths, times) 56 | plt.show() 57 | 58 | 59 | def main(): 60 | compute_runtime() 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/pytables_testing_iterrow.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | from pypet.tests.testutils.ioutils import make_temp_dir 4 | import tables as pt 5 | import tables.parameters 6 | import time 7 | import os 8 | import matplotlib.pyplot as plt 9 | 10 | def iterrowtime(table): 11 | start = time.time() 12 | 13 | startit = min(int(len(table) / 2) ,0) 14 | 15 | row_iterator = table.iterrows(startit, startit+1) 16 | while True: 17 | try: 18 | next(row_iterator) 19 | except StopIteration: 20 | break 21 | 22 | end = time.time() 23 | 24 | runtime = end-start 25 | return runtime 26 | 27 | def make_table(hdf5_file, length): 28 | description = {'test' : pt.StringCol(42)} 29 | table = hdf5_file.create_table(where='/', name='t%d' % length, description=description) 30 | data = ['testing' for _ in range(length)] 31 | for string in data: 32 | row = table.row 33 | row['test'] = string 34 | row.append() 35 | table.flush() 36 | return table 37 | 38 | def table_runtime(filename, length): 39 | hdf5_file = pt.open_file(filename, mode='w') 40 | table = make_table(hdf5_file, length) 41 | itertime = iterrowtime(table) 42 | hdf5_file.close() 43 | return itertime 44 | 45 | def compute_runtime(): 46 | filename = os.path.join(make_temp_dir('tests'), 'iterrow.hdf5') 47 | dirs = os.path.dirname(filename) 48 | if not os.path.isdir(dirs): 49 | os.makedirs(dirs) 50 | if os.path.isfile(filename): 51 | os.remove(filename) 52 | 53 | 54 | lengths = [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000] 55 | times = [] 56 | for length in lengths: 57 | print('Testing %d' % length) 58 | times.append(table_runtime(filename, length)) 59 | print('Done') 60 | plt.semilogx(lengths, times) 61 | plt.show() 62 | 63 | 64 | def main(): 65 | compute_runtime() 66 | 67 | if __name__ == '__main__': 68 | main() 69 | -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/pytables_testing_many_children.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | from pypet.tests.testutils.ioutils import make_temp_dir 4 | import tables as pt 5 | import tables.parameters 6 | import os 7 | import time 8 | 9 | def create_children_dfs(hdf5_file, group_node, current_children): 10 | if len(current_children) == 0: 11 | return 1 12 | nchildren = current_children[0] 13 | child_count = 0 14 | for irun in range(nchildren): 15 | name = 'child%d' % irun 16 | hdf5_file.create_group(where=group_node, name=name) 17 | child = group_node._f_get_child(name) 18 | child_count += create_children_dfs(hdf5_file, child, current_children[1:]) 19 | return child_count 20 | 21 | def main(): 22 | start = time.time() 23 | filename = os.path.join(make_temp_dir('tmp'), 'children.hdf5') 24 | dirs = os.path.dirname(filename) 25 | if not os.path.isdir(dirs): 26 | os.makedirs(dirs) 27 | if os.path.isfile(filename): 28 | os.remove(filename) 29 | #children_structure=(250000,1,1) 30 | children_structure=(500,500,1) 31 | myfile = pt.open_file(filename, mode='w') 32 | cc = create_children_dfs(myfile, myfile.root, children_structure) 33 | end = time.time() 34 | runtime = end-start 35 | print('\nCreated %d children %s in %f seconds' % (cc, str(children_structure), runtime)) 36 | 37 | 38 | if __name__ == '__main__': 39 | main() -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/storage_analysis/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /pypet/tests/profiling/speed_analysis/storage_analysis/avg_runtima_as_function_of_length_plot_times.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | 4 | 5 | from pypet import Environment, Trajectory 6 | from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config 7 | import os 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import time 11 | 12 | import numpy as np 13 | import scipy.sparse as spsp 14 | from pycallgraph import PyCallGraph, Config, GlobbingFilter 15 | from pycallgraph.output import GraphvizOutput 16 | from pycallgraph.color import Color 17 | 18 | class CustomOutput(GraphvizOutput): 19 | def node_color(self, node): 20 | value = float(node.time.fraction) 21 | return Color.hsv(value / 2 + .5, value, 0.9) 22 | 23 | def edge_color(self, edge): 24 | value = float(edge.time.fraction) 25 | return Color.hsv(value / 2 + .5, value, 0.7) 26 | 27 | def job(traj): 28 | traj.f_ares('$set.$', 42, comment='A result') 29 | 30 | 31 | 32 | def get_runtime(length): 33 | filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5') 34 | 35 | with Environment(filename = filename, 36 | log_levels=20, report_progress=(0.0000002, 'progress', 50), 37 | overwrite_file=True, purge_duplicate_comments=False, 38 | log_stdout=False, 39 | summary_tables=False, small_overview_tables=False) as env: 40 | 41 | traj = env.v_traj 42 | 43 | traj.par.f_apar('x', 0, 'parameter') 44 | 45 | traj.f_explore({'x': range(length)}) 46 | 47 | max_run = 100 48 | 49 | for idx in range(len(traj)): 50 | if idx > max_run: 51 | traj.f_get_run_information(idx, copy=False)['completed'] = 1 52 | traj.f_store() 53 | 54 | if not os.path.isdir('./tmp'): 55 | os.mkdir('tmp') 56 | graphviz = CustomOutput() 57 | graphviz.output_file = './tmp/run_profile_storage_%d.png' % len(traj) 58 | service_filter = GlobbingFilter(include=['*storageservice.*']) 59 | 60 | config = Config(groups=True, verbose=True) 61 | config.trace_filter = service_filter 62 | 63 | 64 | print('RUN PROFILE') 65 | with PyCallGraph(config=config, output=graphviz): 66 | # start = time.time() 67 | # env.f_run(job) 68 | # end = time.time() 69 | for irun in range(100): 70 | traj._make_single_run(irun+len(traj)/2) 71 | # Measure start time 72 | traj._set_start() 73 | traj.f_ares('$set.$', 42, comment='A result') 74 | traj._set_finish() 75 | traj._store_final(store_data=2) 76 | traj._finalize_run() 77 | print('STARTING_to_PLOT') 78 | print('DONE RUN PROFILE') 79 | 80 | 81 | # dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))] 82 | # total = end - start 83 | # return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj) 84 | 85 | def main(): 86 | lengths = [1000, 1000000] 87 | runtimes = [get_runtime(x) for x in lengths] 88 | # avg_runtimes = [x[0] for x in runtimes] 89 | # summed_runtime = [x[1] for x in runtimes] 90 | 91 | # plt.subplot(2, 1, 1) 92 | # plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2) 93 | # plt.xlabel('Runs') 94 | # plt.ylabel('t[s]') 95 | # plt.title('Average Runtime per single run') 96 | # plt.grid() 97 | # plt.subplot(2, 1, 2) 98 | # plt.loglog(lengths, summed_runtime, linewidth=2) 99 | # plt.grid() 100 | # plt.xlabel('Runs') 101 | # plt.ylabel('t[s]') 102 | # plt.title('Total runtime of experiment') 103 | # plt.savefig('avg_runtime_as_func_of_lenght_100') 104 | # plt.show() 105 | 106 | 107 | 108 | 109 | 110 | 111 | if __name__ == '__main__': 112 | main() -------------------------------------------------------------------------------- /pypet/tests/scoop_run.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | try: 4 | import pypet 5 | except ImportError: 6 | import sys 7 | sys.path.append('/media/data/PYTHON_WORKSPACE/pypet-project') 8 | 9 | 10 | from pypet.tests.testutils.ioutils import discover_tests, parse_args, run_suite 11 | from pypet.tests.integration.environment_scoop_test import scoop_not_functional_check 12 | 13 | scoop_suite = discover_tests(lambda class_name, test_name, tags: 'scoop' in tags) 14 | 15 | 16 | if __name__ == '__main__': 17 | if scoop_not_functional_check(): 18 | raise RuntimeError('Not running in SCOOP mode!') 19 | opt_dict = parse_args() 20 | run_suite(suite=scoop_suite, **opt_dict) -------------------------------------------------------------------------------- /pypet/tests/test_all_nose.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import getopt 4 | import sys 5 | import os 6 | import sys 7 | import unittest 8 | 9 | from pypet.tests.testutils.ioutils import discover_tests, TEST_IMPORT_ERROR 10 | 11 | class NoseTestDummy(unittest.TestCase): 12 | pass 13 | 14 | suite = discover_tests(predicate= lambda class_name, test_name, tags: 15 | class_name != TEST_IMPORT_ERROR) 16 | suite_dict = {} 17 | for case in suite: 18 | class_name = case.__class__.__name__ 19 | globals()[class_name] = case.__class__ 20 | 21 | -------------------------------------------------------------------------------- /pypet/tests/testdata/pypet_v0_1b_6.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/pypet/tests/testdata/pypet_v0_1b_6.hdf5 -------------------------------------------------------------------------------- /pypet/tests/testutils/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /pypet/tests/unittests/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | -------------------------------------------------------------------------------- /pypet/tests/unittests/brian2tests/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Henri Bunting' 2 | -------------------------------------------------------------------------------- /pypet/tests/unittests/brian2tests/module_test.py: -------------------------------------------------------------------------------- 1 | __author__ = 'robert' 2 | 3 | 4 | import sys 5 | import unittest 6 | 7 | try: 8 | import brian2 9 | import pypet.brian2 10 | from pypet.brian2 import * 11 | except ImportError as exc: 12 | #print('Import Error: %s' % str(exc)) 13 | brian2 = None 14 | 15 | from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite 16 | 17 | import inspect 18 | 19 | 20 | @unittest.skipIf(brian2 is None, 'Can only be run with brian!') 21 | class TestAllBrian2Import(unittest.TestCase): 22 | 23 | tags = 'unittest', 'brian2', 'import' 24 | 25 | def test_import_star(self): 26 | for class_name in pypet.brian2.__all__: 27 | logstr = 'Evaluauting %s: %s' % (class_name, repr(eval(class_name))) 28 | get_root_logger().info(logstr) 29 | 30 | def test_if_all_is_complete(self): 31 | for item in pypet.brian2.__dict__.values(): 32 | if inspect.isclass(item) or inspect.isfunction(item): 33 | self.assertTrue(item.__name__ in pypet.brian2.__all__) 34 | 35 | if __name__ == '__main__': 36 | opt_args = parse_args() 37 | run_suite(**opt_args) -------------------------------------------------------------------------------- /pypet/tests/unittests/brian2tests/run_a_brian2_network.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Henri Bunting' 2 | 3 | 4 | try: 5 | import brian2 6 | from brian2 import pF, mV, defaultclock, ms, NeuronGroup, linspace, SpikeMonitor, \ 7 | PopulationRateMonitor, StateMonitor, run, msecond, nS, nA 8 | except ImportError: 9 | brian2 = None 10 | 11 | 12 | def run_network(): 13 | 14 | monitor_dict={} 15 | defaultclock.dt= 0.01*ms 16 | 17 | C=281*pF 18 | gL=30*nS 19 | EL=-70.6*mV 20 | VT=-50.4*mV 21 | DeltaT=2*mV 22 | tauw=40*ms 23 | a=4*nS 24 | b=0.08*nA 25 | I=8*nA 26 | Vcut="vm>2*mV"# practical threshold condition 27 | N=10 28 | 29 | reset = 'vm=Vr;w+=b' 30 | 31 | eqs=""" 32 | dvm/dt=(gL*(EL-vm)+gL*DeltaT*exp((vm-VT)/DeltaT)+I-w)/C : volt 33 | dw/dt=(a*(vm-EL)-w)/tauw : amp 34 | Vr:volt 35 | """ 36 | 37 | neuron=NeuronGroup(N,model=eqs,threshold=Vcut,reset=reset) 38 | neuron.vm=EL 39 | neuron.w=a*(neuron.vm-EL) 40 | neuron.Vr=linspace(-48.3*mV,-47.7*mV,N) # bifurcation parameter 41 | 42 | #run(25*msecond,report='text') # we discard the first spikes 43 | 44 | MSpike=SpikeMonitor(neuron, variables=['vm']) # record Vr and w at spike times 45 | MPopRate = PopulationRateMonitor(neuron) 46 | 47 | MMultiState = StateMonitor(neuron, ['w','vm'], record=[6,7,8,9]) 48 | 49 | 50 | run(10*msecond,report='text') 51 | 52 | 53 | monitor_dict['SpikeMonitor']=MSpike 54 | monitor_dict['MultiState']=MMultiState 55 | monitor_dict['PopulationRateMonitor']=MPopRate 56 | 57 | return monitor_dict 58 | 59 | 60 | 61 | if __name__ == '__main__': 62 | if brian2 is not None: 63 | run_network() -------------------------------------------------------------------------------- /pypet/tests/unittests/configparse_test.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os 4 | 5 | from pypet.tests.testutils.ioutils import run_suite, make_temp_dir,\ 6 | parse_args, handle_config_file 7 | from pypet.tests.testutils.data import TrajectoryComparator 8 | from pypet import Environment 9 | import pypet 10 | 11 | 12 | class ConfigParseTest(TrajectoryComparator): 13 | 14 | tags = 'configparser', 'unittest' 15 | 16 | def setUp(self): 17 | 18 | pypet_path = os.path.abspath(os.path.dirname(pypet.environment.__file__)) 19 | init_path = os.path.join(pypet_path, 'logging') 20 | self.config_file = os.path.join(init_path, 'env_config_test.ini') 21 | self.parser = handle_config_file(self.config_file) 22 | 23 | 24 | def test_parsing(self): 25 | 26 | filename = make_temp_dir('config_test.hdf5') 27 | env = Environment(filename=filename, config=self.parser) 28 | 29 | traj = env.v_traj 30 | self.assertTrue(traj.v_auto_load) 31 | self.assertEqual(traj.v_storage_service.filename, filename) 32 | 33 | self.assertEqual(traj.x, 42) 34 | self.assertEqual(traj.f_get('y').v_comment, 'This is the second variable') 35 | self.assertTrue(traj.testconfig) 36 | 37 | self.assertTrue(env._logging_manager.log_config is not None) 38 | self.assertTrue(env._logging_manager._sp_config is not None) 39 | 40 | env.f_disable_logging() 41 | 42 | 43 | if __name__ == '__main__': 44 | opt_args = parse_args() 45 | run_suite(**opt_args) -------------------------------------------------------------------------------- /pypet/tests/unittests/module_test.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | 4 | import sys 5 | import unittest 6 | 7 | import pypet 8 | from pypet import * 9 | del test # To not run all tests if this file is executed with nosetests 10 | from pypet.tests.testutils.ioutils import get_root_logger, run_suite, parse_args 11 | 12 | import logging 13 | import inspect 14 | 15 | 16 | class TestAllImport(unittest.TestCase): 17 | 18 | tags = 'unittest', 'import' 19 | 20 | def test_import_star(self): 21 | for class_name in pypet.__all__: 22 | if class_name == 'test': 23 | continue 24 | logstr = 'Evaulauting %s: %s' % (class_name, repr(eval(class_name))) 25 | get_root_logger().info(logstr) 26 | 27 | def test_if_all_is_complete(self): 28 | for item in pypet.__dict__.values(): 29 | if inspect.isclass(item) or inspect.isfunction(item): 30 | self.assertTrue(item.__name__ in pypet.__all__) 31 | 32 | 33 | class TestRunningTests(unittest.TestCase): 34 | 35 | tags = 'unittest', 'test', 'meta' 36 | 37 | def test_run_one_test(self): 38 | predicate = lambda class_name, test_name, tags:(test_name == 'test_import_star' and 39 | class_name == 'TestAllImport') 40 | pypet.test(predicate=predicate) 41 | 42 | 43 | if __name__ == '__main__': 44 | opt_args = parse_args() 45 | run_suite(**opt_args) -------------------------------------------------------------------------------- /pypet/tests/unittests/pypetlogging_test.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import sys 4 | import unittest 5 | 6 | import pickle 7 | 8 | from pypet.pypetlogging import LoggingManager 9 | from pypet.tests.testutils.ioutils import get_log_config, run_suite, parse_args 10 | from pypet.utils.comparisons import nested_equal 11 | 12 | class FakeTraj(object): 13 | def __init__(self): 14 | self.v_environment_name = 'env' 15 | self.v_name = 'traj' 16 | 17 | def f_wildcard(self, card): 18 | return 'Ladida' 19 | 20 | 21 | class LoggingManagerTest(unittest.TestCase): 22 | 23 | tags = 'logging', 'unittest', 'pickle' 24 | 25 | def test_pickling(self): 26 | manager = LoggingManager(log_config=get_log_config(), log_stdout=True) 27 | manager.extract_replacements(FakeTraj()) 28 | manager.check_log_config() 29 | manager.make_logging_handlers_and_tools() 30 | dump = pickle.dumps(manager) 31 | new_manager = pickle.loads(dump) 32 | manager.finalize() 33 | 34 | 35 | if __name__ == '__main__': 36 | opt_args = parse_args() 37 | run_suite(**opt_args) 38 | -------------------------------------------------------------------------------- /pypet/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmokinCaterpillar/pypet/3d454ac65f89e7833baaf89510f73c546e90d8f6/pypet/utils/__init__.py -------------------------------------------------------------------------------- /pypet/utils/configparsing.py: -------------------------------------------------------------------------------- 1 | """Helper module that allows parsing of `.ini` files""" 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | import functools 6 | import ast 7 | import os 8 | import configparser as cp 9 | 10 | from pypet.pypetlogging import use_simple_logging 11 | 12 | 13 | def parse_config(init_func): 14 | """Decorator wrapping the environment to use a config file""" 15 | @functools.wraps(init_func) 16 | def new_func(env, *args, **kwargs): 17 | config_interpreter = ConfigInterpreter(kwargs) 18 | # Pass the config data to the kwargs 19 | new_kwargs = config_interpreter.interpret() 20 | init_func(env, *args, **new_kwargs) 21 | # Add parameters and config data from the `.ini` file 22 | config_interpreter.add_parameters(env.traj) 23 | return new_func 24 | 25 | 26 | class ConfigInterpreter(object): 27 | """ Helper class that parses an `.ini` file and passes the data to an environment.""" 28 | def __init__(self, kwargs): 29 | self.kwargs = kwargs 30 | self.config_file = kwargs.pop('config', None) 31 | self.parser = None 32 | if self.config_file: 33 | if isinstance(self.config_file, str): 34 | # Check if the config file exists 35 | if not os.path.isfile(self.config_file): 36 | raise ValueError('`%s` does not exist.' % self.config_file) 37 | # If yes parse it with a config parser 38 | self.parser = cp.ConfigParser() 39 | self.parser.read(self.config_file) 40 | elif isinstance(self.config_file, cp.RawConfigParser): 41 | # Already instantiaded parsers are also accepted 42 | self.parser = self.config_file 43 | else: 44 | raise RuntimeError('Your config file/parser format `%s` ' 45 | 'is not understood.' % str(self.config_file)) 46 | 47 | def _collect_section(self, section): 48 | """Collects all settings within a section""" 49 | kwargs = {} 50 | try: 51 | if self.parser.has_section(section): 52 | options = self.parser.options(section) 53 | for option in options: 54 | str_val = self.parser.get(section, option) 55 | val = ast.literal_eval(str_val) 56 | kwargs[option] = val 57 | return kwargs 58 | except: 59 | raise # You can set a break point here for debugging! 60 | 61 | def _collect_config(self): 62 | """Collects all info from three sections""" 63 | kwargs = {} 64 | sections = ('storage_service', 'trajectory', 'environment') 65 | for section in sections: 66 | kwargs.update(self._collect_section(section)) 67 | return kwargs 68 | 69 | def interpret(self): 70 | """Copies parsed arguments into the kwargs passed to the environment""" 71 | if self.config_file: 72 | new_kwargs = self._collect_config() 73 | for key in new_kwargs: 74 | # Already specified kwargs take precedence over the ini file 75 | if key not in self.kwargs: 76 | self.kwargs[key] = new_kwargs[key] 77 | if not use_simple_logging(self.kwargs) and 'log_config' not in self.kwargs: 78 | self.kwargs['log_config'] = self.config_file 79 | return self.kwargs 80 | 81 | def add_parameters(self, traj): 82 | """Adds parameters and config from the `.ini` file to the trajectory""" 83 | if self.config_file: 84 | parameters = self._collect_section('parameters') 85 | for name in parameters: 86 | value = parameters[name] 87 | if not isinstance(value, tuple): 88 | value = (value,) 89 | traj.f_add_parameter(name, *value) 90 | config = self._collect_section('config') 91 | for name in config: 92 | value = config[name] 93 | if not isinstance(value, tuple): 94 | value = (value,) 95 | traj.f_add_config(name, *value) 96 | -------------------------------------------------------------------------------- /pypet/utils/dynamicimports.py: -------------------------------------------------------------------------------- 1 | """Module that handles dynamic imports of classes. 2 | 3 | This is done in an independent module to avoid cluttered name spaces. 4 | 5 | """ 6 | 7 | __author__ = 'Robert Meyer' 8 | 9 | import importlib 10 | import inspect 11 | 12 | from pypet.naturalnaming import ResultGroup, ParameterGroup, \ 13 | DerivedParameterGroup, ConfigGroup, NNGroupNode, NNLeafNode 14 | from pypet.parameter import BaseParameter, BaseResult, Parameter, Result, ArrayParameter, \ 15 | PickleResult, SparseParameter, SparseResult 16 | from pypet.shareddata import SharedResult 17 | 18 | 19 | def load_class(full_class_string): 20 | """Loads a class from a string naming the module and class name. 21 | 22 | For example: 23 | >>> load_class(full_class_string = 'pypet.brian.parameter.BrianParameter') 24 | 25 | 26 | """ 27 | 28 | class_data = full_class_string.split(".") 29 | module_path = ".".join(class_data[:-1]) 30 | class_str = class_data[-1] 31 | module = importlib.import_module(module_path) 32 | 33 | # We retrieve the Class from the module 34 | return getattr(module, class_str) 35 | 36 | 37 | def create_class(class_name, dynamic_imports): 38 | """Dynamically creates a class. 39 | 40 | It is tried if the class can be created by the already given imports. 41 | If not the list of the dynamically loaded classes is used. 42 | 43 | """ 44 | try: 45 | new_class = globals()[class_name] 46 | 47 | if not inspect.isclass(new_class): 48 | raise TypeError('Not a class!') 49 | 50 | return new_class 51 | except (KeyError, TypeError): 52 | for dynamic_class in dynamic_imports: 53 | # Dynamic classes can be provided directly as a Class instance, 54 | # for example as `MyCustomParameter`, 55 | # or as a string describing where to import the class from, 56 | # for instance as `'mypackage.mymodule.MyCustomParameter'`. 57 | if inspect.isclass(dynamic_class): 58 | if class_name == dynamic_class.__name__: 59 | return dynamic_class 60 | else: 61 | # The class name is always the last in an import string, 62 | # e.g. `'mypackage.mymodule.MyCustomParameter'` 63 | class_name_to_test = dynamic_class.split('.')[-1] 64 | if class_name == class_name_to_test: 65 | new_class = load_class(dynamic_class) 66 | return new_class 67 | raise ImportError('Could not create the class named `%s`.' % class_name) 68 | 69 | -------------------------------------------------------------------------------- /pypet/utils/explore.py: -------------------------------------------------------------------------------- 1 | """Module containing factory functions for parameter exploration""" 2 | 3 | import logging 4 | import sys 5 | import itertools as itools 6 | from collections import OrderedDict 7 | 8 | 9 | def cartesian_product(parameter_dict, combined_parameters=()): 10 | """ Generates a Cartesian product of the input parameter dictionary. 11 | 12 | For example: 13 | 14 | >>> print cartesian_product({'param1':[1,2,3], 'param2':[42.0, 52.5]}) 15 | {'param1':[1,1,2,2,3,3],'param2': [42.0,52.5,42.0,52.5,42.0,52.5]} 16 | 17 | :param parameter_dict: 18 | 19 | Dictionary containing parameter names as keys and iterables of data to explore. 20 | 21 | :param combined_parameters: 22 | 23 | Tuple of tuples. Defines the order of the parameters and parameters that are 24 | linked together. 25 | If an inner tuple contains only a single item, you can spare the 26 | inner tuple brackets. 27 | 28 | 29 | For example: 30 | 31 | >>> print cartesian_product( {'param1': [42.0, 52.5], 'param2':['a', 'b'], 'param3' : [1,2,3]}, ('param3',('param1', 'param2'))) 32 | {param3':[1,1,2,2,3,3],'param1' : [42.0,52.5,42.0,52.5,42.0,52.5], 'param2':['a','b','a','b','a','b']} 33 | 34 | :returns: Dictionary with cartesian product lists. 35 | 36 | """ 37 | if not combined_parameters: 38 | combined_parameters = list(parameter_dict) 39 | else: 40 | combined_parameters = list(combined_parameters) 41 | 42 | for idx, item in enumerate(combined_parameters): 43 | if isinstance(item, str): 44 | combined_parameters[idx] = (item,) 45 | 46 | iterator_list = [] 47 | for item_tuple in combined_parameters: 48 | inner_iterator_list = [parameter_dict[key] for key in item_tuple] 49 | zipped_iterator = zip(*inner_iterator_list) 50 | iterator_list.append(zipped_iterator) 51 | 52 | result_dict = {} 53 | for key in parameter_dict: 54 | result_dict[key] = [] 55 | 56 | cartesian_iterator = itools.product(*iterator_list) 57 | 58 | for cartesian_tuple in cartesian_iterator: 59 | for idx, item_tuple in enumerate(combined_parameters): 60 | for inneridx, key in enumerate(item_tuple): 61 | result_dict[key].append(cartesian_tuple[idx][inneridx]) 62 | 63 | return result_dict 64 | 65 | 66 | def find_unique_points(explored_parameters): 67 | """Takes a list of explored parameters and finds unique parameter combinations. 68 | 69 | If parameter ranges are hashable operates in O(N), otherwise O(N**2). 70 | 71 | :param explored_parameters: 72 | 73 | List of **explored** parameters 74 | 75 | :return: 76 | 77 | List of tuples, first entry being the parameter values, second entry a list 78 | containing the run position of the unique combination. 79 | 80 | """ 81 | ranges = [param.f_get_range(copy=False) for param in explored_parameters] 82 | zipped_tuples = list(zip(*ranges)) 83 | try: 84 | unique_elements = OrderedDict() 85 | for idx, val_tuple in enumerate(zipped_tuples): 86 | if val_tuple not in unique_elements: 87 | unique_elements[val_tuple] = [] 88 | unique_elements[val_tuple].append(idx) 89 | return list(unique_elements.items()) 90 | except TypeError: 91 | logger = logging.getLogger('pypet.find_unique') 92 | logger.error('Your parameter entries could not be hashed, ' 93 | 'now I am sorting slowly in O(N**2).') 94 | unique_elements = [] 95 | for idx, val_tuple in enumerate(zipped_tuples): 96 | matches = False 97 | for added_tuple, pos_list in unique_elements: 98 | matches = True 99 | for idx2, val in enumerate(added_tuple): 100 | if not explored_parameters[idx2]._equal_values(val_tuple[idx2], val): 101 | matches = False 102 | break 103 | if matches: 104 | pos_list.append(idx) 105 | break 106 | if not matches: 107 | unique_elements.append((val_tuple, [idx])) 108 | return unique_elements 109 | 110 | 111 | -------------------------------------------------------------------------------- /pypet/utils/gitintegration.py: -------------------------------------------------------------------------------- 1 | """ Module providing the functionality to allow automatic git commits. 2 | 3 | * :func:`~pypet.gitintegration.make_git_commit` performs the git commit 4 | 5 | * :func:`~pypet.gitintegration.add_commit_variables` adds some information about the commit to a 6 | :class:`~pypet.trajectory.Trajectory`. 7 | 8 | 9 | """ 10 | 11 | __author__ = 'Robert Meyer' 12 | 13 | import time 14 | 15 | try: 16 | import git 17 | except ImportError: 18 | git = None 19 | 20 | import pypet.pypetexceptions as pex 21 | 22 | 23 | def add_commit_variables(traj, commit): 24 | """Adds commit information to the trajectory.""" 25 | 26 | git_time_value = time.strftime('%Y_%m_%d_%Hh%Mm%Ss', time.localtime(commit.committed_date)) 27 | 28 | git_short_name = str(commit.hexsha[0:7]) 29 | git_commit_name = 'commit_%s_' % git_short_name 30 | git_commit_name = 'git.' + git_commit_name + git_time_value 31 | 32 | if not traj.f_contains('config.'+git_commit_name, shortcuts=False): 33 | 34 | git_commit_name += '.' 35 | # Add the hexsha 36 | traj.f_add_config(git_commit_name+'hexsha', commit.hexsha, 37 | comment='SHA-1 hash of commit') 38 | 39 | # Add the description string 40 | traj.f_add_config(git_commit_name+'name_rev', commit.name_rev, 41 | comment='String describing the commits hex sha based on ' 42 | 'the closest Reference') 43 | 44 | # Add unix epoch 45 | traj.f_add_config(git_commit_name+'committed_date', 46 | commit.committed_date, comment='Date of commit as unix epoch seconds') 47 | 48 | # Add commit message 49 | traj.f_add_config(git_commit_name+'message', str(commit.message), 50 | comment='The commit message') 51 | 52 | # # Add commit author 53 | # traj.f_add_config(git_commit_name+'committer', str(commit.committer.name), 54 | # comment='The committer of the commit') 55 | # 56 | # # Add author's email 57 | # traj.f_add_config(git_commit_name+'committer_email', str(commit.committer.email), 58 | # comment='Email of committer') 59 | 60 | 61 | def make_git_commit(environment, git_repository, user_message, git_fail): 62 | """ Makes a commit and returns if a new commit was triggered and the SHA_1 code of the commit. 63 | 64 | If `git_fail` is `True` program fails instead of triggering a new commit given 65 | not committed changes. Then a `GitDiffError` is raised. 66 | 67 | """ 68 | 69 | # Import GitPython, we do it here to allow also users not having GitPython installed 70 | # to use the normal environment 71 | 72 | # Open the repository 73 | repo = git.Repo(git_repository) 74 | index = repo.index 75 | 76 | traj = environment.v_trajectory 77 | 78 | # Create the commit message and append the trajectory name and comment 79 | if traj.v_comment: 80 | commentstr = ', Comment: `%s`' % traj.v_comment 81 | else: 82 | commentstr = '' 83 | 84 | if user_message: 85 | user_message += ' -- ' 86 | 87 | message = '%sTrajectory: `%s`, Time: `%s`, %s' % \ 88 | (user_message, traj.v_name, traj.v_time, commentstr) 89 | 90 | # Detect changes: 91 | diff = index.diff(None) 92 | 93 | if diff: 94 | if git_fail: 95 | # User requested fail instead of a new commit 96 | raise pex.GitDiffError('Found not committed changes!') 97 | # Make the commit 98 | repo.git.add('-u') 99 | commit = index.commit(message) 100 | new_commit = True 101 | 102 | else: 103 | # Take old commit 104 | commit = repo.commit(None) 105 | new_commit = False 106 | 107 | # Add the commit info to the trajectory 108 | add_commit_variables(traj, commit) 109 | 110 | return new_commit, commit.hexsha 111 | -------------------------------------------------------------------------------- /pypet/utils/hdf5compression.py: -------------------------------------------------------------------------------- 1 | """Module to allow hdf5 compression via ptrepack directly within python scripts""" 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | import os 6 | import subprocess 7 | 8 | from pypet.trajectory import load_trajectory 9 | from pypet import pypetconstants 10 | 11 | 12 | def compact_hdf5_file(filename, name=None, index=None, keep_backup=True): 13 | """Can compress an HDF5 to reduce file size. 14 | 15 | The properties on how to compress the new file are taken from a given 16 | trajectory in the file. 17 | Simply calls ``ptrepack`` from the command line. 18 | (Se also https://pytables.github.io/usersguide/utilities.html#ptrepackdescr) 19 | 20 | Currently only supported under Linux, no guarantee for Windows usage. 21 | 22 | :param filename: 23 | 24 | Name of the file to compact 25 | 26 | :param name: 27 | 28 | The name of the trajectory from which the compression properties are taken 29 | 30 | :param index: 31 | 32 | Instead of a name you could also specify an index, i.e -1 for the last trajectory 33 | in the file. 34 | 35 | :param keep_backup: 36 | 37 | If a back up version of the original file should be kept. 38 | The backup file is named as the original but `_backup` is appended to the end. 39 | 40 | :return: 41 | 42 | The return/error code of ptrepack 43 | 44 | """ 45 | if name is None and index is None: 46 | index = -1 47 | 48 | tmp_traj = load_trajectory(name, index, as_new=False, load_all=pypetconstants.LOAD_NOTHING, 49 | force=True, filename=filename) 50 | service = tmp_traj.v_storage_service 51 | complevel = service.complevel 52 | complib = service.complib 53 | shuffle = service.shuffle 54 | fletcher32 = service.fletcher32 55 | 56 | name_wo_ext, ext = os.path.splitext(filename) 57 | tmp_filename = name_wo_ext + '_tmp' + ext 58 | 59 | abs_filename = os.path.abspath(filename) 60 | abs_tmp_filename = os.path.abspath(tmp_filename) 61 | 62 | command = ['ptrepack', '-v', 63 | '--complib', complib, 64 | '--complevel', str(complevel), 65 | '--shuffle', str(int(shuffle)), 66 | '--fletcher32', str(int(fletcher32)), 67 | abs_filename, abs_tmp_filename] 68 | str_command = ' '.join(command) 69 | print('Executing command `%s`' % str_command) 70 | 71 | retcode = subprocess.call(command) 72 | if retcode != 0: 73 | print('#### ERROR: Compacting `%s` failed with errorcode %s! ####' % 74 | (filename, str(retcode))) 75 | else: 76 | print('#### Compacting successful ####') 77 | print('Renaming files') 78 | if keep_backup: 79 | backup_file_name = name_wo_ext + '_backup' + ext 80 | os.rename(filename, backup_file_name) 81 | else: 82 | os.remove(filename) 83 | os.rename(tmp_filename, filename) 84 | print('### Compacting and Renaming finished ####') 85 | 86 | return retcode -------------------------------------------------------------------------------- /pypet/utils/pypettest.py: -------------------------------------------------------------------------------- 1 | """Short module to allow the running of tests via ``pypet.test()``""" 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | 6 | from pypet.tests.testutils.ioutils import TEST_IMPORT_ERROR, discover_tests, run_suite 7 | 8 | 9 | def test(folder=None, remove=True, predicate=None): 10 | """Runs all pypet tests 11 | 12 | :param folder: 13 | 14 | Temporary folder to put data in, leave `None` for 15 | automatic choice. 16 | 17 | :param remove: 18 | 19 | If temporary data should be removed after the tests. 20 | 21 | :param predicate: 22 | 23 | Predicate to specify subset of tests. Must take three arguments 24 | ``class_name``, ``test_name``, ``tags`` and evaluate to `True` if 25 | a test should be run. Leave `None` for all tests. 26 | 27 | 28 | """ 29 | if predicate is None: 30 | predicate = lambda class_name, test_name, tags: class_name != TEST_IMPORT_ERROR 31 | suite = discover_tests(predicate=predicate) 32 | run_suite(suite=suite, remove=remove, folder=folder) -------------------------------------------------------------------------------- /pypet/utils/siginthandling.py: -------------------------------------------------------------------------------- 1 | """Module that handles KeyboardInterrupt to exit gracefully""" 2 | 3 | __author__ = 'Robert Meyer' 4 | 5 | import signal 6 | import sys 7 | 8 | 9 | class _SigintHandler(object): 10 | 11 | SIGINT = '__SIGINT__' 12 | 13 | def __init__(self): 14 | self.original_handler = signal.getsignal(signal.SIGINT) 15 | self.hit = False # variable to signal if SIGINT has been encountered before 16 | self.started = False 17 | 18 | def start(self): 19 | if not self.started: 20 | signal.signal(signal.SIGINT, self._handle_sigint) 21 | self.started = True 22 | 23 | def finalize(self): 24 | self.hit = False 25 | self.started = False 26 | signal.signal(signal.SIGINT, self.original_handler) 27 | 28 | def _handle_sigint(self, signum, frame): 29 | """Handler of SIGINT 30 | 31 | Does nothing if SIGINT is encountered once but raises a KeyboardInterrupt in case it 32 | is encountered twice. 33 | immediatly. 34 | 35 | """ 36 | if self.hit: 37 | prompt = 'Exiting immediately!' 38 | raise KeyboardInterrupt(prompt) 39 | else: 40 | self.hit = True 41 | prompt = ('\nYou killed the process(es) via `SIGINT` (`CTRL+C`). ' 42 | 'I am trying to exit ' 43 | 'gracefully. Using `SIGINT` (`CTRL+C`) ' 44 | 'again will cause an immediate exit.\n') 45 | sys.stderr.write(prompt) 46 | 47 | sigint_handling = _SigintHandler() 48 | 49 | 50 | -------------------------------------------------------------------------------- /pypet/utils/storagefactory.py: -------------------------------------------------------------------------------- 1 | """ Module to create storage service from given settings. 2 | 3 | Currently only the HDF5StorageService is supported. 4 | But to be extended in the future. 5 | 6 | """ 7 | 8 | __author__ = 'Robert Meyer' 9 | 10 | import inspect 11 | import os 12 | 13 | from pypet.utils.helpful_functions import get_matching_kwargs 14 | from pypet.storageservice import HDF5StorageService 15 | from pypet.utils.dynamicimports import create_class 16 | 17 | 18 | def _create_storage(storage_service, trajectory=None, **kwargs): 19 | """Creates a service from a constructor and checks which kwargs are not used""" 20 | kwargs_copy = kwargs.copy() 21 | kwargs_copy['trajectory'] = trajectory 22 | matching_kwargs = get_matching_kwargs(storage_service, kwargs_copy) 23 | storage_service = storage_service(**matching_kwargs) 24 | unused_kwargs = set(kwargs.keys()) - set(matching_kwargs.keys()) 25 | return storage_service, unused_kwargs 26 | 27 | 28 | def storage_factory(storage_service, trajectory=None, **kwargs): 29 | """Creates a storage service, to be extended if new storage services are added 30 | 31 | :param storage_service: 32 | 33 | Storage Service instance of constructor or a string pointing to a file 34 | 35 | :param trajectory: 36 | 37 | A trajectory instance 38 | 39 | :param kwargs: 40 | 41 | Arguments passed to the storage service 42 | 43 | :return: 44 | 45 | A storage service and a set of not used keyword arguments from kwargs 46 | 47 | """ 48 | 49 | if 'filename' in kwargs and storage_service is None: 50 | filename = kwargs['filename'] 51 | _, ext = os.path.splitext(filename) 52 | if ext in ('.hdf', '.h4', '.hdf4', '.he2', '.h5', '.hdf5', '.he5'): 53 | storage_service = HDF5StorageService 54 | else: 55 | raise ValueError('Extension `%s` of filename `%s` not understood.' % 56 | (ext, filename)) 57 | elif isinstance(storage_service, str): 58 | class_name = storage_service.split('.')[-1] 59 | storage_service = create_class(class_name, [storage_service, HDF5StorageService]) 60 | 61 | if inspect.isclass(storage_service): 62 | return _create_storage(storage_service, trajectory, **kwargs) 63 | else: 64 | return storage_service, set(kwargs.keys()) 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /pypet/utils/trajectory_utils.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import os 4 | from pypet.trajectory import load_trajectory 5 | 6 | 7 | def merge_all_in_folder(folder, ext='.hdf5', 8 | dynamic_imports=None, 9 | storage_service=None, 10 | force=False, 11 | ignore_data=(), 12 | move_data=False, 13 | delete_other_files=False, 14 | keep_info=True, 15 | keep_other_trajectory_info=True, 16 | merge_config=True, 17 | backup=True): 18 | """Merges all files in a given folder. 19 | 20 | IMPORTANT: Does not check if there are more than 1 trajectory in a file. Always 21 | uses the last trajectory in file and ignores the other ones. 22 | 23 | Trajectories are merged according to the alphabetical order of the files, 24 | i.e. the resulting merged trajectory is found in the first file 25 | (according to lexicographic ordering). 26 | 27 | :param folder: folder (not recursive) where to look for files 28 | :param ext: only files with the given extension are used 29 | :param dynamic_imports: Dynamic imports for loading 30 | :param storage_service: storage service to use, leave `None` to use the default one 31 | :param force: If loading should be forced. 32 | :param delete_other_files: Deletes files of merged trajectories 33 | 34 | All other parameters as in `f_merge_many` of the trajectory. 35 | 36 | :return: The merged traj 37 | 38 | """ 39 | in_dir = os.listdir(folder) 40 | all_files = [] 41 | # Find all files with matching extension 42 | for file in in_dir: 43 | full_file = os.path.join(folder, file) 44 | if os.path.isfile(full_file): 45 | _, extension = os.path.splitext(full_file) 46 | if extension == ext: 47 | all_files.append(full_file) 48 | all_files = sorted(all_files) 49 | 50 | # Open all trajectories 51 | trajs = [] 52 | for full_file in all_files: 53 | traj = load_trajectory(index=-1, 54 | storage_service=storage_service, 55 | filename=full_file, 56 | load_data=0, 57 | force=force, 58 | dynamic_imports=dynamic_imports) 59 | trajs.append(traj) 60 | 61 | # Merge all trajectories 62 | first_traj = trajs.pop(0) 63 | first_traj.f_merge_many(trajs, 64 | ignore_data=ignore_data, 65 | move_data=move_data, 66 | delete_other_trajectory=False, 67 | keep_info=keep_info, 68 | keep_other_trajectory_info=keep_other_trajectory_info, 69 | merge_config=merge_config, 70 | backup=backup) 71 | 72 | if delete_other_files: 73 | # Delete all but the first file 74 | for file in all_files[1:]: 75 | os.remove(file) 76 | 77 | return first_traj 78 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Robert Meyer' 2 | 3 | import re 4 | try: 5 | from setuptools import setup 6 | except ImportError: 7 | from distutils.core import setup 8 | try: 9 | # Used to convert md to rst for pypi, otherwise not needed 10 | import m2r 11 | except ImportError: 12 | m2r = None 13 | 14 | install_requires=[ 15 | 'tables', 16 | 'pandas', 17 | 'numpy', 18 | 'scipy' 19 | ] 20 | 21 | # For versioning, Version found in pypet._version.py 22 | verstrline = open('pypet/_version.py', "rt").read() 23 | 24 | VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" 25 | mo = re.search(VSRE, verstrline, re.M) 26 | if mo: 27 | verstr = mo.group(1) 28 | else: 29 | raise RuntimeError('Unable to find version in pypet/_version.py') 30 | 31 | description = ('A toolkit for numerical simulations to allow ' 32 | 'easy parameter exploration and storage of results.') 33 | if m2r is None: 34 | long_description = description 35 | else: 36 | # convert markdown to rst 37 | long_description = m2r.convert(open('README.md').read()) 38 | 39 | setup( 40 | name='pypet', 41 | version=verstr, 42 | packages=['pypet', 43 | 'pypet.brian2', 44 | 'pypet.utils', 45 | 'pypet.tests', 46 | 'pypet.tests.unittests', 47 | 'pypet.tests.integration', 48 | 'pypet.tests.profiling', 49 | 'pypet.tests.testutils', 50 | 'pypet.tests.unittests.brian2tests', 51 | 'pypet.tests.integration.brian2tests', 52 | ], 53 | package_data={'pypet.tests': ['testdata/*.hdf5'], 'pypet': ['logging/*.ini']}, 54 | license='BSD', 55 | author='Robert Meyer', 56 | author_email='robert.meyer@alcemy.tech', 57 | description=description, 58 | long_description=long_description, 59 | url='https://github.com/SmokinCaterpillar/pypet', 60 | install_requires=install_requires, 61 | classifiers=[ 62 | 'Development Status :: 4 - Beta', 63 | 'Programming Language :: Python :: 3.6', 64 | 'Programming Language :: Python :: 3.7', 65 | 'Programming Language :: Python :: 3.8', 66 | 'Intended Audience :: Science/Research', 67 | 'Natural Language :: English', 68 | 'Operating System :: OS Independent', 69 | 'Topic :: Scientific/Engineering', 70 | 'License :: OSI Approved :: BSD License', 71 | 'Topic :: Utilities'], 72 | python_requires='>=3.6', 73 | ) --------------------------------------------------------------------------------