├── .coveragerc ├── .github └── workflows │ ├── tests.yml │ └── zoltan-tests.yml ├── .gitignore ├── .readthedocs.yaml ├── CHANGES.rst ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.rst ├── docker ├── README.md └── base │ └── Dockerfile ├── docs ├── Images │ ├── dam-break-schematic.png │ ├── db3d.png │ ├── ldc-streamlines.png │ ├── local-remote-particles.png │ ├── local-remote.svg │ ├── periodic-domain-ghost-particle-tags.png │ ├── point-partition.png │ ├── pysph-examples-common-steps.graphml │ ├── pysph-examples-common-steps.png │ ├── pysph_viewer.png │ └── rings-collision.png ├── Makefile ├── make.bat ├── requirements.txt ├── source │ ├── conf.py │ ├── contribution │ │ └── how_to_write_docs.rst │ ├── design │ │ ├── equations.rst │ │ ├── images │ │ │ ├── controller.png │ │ │ ├── html_client.png │ │ │ ├── particle-array.png │ │ │ ├── pysph-modules.png │ │ │ └── sph-flowchart.png │ │ ├── iom.rst │ │ ├── overview.rst │ │ ├── solver_interfaces.rst │ │ └── working_with_particles.rst │ ├── examples │ │ ├── flow_past_cylinder.rst │ │ ├── index.rst │ │ ├── sphere_in_vessel.rst │ │ └── taylor_green.rst │ ├── index.rst │ ├── installation.rst │ ├── overview.rst │ ├── reference │ │ ├── application.rst │ │ ├── controller.rst │ │ ├── equations.rst │ │ ├── index.rst │ │ ├── integrator.rst │ │ ├── kernels.rst │ │ ├── nnps.rst │ │ ├── parallel_manager.rst │ │ ├── particle_array.rst │ │ ├── scheme.rst │ │ ├── solver.rst │ │ ├── solver_interfaces.rst │ │ └── tools.rst │ ├── starcluster │ │ └── overview.rst │ ├── tutorial │ │ ├── circular_patch.rst │ │ └── circular_patch_simple.rst │ └── using_pysph.rst └── tutorial │ ├── 1_getting_started.ipynb │ ├── 2_solving_a_problem.ipynb │ ├── 3_simple_post_processing.ipynb │ ├── 4_without_schemes.ipynb │ ├── images │ └── elliptical_drop_ic.png │ └── solutions │ ├── ed.py │ ├── ed0.py │ ├── ed_no_scheme.py │ ├── particles_in_disk.py │ └── plot_pa.py ├── pyproject.toml ├── pysph ├── __init__.py ├── base │ ├── __init__.py │ ├── box_sort_nnps.pxd │ ├── box_sort_nnps.pyx │ ├── c_kernels.pyx │ ├── c_kernels.pyx.mako │ ├── cell_indexing_nnps.pxd │ ├── cell_indexing_nnps.pyx │ ├── device_helper.py │ ├── gpu_domain_manager.py │ ├── gpu_helper_functions.mako │ ├── gpu_helper_kernels.py │ ├── gpu_nnps.py │ ├── gpu_nnps_base.pxd │ ├── gpu_nnps_base.pyx │ ├── gpu_nnps_helper.py │ ├── kernels.py │ ├── linalg3.pxd │ ├── linalg3.pyx │ ├── linked_list_nnps.pxd │ ├── linked_list_nnps.pyx │ ├── nnps.py │ ├── nnps_base.pxd │ ├── nnps_base.pyx │ ├── no_omp_threads.pxd │ ├── no_omp_threads.pyx │ ├── octree.pxd │ ├── octree.pyx │ ├── octree_gpu_nnps.pxd │ ├── octree_gpu_nnps.pyx │ ├── octree_nnps.pxd │ ├── octree_nnps.pyx │ ├── omp_threads.pxd │ ├── omp_threads.pyx │ ├── particle_array.pxd │ ├── particle_array.pyx │ ├── point.pxd │ ├── point.pyx │ ├── reduce_array.py │ ├── spatial_hash.h │ ├── spatial_hash_nnps.pxd │ ├── spatial_hash_nnps.pyx │ ├── stratified_hash_nnps.pxd │ ├── stratified_hash_nnps.pyx │ ├── stratified_sfc_gpu_nnps.mako │ ├── stratified_sfc_gpu_nnps.pxd │ ├── stratified_sfc_gpu_nnps.pyx │ ├── stratified_sfc_nnps.pxd │ ├── stratified_sfc_nnps.pyx │ ├── tests │ │ ├── __init__.py │ │ ├── test_device_helper.py │ │ ├── test_domain_manager.py │ │ ├── test_kernel.py │ │ ├── test_linalg3.py │ │ ├── test_neighbor_cache.py │ │ ├── test_nnps.py │ │ ├── test_octree.py │ │ ├── test_particle_array.py │ │ ├── test_periodic_nnps.py │ │ ├── test_reduce_array.py │ │ └── test_utils.py │ ├── tree │ │ ├── __init__.py │ │ ├── helpers.py │ │ ├── point_tree.mako │ │ ├── point_tree.py │ │ ├── tests │ │ │ └── test_point_tree.py │ │ ├── tree.mako │ │ └── tree.py │ ├── utils.py │ ├── z_order.h │ ├── z_order_gpu_nnps.pxd │ ├── z_order_gpu_nnps.pyx │ ├── z_order_gpu_nnps_kernels.py │ ├── z_order_nnps.pxd │ └── z_order_nnps.pyx ├── examples │ ├── __init__.py │ ├── _db_geometry.py │ ├── cavity.py │ ├── couette.py │ ├── cube.py │ ├── dam_break │ │ ├── __init__.py │ │ ├── dam_break_3d_lobovsky.py │ │ ├── db_2d_buchner.py │ │ └── db_3d_yeh.py │ ├── dam_break_2d.py │ ├── dam_break_3d.py │ ├── db_exp_data.py │ ├── elliptical_drop.py │ ├── elliptical_drop_no_scheme.py │ ├── elliptical_drop_simple.py │ ├── flow_past_cylinder_2d.py │ ├── fpc_with_packed_cylinder.py │ ├── gas_dynamics │ │ ├── __init__.py │ │ ├── accuracy_test_2d.py │ │ ├── acoustic_wave.py │ │ ├── blastwave.py │ │ ├── cheng_shu_1d.py │ │ ├── hydrostatic_box.py │ │ ├── kelvin_helmholtz_instability.py │ │ ├── ndspmhd-sedov-initial-conditions.npz │ │ ├── noh.py │ │ ├── riemann_2d.py │ │ ├── riemann_2d_config.py │ │ ├── riemann_solver.py │ │ ├── robert.py │ │ ├── sedov.py │ │ ├── shocktube.py │ │ ├── shocktube_setup.py │ │ ├── sjogreen.py │ │ ├── sod_shocktube.py │ │ ├── wallshock.py │ │ ├── wc_blastwave.py │ │ └── wc_exact.hdf5 │ ├── ghia_cavity_data.py │ ├── hydrostatic_tank.py │ ├── lattice_cylinders.py │ ├── periodic_cylinders.py │ ├── poiseuille.py │ ├── rayleigh_taylor.py │ ├── rigid_body │ │ ├── README.rst │ │ ├── __init__.py │ │ ├── bouncing_cube.py │ │ ├── bouncing_cubes.py │ │ ├── cubes_colliding_in_tank.py │ │ ├── dam_break3D_sph.py │ │ ├── simple.py │ │ ├── solid_body_floating_in_tank.py │ │ ├── sph.vtk.gz │ │ ├── sphere_in_vessel_akinci.py │ │ ├── ten_spheres_in_vessel_2d.py │ │ ├── three_cubes_in_vessel_3d.py │ │ └── three_spheres_in_fluid.py │ ├── run.py │ ├── shallow_water │ │ ├── __init__.py │ │ ├── cylindrical_dambreak.py │ │ ├── cylindrical_dambreak_closed_boundary.py │ │ ├── cylindrical_dambreak_particle_split.py │ │ ├── cylindrical_dambreak_sloping_bed.py │ │ ├── files_for_output_comparison │ │ │ ├── __init__.py │ │ │ ├── cyl_dam_closed_boun_t01.csv │ │ │ ├── cyl_dam_closed_boun_t02.csv │ │ │ ├── cyl_dam_closed_boun_t03.csv │ │ │ ├── cyl_dam_split_t01.csv │ │ │ ├── cyl_dam_split_t02.csv │ │ │ ├── cyl_dam_split_t03.csv │ │ │ ├── cyl_dam_t01.csv │ │ │ ├── cyl_dam_t02.csv │ │ │ ├── cyl_dam_t03.csv │ │ │ ├── tsu_experimental.csv │ │ │ ├── tsu_sensor1_vacondio.csv │ │ │ ├── tsu_sensor2_vacondio.csv │ │ │ └── tsu_sensor3_vacondio.csv │ │ ├── okushiri_tsunami.py │ │ ├── okushiri_tsunami_input_files │ │ │ ├── __init__.py │ │ │ ├── tsunami_bed.txt.bz2 │ │ │ └── tsunami_obc.txt │ │ ├── particle_split_in_a_square_domain.py │ │ ├── rectangular_channel_flow.py │ │ ├── rectangular_dambreak.py │ │ ├── rectangular_dambreak_particle_split.py │ │ ├── rectangular_dambreak_particle_split_and_merge.py │ │ ├── rectangular_dambreak_sloping_bed.py │ │ ├── rectangular_dambreak_wetbed_1d.py │ │ ├── still_water_over_a_hump_1d.py │ │ ├── still_water_over_a_parabolic_surface_1d.py │ │ ├── still_water_over_a_step_1d.py │ │ └── thacker_basin.py │ ├── sloshing │ │ ├── __init__.py │ │ ├── sloshing_tank_pitch.py │ │ └── st_bouscasse.py │ ├── sloshing_tank_horizontal.py │ ├── solid_mech │ │ ├── __init__.py │ │ ├── impact.py │ │ ├── impact3d.py │ │ ├── oscillating_plate.py │ │ ├── rings.py │ │ └── taylor_bar.py │ ├── spheric │ │ ├── __init__.py │ │ └── moving_square.py │ ├── sphysics │ │ ├── INDAT.gz │ │ ├── IPART.gz │ │ ├── __init__.py │ │ ├── beach_geometry.py │ │ ├── case1.py │ │ ├── case2.py │ │ ├── case3.py │ │ ├── case4.py │ │ ├── case5.py │ │ ├── case6.py │ │ ├── case7.py │ │ ├── case8.py │ │ ├── dam_break.py │ │ ├── dambreak_sphysics.py │ │ └── periodic_rigidbody.py │ ├── st_exp_data.py │ ├── surface_tension │ │ ├── __init__.py │ │ ├── capillary_wave.py │ │ ├── circular_droplet.py │ │ ├── equilibrium_rod.py │ │ ├── equilibrium_rod_hex.py │ │ ├── interface_instability.py │ │ ├── khi_sy11.py │ │ ├── khi_tvf.py │ │ ├── oscillating_rod.py │ │ └── square_droplet.py │ ├── taylor_green.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_examples.py │ │ └── test_riemann_solver.py │ ├── tg_with_packed_particles.py │ ├── trivial_inlet_outlet.py │ └── two_blocks.py ├── parallel │ ├── __init__.py │ ├── parallel_manager.pxd │ ├── parallel_manager.pyx │ └── tests │ │ ├── __init__.py │ │ ├── cavity.py │ │ ├── check_dump_load.py │ │ ├── elliptical_drop.py │ │ ├── example_test_case.py │ │ ├── lb_exchange.py │ │ ├── reduce_array.py │ │ ├── remote_exchange.py │ │ ├── simple_reduction.py │ │ ├── summation_density.py │ │ ├── test_openmp.py │ │ ├── test_parallel.py │ │ └── test_parallel_run.py ├── solver │ ├── __init__.py │ ├── application.py │ ├── controller.py │ ├── output.py │ ├── solver.py │ ├── solver_interfaces.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_application.py │ │ ├── test_solver.py │ │ └── test_solver_utils.py │ ├── tools.py │ ├── utils.py │ └── vtk_output.py ├── sph │ ├── __init__.py │ ├── acceleration_eval.py │ ├── acceleration_eval_cython.mako │ ├── acceleration_eval_cython_helper.py │ ├── acceleration_eval_gpu.mako │ ├── acceleration_eval_gpu_helper.py │ ├── acceleration_nnps_helper.py │ ├── basic_equations.py │ ├── bc │ │ ├── __init__.py │ │ ├── characteristic │ │ │ ├── __init__.py │ │ │ ├── inlet.py │ │ │ ├── outlet.py │ │ │ └── simple_inlet_outlet.py │ │ ├── donothing │ │ │ ├── __init__.py │ │ │ ├── inlet.py │ │ │ ├── outlet.py │ │ │ └── simple_inlet_outlet.py │ │ ├── hybrid │ │ │ ├── __init__.py │ │ │ ├── inlet.py │ │ │ ├── outlet.py │ │ │ └── simple_inlet_outlet.py │ │ ├── inlet_outlet_manager.py │ │ ├── interpolate.py │ │ ├── interpolate.py.mako │ │ ├── mirror │ │ │ ├── __init__.py │ │ │ ├── inlet.py │ │ │ ├── outlet.py │ │ │ └── simple_inlet_outlet.py │ │ ├── mod_donothing │ │ │ ├── __init__.py │ │ │ ├── inlet.py │ │ │ ├── outlet.py │ │ │ └── simple_inlet_outlet.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ └── test_simple_inlet_outlet.py │ ├── boundary_equations.py │ ├── equation.py │ ├── gas_dynamics │ │ ├── __init__.py │ │ ├── basic.py │ │ ├── boundary_equations.py │ │ ├── gsph.py │ │ ├── magma2.py │ │ ├── psph.py │ │ ├── riemann_solver.py │ │ └── tsph.py │ ├── iisph.py │ ├── integrator.py │ ├── integrator_cython.mako │ ├── integrator_cython_helper.py │ ├── integrator_gpu_helper.py │ ├── integrator_step.py │ ├── isph │ │ ├── __init__.py │ │ ├── isph.py │ │ ├── sisph.py │ │ └── wall_normal.py │ ├── misc │ │ ├── __init__.py │ │ └── advection.py │ ├── rigid_body.py │ ├── scheme.py │ ├── solid_mech │ │ ├── __init__.py │ │ ├── basic.py │ │ └── hvi.py │ ├── sph_compiler.py │ ├── surface_tension.py │ ├── swe │ │ ├── __init__.py │ │ └── basic.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_acceleration_eval.py │ │ ├── test_acceleration_eval_cython_helper.py │ │ ├── test_equations.py │ │ ├── test_integrator.py │ │ ├── test_integrator_cython_helper.py │ │ ├── test_integrator_step.py │ │ ├── test_kernel_corrections.py │ │ ├── test_linalg.py │ │ ├── test_multi_group_integrator.py │ │ ├── test_riemann_solver.py │ │ └── test_scheme.py │ └── wc │ │ ├── __init__.py │ │ ├── basic.py │ │ ├── crksph.py │ │ ├── density_correction.py │ │ ├── edac.py │ │ ├── gtvf.py │ │ ├── kernel_correction.py │ │ ├── linalg.py │ │ ├── parshikov.py │ │ ├── pcisph.py │ │ ├── shift.py │ │ ├── transport_velocity.py │ │ ├── viscosity.py │ │ └── zhanghuadams.py └── tools │ ├── __init__.py │ ├── binder.py │ ├── cli.py │ ├── cull.py │ ├── dump_xdmf.py │ ├── fortranfile.py │ ├── geometry.py │ ├── geometry_utils.py │ ├── gmsh.py │ ├── interpolator.py │ ├── ipy_viewer.py │ ├── manage_cache.py │ ├── mayavi_viewer.py │ ├── mesh_tools.pyx │ ├── ndspmhd.py │ ├── packer.py │ ├── particle_packing.py │ ├── pprocess.py │ ├── pysph_to_vtk.py │ ├── read_mesh.py │ ├── run_parallel_script.py │ ├── sph_evaluator.py │ ├── sphysics.py │ ├── tests │ ├── __init__.py │ ├── test_dump_xdmf.py │ ├── test_geometry.py │ ├── test_interpolator.py │ ├── test_mesh_tools.py │ └── test_sph_evaluator.py │ ├── uniform_distribution.py │ └── xdmf_template.mako ├── requirements-test.txt ├── requirements.txt ├── setup.cfg ├── setup.py ├── starcluster ├── ami.sh ├── sc_pysph.py └── xenial_base.sh └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | plugins = Cython.Coverage 3 | source = pysph 4 | omit = 5 | */tests/* 6 | */examples/* 7 | branch = True 8 | parallel = True 9 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | pull_request: 5 | schedule: 6 | # Run test at 0400 UTC on Saturday. 7 | - cron: '0 4 * * 6' 8 | # Run test at 0400 UTC on day 1 of the month to create auto-generated 9 | # code afresh and cache it. 10 | - cron: '0 4 1 * *' # Ref https://crontab.guru/#0_4_1_*_* 11 | 12 | 13 | jobs: 14 | tests: 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | os: [ubuntu-latest, macos-latest, windows-latest] 19 | python-version: [3.11, 3.12] 20 | 21 | runs-on: ${{ matrix.os }} 22 | defaults: 23 | run: 24 | shell: bash -l {0} 25 | 26 | steps: 27 | - uses: actions/checkout@v4 28 | - name: Set up Python ${{ matrix.python-version }} 29 | uses: conda-incubator/setup-miniconda@v3 30 | with: 31 | auto-update-conda: true 32 | python-version: ${{ matrix.python-version }} 33 | channels: defaults, conda-forge 34 | channel-priority: flexible 35 | - name: Install dependencies on Linux/MacOS 36 | run: | 37 | conda info 38 | conda install pocl pyopencl 39 | python -c 'import pyopencl as cl' 40 | if: ${{ runner.os != 'Windows' }} 41 | - name: Setup compyle config on MacOS to use openmp enabled clang from homebrew 42 | run: | 43 | brew install libomp 44 | mkdir -p ~/.compyle 45 | touch ~/.compyle/config.py 46 | echo "import os" >> ~/.compyle/config.py 47 | echo "os.environ['CC'] = '$(brew --prefix llvm@15)/bin/clang'" >> ~/.compyle/config.py 48 | echo "os.environ['CXX'] = '$(brew --prefix llvm@15)/bin/clang++'" >> ~/.compyle/config.py 49 | export CPPFLAGS="-I$(brew --prefix libomp)/include -I$(brew --prefix llvm@15)/include -Xclang -fopenmp" 50 | export LDFLAGS="-L$(brew --prefix libomp)/lib -L$(brew --prefix llvm@15)/lib -lomp" 51 | python -c "import os; OMP_CFLAGS=os.environ.get('CPPFLAGS').split(' '); print(f'{OMP_CFLAGS=}')" >> ~/.compyle/config.py 52 | python -c "import os; OMP_LINK=os.environ.get('LDFLAGS').split(' '); print(f'{OMP_LINK=}')" >> ~/.compyle/config.py 53 | cat ~/.compyle/config.py 54 | if: ${{ runner.os == 'macOS' }} 55 | - name: Install dependencies 56 | run: | 57 | conda info 58 | conda install numpy cython h5py 59 | python -m pip install --upgrade pip setuptools wheel 60 | python -m pip install https://github.com/pypr/cyarray/zipball/main 61 | python -m pip install https://github.com/pypr/compyle/zipball/main 62 | python -m pip install -r requirements.txt -r requirements-test.txt 63 | python -m pip install --no-build-isolation -v -e . 64 | python -m pip list 65 | # Cache auto-generated code. Cache key changes every month. 66 | # Thanks https://stackoverflow.com/a/60942437 67 | - name: Get month to use as cache key. 68 | id: month 69 | run: echo "month=$(date +'%m')" >> $GITHUB_OUTPUT 70 | - name: Deal with auto-generated code cache 71 | uses: actions/cache@v4 72 | with: 73 | path: | 74 | ~/.pysph 75 | ~/.compyle 76 | key: ${{ runner.os }}-${{ steps.month.outputs.month }}-${{ matrix.python-version }} 77 | - name: Run tests 78 | run: | 79 | python -m pytest -v -m 'not slow or slow' 80 | -------------------------------------------------------------------------------- /.github/workflows/zoltan-tests.yml: -------------------------------------------------------------------------------- 1 | name: ZOLTAN/MPI Tests 2 | 3 | on: 4 | pull_request: 5 | schedule: 6 | # Run test at 0400 UTC on day 1 of every month to create auto-generated 7 | # code afresh and cache it. 8 | - cron: '0 4 1 * *' # Ref https://crontab.guru/#0_4_1_*_* 9 | 10 | jobs: 11 | tests: 12 | strategy: 13 | matrix: 14 | os: [ubuntu-latest] 15 | python-version: [3.11] 16 | 17 | env: 18 | USE_TRILINOS: 1 19 | ZOLTAN_INCLUDE: /usr/include/trilinos 20 | ZOLTAN_LIBRARY: /usr/lib/x86_64-linux-gnu 21 | 22 | runs-on: ${{ matrix.os }} 23 | 24 | defaults: 25 | run: 26 | shell: bash -l {0} 27 | 28 | steps: 29 | - uses: actions/checkout@v4 30 | - name: Install Linux packages ZOLTAN support 31 | run: | 32 | sudo apt-get update 33 | sudo apt-get install -y openmpi-bin libopenmpi-dev libtrilinos-zoltan-dev 34 | - name: Set up Python ${{ matrix.python-version }} 35 | uses: conda-incubator/setup-miniconda@v3 36 | with: 37 | auto-update-conda: true 38 | python-version: ${{ matrix.python-version }} 39 | channels: conda-forge 40 | - name: Install dependencies 41 | run: | 42 | conda info 43 | conda install -c conda-forge numpy cython 44 | python -m pip install --upgrade pip setuptools wheel 45 | python -m pip install "mpi4py<4.0" 46 | python -m pip install https://github.com/pypr/cyarray/zipball/main 47 | python -m pip install https://github.com/pypr/compyle/zipball/main 48 | python -m pip install --no-build-isolation https://github.com/pypr/pyzoltan/zipball/main 49 | python -m pip install -r requirements.txt 50 | python -m pip install -r requirements-test.txt 51 | python -m pip install --no-build-isolation -v -e . 52 | # Cache auto-generated code. Cache key changes every month. 53 | # Thanks https://stackoverflow.com/a/60942437 54 | - name: Get month to use as cache key. 55 | id: month 56 | run: echo "month=$(date +'%m')" >> $GITHUB_OUTPUT 57 | - name: Deal with auto-generated code cache 58 | uses: actions/cache@v4 59 | with: 60 | path: | 61 | ~/.pysph 62 | ~/.compyle 63 | key: zoltan-${{ runner.os }}-${{ steps.month.outputs.month }}-${{ matrix.python-version }} 64 | - name: Run tests 65 | run: | 66 | python -m pytest -v -m 'slow or parallel' 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.o 3 | *.c 4 | *.cpp 5 | *~ 6 | *.so 7 | *.so.dSYM 8 | *.orig 9 | *.npz 10 | *.log 11 | *.npz 12 | *.pyd 13 | *.pdf 14 | test.pyx 15 | PySPH.egg-info/ 16 | build/ 17 | dist/ 18 | .tox/ 19 | .pytest_cache/ 20 | *.out 21 | *_output 22 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-24.04 10 | tools: 11 | python: "3.12" 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/source/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Unless otherwise specified by LICENSE.txt files in individual 2 | directories, all code is 3 | 4 | Copyright (c) 2009-2015, the PySPH developers 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are 9 | met: 10 | 11 | 1. Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | 3. Neither the name of the copyright holder nor the names of its contributors 18 | may be used to endorse or promote products derived from this software 19 | without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in Makefile *.bat *.py *.rst *.sh *.txt *.yml *.toml 2 | recursive-include docs *.* 3 | recursive-include pysph *.pxd *.pyx *.mako *.txt.gz *.h 4 | recursive-exclude pysph *.cpp 5 | recursive-include pysph/examples *.py *.gz ndspmhd*.npz *.rst 6 | recursive-include pysph/examples *.txt.bz2 *.csv *.txt 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ROOT = $(shell pwd) 2 | MAKEFILE = $(ROOT)/Makefile 3 | SRC = $(ROOT) 4 | PKG1 = $(SRC)/pysph 5 | SUBPKG1 = base sph sph/solid_mech parallel tools 6 | DIRS := $(foreach dir,$(SUBPKG1),$(PKG1)/$(dir)) 7 | 8 | # this is used for cython files on recursive call to make 9 | PYX = $(wildcard *.pyx) 10 | 11 | MPI4PY_INCL = $(shell python -c "import mpi4py; print mpi4py.get_include()") 12 | 13 | # the default target to make 14 | all : build 15 | 16 | .PHONY : $(DIRS) bench build 17 | 18 | build : 19 | python setup.py build_ext --inplace 20 | 21 | $(DIRS) : 22 | cd $@; python $(ROOT)/pyzoltan/core/generator.py 23 | $(MAKE) -f $(MAKEFILE) -C $@ cythoncpp ROOT=$(ROOT) 24 | 25 | %.c : %.pyx 26 | python `which cython` -I$(SRC) -I$(MPI4PY_INCL) $< 27 | 28 | %.cpp : %.pyx 29 | python `which cython` --cplus -I$(SRC) -I$(MPI4PY_INCL) $< 30 | 31 | %.html : %.pyx 32 | python `which cython` -I$(SRC) -I$(MPI4PY_INCL) -a $< 33 | 34 | cython : $(PYX:.pyx=.c) 35 | 36 | cythoncpp : $(PYX:.pyx=.cpp) 37 | 38 | _annotate : $(PYX:.pyx=.html) 39 | 40 | annotate : 41 | for f in $(DIRS); do $(MAKE) -f $(MAKEFILE) -C $${f} _annotate ROOT=$(ROOT); done 42 | 43 | clean : 44 | python setup.py clean 45 | -for dir in $(DIRS); do rm -f $$dir/*.c; done 46 | -for dir in $(DIRS); do rm -f $$dir/*.cpp; done 47 | 48 | cleanall : clean 49 | -for dir in $(DIRS); do rm -f $$dir/*.so; done 50 | # -rm $(patsubst %.pyx,%.c,$(wildcard $(PKG)/*/*.pyx)) 51 | 52 | test : 53 | python `which pytest` -m 'not slow' pysph 54 | 55 | testall : 56 | python `which pytest` pysph 57 | 58 | epydoc : 59 | python cython-epydoc.py --config epydoc.cfg pysph 60 | 61 | doc : 62 | cd docs; make html 63 | 64 | develop : 65 | python setup.py develop 66 | 67 | install : 68 | python setup.py install 69 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | ## Docker related files 2 | 3 | The docker files are available at https://hub.docker.com/u/pysph/ 4 | 5 | The `base` sub-directory contains a Dockerfile that is used to make the base 6 | image that can be easily used to test PySPH on both Python-2.7 and Python-3.5. 7 | This is the base image for any other PySPH related docker images. 8 | 9 | The base image only contains the necessary packages so as to run *all* the 10 | tests. It therefore include all the dependencies like mpi4py, Zoltan, Mayavi, 11 | and h5py so as to exercise all the tests. 12 | 13 | If you update the Dockerfile build a new image using: 14 | 15 | $ cd base 16 | $ docker build -t pysph/base:v3 . 17 | 18 | 19 | Push it to dockerhub (if you have the permissions) and tag it as latest: 20 | 21 | $ docker push pysph/base:v3 22 | $ docker tag pysph/base:v3 pysph/base:latest 23 | $ docker push pysph/base:latest 24 | -------------------------------------------------------------------------------- /docker/base/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | MAINTAINER Prabhu Ramachandran 3 | 4 | # Install the necessary packages 5 | RUN apt-get update && \ 6 | apt-get install -y \ 7 | build-essential \ 8 | cython \ 9 | cython3 \ 10 | g++ \ 11 | git \ 12 | ipython \ 13 | ipython3 \ 14 | libgomp1 \ 15 | libopenmpi-dev \ 16 | libtrilinos-zoltan-dev \ 17 | mayavi2 \ 18 | python \ 19 | python-dev \ 20 | python-execnet \ 21 | python-h5py \ 22 | python-mako \ 23 | python-matplotlib \ 24 | python-mock \ 25 | python-mpi4py \ 26 | python-nose \ 27 | python-numpy \ 28 | python-pip \ 29 | python-psutil \ 30 | python-qt4 \ 31 | python-setuptools \ 32 | python-unittest2 \ 33 | python3 \ 34 | python3-h5py \ 35 | python3-mako \ 36 | python3-matplotlib \ 37 | python3-mpi4py \ 38 | python3-nose \ 39 | python3-numpy \ 40 | python3-pip \ 41 | python3-psutil \ 42 | sudo \ 43 | tox \ 44 | vim \ 45 | wget \ 46 | && rm -rf /var/lib/apt/lists/* 47 | 48 | # Sudo and the new user are needed as one should not run mpiexec as root. 49 | RUN adduser --disabled-password --gecos '' pysph && \ 50 | adduser pysph sudo && \ 51 | echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 52 | 53 | ENV HOME=/home/pysph \ 54 | ZOLTAN_INCLUDE=/usr/include/trilinos \ 55 | ZOLTAN_LIBRARY=/usr/lib/x86_64-linux-gnu \ 56 | USE_TRILINOS=1 57 | 58 | USER pysph 59 | VOLUME /home/pysph 60 | WORKDIR /home/pysph 61 | -------------------------------------------------------------------------------- /docs/Images/dam-break-schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/dam-break-schematic.png -------------------------------------------------------------------------------- /docs/Images/db3d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/db3d.png -------------------------------------------------------------------------------- /docs/Images/ldc-streamlines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/ldc-streamlines.png -------------------------------------------------------------------------------- /docs/Images/local-remote-particles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/local-remote-particles.png -------------------------------------------------------------------------------- /docs/Images/periodic-domain-ghost-particle-tags.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/periodic-domain-ghost-particle-tags.png -------------------------------------------------------------------------------- /docs/Images/point-partition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/point-partition.png -------------------------------------------------------------------------------- /docs/Images/pysph-examples-common-steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/pysph-examples-common-steps.png -------------------------------------------------------------------------------- /docs/Images/pysph_viewer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/pysph_viewer.png -------------------------------------------------------------------------------- /docs/Images/rings-collision.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/Images/rings-collision.png -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-rtd-theme -------------------------------------------------------------------------------- /docs/source/contribution/how_to_write_docs.rst: -------------------------------------------------------------------------------- 1 | .. _how_to_write_docs: 2 | 3 | Contribute to docs 4 | ================== 5 | 6 | How to build the docs locally 7 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 | 9 | To build the docs, clone the repository:: 10 | 11 | $ git clone https://github.com/pypr/pysph 12 | 13 | Make sure to work in an ``pysph`` environment. I will proceed with the further 14 | instructions assuming that the repository is cloned in home directory. Change to 15 | the ``docs`` directory and run ``make html``. :: 16 | 17 | $ cd ~/pysph/docs/ 18 | $ make html 19 | 20 | 21 | Possible error one might get is:: 22 | 23 | $ sphinx-build: Command not found 24 | 25 | Which means you don't a have `sphinx-build` in your system. To install across 26 | the system do:: 27 | 28 | $ sudo apt-get install python3-sphinx 29 | 30 | 31 | or to install in an environment locally do:: 32 | 33 | $ pip install sphinx 34 | 35 | run ``make html`` again. The documentation is built locally at 36 | ``~/pysph/docs/build/html`` directory. Open ```index.html`` file by running :: 37 | 38 | $ cd ~/pysph/docs/build/html 39 | $ xdg-open index.html 40 | 41 | 42 | 43 | How to add the documentation 44 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 45 | 46 | As a starting point one can add documentation to one of the examples in 47 | ``~/pysph/pysph/examples`` folder. There is a dedicated 48 | ``~/pysph/docs/source/examples`` directory to add documentation to examples. 49 | Choose an example to write documentation for, :: 50 | 51 | $ cd ~/pysph/docs/source/examples 52 | $ touch your_example.rst 53 | 54 | We will write all the documentation in ``rst`` file format. The ``index.rst`` 55 | file in the examples directory should know about our newly created file, add a 56 | reference next to the last written example.:: 57 | 58 | * :ref:`Some_example`: 59 | * :ref:`Other_example`: 60 | * :ref:`taylor_green`: the Taylor-Green Vortex problem in 2D. 61 | * :ref:`sphere_in_vessel`: A sphere floating in a hydrostatic tank example. 62 | * :ref:`your_example_file`: Description of the example. 63 | 64 | and at the top of the example file add the reference, for example in 65 | ``your_example_file.rst``, you should add,:: 66 | 67 | .. _your_example_file 68 | 69 | 70 | That's it, add the documentation and send a pull request. 71 | -------------------------------------------------------------------------------- /docs/source/design/images/controller.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/source/design/images/controller.png -------------------------------------------------------------------------------- /docs/source/design/images/html_client.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/source/design/images/html_client.png -------------------------------------------------------------------------------- /docs/source/design/images/particle-array.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/source/design/images/particle-array.png -------------------------------------------------------------------------------- /docs/source/design/images/pysph-modules.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/source/design/images/pysph-modules.png -------------------------------------------------------------------------------- /docs/source/design/images/sph-flowchart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/source/design/images/sph-flowchart.png -------------------------------------------------------------------------------- /docs/source/examples/index.rst: -------------------------------------------------------------------------------- 1 | .. _example_gallery: 2 | 3 | Gallery of PySPH examples 4 | =========================== 5 | 6 | In the following, several PySPH examples are documented. These serve to 7 | illustrate various features of PySPH and show one may use PySPH to solve a 8 | variety of problems. 9 | 10 | .. toctree:: 11 | :hidden: 12 | 13 | taylor_green.rst 14 | sphere_in_vessel.rst 15 | flow_past_cylinder.rst 16 | 17 | 18 | * :ref:`taylor_green`: the Taylor-Green Vortex problem in 2D. 19 | * :ref:`sphere_in_vessel`: A sphere floating in a hydrostatic tank example. 20 | * :ref:`flow_past_cylinder`: Flow past a circular cylinder in 2D. 21 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. PySPH documentation master file, created by 2 | sphinx-quickstart on Mon Mar 31 01:01:41 2014. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to the PySPH documentation! 7 | ==================================== 8 | 9 | PySPH is an open source framework for Smoothed Particle Hydrodynamics (SPH) 10 | simulations. Users can implement an SPH formulation in pure Python_ and still 11 | obtain excellent performance. PySPH can make use of multiple cores via OpenMP 12 | or be run seamlessly in parallel using MPI. 13 | 14 | Here are some videos of simulations made with PySPH. 15 | 16 | .. raw:: html 17 | 18 |
19 | 23 |
24 | 25 | 26 | PySPH is hosted on `github `_. Please see 27 | the `github `_ site for development 28 | details. 29 | 30 | .. _Python: http://www.python.org 31 | 32 | 33 | ********** 34 | Overview 35 | ********** 36 | 37 | .. toctree:: 38 | :maxdepth: 2 39 | 40 | overview.rst 41 | 42 | ********************************* 43 | Installation and getting started 44 | ********************************* 45 | 46 | .. toctree:: 47 | :maxdepth: 2 48 | 49 | installation.rst 50 | tutorial/circular_patch_simple.rst 51 | tutorial/circular_patch.rst 52 | 53 | 54 | *************************** 55 | The framework and library 56 | *************************** 57 | 58 | .. toctree:: 59 | :maxdepth: 2 60 | 61 | design/overview.rst 62 | design/equations.rst 63 | design/iom.rst 64 | starcluster/overview 65 | using_pysph.rst 66 | contribution/how_to_write_docs.rst 67 | 68 | ************************** 69 | Gallery of PySPH examples 70 | ************************** 71 | 72 | .. toctree:: 73 | :maxdepth: 2 74 | 75 | examples/index.rst 76 | 77 | ************************ 78 | Reference documentation 79 | ************************ 80 | 81 | Autogenerated from doc strings using sphinx's autodoc feature. 82 | 83 | .. toctree:: 84 | :maxdepth: 2 85 | 86 | reference/index 87 | design/solver_interfaces 88 | 89 | 90 | ================== 91 | Indices and tables 92 | ================== 93 | 94 | * :ref:`genindex` 95 | * :ref:`modindex` 96 | * :ref:`search` 97 | -------------------------------------------------------------------------------- /docs/source/reference/application.rst: -------------------------------------------------------------------------------- 1 | Module application 2 | ================== 3 | 4 | .. automodule:: pysph.solver.application 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /docs/source/reference/controller.rst: -------------------------------------------------------------------------------- 1 | Module controller 2 | ================= 3 | 4 | .. automodule:: pysph.solver.controller 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /docs/source/reference/index.rst: -------------------------------------------------------------------------------- 1 | PySPH Reference Documentation 2 | ============================= 3 | 4 | Autogenerated from doc strings using sphinx’s autodoc feature. 5 | 6 | .. toctree:: 7 | :maxdepth: 3 8 | 9 | application 10 | controller 11 | equations 12 | integrator 13 | kernels 14 | nnps 15 | parallel_manager 16 | particle_array 17 | scheme 18 | solver 19 | solver_interfaces 20 | tools -------------------------------------------------------------------------------- /docs/source/reference/integrator.rst: -------------------------------------------------------------------------------- 1 | Integrator related modules 2 | =========================== 3 | 4 | .. automodule:: pysph.sph.integrator 5 | :members: 6 | :undoc-members: 7 | 8 | .. automodule:: pysph.sph.integrator_step 9 | :members: 10 | :undoc-members: 11 | 12 | .. autoclass:: pysph.sph.gas_dynamics.magma2.TVDRK2Integrator 13 | :special-members: 14 | 15 | .. autoclass:: pysph.sph.gas_dynamics.magma2.TVDRK2IntegratorWithRecycling 16 | :special-members: 17 | 18 | .. autoclass:: pysph.sph.gas_dynamics.magma2.TVDRK2Step 19 | :special-members: 20 | -------------------------------------------------------------------------------- /docs/source/reference/kernels.rst: -------------------------------------------------------------------------------- 1 | SPH Kernels 2 | ============ 3 | 4 | .. automodule:: pysph.base.kernels 5 | :members: 6 | :undoc-members: 7 | -------------------------------------------------------------------------------- /docs/source/reference/nnps.rst: -------------------------------------------------------------------------------- 1 | Module nnps: Nearest Neighbor Particle Search 2 | ============================================== 3 | 4 | .. automodule:: pysph.base.nnps 5 | :members: 6 | 7 | .. automodule:: pysph.base.nnps_base 8 | :members: 9 | 10 | .. automodule:: pysph.base.linked_list_nnps 11 | :members: 12 | 13 | .. automodule:: pysph.base.box_sort_nnps 14 | :members: 15 | 16 | .. automodule:: pysph.base.spatial_hash_nnps 17 | :members: 18 | -------------------------------------------------------------------------------- /docs/source/reference/parallel_manager.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Module parallel_manager 3 | ======================== 4 | 5 | .. automodule:: pysph.parallel.parallel_manager 6 | :members: 7 | -------------------------------------------------------------------------------- /docs/source/reference/particle_array.rst: -------------------------------------------------------------------------------- 1 | Module particle_array 2 | ===================== 3 | 4 | The ``ParticleArray`` class itself is documented as below. 5 | 6 | .. automodule:: pysph.base.particle_array 7 | :members: 8 | 9 | Convenience functions to create particle arrays 10 | ----------------------------------------------- 11 | 12 | There are several convenience functions that provide a particle array with a 13 | requisite set of particle properties that are documented below. 14 | 15 | .. automodule:: pysph.base.utils 16 | :members: 17 | 18 | -------------------------------------------------------------------------------- /docs/source/reference/scheme.rst: -------------------------------------------------------------------------------- 1 | Module scheme 2 | ============== 3 | 4 | .. automodule:: pysph.sph.scheme 5 | :members: 6 | :undoc-members: 7 | 8 | .. autoclass:: pysph.sph.gas_dynamics.magma2.MAGMA2Scheme 9 | :special-members: -------------------------------------------------------------------------------- /docs/source/reference/solver.rst: -------------------------------------------------------------------------------- 1 | Module solver 2 | ============= 3 | 4 | .. automodule:: pysph.solver.solver 5 | :members: 6 | 7 | Module solver tools 8 | ==================== 9 | 10 | .. automodule:: pysph.solver.tools 11 | :members: 12 | 13 | Module boundary conditions 14 | =========================== 15 | 16 | .. automodule:: pysph.sph.bc.inlet_outlet_manager 17 | :members: 18 | :undoc-members: -------------------------------------------------------------------------------- /docs/source/reference/solver_interfaces.rst: -------------------------------------------------------------------------------- 1 | Module solver_interfaces 2 | ======================== 3 | 4 | .. automodule:: pysph.solver.solver_interfaces 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /docs/source/reference/tools.rst: -------------------------------------------------------------------------------- 1 | .. py:currentmodule:: pysph.tools 2 | 3 | Miscellaneous Tools for PySPH 4 | ============================== 5 | 6 | .. contents:: 7 | :local: 8 | :depth: 1 9 | 10 | Input/Output of data files 11 | --------------------------- 12 | 13 | The following functions are handy functions when processing output generated by 14 | PySPH or to generate new files. 15 | 16 | .. autofunction:: pysph.solver.utils.dump 17 | 18 | .. autofunction:: pysph.solver.utils.get_files 19 | 20 | .. autofunction:: pysph.solver.utils.load 21 | 22 | .. autofunction:: pysph.solver.utils.load_and_concatenate 23 | 24 | Dump XDMF 25 | ~~~~~~~~~~~~~ 26 | .. automodule:: pysph.tools.dump_xdmf 27 | :members: 28 | :undoc-members: 29 | 30 | Interpolator 31 | ------------ 32 | 33 | This module provides a convenient class called 34 | :py:class:`interpolator.Interpolator` which can be used to interpolate any 35 | scalar values from the points onto either a mesh or a collection of other 36 | points. SPH interpolation is performed with a simple Shepard filtering. 37 | 38 | .. automodule:: pysph.tools.interpolator 39 | :members: 40 | :undoc-members: 41 | 42 | 43 | SPH Evaluator 44 | ------------- 45 | 46 | This module provides a class that allows one to evaluate a set of equations on 47 | a collection of particle arrays. This is very handy for non-trivial 48 | post-processing that needs to be quick. 49 | 50 | .. automodule:: pysph.tools.sph_evaluator 51 | :members: 52 | :undoc-members: 53 | 54 | 55 | GMsh input/output 56 | ------------------ 57 | 58 | 59 | .. automodule:: pysph.tools.gmsh 60 | :members: 61 | :undoc-members: 62 | 63 | Mayavi Viewer 64 | ------------- 65 | 66 | .. automodule:: pysph.tools.mayavi_viewer 67 | :members: 68 | :undoc-members: 69 | 70 | Mesh Converter 71 | -------------- 72 | 73 | The following functions can be used to convert a mesh file supported by 74 | `meshio `_ to a set of surface points. 75 | 76 | .. autofunction:: pysph.tools.read_mesh.mesh2points 77 | 78 | Particle Packer 79 | --------------- 80 | 81 | The following functions can be used to create a domain with particle packed 82 | around a solid surface in both 2D and 3D. 83 | 84 | .. autofunction:: pysph.tools.geometry.get_packed_periodic_packed_particles 85 | .. autofunction:: pysph.tools.geometry.get_packed_2d_particles_from_surface_coordinates 86 | .. autofunction:: pysph.tools.geometry.get_packed_2d_particles_from_surface_file 87 | .. autofunction:: pysph.tools.geometry.get_packed_3d_particles_from_surface_file 88 | .. autofunction:: pysph.tools.geometry.create_fluid_around_packing -------------------------------------------------------------------------------- /docs/tutorial/images/elliptical_drop_ic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/docs/tutorial/images/elliptical_drop_ic.png -------------------------------------------------------------------------------- /docs/tutorial/solutions/ed.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pysph.base.utils import get_particle_array 3 | from pysph.solver.application import Application 4 | from pysph.sph.scheme import WCSPHScheme 5 | 6 | 7 | class EllipticalDrop(Application): 8 | def create_particles(self): 9 | dx = 0.025 10 | x, y = np.mgrid[-1.05:1.05:dx, -1.05:1.05:dx] 11 | mask = x*x + y*y < 1 12 | x = x[mask] 13 | y = y[mask] 14 | rho = 1.0 15 | h = 1.3*dx 16 | m = rho*dx*dx 17 | pa = get_particle_array( 18 | name='fluid', x=x, y=y, u=-100*x, v=100*y, rho=rho, 19 | m=m, h=h 20 | ) 21 | self.scheme.setup_properties([pa]) 22 | return [pa] 23 | 24 | def create_scheme(self): 25 | s = WCSPHScheme( 26 | ['fluid'], [], dim=2, rho0=1.0, c0=1400, 27 | h0=1.3*0.025, hdx=1.3, gamma=7.0, alpha=0.1, beta=0.0 28 | ) 29 | dt = 5e-6 30 | tf = 0.0076 31 | s.configure_solver( 32 | dt=dt, tf=tf, 33 | ) 34 | return s 35 | 36 | 37 | if __name__ == '__main__': 38 | app = EllipticalDrop(fname='ed') 39 | app.run() 40 | -------------------------------------------------------------------------------- /docs/tutorial/solutions/ed0.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pysph.base.utils import get_particle_array 3 | from pysph.solver.application import Application 4 | 5 | 6 | class EllipticalDrop(Application): 7 | def create_particles(self): 8 | dx = 0.025 9 | x, y = np.mgrid[-1.05:1.05:dx, -1.05:1.05:dx] 10 | mask = x*x + y*y < 1 11 | x = x[mask] 12 | y = y[mask] 13 | rho = 1.0 14 | h = 1.3*dx 15 | m = rho*dx*dx 16 | pa = get_particle_array( 17 | name='fluid', x=x, y=y, u=-100*x, v=100*y, rho=rho, 18 | m=m, h=h 19 | ) 20 | return [pa] 21 | 22 | 23 | if __name__ == '__main__': 24 | app = EllipticalDrop(fname='ed') 25 | app.run() 26 | -------------------------------------------------------------------------------- /docs/tutorial/solutions/ed_no_scheme.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pysph.base.utils import get_particle_array_wcsph 3 | from pysph.base.kernels import CubicSpline 4 | from pysph.solver.application import Application 5 | from pysph.sph.equation import Group 6 | from pysph.sph.basic_equations import XSPHCorrection, ContinuityEquation 7 | from pysph.sph.wc.basic import TaitEOS, MomentumEquation 8 | from pysph.solver.solver import Solver 9 | from pysph.sph.integrator import PECIntegrator 10 | from pysph.sph.integrator_step import WCSPHStep 11 | 12 | 13 | class EllipticalDrop(Application): 14 | def create_particles(self): 15 | dx = 0.025 16 | x, y = np.mgrid[-1.05:1.05:dx, -1.05:1.05:dx] 17 | mask = x*x + y*y < 1 18 | x = x[mask] 19 | y = y[mask] 20 | rho = 1.0 21 | h = 1.3*dx 22 | m = rho*dx*dx 23 | pa = get_particle_array_wcsph( 24 | name='fluid', x=x, y=y, u=-100*x, v=100*y, rho=rho, 25 | m=m, h=h 26 | ) 27 | return [pa] 28 | 29 | def create_equations(self): 30 | equations = [ 31 | Group( 32 | equations=[ 33 | TaitEOS(dest='fluid', sources=None, rho0=1.0, 34 | c0=1400, gamma=7.0), 35 | ], 36 | real=False 37 | ), 38 | 39 | Group( 40 | equations=[ 41 | ContinuityEquation(dest='fluid', sources=['fluid']), 42 | 43 | MomentumEquation(dest='fluid', sources=['fluid'], 44 | alpha=0.1, beta=0.0, c0=1400), 45 | 46 | XSPHCorrection(dest='fluid', sources=['fluid']), 47 | ] 48 | ), 49 | ] 50 | return equations 51 | 52 | def create_solver(self): 53 | kernel = CubicSpline(dim=2) 54 | integrator = PECIntegrator(fluid=WCSPHStep()) 55 | 56 | dt = 5e-6 57 | tf = 0.0076 58 | solver = Solver( 59 | kernel=kernel, dim=2, integrator=integrator, 60 | dt=dt, tf=tf 61 | ) 62 | return solver 63 | 64 | 65 | if __name__ == '__main__': 66 | app = EllipticalDrop(fname='ed_no_scheme') 67 | app.run() 68 | -------------------------------------------------------------------------------- /docs/tutorial/solutions/particles_in_disk.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pysph.base.utils import get_particle_array_wcsph 3 | x, y = np.mgrid[-1:1:50j, -1:1:50j] 4 | mask = x*x + y*y < 1.0 5 | pa = get_particle_array_wcsph(name='fluid', x=x[mask], y=y[mask]) 6 | plt.scatter(pa.x, pa.y, marker='.') -------------------------------------------------------------------------------- /docs/tutorial/solutions/plot_pa.py: -------------------------------------------------------------------------------- 1 | from pysph.solver.utils import load 2 | data = load('ed_output/ed_1000.hdf5') 3 | f = data['arrays']['fluid'] 4 | plt.axis('equal') 5 | plt.scatter(f.x, f.y, c=f.p, marker='.') -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "Beaker", 4 | "Cython>=0.20", 5 | "compyle>=0.8", 6 | "cyarray", 7 | "mako", 8 | "numpy>=2.0,<3", 9 | "pytools", 10 | "setuptools>=42.0.0", 11 | "wheel>=0.29.0", 12 | ] -------------------------------------------------------------------------------- /pysph/__init__.py: -------------------------------------------------------------------------------- 1 | # See PEP 440 for more on suitable version numbers. 2 | __version__ = '1.0b3.dev0' 3 | 4 | # Utility functions to determine if Zoltan/MPI are available. 5 | _has_zoltan = None 6 | _has_opencl = None 7 | _has_mpi = None 8 | _in_parallel = None 9 | 10 | 11 | try: 12 | from pyzoltan import has_mpi # noqa: 402 13 | except ImportError: 14 | def has_mpi(): 15 | global _has_mpi 16 | if _has_mpi is None: 17 | try: 18 | import mpi4py # noqa: 401 19 | except ImportError: 20 | _has_mpi = False 21 | else: 22 | mpi4py.rc.initialize = False 23 | mpi4py.rc.finalize = True 24 | return _has_mpi 25 | 26 | 27 | def has_opencl(): 28 | """Return True if pyopencl is available. 29 | """ 30 | global _has_opencl 31 | if _has_opencl is None: 32 | _has_opencl = True 33 | try: 34 | import pyopencl # noqa: 401 35 | except ImportError: 36 | _has_opencl = False 37 | return _has_opencl 38 | 39 | 40 | def has_zoltan(): 41 | """Return True if zoltan is available. 42 | """ 43 | global _has_zoltan 44 | if _has_zoltan is None: 45 | _has_zoltan = True 46 | try: 47 | from pyzoltan.core import zoltan # noqa: 401 48 | except ImportError: 49 | _has_zoltan = False 50 | return _has_zoltan 51 | 52 | 53 | def in_parallel(): 54 | """Return true if we're running with MPI and Zoltan support 55 | """ 56 | global _in_parallel 57 | if _in_parallel is None: 58 | _in_parallel = has_mpi() and has_zoltan() 59 | 60 | return _in_parallel 61 | 62 | 63 | # Utility function to determine the possible output files 64 | _has_h5py = None 65 | _has_pyvisfile = None 66 | _has_tvtk = None 67 | 68 | 69 | def has_h5py(): 70 | """Return True if h5py is available. 71 | """ 72 | global _has_h5py 73 | if _has_h5py is None: 74 | _has_h5py = True 75 | try: 76 | import h5py # noqa: 401 77 | except ImportError: 78 | _has_h5py = False 79 | return _has_h5py 80 | 81 | 82 | def has_tvtk(): 83 | """Return True if tvtk is available. 84 | """ 85 | global _has_tvtk 86 | if _has_tvtk is None: 87 | _has_tvtk = True 88 | try: 89 | import tvtk # noqa: 401 90 | except ImportError: 91 | _has_tvtk = False 92 | return _has_tvtk 93 | 94 | 95 | def has_pyvisfile(): 96 | """Return True if pyvisfile is available. 97 | """ 98 | global _has_pyvisfile 99 | if _has_pyvisfile is None: 100 | _has_pyvisfile = True 101 | try: 102 | import pyvisfile # noqa: 401 103 | except ImportError: 104 | _has_pyvisfile = False 105 | return _has_pyvisfile 106 | -------------------------------------------------------------------------------- /pysph/base/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/base/__init__.py -------------------------------------------------------------------------------- /pysph/base/box_sort_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.map cimport map 6 | 7 | from .nnps_base cimport * 8 | from .linked_list_nnps cimport * 9 | 10 | # NNPS using the original gridding algorithm 11 | cdef class DictBoxSortNNPS(NNPS): 12 | cdef public dict cells # lookup table for the cells 13 | cdef list _cell_keys 14 | 15 | cpdef get_nearest_particles_no_cache(self, int src_index, int dst_index, 16 | size_t d_idx, UIntArray nbrs, bint prealloc) 17 | 18 | cpdef _refresh(self) 19 | 20 | cpdef _bin(self, int pa_index, UIntArray indices) 21 | 22 | # NNPS using the linked list approach 23 | cdef class BoxSortNNPS(LinkedListNNPS): 24 | ############################################################################ 25 | # Data Attributes 26 | ############################################################################ 27 | cdef public map[long, int] cell_to_index # Maps cell ID to an index 28 | -------------------------------------------------------------------------------- /pysph/base/c_kernels.pyx.mako: -------------------------------------------------------------------------------- 1 | # cython: embedsignature=True, language_level=3 2 | # distutils: language=c++ 3 | <% 4 | from compyle.api import CythonGenerator, use_config 5 | from kernels import ( 6 | CubicSpline, WendlandQuintic, Gaussian, QuinticSpline, SuperGaussian, 7 | WendlandQuinticC4, WendlandQuinticC6, WendlandQuinticC2_1D, 8 | WendlandQuinticC4_1D, WendlandQuinticC6_1D 9 | ) 10 | CLASSES = ( 11 | CubicSpline, WendlandQuintic, Gaussian, QuinticSpline, SuperGaussian, 12 | WendlandQuinticC4, WendlandQuinticC6, WendlandQuinticC2_1D, 13 | WendlandQuinticC4_1D, WendlandQuinticC6_1D 14 | ) 15 | with use_config(use_openmp=True): 16 | generator = CythonGenerator(python_methods=True) 17 | %> 18 | 19 | from libc.math cimport * 20 | import numpy as np 21 | 22 | % for cls in CLASSES: 23 | <% 24 | generator.parse(cls()) 25 | classname = cls.__name__ 26 | %> 27 | ${generator.get_code()} 28 | 29 | cdef class ${classname}Wrapper: 30 | """Reasonably high-performance convenience wrapper for Kernels. 31 | """ 32 | 33 | cdef public ${classname} kern 34 | cdef double[3] xij, grad 35 | cdef public double radius_scale 36 | cdef public double fac 37 | 38 | def __init__(self, kern): 39 | self.kern = kern 40 | self.radius_scale = kern.radius_scale 41 | self.fac = kern.fac 42 | 43 | cpdef double kernel(self, double xi, double yi, double zi, double xj, double yj, double zj, double h): 44 | cdef double* xij = self.xij 45 | xij[0] = xi-xj 46 | xij[1] = yi-yj 47 | xij[2] = zi-zj 48 | cdef double rij = sqrt(xij[0]*xij[0] + xij[1]*xij[1] +xij[2]*xij[2]) 49 | return self.kern.kernel(xij, rij, h) 50 | 51 | cpdef gradient(self, double xi, double yi, double zi, double xj, double yj, double zj, double h): 52 | cdef double* xij = self.xij 53 | xij[0] = xi-xj 54 | xij[1] = yi-yj 55 | xij[2] = zi-zj 56 | cdef double rij = sqrt(xij[0]*xij[0] + xij[1]*xij[1] +xij[2]*xij[2]) 57 | cdef double* grad = self.grad 58 | self.kern.gradient(xij, rij, h, grad) 59 | return grad[0], grad[1], grad[2] 60 | 61 | % endfor 62 | -------------------------------------------------------------------------------- /pysph/base/cell_indexing_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.map cimport map 6 | from libcpp.pair cimport pair 7 | 8 | from .nnps_base cimport * 9 | 10 | ctypedef unsigned int u_int 11 | ctypedef map[u_int, pair[u_int, u_int]] key_to_idx_t 12 | 13 | cdef extern from 'math.h': 14 | double log(double) nogil 15 | double log2(double) nogil 16 | 17 | cdef class CellIndexingNNPS(NNPS): 18 | ############################################################################ 19 | # Data Attributes 20 | ############################################################################ 21 | cdef u_int** keys 22 | cdef u_int* current_keys 23 | 24 | cdef key_to_idx_t** key_indices 25 | cdef key_to_idx_t* current_indices 26 | 27 | cdef u_int* I 28 | cdef u_int J 29 | cdef u_int K 30 | 31 | cdef double radius_scale2 32 | cdef NNPSParticleArrayWrapper dst, src 33 | 34 | ########################################################################## 35 | # Member functions 36 | ########################################################################## 37 | 38 | cdef inline u_int _get_key(self, u_int n, u_int i, u_int j, 39 | u_int k, int pa_index) noexcept nogil 40 | 41 | cdef inline int _get_id(self, u_int key, int pa_index) noexcept nogil 42 | 43 | cdef inline int _get_x(self, u_int key, int pa_index) noexcept nogil 44 | 45 | cdef inline int _get_y(self, u_int key, int pa_index) noexcept nogil 46 | 47 | cdef inline int _get_z(self, u_int key, int pa_index) noexcept nogil 48 | 49 | cdef inline int _neighbor_boxes(self, int i, int j, int k, 50 | int* x, int* y, int* z) noexcept nogil 51 | 52 | cpdef set_context(self, int src_index, int dst_index) 53 | 54 | cdef void find_nearest_neighbors(self, size_t d_idx, UIntArray nbrs) noexcept nogil 55 | 56 | cpdef get_nearest_particles_no_cache(self, int src_index, int dst_index, 57 | size_t d_idx, UIntArray nbrs, bint prealloc) 58 | 59 | cpdef get_spatially_ordered_indices(self, int pa_index, LongArray indices) 60 | 61 | cdef void fill_array(self, NNPSParticleArrayWrapper pa_wrapper, int pa_index, 62 | UIntArray indices, u_int* current_keys, key_to_idx_t* current_indices) noexcept nogil 63 | 64 | cpdef _refresh(self) 65 | 66 | cpdef _bin(self, int pa_index, UIntArray indices) 67 | -------------------------------------------------------------------------------- /pysph/base/gpu_nnps.py: -------------------------------------------------------------------------------- 1 | from pysph.base.gpu_nnps_base import GPUNeighborCache, GPUNNPS, BruteForceNNPS 2 | from pysph.base.z_order_gpu_nnps import ZOrderGPUNNPS 3 | from pysph.base.stratified_sfc_gpu_nnps import StratifiedSFCGPUNNPS 4 | from pysph.base.gpu_domain_manager import GPUDomainManager 5 | from pysph.base.octree_gpu_nnps import OctreeGPUNNPS 6 | -------------------------------------------------------------------------------- /pysph/base/gpu_nnps_helper.py: -------------------------------------------------------------------------------- 1 | from mako.template import Template 2 | from mako.lookup import TemplateLookup 3 | import os 4 | import sys 5 | 6 | from compyle.opencl import get_context, profile_kernel, SimpleKernel 7 | 8 | 9 | def get_simple_kernel(kernel_name, args, src, wgs, preamble=""): 10 | ctx = get_context() 11 | knl = SimpleKernel( 12 | ctx, args, src, wgs, 13 | kernel_name, preamble=preamble 14 | ) 15 | 16 | return profile_kernel(knl, kernel_name, backend='opencl') 17 | 18 | 19 | def get_elwise_kernel(kernel_name, args, src, preamble=""): 20 | ctx = get_context() 21 | from pyopencl.elementwise import ElementwiseKernel 22 | knl = ElementwiseKernel( 23 | ctx, args, src, 24 | kernel_name, preamble=preamble 25 | ) 26 | return profile_kernel(knl, kernel_name, backend='opencl') 27 | 28 | 29 | class GPUNNPSHelper(object): 30 | def __init__(self, tpl_filename, backend=None, use_double=False, 31 | c_type=None): 32 | """ 33 | 34 | Parameters 35 | ---------- 36 | tpl_filename 37 | filename of source template 38 | backend 39 | backend to use for helper 40 | use_double: 41 | Use double precision floating point data types 42 | c_type: 43 | c_type to use. Overrides use_double 44 | """ 45 | 46 | self.src_tpl = Template( 47 | filename=os.path.join( 48 | os.path.dirname(os.path.realpath(__file__)), 49 | tpl_filename), 50 | ) 51 | self.data_t = "double" if use_double else "float" 52 | 53 | if c_type is not None: 54 | self.data_t = c_type 55 | 56 | helper_tpl = Template( 57 | filename=os.path.join( 58 | os.path.dirname(os.path.realpath(__file__)), 59 | "gpu_helper_functions.mako"), 60 | ) 61 | 62 | helper_preamble = helper_tpl.get_def("get_helpers").render( 63 | data_t=self.data_t 64 | ) 65 | preamble = self.src_tpl.get_def("preamble").render( 66 | data_t=self.data_t 67 | ) 68 | self.preamble = "\n".join([helper_preamble, preamble]) 69 | self.cache = {} 70 | self.backend = backend 71 | 72 | def _get_code(self, kernel_name, **kwargs): 73 | arguments = self.src_tpl.get_def("%s_args" % kernel_name).render( 74 | data_t=self.data_t, **kwargs) 75 | 76 | src = self.src_tpl.get_def("%s_src" % kernel_name).render( 77 | data_t=self.data_t, **kwargs) 78 | 79 | return arguments, src 80 | 81 | def get_kernel(self, kernel_name, **kwargs): 82 | key = kernel_name, tuple(kwargs.items()) 83 | wgs = kwargs.get('wgs', None) 84 | 85 | if key in self.cache: 86 | return self.cache[key] 87 | else: 88 | args, src = self._get_code(kernel_name, **kwargs) 89 | 90 | if wgs is None: 91 | knl = get_elwise_kernel(kernel_name, args, src, 92 | preamble=self.preamble) 93 | else: 94 | knl = get_simple_kernel(kernel_name, args, src, wgs, 95 | preamble=self.preamble) 96 | 97 | self.cache[key] = knl 98 | return knl 99 | -------------------------------------------------------------------------------- /pysph/base/linalg3.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | """Routines for eigen decomposition of symmetric 3x3 matrices. 6 | """ 7 | cdef double det(double [3][3]a) noexcept nogil 8 | cdef void get_eigenvalues(double [3][3]a, double *result) noexcept nogil 9 | cdef void eigen_decomposition(double [3][3]A, double [3][3]V, double *d) noexcept nogil 10 | cdef void transform(double [3][3]A, double [3][3]P, double [3][3]res) noexcept nogil 11 | cdef void transform_diag(double *A, double [3][3]P, double [3][3]res) noexcept nogil 12 | cdef void transform_diag_inv(double *A, double [3][3]P, double [3][3]res) nogil 13 | 14 | cdef void get_eigenvalvec(double [3][3]A, double *R, double *e) 15 | -------------------------------------------------------------------------------- /pysph/base/linked_list_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.map cimport map 6 | from libcpp.vector cimport vector 7 | 8 | from .nnps_base cimport * 9 | 10 | # NNPS using the linked list approach 11 | cdef class LinkedListNNPS(NNPS): 12 | ############################################################################ 13 | # Data Attributes 14 | ############################################################################ 15 | cdef public IntArray ncells_per_dim # number of cells in each direction 16 | cdef public int ncells_tot # total number of cells 17 | cdef public bint fixed_h # Constant cell sizes 18 | cdef public list heads # Head arrays for the cells 19 | cdef public list nexts # Next arrays for the particles 20 | 21 | cdef NNPSParticleArrayWrapper src, dst # Current source and destination. 22 | cdef UIntArray next, head # Current next and head arrays. 23 | 24 | cpdef long _count_occupied_cells(self, long n_cells) except -1 25 | cpdef long _get_number_of_cells(self) except -1 26 | cdef long _get_flattened_cell_index(self, cPoint pnt, double cell_size) 27 | cdef long _get_valid_cell_index(self, int cid_x, int cid_y, int cid_z, 28 | int* ncells_per_dim, int dim, int n_cells) noexcept nogil 29 | cdef void find_nearest_neighbors(self, size_t d_idx, UIntArray nbrs) noexcept nogil 30 | -------------------------------------------------------------------------------- /pysph/base/nnps.py: -------------------------------------------------------------------------------- 1 | from pysph.base.nnps_base import get_number_of_threads, py_flatten, \ 2 | py_unflatten, py_get_valid_cell_index 3 | from pysph.base.nnps_base import NNPSParticleArrayWrapper, CPUDomainManager, \ 4 | DomainManager, Cell, NeighborCache, NNPSBase, NNPS 5 | from pysph.base.linked_list_nnps import LinkedListNNPS 6 | from pysph.base.box_sort_nnps import BoxSortNNPS, DictBoxSortNNPS 7 | from pysph.base.spatial_hash_nnps import SpatialHashNNPS, \ 8 | ExtendedSpatialHashNNPS 9 | from pysph.base.cell_indexing_nnps import CellIndexingNNPS 10 | from pysph.base.z_order_nnps import ZOrderNNPS, ExtendedZOrderNNPS 11 | from pysph.base.stratified_hash_nnps import StratifiedHashNNPS 12 | from pysph.base.stratified_sfc_nnps import StratifiedSFCNNPS 13 | from pysph.base.octree_nnps import OctreeNNPS, CompressedOctreeNNPS 14 | -------------------------------------------------------------------------------- /pysph/base/no_omp_threads.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | cpdef int get_number_of_threads() 6 | cpdef set_number_of_threads(int) -------------------------------------------------------------------------------- /pysph/base/no_omp_threads.pyx: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | cpdef int get_number_of_threads(): 6 | return 1 7 | 8 | cpdef set_number_of_threads(int n): 9 | print("OpenMP not available, cannot set number of threads.") 10 | 11 | -------------------------------------------------------------------------------- /pysph/base/octree_gpu_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from pysph.base.gpu_nnps_base cimport * 6 | 7 | 8 | cdef class OctreeGPUNNPS(GPUNNPS): 9 | cdef NNPSParticleArrayWrapper src, dst # Current source and destination. 10 | 11 | cdef public list pids 12 | cdef public list pid_keys 13 | cdef public list cids 14 | cdef public list cid_to_idx 15 | cdef public list max_cid 16 | cdef public object dst_to_src 17 | cdef object overflow_cid_to_idx 18 | cdef object curr_cid 19 | cdef object max_cid_src 20 | 21 | cdef object helper 22 | cdef object radix_sort 23 | cdef object make_vec 24 | 25 | cdef public bint allow_sort 26 | cdef bint dst_src 27 | 28 | cdef public object neighbor_cid_counts 29 | cdef public object neighbor_cids 30 | cdef public list octrees 31 | cdef public bint use_elementwise 32 | cdef public bint use_partitions 33 | cdef public object leaf_size 34 | 35 | cpdef _bin(self, int pa_index) 36 | 37 | cpdef _refresh(self) 38 | 39 | cdef void find_neighbor_lengths(self, nbr_lengths) 40 | cdef void find_nearest_neighbors_gpu(self, nbrs, start_indices) 41 | 42 | cpdef get_kernel_args(self, c_type) 43 | -------------------------------------------------------------------------------- /pysph/base/octree_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from .nnps_base cimport * 6 | from .octree cimport Octree, CompressedOctree, cOctreeNode 7 | 8 | from libcpp.vector cimport vector 9 | cimport cython 10 | 11 | ctypedef unsigned int u_int 12 | 13 | cdef extern from 'math.h': 14 | int abs(int) nogil 15 | double ceil(double) nogil 16 | double floor(double) nogil 17 | double fabs(double) nogil 18 | double fmax(double, double) nogil 19 | double fmin(double, double) nogil 20 | 21 | cdef class OctreeNNPS(NNPS): 22 | ########################################################################## 23 | # Data Attributes 24 | ########################################################################## 25 | cdef list tree 26 | cdef cOctreeNode* current_tree 27 | cdef u_int* current_pids 28 | 29 | cdef double radius_scale2 30 | cdef NNPSParticleArrayWrapper dst, src 31 | cdef int leaf_max_particles 32 | 33 | ########################################################################## 34 | # Member functions 35 | ########################################################################## 36 | 37 | cpdef get_depth(self, int pa_index) 38 | 39 | cdef void find_nearest_neighbors(self, size_t d_idx, UIntArray nbrs) noexcept nogil 40 | 41 | cpdef set_context(self, int src_index, int dst_index) 42 | 43 | cdef void _get_neighbors(self, double q_x, double q_y, double q_z, double q_h, 44 | double* src_x_ptr, double* src_y_ptr, double* src_z_ptr, double* src_h_ptr, 45 | UIntArray nbrs, cOctreeNode* node) noexcept nogil 46 | 47 | cpdef get_spatially_ordered_indices(self, int pa_index, LongArray indices) 48 | 49 | cpdef _refresh(self) 50 | 51 | cpdef _bin(self, int pa_index, UIntArray indices) 52 | 53 | cdef class CompressedOctreeNNPS(OctreeNNPS): 54 | ########################################################################## 55 | # Member functions 56 | ########################################################################## 57 | cpdef set_context(self, int src_index, int dst_index) 58 | 59 | cpdef _refresh(self) 60 | 61 | cpdef _bin(self, int pa_index, UIntArray indices) 62 | -------------------------------------------------------------------------------- /pysph/base/omp_threads.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | cpdef int get_number_of_threads() 6 | cpdef set_number_of_threads(int) -------------------------------------------------------------------------------- /pysph/base/omp_threads.pyx: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from cython.parallel import parallel, prange 6 | cimport openmp 7 | 8 | 9 | cpdef int get_number_of_threads(): 10 | cdef int i, n 11 | with nogil, parallel(): 12 | for i in prange(1): 13 | n = openmp.omp_get_num_threads() 14 | return n 15 | 16 | cpdef set_number_of_threads(int n): 17 | openmp.omp_set_num_threads(n) -------------------------------------------------------------------------------- /pysph/base/reduce_array.py: -------------------------------------------------------------------------------- 1 | """Functions to reduce array data in serial or parallel. 2 | """ 3 | 4 | import numpy as np 5 | 6 | from cyarray.carray import BaseArray 7 | 8 | 9 | def _check_operation(op): 10 | """Raise an exception if the wrong operation is given. 11 | """ 12 | valid_ops = ('sum', 'max', 'min', 'prod') 13 | msg = "Unsupported operation %s, must be one of %s."%(op, valid_ops) 14 | if op not in valid_ops: 15 | raise RuntimeError(msg) 16 | 17 | def _get_npy_array(array_or_carray): 18 | """Return a numpy array from given carray or numpy array. 19 | """ 20 | if isinstance(array_or_carray, BaseArray): 21 | return array_or_carray.get_npy_array() 22 | else: 23 | return array_or_carray 24 | 25 | def serial_reduce_array(array, op='sum'): 26 | """Reduce an array given an array and a suitable reduction operation. 27 | 28 | Currently, only 'sum', 'max', 'min' and 'prod' are supported. 29 | 30 | **Parameters** 31 | 32 | - array: numpy.ndarray: Any numpy array (1D). 33 | - op: str: reduction operation, one of ('sum', 'prod', 'min', 'max') 34 | 35 | """ 36 | _check_operation(op) 37 | ops = {'sum': np.sum, 'prod': np.prod, 38 | 'max': np.max, 'min': np.min} 39 | np_array = _get_npy_array(array) 40 | return ops[op](np_array) 41 | 42 | 43 | def dummy_reduce_array(array, op='sum'): 44 | """Simply returns the array for the serial case. 45 | """ 46 | return _get_npy_array(array) 47 | 48 | def mpi_reduce_array(array, op='sum'): 49 | """Reduce an array given an array and a suitable reduction operation. 50 | 51 | Currently, only 'sum', 'max', 'min' and 'prod' are supported. 52 | 53 | **Parameters** 54 | 55 | - array: numpy.ndarray: Any numpy array (1D). 56 | - op: str: reduction operation, one of ('sum', 'prod', 'min', 'max') 57 | 58 | """ 59 | np_array = _get_npy_array(array) 60 | from mpi4py import MPI 61 | ops = {'sum': MPI.SUM, 'prod': MPI.PROD, 62 | 'max': MPI.MAX, 'min': MPI.MIN} 63 | return MPI.COMM_WORLD.allreduce(np_array, op=ops[op]) 64 | 65 | # This is just to keep syntax highlighters happy in editors while writing 66 | # equations. 67 | parallel_reduce_array = mpi_reduce_array 68 | -------------------------------------------------------------------------------- /pysph/base/spatial_hash_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.vector cimport vector 6 | 7 | from .nnps_base cimport * 8 | 9 | #Imports for SpatialHashNNPS 10 | cdef extern from "spatial_hash.h": 11 | cdef cppclass HashEntry: 12 | double h_max 13 | 14 | vector[unsigned int] *get_indices() noexcept nogil 15 | 16 | cdef cppclass HashTable: 17 | HashTable(long long int) except + nogil 18 | void add(int, int, int, int, double) noexcept nogil 19 | HashEntry* get(int, int, int) noexcept nogil 20 | 21 | # NNPS using Spatial Hashing algorithm 22 | cdef class SpatialHashNNPS(NNPS): 23 | ############################################################################ 24 | # Data Attributes 25 | ############################################################################ 26 | cdef long long int table_size # Size of hashtable 27 | cdef double radius_scale2 28 | 29 | cdef HashTable** hashtable 30 | cdef HashTable* current_hash 31 | 32 | cdef NNPSParticleArrayWrapper dst, src 33 | 34 | ########################################################################## 35 | # Member functions 36 | ########################################################################## 37 | 38 | cpdef set_context(self, int src_index, int dst_index) 39 | 40 | cdef void find_nearest_neighbors(self, size_t d_idx, UIntArray nbrs) noexcept nogil 41 | 42 | cdef inline void _add_to_hashtable(self, int hash_id, unsigned int pid, double h, 43 | int i, int j, int k) noexcept nogil 44 | 45 | cdef inline int _neighbor_boxes(self, int i, int j, int k, 46 | int* x, int* y, int* z) noexcept nogil 47 | 48 | cpdef _refresh(self) 49 | 50 | cpdef _bin(self, int pa_index, UIntArray indices) 51 | 52 | # NNPS using Extended Spatial Hashing algorithm 53 | cdef class ExtendedSpatialHashNNPS(NNPS): 54 | ############################################################################ 55 | # Data Attributes 56 | ############################################################################ 57 | cdef long long int table_size # Size of hashtable 58 | cdef double radius_scale2 59 | 60 | cdef HashTable** hashtable 61 | cdef HashTable* current_hash 62 | 63 | cdef NNPSParticleArrayWrapper dst, src 64 | 65 | cdef int H 66 | cdef double h_sub 67 | cdef bint approximate 68 | 69 | ########################################################################## 70 | # Member functions 71 | ########################################################################## 72 | 73 | cpdef set_context(self, int src_index, int dst_index) 74 | 75 | cdef void find_nearest_neighbors(self, size_t d_idx, UIntArray nbrs) noexcept nogil 76 | 77 | cdef inline int _h_mask_approx(self, int* x, int* y, int* z) noexcept nogil 78 | 79 | cdef inline int _h_mask_exact(self, int* x, int* y, int* z) noexcept nogil 80 | 81 | cdef int _neighbor_boxes(self, int i, int j, int k, 82 | int* x, int* y, int* z, double h) noexcept nogil 83 | 84 | cdef inline void _add_to_hashtable(self, int hash_id, unsigned int pid, double h, 85 | int i, int j, int k) noexcept nogil 86 | 87 | cpdef _refresh(self) 88 | 89 | cpdef _bin(self, int pa_index, UIntArray indices) 90 | -------------------------------------------------------------------------------- /pysph/base/stratified_hash_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.vector cimport vector 6 | 7 | from .nnps_base cimport * 8 | 9 | ctypedef unsigned int u_int 10 | 11 | cdef extern from 'math.h': 12 | int abs(int) nogil 13 | double ceil(double) nogil 14 | double floor(double) nogil 15 | double fabs(double) nogil 16 | double fmax(double, double) nogil 17 | double fmin(double, double) nogil 18 | 19 | #Imports for SpatialHashNNPS 20 | cdef extern from "spatial_hash.h": 21 | cdef cppclass HashEntry: 22 | double h_max 23 | 24 | vector[unsigned int] *get_indices() noexcept nogil 25 | 26 | cdef cppclass HashTable: 27 | long long int table_size 28 | 29 | HashTable(long long int) except + nogil 30 | void add(int, int, int, int, double) noexcept nogil 31 | HashEntry* get(int, int, int) noexcept nogil 32 | int number_of_particles() noexcept nogil 33 | 34 | cdef class StratifiedHashNNPS(NNPS): 35 | ############################################################################ 36 | # Data Attributes 37 | ############################################################################ 38 | cdef long long int table_size # Size of hashtable 39 | cdef double radius_scale2 40 | 41 | cdef public int num_levels 42 | cdef public int H 43 | 44 | cdef double interval_size 45 | 46 | cdef HashTable*** hashtable 47 | cdef HashTable** current_hash 48 | 49 | cdef double** cell_sizes 50 | cdef double* current_cells 51 | 52 | cdef NNPSParticleArrayWrapper dst, src 53 | 54 | ########################################################################## 55 | # Member functions 56 | ########################################################################## 57 | 58 | cpdef set_context(self, int src_index, int dst_index) 59 | 60 | cpdef int count_particles(self, int interval) 61 | 62 | cpdef double get_binning_size(self, int interval) 63 | 64 | cdef void find_nearest_neighbors(self, size_t d_idx, UIntArray nbrs) noexcept nogil 65 | 66 | cdef inline int _h_mask_exact(self, int* x, int* y, int* z, int H) noexcept nogil 67 | 68 | cdef inline int _neighbor_boxes(self, int i, int j, int k, 69 | int* x, int* y, int* z, int H) noexcept nogil 70 | 71 | cdef inline int _get_hash_id(self, double h) noexcept nogil 72 | 73 | cdef inline void _set_h_max(self, double* current_cells, double* src_h_ptr, 74 | int num_particles) noexcept nogil 75 | 76 | cdef inline double _get_h_max(self, double* current_cells, int hash_id) noexcept nogil 77 | 78 | cpdef _refresh(self) 79 | 80 | cpdef _bin(self, int pa_index, UIntArray indices) 81 | -------------------------------------------------------------------------------- /pysph/base/stratified_sfc_gpu_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.vector cimport vector 6 | from libcpp.map cimport map 7 | from libcpp.pair cimport pair 8 | 9 | from pysph.base.gpu_nnps_base cimport * 10 | 11 | ctypedef unsigned int u_int 12 | ctypedef map[u_int, pair[u_int, u_int]] key_to_idx_t 13 | ctypedef vector[u_int] u_int_vector_t 14 | 15 | cdef extern from 'math.h': 16 | int abs(int) nogil 17 | double ceil(double) nogil 18 | double floor(double) nogil 19 | double fabs(double) nogil 20 | double fmax(double, double) nogil 21 | double fmin(double, double) nogil 22 | 23 | cdef extern from 'math.h': 24 | double log(double) nogil 25 | double log2(double) nogil 26 | 27 | cdef class StratifiedSFCGPUNNPS(GPUNNPS): 28 | cdef NNPSParticleArrayWrapper src, dst # Current source and destination. 29 | 30 | cdef public list pids 31 | cdef public list pid_keys 32 | cdef public list start_idx_levels 33 | cdef public list num_particles_levels 34 | cdef public int max_num_bits 35 | cdef int num_levels 36 | cdef double interval_size 37 | cdef double eps 38 | 39 | cdef object helper 40 | 41 | cdef bint _sorted 42 | 43 | cpdef get_spatially_ordered_indices(self, int pa_index) 44 | 45 | cpdef _bin(self, int pa_index) 46 | 47 | cpdef _refresh(self) 48 | 49 | cdef void find_neighbor_lengths(self, nbr_lengths) 50 | 51 | cdef void find_nearest_neighbors_gpu(self, nbrs, start_indices) 52 | -------------------------------------------------------------------------------- /pysph/base/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/base/tests/__init__.py -------------------------------------------------------------------------------- /pysph/base/tests/test_reduce_array.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from unittest import TestCase, main 3 | 4 | from pysph.base.reduce_array import serial_reduce_array, dummy_reduce_array 5 | 6 | 7 | class TestSerialReduceArray(TestCase): 8 | def test_reduce_sum_works(self): 9 | x = np.linspace(0, 10, 100) 10 | expect = np.sum(x) 11 | result = serial_reduce_array(x, 'sum') 12 | self.assertAlmostEqual(result, expect) 13 | 14 | def test_reduce_prod_works(self): 15 | x = np.linspace(0, 10, 100) 16 | expect = np.prod(x) 17 | result = serial_reduce_array(x, 'prod') 18 | self.assertAlmostEqual(result, expect) 19 | 20 | def test_reduce_max_works(self): 21 | x = np.linspace(0, 10, 100) 22 | expect = np.max(x) 23 | result = serial_reduce_array(x, 'max') 24 | self.assertAlmostEqual(result, expect) 25 | 26 | def test_reduce_min_works(self): 27 | x = np.linspace(0, 10, 100) 28 | expect = np.min(x) 29 | result = serial_reduce_array(x, 'min') 30 | self.assertAlmostEqual(result, expect) 31 | 32 | def test_reduce_raises_error_for_wrong_op(self): 33 | x = np.linspace(0, 10, 100) 34 | self.assertRaises(RuntimeError, serial_reduce_array, x, 'foo') 35 | 36 | def test_dummy_reduce_array_does_nothing(self): 37 | x = np.array([1.0, 2.0]) 38 | expect = x 39 | result = dummy_reduce_array(x, 'min') 40 | self.assertTrue(np.all(result == expect)) 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /pysph/base/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, main 2 | 3 | from ..utils import is_overloaded_method 4 | 5 | 6 | class TestUtils(TestCase): 7 | def test_is_overloaded_method_works_for_simple_overloads(self): 8 | # Given 9 | class A(object): 10 | def f(self): pass 11 | 12 | class B(A): 13 | pass 14 | 15 | # When/Then 16 | b = B() 17 | self.assertFalse(is_overloaded_method(b.f)) 18 | 19 | class C(A): 20 | def f(self): pass 21 | 22 | # When/Then 23 | c = C() 24 | self.assertTrue(is_overloaded_method(c.f)) 25 | 26 | def test_is_overloaded_method_works_for_parent_overloads(self): 27 | # Given 28 | class A(object): 29 | def f(self): pass 30 | 31 | class B(A): 32 | def f(self): pass 33 | 34 | class C(B): 35 | pass 36 | 37 | # When/Then 38 | c = C() 39 | self.assertTrue(is_overloaded_method(c.f)) 40 | 41 | 42 | if __name__ == '__main__': 43 | main() 44 | -------------------------------------------------------------------------------- /pysph/base/tree/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/base/tree/__init__.py -------------------------------------------------------------------------------- /pysph/base/tree/tree.mako: -------------------------------------------------------------------------------- 1 | //CL// 2 | <%def name="preamble(data_t)" cached="False"> 3 | char eye_index(ulong sfc, ulong mask, char rshift) { 4 | return ((sfc & mask) >> rshift); 5 | } 6 | 7 | 8 | <%def name="reorder_particles_args(k, data_vars, data_var_ctypes, const_vars, 9 | const_var_ctypes, index_code)" cached="False"> 10 | int *pids, int *cids, char *seg_flag, 11 | uint2 *pbounds, int *offsets, uint${k} *octant_vector, 12 | int *pids_next, int *cids_next, 13 | % for var, ctype in zip(data_vars, data_var_ctypes): 14 | ${ctype} *${var}, 15 | % endfor 16 | % for var, ctype in zip(data_vars, data_var_ctypes): 17 | ${ctype} *${var}_next, 18 | % endfor 19 | % for var, ctype in zip(const_vars, const_var_ctypes): 20 | ${ctype} ${var}, 21 | % endfor 22 | uint csum_nodes_prev 23 | 24 | 25 | <%def name="reorder_particles_src(k, data_vars, data_var_ctypes, const_vars, 26 | const_var_ctypes, index_code)" cached="False"> 27 | int curr_cid = cids[i] - csum_nodes_prev; 28 | if (curr_cid < 0 || offsets[curr_cid] == -1) { 29 | cids_next[i] = cids[i]; 30 | pids_next[i] = pids[i]; 31 | 32 | % for var in data_vars: 33 | ${var}_next[i] = ${var}[i]; 34 | % endfor 35 | } else { 36 | uint2 pbound_here = pbounds[curr_cid]; 37 | char octant = (${index_code}); 38 | 39 | global uint *octv = (global uint *)(octant_vector + i); 40 | int sum = octv[octant]; 41 | sum -= (octant == 0) ? 0 : octv[octant - 1]; 42 | octv = (global uint *)(octant_vector + pbound_here.s1 - 1); 43 | sum += (octant == 0) ? 0 : octv[octant - 1]; 44 | 45 | uint new_index = pbound_here.s0 + sum - 1; 46 | 47 | pids_next[new_index] = pids[i]; 48 | cids_next[new_index] = offsets[curr_cid] + octant; 49 | 50 | % for var in data_vars: 51 | ${var}_next[new_index] = ${var}[i]; 52 | % endfor 53 | } 54 | 55 | 56 | <%def name="append_layer_args()" cached="False"> 57 | int *offsets_next, uint2 *pbounds_next, 58 | int *offsets, uint2 *pbounds, 59 | int curr_offset, char is_last_level 60 | 61 | 62 | <%def name="append_layer_src()" cached="False"> 63 | pbounds[curr_offset + i] = pbounds_next[i]; 64 | offsets[curr_offset + i] = is_last_level ? -1 : offsets_next[i]; 65 | 66 | 67 | <%def name="set_node_data_args(k)", cached="False"> 68 | int *offsets_prev, uint2 *pbounds_prev, 69 | int *offsets, uint2 *pbounds, 70 | char *seg_flag, uint${k} *octant_vector, 71 | uint csum_nodes, uint N 72 | 73 | 74 | <%def name="set_node_data_src(k)", cached="False"> 75 | uint2 pbound_here = pbounds_prev[i]; 76 | int child_offset = offsets_prev[i]; 77 | if (child_offset == -1) { 78 | PYOPENCL_ELWISE_CONTINUE; 79 | } 80 | child_offset -= csum_nodes; 81 | 82 | uint${k} octv = octant_vector[pbound_here.s1 - 1]; 83 | 84 | % for i in range(k): 85 | % if i == 0: 86 | pbounds[child_offset] = (uint2)(pbound_here.s0, pbound_here.s0 + octv.s0); 87 | % else: 88 | pbounds[child_offset + ${i}] = (uint2)(pbound_here.s0 + octv.s${i - 1}, 89 | pbound_here.s0 + octv.s${i}); 90 | if (pbound_here.s0 + octv.s${i - 1} < N) 91 | seg_flag[pbound_here.s0 + octv.s${i - 1}] = 1; 92 | % endif 93 | % endfor 94 | 95 | -------------------------------------------------------------------------------- /pysph/base/z_order.h: -------------------------------------------------------------------------------- 1 | #ifndef Z_ORDER_H 2 | #define Z_ORDER_H 3 | #include 4 | #include 5 | #include 6 | 7 | #ifdef _WIN32 8 | typedef unsigned int uint32_t; 9 | typedef unsigned long long uint64_t; 10 | #else 11 | #include 12 | #endif 13 | 14 | using namespace std; 15 | 16 | inline void find_cell_id(double x, double y, double z, double h, 17 | int &c_x, int &c_y, int &c_z) 18 | { 19 | c_x = floor(x/h); 20 | c_y = floor(y/h); 21 | c_z = floor(z/h); 22 | } 23 | 24 | inline uint64_t get_key(uint64_t i, uint64_t j, uint64_t k) 25 | { 26 | 27 | i = (i | (i << 32)) & 0x1f00000000ffff; 28 | i = (i | (i << 16)) & 0x1f0000ff0000ff; 29 | i = (i | (i << 8)) & 0x100f00f00f00f00f; 30 | i = (i | (i << 4)) & 0x10c30c30c30c30c3; 31 | i = (i | (i << 2)) & 0x1249249249249249; 32 | 33 | j = (j | (j << 32)) & 0x1f00000000ffff; 34 | j = (j | (j << 16)) & 0x1f0000ff0000ff; 35 | j = (j | (j << 8)) & 0x100f00f00f00f00f; 36 | j = (j | (j << 4)) & 0x10c30c30c30c30c3; 37 | j = (j | (j << 2)) & 0x1249249249249249; 38 | 39 | k = (k | (k << 32)) & 0x1f00000000ffff; 40 | k = (k | (k << 16)) & 0x1f0000ff0000ff; 41 | k = (k | (k << 8)) & 0x100f00f00f00f00f; 42 | k = (k | (k << 4)) & 0x10c30c30c30c30c3; 43 | k = (k | (k << 2)) & 0x1249249249249249; 44 | 45 | return (i | (j << 1) | (k << 2)); 46 | } 47 | 48 | class CompareSortWrapper 49 | { 50 | private: 51 | uint32_t* current_pids; 52 | uint64_t* current_keys; 53 | int length; 54 | public: 55 | CompareSortWrapper() 56 | { 57 | this->current_pids = NULL; 58 | this->current_keys = NULL; 59 | this->length = 0; 60 | } 61 | 62 | CompareSortWrapper(uint32_t* current_pids, uint64_t* current_keys, 63 | int length) 64 | { 65 | this->current_pids = current_pids; 66 | this->current_keys = current_keys; 67 | this->length = length; 68 | } 69 | 70 | struct CompareFunctionWrapper 71 | { 72 | CompareSortWrapper* data; 73 | 74 | CompareFunctionWrapper(CompareSortWrapper* data) 75 | { 76 | this->data = data; 77 | } 78 | 79 | inline bool operator()(const int &a, const int &b) 80 | { 81 | return this->data->current_keys[a] < this->data->current_keys[b]; 82 | } 83 | }; 84 | 85 | inline void compare_sort() 86 | { 87 | sort(this->current_pids, this->current_pids + this->length, 88 | CompareFunctionWrapper(this)); 89 | 90 | sort(this->current_keys, this->current_keys + this->length); 91 | } 92 | }; 93 | 94 | #endif 95 | 96 | -------------------------------------------------------------------------------- /pysph/base/z_order_gpu_nnps.pxd: -------------------------------------------------------------------------------- 1 | # cython: language_level=3, embedsignature=True 2 | # distutils: language=c++ 3 | # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION 4 | 5 | from libcpp.map cimport map 6 | from libcpp.pair cimport pair 7 | 8 | from pysph.base.gpu_nnps_base cimport * 9 | 10 | ctypedef unsigned int u_int 11 | ctypedef map[u_int, pair[u_int, u_int]] key_to_idx_t 12 | 13 | cdef extern from "math.h": 14 | double log2(double) nogil 15 | 16 | cdef extern from "z_order.h": 17 | ctypedef unsigned long long uint64_t 18 | ctypedef unsigned int uint32_t 19 | 20 | cdef class ZOrderGPUNNPS(GPUNNPS): 21 | cdef NNPSParticleArrayWrapper src, dst # Current source and destination. 22 | 23 | cdef public list pids 24 | cdef public list pid_keys 25 | cdef public list cids 26 | cdef public list cid_to_idx 27 | cdef public list max_cid 28 | cdef public list cell_lengths 29 | cdef public list cell_start_indices 30 | cdef public object dst_to_src 31 | cdef object overflow_cid_to_idx 32 | cdef object curr_cid 33 | cdef object max_cid_src 34 | cdef object allocator 35 | 36 | cdef object helper 37 | cdef object radix_sort 38 | cdef object make_vec 39 | 40 | cdef public bint sorted 41 | cdef bint dst_src 42 | 43 | cdef object z_order_nbrs 44 | cdef object z_order_nbr_lengths 45 | 46 | #cpdef get_spatially_ordered_indices(self, int pa_index) 47 | 48 | cpdef _bin(self, int pa_index) 49 | 50 | cpdef _refresh(self) 51 | 52 | cdef void find_neighbor_lengths(self, nbr_lengths) 53 | 54 | cdef void find_nearest_neighbors_gpu(self, nbrs, start_indices) 55 | -------------------------------------------------------------------------------- /pysph/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/__init__.py -------------------------------------------------------------------------------- /pysph/examples/cube.py: -------------------------------------------------------------------------------- 1 | """A very simple example to help benchmark PySPH. (2 minutes) 2 | 3 | The example creates a cube shaped block of water falling in free-space under 4 | the influence of gravity while solving the incompressible, inviscid flow 5 | equations. Only 5 time steps are solved but with a million particles. It is 6 | easy to change the number of particles by simply passing the command line 7 | argument --np to a desired number:: 8 | 9 | $ pysph run cube --np 2e6 10 | 11 | To check the performance of PySPH using OpenMP one could try the following:: 12 | 13 | $ pysph run cube --disable-output 14 | 15 | $ pysph run cube --disable-output --openmp 16 | 17 | """ 18 | 19 | import numpy 20 | 21 | from pysph.base.kernels import CubicSpline 22 | from pysph.base.utils import get_particle_array_wcsph 23 | from pysph.solver.application import Application 24 | from pysph.sph.scheme import WCSPHScheme 25 | 26 | rho0 = 1000.0 27 | 28 | 29 | class Cube(Application): 30 | def add_user_options(self, group): 31 | group.add_argument( 32 | "--np", action="store", type=float, dest="np", default=int(1e5), 33 | help="Number of particles in the cube (1e5 by default)." 34 | ) 35 | 36 | def consume_user_options(self): 37 | self.hdx = 1.5 38 | self.dx = 1.0/pow(self.options.np, 1.0/3.0) 39 | 40 | def configure_scheme(self): 41 | self.scheme.configure(h0=self.hdx*self.dx, hdx=self.hdx) 42 | kernel = CubicSpline(dim=3) 43 | dt = 1e-4 44 | tf = 5e-4 45 | self.scheme.configure_solver(kernel=kernel, tf=tf, dt=dt) 46 | 47 | def create_scheme(self): 48 | co = 10.0 49 | s = WCSPHScheme( 50 | ['fluid'], [], dim=3, rho0=rho0, c0=co, 51 | h0=0.1, hdx=1.5, gz=-9.81, gamma=7.0, 52 | alpha=0.5, beta=0.0 53 | ) 54 | return s 55 | 56 | def create_particles(self): 57 | dx = self.dx 58 | hdx = self.hdx 59 | xmin, xmax = 0.0, 1.0 60 | ymin, ymax = 0.0, 1.0 61 | zmin, zmax = 0.0, 1.0 62 | x, y, z = numpy.mgrid[xmin:xmax:dx, ymin:ymax:dx, zmin:zmax:dx] 63 | x = x.ravel() 64 | y = y.ravel() 65 | z = z.ravel() 66 | 67 | # set up particle properties 68 | h0 = hdx * dx 69 | 70 | volume = dx**3 71 | m0 = rho0 * volume 72 | 73 | fluid = get_particle_array_wcsph(name='fluid', x=x, y=y, z=z) 74 | fluid.m[:] = m0 75 | fluid.h[:] = h0 76 | 77 | fluid.rho[:] = rho0 78 | #nnps = LinkedListNNPS(dim=3, particles=[fluid]) 79 | #nnps.spatially_order_particles(0) 80 | 81 | print("Number of particles:", x.size) 82 | fluid.set_lb_props( list(fluid.properties.keys()) ) 83 | 84 | if fluid.gpu: 85 | fluid.gpu.push() 86 | 87 | return [fluid] 88 | 89 | 90 | if __name__ == '__main__': 91 | app = Cube() 92 | app.run() 93 | -------------------------------------------------------------------------------- /pysph/examples/dam_break/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/dam_break/__init__.py -------------------------------------------------------------------------------- /pysph/examples/elliptical_drop_simple.py: -------------------------------------------------------------------------------- 1 | """Evolution of a circular patch of incompressible fluid. (30 seconds) 2 | 3 | This is the simplest implementation using existing schemes. 4 | 5 | See J. J. Monaghan "Simulating Free Surface Flows with SPH", JCP, 1994, 100, pp 6 | 399 - 406 7 | """ 8 | from __future__ import print_function 9 | 10 | from numpy import ones_like, mgrid, sqrt 11 | 12 | from pysph.base.utils import get_particle_array 13 | from pysph.solver.application import Application 14 | from pysph.sph.scheme import WCSPHScheme 15 | 16 | 17 | class EllipticalDrop(Application): 18 | def initialize(self): 19 | self.co = 1400.0 20 | self.ro = 1.0 21 | self.hdx = 1.3 22 | self.dx = 0.025 23 | self.alpha = 0.1 24 | 25 | def create_scheme(self): 26 | s = WCSPHScheme( 27 | ['fluid'], [], dim=2, rho0=self.ro, c0=self.co, 28 | h0=self.dx*self.hdx, hdx=self.hdx, gamma=7.0, alpha=0.1, beta=0.0 29 | ) 30 | dt = 5e-6 31 | tf = 0.0076 32 | s.configure_solver(dt=dt, tf=tf) 33 | return s 34 | 35 | def create_particles(self): 36 | """Create the circular patch of fluid.""" 37 | dx = self.dx 38 | hdx = self.hdx 39 | ro = self.ro 40 | name = 'fluid' 41 | x, y = mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx] 42 | x = x.ravel() 43 | y = y.ravel() 44 | 45 | m = ones_like(x)*dx*dx*ro 46 | h = ones_like(x)*hdx*dx 47 | rho = ones_like(x) * ro 48 | u = -100*x 49 | v = 100*y 50 | 51 | # remove particles outside the circle 52 | indices = [] 53 | for i in range(len(x)): 54 | if sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10: 55 | indices.append(i) 56 | 57 | pa = get_particle_array(x=x, y=y, m=m, rho=rho, h=h, u=u, v=v, 58 | name=name) 59 | pa.remove_particles(indices) 60 | 61 | print("Elliptical drop :: %d particles" 62 | % (pa.get_number_of_particles())) 63 | 64 | self.scheme.setup_properties([pa]) 65 | return [pa] 66 | 67 | 68 | if __name__ == '__main__': 69 | app = EllipticalDrop() 70 | app.run() 71 | -------------------------------------------------------------------------------- /pysph/examples/fpc_with_packed_cylinder.py: -------------------------------------------------------------------------------- 1 | from pysph.examples.flow_past_cylinder_2d import WindTunnel 2 | from pysph.base.utils import get_particle_array 3 | from math import sin, cos, pi 4 | import numpy as np 5 | import tempfile 6 | import os 7 | 8 | # fluid mechanical/numerical parameters 9 | rho = 1000 10 | umax = 1.0 11 | c0 = 10 * umax 12 | p0 = rho * c0 * c0 13 | use_coords = True 14 | 15 | # creating the files and coordinates of the cylinder surface for demonstration 16 | xc, yc = [], [] 17 | cyl_file = os.path.join(tempfile.gettempdir(), 'cylinder.txt') 18 | print(cyl_file) 19 | fp = open(cyl_file, 'w') 20 | for i in range(0, 100): 21 | _x = cos(2 * pi * i / 100) + 5.0 22 | _y = sin(2 * pi * i / 100) 23 | xc.append(_x) 24 | yc.append(_y) 25 | fp.write('%.3f %.3f\n' % (_x, _y)) 26 | fp.close() 27 | 28 | 29 | class FPCWithPackedCylinder(WindTunnel): 30 | def _get_packed_points(self): 31 | ''' 32 | returns 33 | xs, ys, zs, xf, yf, zf 34 | ''' 35 | from pysph.tools.geometry import ( 36 | get_packed_2d_particles_from_surface_coordinates, 37 | get_packed_2d_particles_from_surface_file) 38 | folder = self.output_dir 39 | dx = self.dx 40 | if use_coords: 41 | return get_packed_2d_particles_from_surface_coordinates( 42 | self.add_user_options, folder, dx, x=np.array(xc), 43 | y=np.array(yc), shift=True) 44 | else: 45 | return get_packed_2d_particles_from_surface_file( 46 | self.add_user_options, folder, dx, filename=cyl_file, 47 | shift=True) 48 | 49 | def _create_solid(self): 50 | xs, ys, zs, xf, yf, zf = self._get_packed_points() 51 | dx = self.dx 52 | h0 = self.h 53 | volume = dx*dx 54 | solid = get_particle_array( 55 | name='solid', x=xs-dx/2, y=ys, 56 | m=volume*rho, rho=rho, h=h0, V=1.0/volume) 57 | return solid 58 | 59 | def _create_fluid(self): 60 | from pysph.tools.geometry import create_fluid_around_packing 61 | xs, ys, zs, xf, yf, zf = self._get_packed_points() 62 | dx = self.dx 63 | h0 = self.h 64 | volume = dx*dx 65 | L = self.Lt 66 | B = self.Wt * 2.0 67 | 68 | fluid = create_fluid_around_packing( 69 | dx, xf-dx/2, yf, L, B, m=volume*rho, rho=rho, h=h0, V=1.0/volume, 70 | u=umax, p=0.0, uhat=umax) 71 | 72 | return fluid 73 | 74 | def create_particles(self): 75 | dx = self.dx 76 | fluid = self._create_fluid() 77 | solid = self._create_solid() 78 | outlet = self._create_outlet() 79 | inlet = self._create_inlet() 80 | wall = self._create_wall() 81 | 82 | ghost_inlet = self.iom.create_ghost(inlet, inlet=True) 83 | ghost_outlet = self.iom.create_ghost(outlet, inlet=False) 84 | 85 | particles = [fluid, inlet, outlet, solid, wall] 86 | if ghost_inlet: 87 | particles.append(ghost_inlet) 88 | if ghost_outlet: 89 | particles.append(ghost_outlet) 90 | 91 | self.scheme.setup_properties(particles) 92 | self._set_wall_normal(wall) 93 | 94 | if self.io_method == 'hybrid': 95 | fluid.uag[:] = 1.0 96 | fluid.uta[:] = 1.0 97 | outlet.uta[:] = 1.0 98 | 99 | return particles 100 | 101 | 102 | if __name__ == '__main__': 103 | app = FPCWithPackedCylinder() 104 | app.run() 105 | app.post_process(app.info_filename) 106 | -------------------------------------------------------------------------------- /pysph/examples/gas_dynamics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/gas_dynamics/__init__.py -------------------------------------------------------------------------------- /pysph/examples/gas_dynamics/ndspmhd-sedov-initial-conditions.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/gas_dynamics/ndspmhd-sedov-initial-conditions.npz -------------------------------------------------------------------------------- /pysph/examples/gas_dynamics/sedov.py: -------------------------------------------------------------------------------- 1 | """Sedov point explosion problem. (7 minutes) 2 | 3 | Particles are distributed on concentric circles about the origin with 4 | increasing number of particles with increasing radius. A unit charge 5 | is distributed about the center which gives the initial pressure 6 | disturbance. 7 | 8 | """ 9 | # NumPy and standard library imports 10 | import os.path 11 | import numpy 12 | 13 | # PySPH base and carray imports 14 | from pysph.base.utils import get_particle_array as gpa 15 | from pysph.solver.application import Application 16 | from pysph.sph.scheme import GasDScheme, SchemeChooser 17 | from pysph.sph.gas_dynamics.psph import PSPHScheme 18 | from pysph.sph.gas_dynamics.tsph import TSPHScheme 19 | from pysph.sph.gas_dynamics.magma2 import MAGMA2Scheme 20 | 21 | # Numerical constants 22 | dim = 2 23 | gamma = 5.0/3.0 24 | gamma1 = gamma - 1.0 25 | 26 | # solution parameters 27 | dt = 1e-4 28 | tf = 0.1 29 | 30 | # scheme constants 31 | alpha1 = 10.0 32 | alpha2 = 1.0 33 | beta = 2.0 34 | kernel_factor = 1.2 35 | 36 | 37 | class SedovPointExplosion(Application): 38 | def create_particles(self): 39 | fpath = os.path.join( 40 | os.path.dirname(__file__), 'ndspmhd-sedov-initial-conditions.npz' 41 | ) 42 | data = numpy.load(fpath) 43 | x = data['x'] 44 | y = data['y'] 45 | rho = data['rho'] 46 | p = data['p'] 47 | e = data['e'] + 1e-9 48 | h = data['h'] 49 | m = data['m'] 50 | 51 | fluid = gpa(name='fluid', x=x, y=y, rho=rho, p=p, e=e, h=h, m=m) 52 | self.scheme.setup_properties([fluid]) 53 | 54 | # set the initial smoothing length proportional to the particle 55 | # volume 56 | fluid.h[:] = kernel_factor * (fluid.m/fluid.rho)**(1./dim) 57 | 58 | print("Sedov's point explosion with %d particles" 59 | % (fluid.get_number_of_particles())) 60 | 61 | return [fluid,] 62 | 63 | def create_scheme(self): 64 | mpm = GasDScheme( 65 | fluids=['fluid'], solids=[], dim=dim, gamma=gamma, 66 | kernel_factor=kernel_factor, alpha1=alpha1, alpha2=alpha2, 67 | beta=beta, adaptive_h_scheme="mpm", 68 | update_alpha1=True, update_alpha2=True 69 | ) 70 | psph = PSPHScheme( 71 | fluids=['fluid'], solids=[], dim=dim, gamma=gamma, 72 | hfact=kernel_factor 73 | ) 74 | 75 | tsph = TSPHScheme( 76 | fluids=['fluid'], solids=[], dim=dim, gamma=gamma, 77 | hfact=kernel_factor 78 | ) 79 | 80 | # Reconstruction does not work with this initial condition and 81 | # initial distribution combination. 82 | magma2 = MAGMA2Scheme( 83 | fluids=['fluid'], solids=[], dim=dim, gamma=gamma, 84 | ndes=40, reconstruction_order=0 85 | ) 86 | 87 | s = SchemeChooser( 88 | default='mpm', mpm=mpm, psph=psph, tsph=tsph, magma2=magma2 89 | ) 90 | return s 91 | 92 | def configure_scheme(self): 93 | s = self.scheme 94 | s.configure_solver( 95 | dt=dt, tf=tf, adaptive_timestep=False, pfreq=25 96 | ) 97 | 98 | 99 | if __name__ == '__main__': 100 | app = SedovPointExplosion() 101 | app.run() 102 | -------------------------------------------------------------------------------- /pysph/examples/gas_dynamics/wc_exact.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/gas_dynamics/wc_exact.hdf5 -------------------------------------------------------------------------------- /pysph/examples/rigid_body/README.rst: -------------------------------------------------------------------------------- 1 | This directory contains a bunch of examples that demonstrate rigid body 2 | motion and interaction both with other rigid bodies and rigid-fluid 3 | coupling. 4 | 5 | The demos here are only proofs of concept. They need work to make sure 6 | that the physics is correct, the equations correct and produce the right 7 | numbers. In particular, 8 | 9 | - the rigid_block in tank does not work without the pressure rigid body 10 | equation which is incorrect. 11 | 12 | - the formulation and parameters used for the rigid body collision is not 13 | tested if it conserves energy and works correctly in all cases. The choice 14 | of parameters is currently ad-hoc. 15 | 16 | - the rigid-fluid coupling should also be looked at a bit more carefully with 17 | proper comparisons to well-known results. 18 | 19 | Right now, it looks pretty and is a reasonable start. 20 | -------------------------------------------------------------------------------- /pysph/examples/rigid_body/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/rigid_body/__init__.py -------------------------------------------------------------------------------- /pysph/examples/rigid_body/bouncing_cube.py: -------------------------------------------------------------------------------- 1 | """A cube bouncing inside a box. (5 seconds) 2 | 3 | This is used to test the rigid body equations. 4 | """ 5 | 6 | import numpy as np 7 | 8 | from pysph.base.kernels import CubicSpline 9 | from pysph.base.utils import get_particle_array_rigid_body 10 | from pysph.sph.equation import Group 11 | 12 | from pysph.sph.integrator import EPECIntegrator 13 | 14 | from pysph.solver.application import Application 15 | from pysph.solver.solver import Solver 16 | from pysph.sph.rigid_body import (BodyForce, RigidBodyCollision, 17 | RigidBodyMoments, RigidBodyMotion, 18 | RK2StepRigidBody) 19 | 20 | dim = 3 21 | 22 | dt = 5e-3 23 | tf = 5.0 24 | gz = -9.81 25 | 26 | hdx = 1.0 27 | dx = dy = 0.02 28 | rho0 = 10.0 29 | 30 | 31 | class BouncingCube(Application): 32 | def create_particles(self): 33 | nx, ny, nz = 10, 10, 10 34 | dx = 1.0 / (nx - 1) 35 | x, y, z = np.mgrid[0:1:nx * 1j, 0:1:ny * 1j, 0:1:nz * 1j] 36 | x = x.flat 37 | y = y.flat 38 | z = (z - 1).flat 39 | m = np.ones_like(x) * dx * dx * rho0 40 | h = np.ones_like(x) * hdx * dx 41 | # radius of each sphere constituting in cube 42 | rad_s = np.ones_like(x) * dx 43 | body = get_particle_array_rigid_body(name='body', x=x, y=y, z=z, h=h, 44 | m=m, rad_s=rad_s) 45 | 46 | body.vc[0] = -5.0 47 | body.vc[2] = -5.0 48 | 49 | # Create the tank. 50 | nx, ny, nz = 40, 40, 40 51 | dx = 1.0 / (nx - 1) 52 | xmin, xmax, ymin, ymax, zmin, zmax = -2, 2, -2, 2, -2, 2 53 | x, y, z = np.mgrid[xmin:xmax:nx * 1j, ymin:ymax:ny * 1j, zmin:zmax:nz * 54 | 1j] 55 | interior = ((x < 1.8) & (x > -1.8)) & ((y < 1.8) & (y > -1.8)) & ( 56 | (z > -1.8) & (z <= 2)) 57 | tank = np.logical_not(interior) 58 | x = x[tank].flat 59 | y = y[tank].flat 60 | z = z[tank].flat 61 | m = np.ones_like(x) * dx * dx * rho0 62 | h = np.ones_like(x) * hdx * dx 63 | 64 | # radius of each sphere constituting in cube 65 | rad_s = np.ones_like(x) * dx 66 | tank = get_particle_array_rigid_body(name='tank', x=x, y=y, z=z, h=h, 67 | m=m, rad_s=rad_s) 68 | tank.total_mass[0] = np.sum(m) 69 | 70 | return [body, tank] 71 | 72 | def create_solver(self): 73 | kernel = CubicSpline(dim=dim) 74 | 75 | integrator = EPECIntegrator(body=RK2StepRigidBody()) 76 | 77 | solver = Solver(kernel=kernel, dim=dim, integrator=integrator, dt=dt, 78 | tf=tf, adaptive_timestep=False) 79 | solver.set_print_freq(10) 80 | return solver 81 | 82 | def create_equations(self): 83 | equations = [ 84 | Group(equations=[ 85 | BodyForce(dest='body', sources=None, gz=gz), 86 | RigidBodyCollision(dest='body', sources=['tank'], kn=1e4, en=1) 87 | ]), 88 | Group(equations=[RigidBodyMoments(dest='body', sources=None)]), 89 | Group(equations=[RigidBodyMotion(dest='body', sources=None)]), 90 | ] 91 | return equations 92 | 93 | 94 | if __name__ == '__main__': 95 | app = BouncingCube() 96 | app.run() 97 | -------------------------------------------------------------------------------- /pysph/examples/rigid_body/simple.py: -------------------------------------------------------------------------------- 1 | """Very simple rigid body motion. (5 seconds) 2 | 3 | This is used to test the rigid body equations. 4 | """ 5 | 6 | import numpy as np 7 | 8 | from pysph.base.kernels import CubicSpline 9 | from pysph.base.utils import get_particle_array_rigid_body 10 | from pysph.sph.equation import Group 11 | 12 | from pysph.sph.integrator import EPECIntegrator 13 | 14 | from pysph.solver.application import Application 15 | from pysph.solver.solver import Solver 16 | from pysph.sph.rigid_body import RigidBodyMoments, RigidBodyMotion, RK2StepRigidBody 17 | 18 | dim = 3 19 | 20 | dt = 1e-3 21 | tf = 2.5 22 | 23 | hdx = 1.0 24 | rho0 = 10.0 25 | 26 | 27 | class SimpleRigidMotion(Application): 28 | def create_particles(self): 29 | nx, ny, nz = 10, 10, 10 30 | dx = 1.0/(nx-1) 31 | x, y, z = np.mgrid[0:1:nx*1j, 0:1:ny*1j, 0:1:nz*1j] 32 | x = x.flat 33 | y = y.flat 34 | z = z.flat 35 | m = np.ones_like(x)*dx*dx*rho0 36 | h = np.ones_like(x)*hdx*dx 37 | body = get_particle_array_rigid_body( 38 | name='body', x=x, y=y, z=z, h=h, m=m, 39 | ) 40 | 41 | body.omega[0] = 5.0 42 | body.omega[1] = 5.0 43 | body.vc[0] = 1.0 44 | body.vc[1] = 1.0 45 | 46 | return [body] 47 | 48 | def create_solver(self): 49 | kernel = CubicSpline(dim=dim) 50 | integrator = EPECIntegrator(body=RK2StepRigidBody()) 51 | solver = Solver(kernel=kernel, dim=dim, integrator=integrator, 52 | dt=dt, tf=tf, adaptive_timestep=False) 53 | solver.set_print_freq(10) 54 | return solver 55 | 56 | def create_equations(self): 57 | equations = [ 58 | Group(equations=[RigidBodyMoments(dest='body', sources=None)]), 59 | Group(equations=[RigidBodyMotion(dest='body', sources=None)]), 60 | ] 61 | return equations 62 | 63 | if __name__ == '__main__': 64 | app = SimpleRigidMotion() 65 | app.run() 66 | -------------------------------------------------------------------------------- /pysph/examples/rigid_body/sph.vtk.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/rigid_body/sph.vtk.gz -------------------------------------------------------------------------------- /pysph/examples/shallow_water/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/shallow_water/__init__.py -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/shallow_water/files_for_output_comparison/__init__.py -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_closed_boun_t01.csv: -------------------------------------------------------------------------------- 1 | 4.3939393939393945, 3.115384615384615 2 | 4.494949494949497, 3.4038461538461533 3 | 4.646464646464647, 3.6730769230769234 4 | 4.797979797979799, 4.019230769230768 5 | 5, 4.519230769230768 6 | 5.151515151515152, 5 7 | 5.252525252525253, 5.6923076923076925 8 | 5.454545454545455, 6.442307692307692 9 | 5.555555555555555, 7.249999999999999 10 | 5.757575757575758, 8 11 | 5.909090909090908, 8.673076923076923 12 | 6.060606060606062, 9.23076923076923 13 | 6.212121212121213, 9.634615384615385 14 | 6.313131313131313, 9.903846153846153 15 | 6.515151515151516, 10 16 | 8.484848484848486, 10.01923076923077 17 | 7.727272727272728, 10.01923076923077 18 | 7.070707070707071, 10.01923076923077 19 | 9.242424242424244, 10.038461538461538 20 | 9.999999999999998, 10.038461538461538 21 | 10.707070707070704, 10.01923076923077 22 | 11.21212121212121, 10.038461538461538 23 | 11.919191919191919, 10.038461538461538 24 | 12.727272727272725, 10.01923076923077 25 | 13.383838383838382, 10.01923076923077 26 | 13.989898989898988, 10.01923076923077 27 | 14.595959595959593, 9.98076923076923 28 | 15.202020202020202, 9.98076923076923 29 | 15.707070707070704, 10 30 | 16.313131313131308, 10.01923076923077 31 | 17.17171717171717, 10.01923076923077 32 | 18.030303030303024, 10.01923076923077 33 | 19.04040404040404, 10.01923076923077 34 | 19.848484848484844, 10.01923076923077 35 | 20.252525252525253, 10.01923076923077 36 | 20.95959595959596, 10.01923076923077 37 | 21.565656565656568, 10.038461538461538 38 | 22.17171717171717, 10.038461538461538 39 | 22.626262626262623, 10.01923076923077 40 | 23.33333333333333, 10.01923076923077 41 | 23.636363636363633, 9.923076923076923 42 | 23.737373737373737, 9.692307692307692 43 | 23.93939393939393, 9.26923076923077 44 | 24.090909090909086, 8.692307692307693 45 | 24.19191919191919, 8 46 | 24.39393939393939, 7.249999999999999 47 | 24.494949494949488, 6.442307692307692 48 | 24.646464646464644, 5.730769230769231 49 | 24.797979797979792, 5.038461538461538 50 | 24.999999999999993, 4.5 51 | 25.101010101010097, 4.038461538461538 52 | 25.252525252525245, 3.6923076923076916 53 | 25.454545454545446, 3.4038461538461533 54 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_closed_boun_t02.csv: -------------------------------------------------------------------------------- 1 | 1.0215053763440878, 1.485187623436472 2 | 1.3440860215053778, 1.5859117840684682 3 | 1.6666666666666679, 1.7274522712310727 4 | 1.8279569892473138, 1.8288347597103378 5 | 2.204301075268818, 1.9293394777265753 6 | 2.473118279569894, 2.0302830809743266 7 | 2.741935483870968, 2.1516348474873848 8 | 3.06451612903226, 2.313583497915298 9 | 3.655913978494624, 2.658108404652184 10 | 3.333333333333334, 2.4553434276936574 11 | 3.9247311827957, 2.861092824226466 12 | 4.354838709677422, 3.185867895545316 13 | 4.516129032258066, 3.368883037085803 14 | 4.78494623655914, 3.612683783190697 15 | 5, 3.8362958086460406 16 | 5.32258064516129, 4.202326091727015 17 | 5.591397849462368, 4.548167654158439 18 | 5.806451612903228, 4.8330041694096995 19 | 6.075268817204302, 5.219662058371737 20 | 6.451612903225806, 5.789554531490016 21 | 6.720430107526884, 6.237436910247972 22 | 7.096774193548388, 6.807329383366251 23 | 7.365591397849466, 7.193987272328288 24 | 7.634408602150538, 7.580645161290324 25 | 8.010752688172044, 8.089313144612685 26 | 8.333333333333336, 8.618608733816108 27 | 8.70967741935484, 9.168093043669082 28 | 9.086021505376344, 9.656352863726138 29 | 9.35483870967742, 9.879745446565725 30 | 9.838709677419352, 9.979811279350452 31 | 12.096774193548388, 9.950186526223394 32 | 10.913978494623656, 9.975422427035332 33 | 12.956989247311828, 9.9466754443713 34 | 13.817204301075268, 9.983980689049815 35 | 14.408602150537636, 9.920342330480581 36 | 14.946236559139784, 9.918147904323021 37 | 15.591397849462364, 9.915514592933949 38 | 16.344086021505376, 9.953258722843978 39 | 17.258064516129032, 9.949528198376127 40 | 18.333333333333336, 9.945139346061008 41 | 19.193548387096776, 9.941628264208912 42 | 19.838709677419352, 9.93899495281984 43 | 20.43010752688172, 9.936581084046525 44 | 20.806451612903228, 9.690147026552559 45 | 21.18279569892473, 9.280447662936146 46 | 21.559139784946236, 8.707482993197281 47 | 21.989247311827956, 8.073074391046745 48 | 22.41935483870968, 7.418257625630901 49 | 22.741935483870964, 6.927145051569019 50 | 23.118279569892472, 6.456221198156687 51 | 23.333333333333332, 6.1084046521834585 52 | 23.655913978494628, 5.617292078121575 53 | 23.9247311827957, 5.187623436471366 54 | 24.35483870967742, 4.553214834320828 55 | 24.999999999999996, 3.734254992319512 56 | 25.64516129032258, 3.0989686197059516 57 | 26.129032258064516, 2.7500548606539432 58 | 26.666666666666668, 2.360105332455566 59 | 27.311827956989244, 2.0105332455562923 60 | 28.11827956989247, 1.7419354838709733 61 | 28.60215053763441, 1.5358788676761073 62 | 28.9247311827957, 1.4121132323897356 63 | 27.68817204301075, 1.886548167654162 64 | 26.39784946236559, 2.5448760149221012 65 | 26.989247311827956, 2.175115207373276 66 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_closed_boun_t03.csv: -------------------------------------------------------------------------------- 1 | 0.10752688172043001, 6.488918147904323 2 | 0.2688172043010777, 6.386219003730525 3 | 0.5376344086021518, 6.283080974325215 4 | 0.9139784946236578, 6.281544876014923 5 | 1.5053763440860237, 6.258722843976301 6 | 2.204301075268818, 6.215053763440861 7 | 2.9032258064516157, 6.150976519640115 8 | 3.81720430107527, 6.00438885231512 9 | 4.408602150537636, 5.920342330480581 10 | 5.053763440860216, 5.774851876234365 11 | 5.376344086021508, 5.528637261356156 12 | 5.483870967741938, 5.262892253675664 13 | 5.591397849462368, 5.058371735791091 14 | 5.913978494623658, 5.016238753565943 15 | 5.967741935483872, 4.730305025235902 16 | 5.967741935483872, 4.4037744129910035 17 | 6.075268817204302, 3.9543559359227576 18 | 6.075268817204302, 3.5257845073513288 19 | 6.182795698924732, 3.1375905200789997 20 | 6.182795698924732, 2.811059907834103 21 | 6.236559139784948, 2.606758832565287 22 | 6.344086021505378, 2.25938117182357 23 | 6.451612903225806, 1.8711871845512427 24 | 6.612903225806452, 1.6868553873162178 25 | 6.989247311827958, 1.583278472679396 26 | 7.419354838709678, 1.5407066052227343 27 | 8.333333333333336, 1.5573842440201915 28 | 9.247311827956992, 1.55365371955234 29 | 9.892473118279572, 1.551020408163268 30 | 11.021505376344088, 1.5464121132323925 31 | 12.419354838709676, 1.5407066052227378 32 | 13.440860215053764, 1.5365371955233744 33 | 14.301075268817208, 1.5330261136712782 34 | 15.698924731182796, 1.5273206056616235 35 | 16.7741935483871, 1.5229317533465032 36 | 17.849462365591396, 1.5185429010313847 37 | 19.24731182795699, 1.5128373930217283 38 | 20.43010752688172, 1.5080096554750977 39 | 21.720430107526884, 1.502743032696955 40 | 22.473118279569892, 1.4996708360763709 41 | 23.118279569892472, 1.4970375246872987 42 | 23.49462365591398, 1.6995830590300667 43 | 23.548387096774192, 1.9034452490673708 44 | 23.655913978494628, 2.3315777924072876 45 | 23.817204301075268, 2.779899056396756 46 | 23.870967741935484, 3.0041694096993687 47 | 23.870967741935484, 3.371516348474879 48 | 23.9247311827957, 3.799868334430551 49 | 23.978494623655916, 4.330261136712753 50 | 24.086021505376348, 4.615536537195528 51 | 24.086021505376348, 4.840026333113894 52 | 24.086021505376348, 4.962475312705733 53 | 24.40860215053763, 5.001974983541809 54 | 24.46236559139785, 5.2466535001097245 55 | 24.569892473118276, 5.450296247531274 56 | 24.677419354838715, 5.633530831687518 57 | 25.107526881720435, 5.774632433618612 58 | 25.537634408602155, 5.834101382488484 59 | 26.129032258064516, 5.9133201667763915 60 | 26.559139784946236, 5.993197278911569 61 | 27.365591397849467, 6.071538292736453 62 | 27.95698924731182, 6.10994075049375 63 | 28.387096774193548, 6.128593372833008 64 | 29.35483870967742, 6.145051569014707 65 | 28.817204301075268, 6.126837831906961 66 | 29.677419354838708, 6.245775729646701 67 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_split_t01.csv: -------------------------------------------------------------------------------- 1 | 853.1746031746031, 0.09523809523809668 2 | 865.0793650793651, 0.3174603174603199 3 | 873.015873015873, 0.4444444444444464 4 | 880.952380952381, 0.6031746031746046 5 | 892.8571428571429, 0.8571428571428577 6 | 900.7936507936508, 1.0476190476190492 7 | 908.7301587301586, 1.2063492063492074 8 | 916.6666666666667, 1.3968253968253972 9 | 924.6031746031747, 1.5873015873015888 10 | 936.5079365079364, 1.9365079365079385 11 | 948.4126984126983, 2.3492063492063515 12 | 960.3174603174602, 2.730158730158731 13 | 968.2539682539682, 3.2063492063492056 14 | 980.1587301587301, 3.650793650793652 15 | 996.0317460317458, 4.1269841269841265 16 | 1007.936507936508, 4.730158730158729 17 | 1023.8095238095239, 5.428571428571427 18 | 1039.6825396825398, 6.158730158730157 19 | 1055.5555555555557, 6.95238095238095 20 | 1063.4920634920634, 7.460317460317458 21 | 1075.396825396825, 7.904761904761902 22 | 1083.3333333333333, 8.41269841269841 23 | 1099.2063492063494, 8.98412698412698 24 | 1115.0793650793648, 9.555555555555554 25 | 1123.015873015873, 9.746031746031743 26 | 1134.9206349206347, 9.904761904761902 27 | 1150.7936507936508, 9.999999999999996 28 | 1166.6666666666667, 10.031746031746028 29 | 1301.5873015873015, 9.999999999999996 30 | 1250, 9.999999999999996 31 | 1214.2857142857142, 9.999999999999996 32 | 1190.4761904761904, 9.999999999999996 33 | 1376.9841269841265, 9.999999999999996 34 | 1432.5396825396822, 9.999999999999996 35 | 1515.8730158730157, 9.999999999999996 36 | 1623.015873015873, 9.999999999999996 37 | 1710.31746031746, 9.999999999999996 38 | 1789.6825396825395, 9.999999999999996 39 | 1845.2380952380947, 9.999999999999996 40 | 1873.015873015873, 9.809523809523807 41 | 1892.8571428571424, 9.36507936507936 42 | 1904.7619047619044, 8.98412698412698 43 | 1920.6349206349203, 8.476190476190474 44 | 1932.5396825396822, 7.809523809523807 45 | 1944.4444444444441, 7.238095238095236 46 | 1952.380952380952, 6.634920634920633 47 | 1964.285714285714, 6.158730158730157 48 | 1972.222222222222, 5.682539682539681 49 | 1980.15873015873, 5.206349206349206 50 | 1992.0634920634918, 4.825396825396824 51 | 2003.9682539682537, 4.222222222222221 52 | 2015.8730158730157, 3.777777777777777 53 | 2031.7460317460316, 3.3015873015873023 54 | 2051.587301587301, 2.317460317460318 55 | 2059.523809523809, 2.0952380952380967 56 | 2067.460317460317, 1.8730158730158735 57 | 2075.396825396825, 1.5873015873015888 58 | 2087.3015873015875, 1.3968253968253972 59 | 2099.206349206349, 1.1111111111111125 60 | 2115.079365079365, 0.7301587301587329 61 | 2126.9841269841263, 0.5079365079365097 62 | 2134.9206349206343, 0.38095238095238315 63 | 1567.4603174603174, 9.999999999999996 64 | 1666.6666666666663, 9.999999999999996 65 | 1749.9999999999998, 9.999999999999996 66 | 1817.4603174603174, 9.999999999999996 67 | 1472.222222222222, 9.999999999999996 68 | 1345.2380952380952, 9.999999999999996 69 | 2039.682539682539, 2.8888888888888893 70 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_t01.csv: -------------------------------------------------------------------------------- 1 | -0.8178368121442123, 0.1269230769230767 2 | -0.7457305502846299, 0.14615384615384586 3 | -0.6698292220113851, 0.1846153846153844 4 | -0.620493358633776, 0.23461538461538445 5 | -0.5635673624288424, 0.30384615384615377 6 | -0.4724857685009485, 0.43846153846153824 7 | -0.40417457305502835, 0.5423076923076923 8 | -0.3624288425047437, 0.6269230769230768 9 | -0.31688804554079697, 0.7076923076923076 10 | -0.27134724857685, 0.8 11 | -0.2220113851992409, 0.8884615384615384 12 | -0.16129032258064524, 0.976923076923077 13 | -0.06641366223908918, 0.9923076923076923 14 | 0.039848197343453684, 0.9961538461538463 15 | 0.1461100569259961, 0.9807692307692308 16 | 0.21442125237191645, 0.8846153846153846 17 | 0.2675521821631879, 0.7846153846153847 18 | 0.31688804554079697, 0.6961538461538461 19 | 0.36622390891840606, 0.6038461538461537 20 | 0.41555977229601515, 0.5115384615384614 21 | 0.46489563567362424, 0.4423076923076923 22 | 0.5332068311195446, 0.3346153846153844 23 | 0.586337760910816, 0.2615384615384614 24 | 0.6470588235294117, 0.1961538461538459 25 | 0.7077798861480078, 0.15769230769230758 26 | 0.7609108159392788, 0.14230769230769202 27 | 0.806451612903226, 0.1269230769230767 28 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_t02.csv: -------------------------------------------------------------------------------- 1 | -1.2003929273084482, 0.052980132450331174 2 | -1.1493123772102163, 0.060927152317880706 3 | -1.0785854616895876, 0.06887417218543046 4 | -1.0039292730844795, 0.07682119205298021 5 | -0.9371316306483302, 0.09668874172185449 6 | -0.8546168958742635, 0.1205298013245033 7 | -0.7563850687622791, 0.1523178807947021 8 | -0.6620825147347744, 0.19999999999999996 9 | -0.5677799607072695, 0.2516556291390728 10 | -0.4499017681728881, 0.31523178807947017 11 | -0.3673870333988214, 0.3668874172185431 12 | -0.2730844793713165, 0.4264900662251656 13 | -0.18271119842829098, 0.4701986754966888 14 | -0.10019646365422408, 0.47814569536423845 15 | -0.0216110019646365, 0.47814569536423845 16 | 0.06876227897838882, 0.47814569536423845 17 | 0.1512770137524555, 0.47814569536423845 18 | 0.23772102161100195, 0.4543046357615894 19 | 0.2927308447937129, 0.4264900662251656 20 | 0.387033398821218, 0.3668874172185431 21 | 0.473477406679764, 0.31523178807947017 22 | 0.5638506876227893, 0.2556291390728477 23 | 0.6227897838899801, 0.21986754966887412 24 | 0.6777996070726915, 0.19999999999999996 25 | 0.7406679764243611, 0.16821192052980138 26 | 0.8074656188605109, 0.14437086092715234 27 | 0.8899803536345776, 0.11655629139072854 28 | 0.9764243614931236, 0.0927152317880795 29 | 1.0589390962671903, 0.07284768211920545 30 | 1.1375245579567776, 0.0649006622516557 31 | 1.2200392927308448, 0.05695364238410594 32 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/files_for_output_comparison/cyl_dam_t03.csv: -------------------------------------------------------------------------------- 1 | -1.5295159244813474, 0.040506329113924266 2 | -1.4426072599363495, 0.048101265822785066 3 | -1.3632600320643198, 0.055696202531645644 4 | -1.295254959201742, 0.06329113924050644 5 | -1.2083702232538103, 0.07468354430379764 6 | -1.125266205642363, 0.08607594936708862 7 | -1.053480414443301, 0.09367088607594942 8 | -0.9590820990165346, 0.11265822784810142 9 | -0.8760498671962862, 0.1354430379746837 10 | -0.8194108779402263, 0.1468354430379748 11 | -0.7476490153382307, 0.158227848101266 12 | -0.6947907444186545, 0.1696202531645571 13 | -0.6456653346414298, 0.1734177215189875 14 | -0.5965877820583376, 0.18481012658227858 15 | -0.49453231557033805, 0.18860759493670898 16 | -0.37357325739991865, 0.19240506329113927 17 | -0.22612524227704545, 0.19240506329113927 18 | -0.13160728386494713, 0.19240506329113927 19 | -0.0559689885382022, 0.18860759493670898 20 | 0.038501112679763416, 0.19620253164556967 21 | 0.25778277619583156, 0.19620253164556967 22 | 0.33339714292551004, 0.19620253164556967 23 | 0.42791510133760835, 0.19620253164556967 24 | 0.537603790289775, 0.18860759493670898 25 | 0.6359981814266227, 0.1734177215189875 26 | 0.7419540092364383, 0.158227848101266 27 | 0.8176880189514486, 0.1392405063291141 28 | 0.8972027470029429, 0.120253164556962 29 | 1.006963221746309, 0.10126582278481022 30 | 1.12804192290206, 0.08607594936708862 31 | 1.2037280754229378, 0.07468354430379764 32 | 1.2907563829532671, 0.06329113924050644 33 | 1.3853461271565648, 0.051898734177215244 34 | 1.479935871359862, 0.040506329113924266 35 | -0.3093010456796921, 0.19240506329113927 36 | -0.437821540523079, 0.18860759493670898 37 | 0.13301907109186195, 0.19620253164556967 38 | -------------------------------------------------------------------------------- /pysph/examples/shallow_water/okushiri_tsunami_input_files/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/shallow_water/okushiri_tsunami_input_files/__init__.py -------------------------------------------------------------------------------- /pysph/examples/shallow_water/okushiri_tsunami_input_files/tsunami_bed.txt.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/shallow_water/okushiri_tsunami_input_files/tsunami_bed.txt.bz2 -------------------------------------------------------------------------------- /pysph/examples/sloshing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/sloshing/__init__.py -------------------------------------------------------------------------------- /pysph/examples/solid_mech/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/solid_mech/__init__.py -------------------------------------------------------------------------------- /pysph/examples/solid_mech/rings.py: -------------------------------------------------------------------------------- 1 | """Colliding Elastic Balls. (10 minutes) 2 | """ 3 | 4 | import numpy 5 | 6 | # SPH equations 7 | from pysph.sph.equation import Group 8 | from pysph.sph.solid_mech.basic import (get_particle_array_elastic_dynamics, 9 | ElasticSolidsScheme) 10 | 11 | from pysph.base.utils import get_particle_array 12 | from pysph.base.kernels import CubicSpline 13 | from pysph.solver.application import Application 14 | from pysph.solver.solver import Solver 15 | from pysph.sph.integrator import PECIntegrator 16 | from pysph.sph.integrator_step import SolidMechStep 17 | 18 | 19 | class Rings(Application): 20 | def initialize(self): 21 | # constants 22 | self.E = 1e7 23 | self.nu = 0.3975 24 | self.rho0 = 1.0 25 | 26 | self.dx = 0.0005 27 | self.hdx = 1.5 28 | self.h = self.hdx * self.dx 29 | 30 | # geometry 31 | self.ri = 0.03 32 | self.ro = 0.04 33 | 34 | self.spacing = 0.041 35 | 36 | self.dt = 1e-8 37 | self.tf = 5e-5 38 | 39 | def create_particles(self): 40 | spacing = self.spacing # spacing = 2*5cm 41 | 42 | x, y = numpy.mgrid[-self.ro:self.ro:self.dx, -self.ro:self.ro:self.dx] 43 | x = x.ravel() 44 | y = y.ravel() 45 | 46 | d = (x * x + y * y) 47 | ro = self.ro 48 | ri = self.ri 49 | keep = numpy.flatnonzero((ri * ri <= d) * (d < ro * ro)) 50 | x = x[keep] 51 | y = y[keep] 52 | 53 | x = numpy.concatenate([x - spacing, x + spacing]) 54 | y = numpy.concatenate([y, y]) 55 | 56 | dx = self.dx 57 | hdx = self.hdx 58 | m = numpy.ones_like(x) * dx * dx 59 | h = numpy.ones_like(x) * hdx * dx 60 | rho = numpy.ones_like(x) 61 | 62 | # create the particle array 63 | kernel = CubicSpline(dim=2) 64 | self.wdeltap = kernel.kernel(rij=dx, h=self.h) 65 | pa = get_particle_array_elastic_dynamics( 66 | name="solid", x=x + spacing, y=y, m=m, 67 | rho=rho, h=h, constants=dict( 68 | wdeltap=self.wdeltap, n=4, rho_ref=self.rho0, 69 | E=self.E, nu=self.nu)) 70 | 71 | print('Ellastic Collision with %d particles' % (x.size)) 72 | print("Shear modulus G = %g, Young's modulus = %g, Poisson's ratio =%g" 73 | % (pa.G, pa.E, pa.nu)) 74 | 75 | u_f = 0.059 76 | pa.u = pa.cs * u_f * (2 * (x < 0) - 1) 77 | 78 | return [pa] 79 | 80 | def create_scheme(self): 81 | s = ElasticSolidsScheme(elastic_solids=['solid'], solids=[], 82 | dim=2) 83 | s.configure_solver(dt=self.dt, tf=self.tf, pfreq=500) 84 | return s 85 | 86 | 87 | if __name__ == '__main__': 88 | app = Rings() 89 | app.run() 90 | -------------------------------------------------------------------------------- /pysph/examples/spheric/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/spheric/__init__.py -------------------------------------------------------------------------------- /pysph/examples/sphysics/INDAT.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/sphysics/INDAT.gz -------------------------------------------------------------------------------- /pysph/examples/sphysics/IPART.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/sphysics/IPART.gz -------------------------------------------------------------------------------- /pysph/examples/sphysics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/sphysics/__init__.py -------------------------------------------------------------------------------- /pysph/examples/sphysics/beach_geometry.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pysph.tools.geometry import get_2d_wall 3 | 4 | 5 | def get_beach_geometry_2d(dx=0.1, l=3.0, h=1.0, flat_l=1.0, angle=45.0, 6 | num_layers=3): 7 | """ 8 | Generates a beach like geometry which is commonly used for simulations 9 | related to SPHysics. 10 | 11 | Parameters 12 | ---------- 13 | dx : Spacing between the particles 14 | l : Total length of the beach 15 | h : Height of the wall used at the beach position 16 | flat_l : Length of the flat part 17 | angle : Angle of the inclined part 18 | num_layers : number of layers 19 | 20 | Returns 21 | ------- 22 | x : 1d numpy array with x coordinates of the beach 23 | y : 1d numpy array with y coordinates of the beach 24 | x4 : 1d numpy array with x coordinates of the obstacle 25 | y4 : 1d numpy array with y coordinates of the obstacle 26 | """ 27 | 28 | theta = np.pi * angle / 180.0 29 | x1, y1 = get_2d_wall(dx, np.array([(flat_l + dx) / 2.0, 0.]), flat_l, 30 | num_layers, False) 31 | x2 = np.arange(flat_l - l, 0.0, dx * np.cos(theta)) 32 | h2 = (l - flat_l) * np.tan(theta) 33 | y2_layer = x2 * np.tan(-theta) 34 | x2 = np.tile(x2, num_layers) 35 | y2 = [] 36 | for i in range(num_layers): 37 | y2.append(y2_layer - i * dx) 38 | y2 = np.ravel(np.array(y2)) 39 | y3 = np.arange(h2 + dx, h + h2, dx) 40 | x3_layer = np.ones_like(y3) * (flat_l - l) 41 | y3 = np.tile(y3, num_layers) 42 | x3 = [] 43 | for i in range(num_layers): 44 | x3.append(x3_layer - i * dx) 45 | x3 = np.ravel(np.array(x3)) 46 | x = np.concatenate([x1, x2, x3]) 47 | y = np.concatenate([y1, y2, y3]) 48 | y4 = np.arange(dx, 2.0 * h, dx) 49 | x4_layer = np.ones_like(y4) * flat_l 50 | y4 = np.tile(y4, num_layers) 51 | x4 = [] 52 | for i in range(num_layers): 53 | x4.append(x4_layer + i * dx) 54 | x4 = np.ravel(np.array(x4)) 55 | return x, y, x4, y4 56 | -------------------------------------------------------------------------------- /pysph/examples/sphysics/periodic_rigidbody.py: -------------------------------------------------------------------------------- 1 | from pysph.sph.equation import Equation 2 | import numpy as np 3 | 4 | 5 | class GroupParticles(Equation): 6 | 7 | def __init__(self, dest, sources=None, xmin=0.0, xmax=0.0, ymin=0.0, 8 | ymax=0.0, zmin=0.0, zmax=0.0, periodic_in_x=False, 9 | periodic_in_y=False, periodic_in_z=False): 10 | self.periodic_in_x = periodic_in_x 11 | self.periodic_in_y = periodic_in_y 12 | self.periodic_in_z = periodic_in_z 13 | self.xlen = abs(xmax - xmin) 14 | self.xmin = xmin 15 | self.xmax = xmax 16 | self.ylen = abs(ymax - ymin) 17 | self.ymin = ymin 18 | self.ymax = ymax 19 | self.zlen = abs(zmax - zmin) 20 | self.zmin = zmin 21 | self.zmax = zmax 22 | 23 | super(GroupParticles, self).__init__(dest, sources) 24 | 25 | def loop(self, d_idx, d_cm, d_body_id, d_x, d_y, d_z): 26 | b = declare('int') 27 | b = d_body_id[d_idx] 28 | if self.periodic_in_x: 29 | if (abs(d_x[d_idx] - d_cm[3 * b]) > (self.xlen / 2.0)): 30 | if (d_cm[3 * b] > self.xmin + self.xlen / 2.0): 31 | d_x[d_idx] += self.xlen 32 | else: 33 | d_x[d_idx] -= self.xlen 34 | if self.periodic_in_y: 35 | if (abs(d_y[d_idx] - d_cm[3 * b + 1]) > (self.ylen / 2.0)): 36 | if (d_cm[3 * b + 1] > self.ymin + self.ylen / 2.0): 37 | d_y[d_idx] += self.ylen 38 | else: 39 | d_y[d_idx] -= self.ylen 40 | if self.periodic_in_z: 41 | if (abs(d_z[d_idx] - d_cm[3 * b + 2]) > (self.zlen / 2.0)): 42 | if (d_cm[3 * b + 2] > self.zmin + self.zlen / 2.0): 43 | d_z[d_idx] += self.zlen 44 | else: 45 | d_z[d_idx] -= self.zlen 46 | -------------------------------------------------------------------------------- /pysph/examples/surface_tension/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/surface_tension/__init__.py -------------------------------------------------------------------------------- /pysph/examples/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/examples/tests/__init__.py -------------------------------------------------------------------------------- /pysph/examples/tests/test_examples.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import shutil 4 | import subprocess 5 | import sys 6 | 7 | from pytest import mark 8 | 9 | from pysph.examples import run 10 | 11 | 12 | def check_output(*args, **kw): 13 | """Simple hack to support Python 2.6 which does not have 14 | subprocess.check_output. 15 | """ 16 | if not hasattr(subprocess, 'check_output'): 17 | subprocess.call(*args, **kw) 18 | else: 19 | subprocess.check_output(*args, **kw) 20 | 21 | 22 | def print_safe(string_or_bytes): 23 | if type(string_or_bytes) is bytes: 24 | print(string_or_bytes.decode('utf-8')) 25 | else: 26 | print(string_or_bytes) 27 | 28 | 29 | _orig_ets_toolkit = None 30 | def setup_module(): 31 | # Set the ETS_TOOLKIT to null to avoid errors when importing TVTK. 32 | global _orig_ets_toolkit 33 | var = 'ETS_TOOLKIT' 34 | _orig_ets_toolkit = os.environ.get(var) 35 | os.environ[var] = 'null' 36 | 37 | 38 | def teardown_module(): 39 | var = 'ETS_TOOLKIT' 40 | if _orig_ets_toolkit is None: 41 | del os.environ[var] 42 | else: 43 | os.environ[var] = _orig_ets_toolkit 44 | 45 | 46 | def run_example(module): 47 | """This simply runs the example to make sure that the example executes 48 | correctly. It wipes out the generated output directory. 49 | """ 50 | out_dir = tempfile.mkdtemp() 51 | cmd = [sys.executable, "-m", module, "--max-steps", "1", 52 | "--disable-output", "-q", "-d", out_dir] 53 | env_vars = dict(os.environ) 54 | env_vars['ETS_TOOLKIT'] = 'null' 55 | try: 56 | check_output(cmd, env=env_vars) 57 | except subprocess.CalledProcessError as e: 58 | print_safe(e.stdout) 59 | print_safe(e.stderr) 60 | raise 61 | finally: 62 | shutil.rmtree(out_dir) 63 | 64 | 65 | def _has_tvtk(): 66 | try: 67 | from tvtk.api import tvtk 68 | except (ImportError, SystemExit): 69 | return False 70 | else: 71 | return True 72 | 73 | 74 | def _find_examples(): 75 | examples = [] 76 | for module, doc in run.get_all_examples(): 77 | if module == 'pysph.examples.rigid_body.dam_break3D_sph' and \ 78 | not _has_tvtk(): 79 | continue 80 | examples.append(module) 81 | return examples 82 | 83 | 84 | @mark.slow 85 | @mark.parametrize("module", _find_examples()) 86 | def test_example_should_run(module): 87 | run_example(module) 88 | -------------------------------------------------------------------------------- /pysph/examples/tg_with_packed_particles.py: -------------------------------------------------------------------------------- 1 | from numpy import pi, sin, cos, exp 2 | 3 | from pysph.examples.taylor_green import TaylorGreen, exact_solution 4 | from pysph.base.utils import get_particle_array 5 | 6 | # domain and constants 7 | L = 1.0 8 | U = 1.0 9 | rho0 = 1.0 10 | c0 = 10 * U 11 | p0 = c0**2 * rho0 12 | 13 | 14 | class TGPacked(TaylorGreen): 15 | def _get_packed_points(self): 16 | ''' 17 | returns 18 | xs, ys, zs, xf, yf, zf 19 | ''' 20 | from pysph.tools.geometry import get_packed_periodic_packed_particles 21 | folder = self.output_dir 22 | dx = self.dx 23 | return get_packed_periodic_packed_particles( 24 | self.add_user_options, folder, dx, L=L, B=L) 25 | 26 | def create_fluid(self): 27 | # create the particles 28 | xs, ys, zs, xf, yf, zf = self._get_packed_points() 29 | x, y = xf, yf 30 | if self.options.init is not None: 31 | fname = self.options.init 32 | from pysph.solver.utils import load 33 | data = load(fname) 34 | _f = data['arrays']['fluid'] 35 | x, y = _f.x.copy(), _f.y.copy() 36 | 37 | # Initialize 38 | dx = self.dx 39 | m = self.volume * rho0 40 | h = self.hdx * dx 41 | re = self.options.re 42 | b = -8.0 * pi * pi / re 43 | u0, v0, p0 = exact_solution(U=U, b=b, t=0, x=x, y=y) 44 | color0 = cos(2 * pi * x) * cos(4 * pi * y) 45 | 46 | # create the arrays 47 | fluid = get_particle_array( 48 | name='fluid', x=x, y=y, m=m, h=h, u=u0, v=v0, rho=rho0, p=p0, 49 | color=color0) 50 | return fluid 51 | 52 | 53 | if __name__ == '__main__': 54 | app = TGPacked() 55 | app.run() 56 | app.post_process(app.info_filename) 57 | -------------------------------------------------------------------------------- /pysph/examples/two_blocks.py: -------------------------------------------------------------------------------- 1 | """Two square blocks of water colliding with each other. (20 seconds) 2 | """ 3 | 4 | import numpy 5 | from pysph.examples._db_geometry import create_2D_filled_region 6 | from pysph.base.utils import get_particle_array 7 | from pysph.sph.iisph import IISPHScheme 8 | from pysph.solver.application import Application 9 | 10 | dx = 0.025 11 | hdx = 1.0 12 | rho0 = 1000 13 | 14 | 15 | class TwoBlocks(Application): 16 | def create_particles(self): 17 | x1, y1 = create_2D_filled_region(-1, 0, 0, 1, dx) 18 | x2, y2 = create_2D_filled_region(0.5, 0, 1.5, 1, dx) 19 | 20 | x = numpy.concatenate((x1, x2)) 21 | y = numpy.concatenate((y1, y2)) 22 | u1 = numpy.ones_like(x1) 23 | u2 = -numpy.ones_like(x2) 24 | u = numpy.concatenate((u1, u2)) 25 | 26 | rho = numpy.ones_like(u)*rho0 27 | h = numpy.ones_like(u)*hdx*dx 28 | m = numpy.ones_like(u)*dx*dx*rho0 29 | 30 | fluid = get_particle_array( 31 | name='fluid', x=x, y=y, u=u, rho=rho, m=m, h=h 32 | ) 33 | self.scheme.setup_properties([fluid]) 34 | return [fluid] 35 | 36 | def create_scheme(self): 37 | s = IISPHScheme(fluids=['fluid'], solids=[], dim=2, rho0=rho0) 38 | return s 39 | 40 | def configure_scheme(self): 41 | dt = 2e-3 42 | tf = 1.0 43 | self.scheme.configure_solver( 44 | dt=dt, tf=tf, adaptive_timestep=False, pfreq=10 45 | ) 46 | 47 | 48 | if __name__ == '__main__': 49 | app = TwoBlocks() 50 | app.run() 51 | -------------------------------------------------------------------------------- /pysph/parallel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/parallel/__init__.py -------------------------------------------------------------------------------- /pysph/parallel/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/parallel/tests/__init__.py -------------------------------------------------------------------------------- /pysph/parallel/tests/cavity.py: -------------------------------------------------------------------------------- 1 | # We do this as we are not interested in the post-processing step. 2 | if __name__ == '__main__': 3 | from pysph.examples.cavity import LidDrivenCavity 4 | app = LidDrivenCavity() 5 | app.run() 6 | -------------------------------------------------------------------------------- /pysph/parallel/tests/check_dump_load.py: -------------------------------------------------------------------------------- 1 | """Test if dumping and loading files works in parallel correctly. 2 | """ 3 | 4 | import mpi4py.MPI as mpi 5 | import numpy as np 6 | from os.path import join 7 | import shutil 8 | from tempfile import mkdtemp 9 | 10 | from pysph.base.particle_array import ParticleArray 11 | from pysph.solver.utils import dump, load 12 | 13 | 14 | def assert_lists_same(l1, l2): 15 | expect = list(sorted(l1)) 16 | result = list(sorted(l2)) 17 | assert expect == result, "Expected %s, got %s" % (expect, result) 18 | 19 | 20 | def main(): 21 | comm = mpi.COMM_WORLD 22 | rank = comm.Get_rank() 23 | size = comm.Get_size() 24 | 25 | root = mkdtemp() 26 | filename = join(root, 'test.npz') 27 | 28 | x = np.ones(5, dtype=float)*rank 29 | pa = ParticleArray(name='fluid', constants={'c1': 0.0, 'c2': [0.0, 0.0]}, 30 | x=x) 31 | 32 | try: 33 | dump(filename, [pa], {}, mpi_comm=comm) 34 | if rank == 0: 35 | data = load(filename) 36 | pa1 = data["arrays"]["fluid"] 37 | 38 | assert_lists_same(pa.properties.keys(), pa1.properties.keys()) 39 | assert_lists_same(pa.constants.keys(), pa1.constants.keys()) 40 | 41 | expect = np.ones(5*size) 42 | for i in range(size): 43 | expect[5*i:5*(i+1)] = i 44 | 45 | assert np.allclose(pa1.x, expect, atol=1e-14), \ 46 | "Expected %s, got %s" % (expect, pa1.x) 47 | finally: 48 | shutil.rmtree(root) 49 | 50 | 51 | if __name__ == '__main__': 52 | main() 53 | -------------------------------------------------------------------------------- /pysph/parallel/tests/elliptical_drop.py: -------------------------------------------------------------------------------- 1 | # We do this as we are not interested in the post-processing step. 2 | if __name__ == '__main__': 3 | from pysph.examples.elliptical_drop import EllipticalDrop 4 | app = EllipticalDrop() 5 | app.run() 6 | -------------------------------------------------------------------------------- /pysph/parallel/tests/reduce_array.py: -------------------------------------------------------------------------------- 1 | """Test if the mpi_reduce_array function works correctly. 2 | """ 3 | 4 | import mpi4py.MPI as mpi 5 | import numpy as np 6 | 7 | from pysph.base.reduce_array import serial_reduce_array, mpi_reduce_array 8 | 9 | 10 | def main(): 11 | comm = mpi.COMM_WORLD 12 | rank = comm.Get_rank() 13 | size = comm.Get_size() 14 | n = 5 15 | data = np.ones(n)*(rank + 1) 16 | 17 | full_data = [] 18 | for i in range(size): 19 | full_data = np.concatenate([full_data, np.ones(n)*(i+1)]) 20 | 21 | for op in ('sum', 'prod', 'min', 'max'): 22 | serial_data = serial_reduce_array(data, op) 23 | result = mpi_reduce_array(serial_data, op) 24 | expect = getattr(np, op)(full_data) 25 | msg = "For op %s: Expected %s, got %s" % (op, expect, result) 26 | assert expect == result, msg 27 | 28 | 29 | if __name__ == '__main__': 30 | main() 31 | -------------------------------------------------------------------------------- /pysph/parallel/tests/simple_reduction.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pysph.base.kernels import CubicSpline 4 | from pysph.base.particle_array import ParticleArray 5 | 6 | from pysph.sph.equation import Equation 7 | from pysph.sph.integrator_step import IntegratorStep 8 | 9 | from pysph.sph.integrator import EulerIntegrator 10 | from pysph.solver.application import Application 11 | from pysph.solver.solver import Solver 12 | from pysph.base.reduce_array import parallel_reduce_array, serial_reduce_array 13 | 14 | 15 | class TotalMass(Equation): 16 | def reduce(self, dst, t, dt): 17 | m = serial_reduce_array(dst.m, op='sum') 18 | dst.total_mass[0] = parallel_reduce_array(m, op='sum') 19 | 20 | 21 | class DummyStepper(IntegratorStep): 22 | def initialize(self): 23 | pass 24 | 25 | def stage1(self): 26 | pass 27 | 28 | 29 | class MyApp(Application): 30 | def create_particles(self): 31 | x = np.linspace(0, 10, 10) 32 | m = np.ones_like(x) 33 | y = np.zeros_like(x) 34 | z = np.zeros_like(x) 35 | h = np.ones_like(x) * 0.2 36 | 37 | fluid = ParticleArray(name='fluid', x=x, y=y, z=z, m=m, h=h) 38 | fluid.add_constant('total_mass', 0.0) 39 | return [fluid] 40 | 41 | def create_solver(self): 42 | dim = 1 43 | # Create the kernel 44 | kernel = CubicSpline(dim=dim) 45 | 46 | # Create the integrator. 47 | integrator = EulerIntegrator(fluid=DummyStepper()) 48 | 49 | solver = Solver(kernel=kernel, dim=dim, integrator=integrator) 50 | solver.set_time_step(0.1) 51 | solver.set_final_time(0.1) 52 | # There is no need to write any output as the test below 53 | # computes the total mass. 54 | solver.set_disable_output(True) 55 | return solver 56 | 57 | def create_equations(self): 58 | return [TotalMass(dest='fluid', sources=['fluid'])] 59 | 60 | 61 | def main(): 62 | # Create the application. 63 | app = MyApp() 64 | app.run() 65 | 66 | fluid = app.particles[0] 67 | err = fluid.total_mass[0] - 10.0 68 | assert abs(err) < 1e-16, "Error: %s" % err 69 | 70 | 71 | if __name__ == '__main__': 72 | main() 73 | -------------------------------------------------------------------------------- /pysph/parallel/tests/test_openmp.py: -------------------------------------------------------------------------------- 1 | """ Module to run the example files and report their success/failure results 2 | 3 | Add a function to the ExampleTest class corresponding to an example script to 4 | be tested. 5 | This is done till better strategy for parallel testing is implemented 6 | 7 | """ 8 | 9 | from pytest import mark 10 | from .example_test_case import ExampleTestCase, get_example_script 11 | from pysph.base.nnps import get_number_of_threads 12 | 13 | 14 | @mark.skipif(get_number_of_threads() == 1, reason= "N_threads=1; OpenMP does not seem available.") 15 | class TestOpenMPExamples(ExampleTestCase): 16 | 17 | @mark.slow 18 | def test_3Ddam_break_example(self): 19 | dt = 2e-5; tf = 13*dt 20 | serial_kwargs = dict( 21 | timestep=dt, tf=tf, pfreq=100, test=None 22 | ) 23 | extra_parallel_kwargs = dict(openmp=None) 24 | # Note that we set nprocs=1 here since we do not want 25 | # to run this with mpirun. 26 | self.run_example( 27 | get_example_script('sphysics/dambreak_sphysics.py'), 28 | nprocs=1, atol=1e-14, 29 | serial_kwargs=serial_kwargs, 30 | extra_parallel_kwargs=extra_parallel_kwargs 31 | ) 32 | 33 | @mark.slow 34 | def test_elliptical_drop_example(self): 35 | tf = 0.0076*0.25 36 | serial_kwargs = dict(kernel='CubicSpline', tf=tf) 37 | extra_parallel_kwargs = dict(openmp=None) 38 | # Note that we set nprocs=1 here since we do not want 39 | # to run this with mpirun. 40 | self.run_example( 41 | 'elliptical_drop.py', nprocs=1, atol=1e-14, 42 | serial_kwargs=serial_kwargs, 43 | extra_parallel_kwargs=extra_parallel_kwargs 44 | ) 45 | 46 | def test_ldcavity_example(self): 47 | dt=1e-4; tf=200*dt 48 | serial_kwargs = dict(timestep=dt, tf=tf, pfreq=500) 49 | extra_parallel_kwargs = dict(openmp=None) 50 | # Note that we set nprocs=1 here since we do not want 51 | # to run this with mpirun. 52 | self.run_example( 53 | 'cavity.py', nprocs=1, atol=1e-14, 54 | serial_kwargs=serial_kwargs, 55 | extra_parallel_kwargs=extra_parallel_kwargs 56 | ) 57 | -------------------------------------------------------------------------------- /pysph/parallel/tests/test_parallel.py: -------------------------------------------------------------------------------- 1 | """Tests for the PySPH parallel module""" 2 | 3 | import shutil 4 | import tempfile 5 | import unittest 6 | 7 | import numpy as np 8 | from pytest import mark, importorskip 9 | from pysph.tools import run_parallel_script 10 | 11 | path = run_parallel_script.get_directory(__file__) 12 | 13 | 14 | class ParticleArrayTestCase(unittest.TestCase): 15 | @classmethod 16 | def setUpClass(cls): 17 | importorskip("pysph.parallel.parallel_manager") 18 | 19 | def test_get_strided_indices(self): 20 | # Given 21 | from pysph.parallel.parallel_manager import get_strided_indices 22 | 23 | indices = np.array([1, 5, 3]) 24 | 25 | # When 26 | idx = get_strided_indices(indices, 1) 27 | # Then 28 | np.testing.assert_array_equal(idx, indices) 29 | 30 | # When 31 | idx = get_strided_indices(indices, 2) 32 | # Then 33 | np.testing.assert_array_equal( 34 | idx, [2, 3, 10, 11, 6, 7] 35 | ) 36 | 37 | # When 38 | idx = get_strided_indices(indices, 3) 39 | # Then 40 | np.testing.assert_array_equal( 41 | idx, [3, 4, 5, 15, 16, 17, 9, 10, 11] 42 | ) 43 | 44 | 45 | class ParticleArrayExchangeTestCase(unittest.TestCase): 46 | @classmethod 47 | def setUpClass(cls): 48 | importorskip("mpi4py.MPI") 49 | importorskip("pyzoltan.core.zoltan") 50 | 51 | @mark.parallel 52 | def test_lb_exchange(self): 53 | run_parallel_script.run(filename='lb_exchange.py', nprocs=4, path=path) 54 | 55 | @mark.parallel 56 | def test_remote_exchange(self): 57 | run_parallel_script.run( 58 | filename='remote_exchange.py', nprocs=4, path=path 59 | ) 60 | 61 | 62 | class SummationDensityTestCase(unittest.TestCase): 63 | @classmethod 64 | def setUpClass(cls): 65 | importorskip("mpi4py.MPI") 66 | importorskip("pyzoltan.core.zoltan") 67 | 68 | @mark.slow 69 | @mark.parallel 70 | def test_summation_density(self): 71 | run_parallel_script.run( 72 | filename='summation_density.py', nprocs=4, path=path 73 | ) 74 | 75 | 76 | class MPIReduceArrayTestCase(unittest.TestCase): 77 | @classmethod 78 | def setUpClass(cls): 79 | importorskip("mpi4py.MPI") 80 | importorskip("pyzoltan.core.zoltan") 81 | 82 | def setUp(self): 83 | self.root = tempfile.mkdtemp() 84 | 85 | def tearDown(self): 86 | shutil.rmtree(self.root) 87 | 88 | @mark.parallel 89 | def test_mpi_reduce_array(self): 90 | run_parallel_script.run( 91 | filename='reduce_array.py', nprocs=4, path=path 92 | ) 93 | 94 | @mark.parallel 95 | def test_parallel_reduce(self): 96 | args = ['--directory=%s' % self.root] 97 | run_parallel_script.run( 98 | filename='simple_reduction.py', args=args, nprocs=4, path=path, 99 | timeout=60.0 100 | ) 101 | 102 | 103 | class DumpLoadTestCase(unittest.TestCase): 104 | @classmethod 105 | def setUpClass(cls): 106 | importorskip("mpi4py.MPI") 107 | importorskip("pyzoltan.core.zoltan") 108 | 109 | @mark.parallel 110 | def test_dump_and_load_work_in_parallel(self): 111 | run_parallel_script.run( 112 | filename='check_dump_load.py', nprocs=4, path=path 113 | ) 114 | 115 | 116 | if __name__ == '__main__': 117 | unittest.main() 118 | -------------------------------------------------------------------------------- /pysph/parallel/tests/test_parallel_run.py: -------------------------------------------------------------------------------- 1 | """ Module to run the example files and report their success/failure results 2 | 3 | Add a function to the ExampleTest class corresponding to an example script to 4 | be tested. 5 | This is done till better strategy for parallel testing is implemented 6 | 7 | """ 8 | 9 | from pytest import mark, importorskip 10 | 11 | from pysph.tools import run_parallel_script 12 | from pysph.parallel.tests.example_test_case import ExampleTestCase, get_example_script 13 | 14 | 15 | class ParallelTests(ExampleTestCase): 16 | 17 | @classmethod 18 | def setUpClass(cls): 19 | importorskip("mpi4py.MPI") 20 | importorskip("pyzoltan.core.zoltan") 21 | 22 | @mark.slow 23 | @mark.parallel 24 | def test_3Ddam_break_example(self): 25 | serial_kwargs = dict( 26 | max_steps=50, pfreq=200, sort_gids=None, test=None 27 | ) 28 | extra_parallel_kwargs = dict(ghost_layers=1, lb_freq=5) 29 | self.run_example( 30 | get_example_script('sphysics/dambreak_sphysics.py'), 31 | nprocs=4, atol=1e-12, 32 | serial_kwargs=serial_kwargs, 33 | extra_parallel_kwargs=extra_parallel_kwargs 34 | ) 35 | 36 | @mark.slow 37 | @mark.parallel 38 | def test_dam_break_example(self): 39 | serial_kwargs = dict( 40 | max_steps=25, pfreq=200, sort_gids=None, dx=0.04, 41 | no_adaptive_timestep=None, n_damp=0 42 | ) 43 | extra_parallel_kwargs = dict(ghost_layers=1, lb_freq=5) 44 | self.run_example( 45 | get_example_script('dam_break_3d.py'), 46 | nprocs=4, timeout=180, atol=1e-12, 47 | serial_kwargs=serial_kwargs, 48 | extra_parallel_kwargs=extra_parallel_kwargs 49 | ) 50 | 51 | @mark.slow 52 | @mark.parallel 53 | def test_elliptical_drop_example(self): 54 | serial_kwargs = dict(sort_gids=None, kernel='CubicSpline', tf=0.0038) 55 | extra_parallel_kwargs = dict(ghost_layers=1, lb_freq=5) 56 | self.run_example( 57 | 'elliptical_drop.py', nprocs=2, atol=1e-11, 58 | serial_kwargs=serial_kwargs, 59 | extra_parallel_kwargs=extra_parallel_kwargs 60 | ) 61 | 62 | @mark.parallel 63 | def test_ldcavity_example(self): 64 | max_steps = 150 65 | serial_kwargs = dict(max_steps=max_steps, pfreq=500, sort_gids=None) 66 | extra_parallel_kwargs = dict(ghost_layers=2, lb_freq=5) 67 | self.run_example( 68 | 'cavity.py', nprocs=4, atol=1e-14, serial_kwargs=serial_kwargs, 69 | extra_parallel_kwargs=extra_parallel_kwargs 70 | ) 71 | 72 | if __name__ == '__main__': 73 | import unittest 74 | unittest.main() 75 | -------------------------------------------------------------------------------- /pysph/solver/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/solver/__init__.py -------------------------------------------------------------------------------- /pysph/solver/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/solver/tests/__init__.py -------------------------------------------------------------------------------- /pysph/sph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/characteristic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/characteristic/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/characteristic/inlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import InletBase 5 | 6 | 7 | class Inlet(InletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/characteristic/outlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Outlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import OutletBase 5 | 6 | 7 | class Outlet(OutletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/donothing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/donothing/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/donothing/inlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import InletBase 5 | 6 | 7 | class Inlet(InletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/donothing/outlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Outlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import OutletBase 5 | 6 | 7 | class Outlet(OutletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/hybrid/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/hybrid/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/hybrid/inlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inlet boundary 3 | """ 4 | import numpy as np 5 | from pysph.sph.bc.inlet_outlet_manager import InletBase 6 | 7 | 8 | class Inlet(InletBase): 9 | def update(self, time, dt, stage): 10 | dest_pa = self.dest_pa 11 | inlet_pa = self.inlet_pa 12 | ghost_pa = self.ghost_pa 13 | 14 | dest_pa.uref[0] = 0.5 * (inlet_pa.uref[0] + dest_pa.uref[0]) 15 | 16 | if not self._init: 17 | self.initialize() 18 | self._init = True 19 | if stage in self.active_stages: 20 | 21 | self.io_eval = self._create_io_eval() 22 | self.io_eval.update() 23 | self.io_eval.evaluate() 24 | 25 | io_id = inlet_pa.ioid 26 | cond = (io_id == 0) 27 | all_idx = np.where(cond)[0] 28 | inlet_pa.extract_particles(all_idx, dest_pa) 29 | 30 | # moving the moved particles back to the array beginning. 31 | inlet_pa.x[all_idx] += self.length * self.xn 32 | inlet_pa.y[all_idx] += self.length * self.yn 33 | inlet_pa.z[all_idx] += self.length * self.zn 34 | 35 | if ghost_pa: 36 | ghost_pa.x[all_idx] -= self.length * self.xn 37 | ghost_pa.y[all_idx] -= self.length * self.yn 38 | ghost_pa.z[all_idx] -= self.length * self.zn 39 | 40 | if self.callback is not None: 41 | self.callback(dest_pa, inlet_pa) 42 | -------------------------------------------------------------------------------- /pysph/sph/bc/hybrid/outlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Outlet boundary 3 | """ 4 | 5 | from pysph.sph.bc.inlet_outlet_manager import OutletBase 6 | 7 | 8 | class Outlet(OutletBase): 9 | pass 10 | -------------------------------------------------------------------------------- /pysph/sph/bc/mirror/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/mirror/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/mirror/inlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import InletBase 5 | 6 | 7 | class Inlet(InletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/mirror/outlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Outlet boundary 3 | """ 4 | 5 | from pysph.sph.bc.inlet_outlet_manager import OutletBase 6 | import numpy as np 7 | 8 | 9 | class Outlet(OutletBase): 10 | def _get_ghost_xyz(self, x, y, z): 11 | xij = x - self.x 12 | yij = y - self.y 13 | zij = z - self.z 14 | 15 | disp = xij * self.xn + yij * self.yn + zij * self.zn 16 | x = x - 2 * disp * self.xn 17 | y = y - 2 * disp * self.yn 18 | z = z - 2 * disp * self.zn 19 | 20 | return x, y, z 21 | 22 | def update(self, time, dt, stage): 23 | if not self._init: 24 | self.initialize() 25 | self._init = True 26 | if stage in self.active_stages: 27 | props_to_copy = self.props_to_copy 28 | outlet_pa = self.outlet_pa 29 | source_pa = self.source_pa 30 | ghost_pa = self.ghost_pa 31 | 32 | self.io_eval = self._create_io_eval() 33 | self.io_eval.update() 34 | self.io_eval.evaluate() 35 | 36 | # adding particles to the destination array. 37 | io_id = source_pa.ioid 38 | cond = (io_id == 1) 39 | all_idx = np.where(cond)[0] 40 | pa_add = source_pa.extract_particles( 41 | all_idx, props=props_to_copy) 42 | outlet_pa.add_particles(**pa_add.get_property_arrays()) 43 | 44 | if ghost_pa: 45 | if len(all_idx) > 0: 46 | x, y, z = self._get_ghost_xyz( 47 | pa_add.x, pa_add.y, pa_add.z) 48 | pa_add.x = x 49 | pa_add.y = y 50 | pa_add.z = z 51 | pa_add.u = -1. * pa_add.u 52 | ghost_pa.add_particles(**pa_add.get_property_arrays()) 53 | source_pa.remove_particles(all_idx) 54 | 55 | io_id = outlet_pa.ioid 56 | cond = (io_id == 2) 57 | all_idx = np.where(cond)[0] 58 | outlet_pa.remove_particles(all_idx) 59 | if ghost_pa: 60 | ghost_pa.remove_particles(all_idx) 61 | 62 | if self.callback is not None: 63 | self.callback(source_pa, outlet_pa) 64 | -------------------------------------------------------------------------------- /pysph/sph/bc/mod_donothing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/mod_donothing/__init__.py -------------------------------------------------------------------------------- /pysph/sph/bc/mod_donothing/inlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import InletBase 5 | 6 | 7 | class Inlet(InletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/mod_donothing/outlet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Outlet boundary 3 | """ 4 | from pysph.sph.bc.inlet_outlet_manager import OutletBase 5 | 6 | 7 | class Outlet(OutletBase): 8 | pass 9 | -------------------------------------------------------------------------------- /pysph/sph/bc/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/bc/tests/__init__.py -------------------------------------------------------------------------------- /pysph/sph/boundary_equations.py: -------------------------------------------------------------------------------- 1 | """ 2 | SPH Boundary Equations 3 | ###################### 4 | """ 5 | 6 | from pysph.sph.equation import Equation 7 | 8 | def wendland_quintic(rij=1.0, h=1.0): 9 | q = rij/h 10 | q1 = 2.0 - q 11 | val = 0.0 12 | if q < 2.0: 13 | val = (1 + 2.5*q + 2*q*q)*q1*q1*q1*q1*q1 14 | 15 | return val 16 | 17 | 18 | class MonaghanBoundaryForce(Equation): 19 | def __init__(self, dest, sources, deltap): 20 | self.deltap = deltap 21 | super(MonaghanBoundaryForce,self).__init__(dest,sources) 22 | 23 | def loop(self, d_idx, s_idx, s_m, s_rho, d_m, d_cs, s_cs, d_h, 24 | s_tx, s_ty, s_tz, s_nx, s_ny, s_nz, 25 | d_au, d_av, d_aw, XIJ): 26 | 27 | norm = declare('matrix((3,))') 28 | tang = declare('matrix((3,))') 29 | 30 | ma = d_m[d_idx] 31 | mb = s_m[s_idx] 32 | 33 | # particle sound speed 34 | cs = d_cs[d_idx] 35 | 36 | # boundary normals 37 | norm[0] = s_nx[s_idx] 38 | norm[1] = s_ny[s_idx] 39 | norm[2] = s_nz[s_idx] 40 | 41 | # boundary tangents 42 | tang[0] = s_tx[s_idx] 43 | tang[1] = s_ty[s_idx] 44 | tang[2] = s_tz[s_idx] 45 | 46 | # x and y projections 47 | x = XIJ[0]*tang[0] + XIJ[1]*tang[1] + XIJ[2]*tang[2] 48 | y = XIJ[0]*norm[0] + XIJ[1]*norm[1] + XIJ[2]*norm[2] 49 | 50 | # compute the force 51 | force = 0.0 52 | q = y/d_h[d_idx] 53 | 54 | xabs = fabs(x) 55 | 56 | if (0 <= xabs) and (xabs <= self.deltap): 57 | beta = 0.02 * cs * cs/y 58 | tforce = 1.0 - xabs/self.deltap 59 | 60 | if (0 < q) and (q <= 2.0/3.0): 61 | nforce = 2.0/3.0 62 | 63 | elif (2.0/3.0 < q) and (q <= 1.0): 64 | nforce = 2*q*(1.0 - 0.75*q) 65 | 66 | elif (1.0 < q) and (q <= 2.0): 67 | nforce = 0.5 * (2-q)*(2-q) 68 | 69 | else: 70 | nforce = 0.0 71 | 72 | force = (mb/(ma+mb)) * nforce * tforce * beta 73 | else: 74 | force = 0.0 75 | 76 | # boundary force accelerations 77 | d_au[d_idx] += force * norm[0] 78 | d_av[d_idx] += force * norm[1] 79 | d_aw[d_idx] += force * norm[2] 80 | 81 | class MonaghanKajtarBoundaryForce(Equation): 82 | def __init__(self, dest, sources, K=None, beta=None, h=None): 83 | self.K = K 84 | self.beta = beta 85 | self.h = h 86 | 87 | if None in [K, beta, h]: 88 | raise ValueError("Invalid parameter values") 89 | 90 | super(MonaghanKajtarBoundaryForce,self).__init__(dest,sources) 91 | 92 | def _get_helpers_(self): 93 | return [wendland_quintic] 94 | 95 | def loop(self, d_idx, s_idx, d_m, s_m, d_au, d_av, d_aw, RIJ, R2IJ, XIJ): 96 | 97 | ma = d_m[d_idx] 98 | mb = s_m[s_idx] 99 | 100 | w = wendland_quintic(RIJ, self.h) 101 | force = self.K/self.beta * w/R2IJ * 2*mb/(ma + mb) 102 | 103 | d_au[d_idx] += force * XIJ[0] 104 | d_av[d_idx] += force * XIJ[1] 105 | d_aw[d_idx] += force * XIJ[2] 106 | -------------------------------------------------------------------------------- /pysph/sph/gas_dynamics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/gas_dynamics/__init__.py -------------------------------------------------------------------------------- /pysph/sph/gas_dynamics/boundary_equations.py: -------------------------------------------------------------------------------- 1 | """Boundary equations for Gas-dynamics""" 2 | 3 | from pysph.sph.equation import Equation 4 | 5 | class WallBoundary(Equation): 6 | def initialize(self, d_idx, d_p, d_rho, d_e, d_m, d_cs, d_div, d_h, 7 | d_htmp, d_h0, d_u, d_v, d_w, d_wij): 8 | d_p[d_idx] = 0.0 9 | d_u[d_idx] = 0.0 10 | d_v[d_idx] = 0.0 11 | d_w[d_idx] = 0.0 12 | d_m[d_idx] = 0.0 13 | d_rho[d_idx] = 0.0 14 | d_e[d_idx] = 0.0 15 | d_cs[d_idx] = 0.0 16 | d_div[d_idx] = 0.0 17 | d_wij[d_idx] = 0.0 18 | d_h[d_idx] = d_h0[d_idx] 19 | d_htmp[d_idx] = 0.0 20 | 21 | def loop(self, d_idx, s_idx, d_p, d_rho, d_e, d_m, d_cs, d_div, d_h, d_u, 22 | d_v, d_w, d_wij, d_htmp, s_p, s_rho, s_e, s_m, s_cs, s_h, s_div, 23 | s_u, s_v, s_w, WI): 24 | d_wij[d_idx] += WI 25 | d_p[d_idx] += s_p[s_idx]*WI 26 | d_u[d_idx] -= s_u[s_idx]*WI 27 | d_v[d_idx] -= s_v[s_idx]*WI 28 | d_w[d_idx] -= s_w[s_idx]*WI 29 | d_m[d_idx] += s_m[s_idx]*WI 30 | d_rho[d_idx] += s_rho[s_idx]*WI 31 | d_e[d_idx] += s_e[s_idx]*WI 32 | d_cs[d_idx] += s_cs[s_idx]*WI 33 | d_div[d_idx] += s_div[s_idx]*WI 34 | d_htmp[d_idx] += s_h[s_idx]*WI 35 | 36 | def post_loop(self, d_idx, d_p, d_rho, d_e, d_m, d_cs, d_div, d_h, d_u, 37 | d_v, d_w, d_wij, d_htmp): 38 | if (d_wij[d_idx]>1e-30): 39 | d_p[d_idx] = d_p[d_idx]/d_wij[d_idx] 40 | d_u[d_idx] = d_u[d_idx]/d_wij[d_idx] 41 | d_v[d_idx] = d_v[d_idx]/d_wij[d_idx] 42 | d_w[d_idx] = d_w[d_idx]/d_wij[d_idx] 43 | d_m[d_idx] = d_m[d_idx]/d_wij[d_idx] 44 | d_rho[d_idx] = d_rho[d_idx]/d_wij[d_idx] 45 | d_e[d_idx] = d_e[d_idx]/d_wij[d_idx] 46 | d_cs[d_idx] = d_cs[d_idx]/d_wij[d_idx] 47 | d_div[d_idx] = d_div[d_idx]/d_wij[d_idx] 48 | d_h[d_idx] = d_htmp[d_idx] /d_wij[d_idx] 49 | -------------------------------------------------------------------------------- /pysph/sph/isph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/isph/__init__.py -------------------------------------------------------------------------------- /pysph/sph/misc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/misc/__init__.py -------------------------------------------------------------------------------- /pysph/sph/misc/advection.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions for advection 3 | ####################### 4 | """ 5 | 6 | from pysph.sph.equation import Equation 7 | from numpy import cos 8 | from numpy import pi as M_PI 9 | 10 | 11 | class Advect(Equation): 12 | def loop(self, d_idx, d_ax, d_ay, d_u, d_v): 13 | d_ax[d_idx] = d_u[d_idx] 14 | d_ay[d_idx] = d_v[d_idx] 15 | 16 | 17 | class MixingVelocityUpdate(Equation): 18 | def __init__(self, dest, sources, T): 19 | self.T = T 20 | super(MixingVelocityUpdate, self).__init__(dest, sources) 21 | 22 | def loop(self, d_idx, d_u, d_v, d_u0, d_v0, t=0.1): 23 | d_u[d_idx] = cos(M_PI*t/self.T) * d_u0[d_idx] 24 | d_v[d_idx] = -cos(M_PI*t/self.T) * d_v0[d_idx] 25 | -------------------------------------------------------------------------------- /pysph/sph/solid_mech/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/solid_mech/__init__.py -------------------------------------------------------------------------------- /pysph/sph/solid_mech/hvi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Equations for the High Velocity Impact Problems 3 | ############################################### 4 | """ 5 | 6 | from math import sqrt 7 | from pysph.sph.equation import Equation 8 | 9 | class VonMisesPlasticity2D(Equation): 10 | def __init__(self, dest, sources, flow_stress): 11 | self.flow_stress2 = float(flow_stress*flow_stress) 12 | self.factor = sqrt( 2.0/3.0 )*flow_stress 13 | super(VonMisesPlasticity2D,self).__init__(dest, sources) 14 | 15 | def loop(self, d_idx, d_s00, d_s01, d_s02, d_s11, d_s12, d_s22): 16 | s00a = d_s00[d_idx] 17 | s01a = d_s01[d_idx] 18 | s02a = d_s02[d_idx] 19 | 20 | s10a = d_s01[d_idx] 21 | s11a = d_s11[d_idx] 22 | s12a = d_s12[d_idx] 23 | 24 | s20a = d_s02[d_idx] 25 | s21a = d_s12[d_idx] 26 | s22a = d_s22[d_idx] 27 | 28 | J = s00a* s00a + 2.0 * s01a*s10a + s11a*s11a 29 | scale = 1.0 30 | if (J > 2.0/3.0 * self.flow_stress2): 31 | scale = self.factor/sqrt(J) 32 | 33 | # store the stresses 34 | d_s00[d_idx] = scale * s00a 35 | d_s01[d_idx] = scale * s01a 36 | d_s02[d_idx] = scale * s02a 37 | 38 | d_s11[d_idx] = scale * s11a 39 | d_s12[d_idx] = scale * s12a 40 | 41 | d_s22[d_idx] = scale * s22a 42 | 43 | 44 | class StiffenedGasEOS(Equation): 45 | 46 | """Stiffened-gas EOS from "A Free Lagrange Augmented Godunov Method for the 47 | Simulation of Elastic-Plastic Solids", B. P. Howell and G. J. Ball, JCP 48 | (2002). http://dx.doi.org/10.1006/jcph.2001.6931 49 | """ 50 | 51 | def __init__(self, dest, sources, gamma, r0, c0): 52 | self.gamma = float(gamma) # Gruneisen gamma 53 | self.c0 = float(c0) # unshocked sound speed 54 | self.r0 = float(r0) # reference density 55 | 56 | super(StiffenedGasEOS,self).__init__(dest, sources) 57 | 58 | def loop(self, d_idx, d_e, d_rho, d_p, d_cs): 59 | # Eq. (17) 60 | d_p[d_idx] = self.c0*self.c0 * (d_rho[d_idx] - self.r0) + \ 61 | (self.gamma - 1.0)*d_rho[d_idx]*d_e[d_idx] 62 | 63 | # Eq. (21) 64 | d_cs[d_idx] = sqrt( 65 | self.c0*self.c0 + (self.gamma - 1.0) * 66 | (d_e[d_idx] + d_p[d_idx]/d_rho[d_idx]) 67 | ) 68 | 69 | 70 | class MieGruneisenEOS(Equation): 71 | def __init__(self, dest, sources, gamma,r0, c0, S): 72 | 73 | self.gamma = float(gamma) 74 | self.r0 = float(r0) 75 | self.c0 = float(c0) 76 | self.S = float(S) 77 | 78 | self.a0 = a0 = float(r0 * c0 * c0) 79 | self.b0 = a0 * ( 1 + 2.0*(S - 1.0) ) 80 | self.c0 = a0 * ( 2*(S - 1.0) + 3*(S - 1.0)*(S - 1.0) ) 81 | 82 | super(MieGruneisenEOS, self).__init__(dest, sources) 83 | 84 | def loop(self, d_idx, d_p, d_rho, d_e): 85 | rhoa = d_rho[d_idx] 86 | ea = d_e[d_idx] 87 | 88 | gamma = self.gamma 89 | ratio = rhoa/self.r0 - 1.0 90 | ratio2 = ratio * ratio 91 | 92 | PH = self.a0 * ratio 93 | if ratio > 0: 94 | PH = PH + ratio2 * (self.b0 + self.c0*ratio) 95 | 96 | d_p[d_idx] = (1. - 0.5*gamma*ratio) * PH + rhoa * ea * gamma 97 | -------------------------------------------------------------------------------- /pysph/sph/swe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/swe/__init__.py -------------------------------------------------------------------------------- /pysph/sph/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/tests/__init__.py -------------------------------------------------------------------------------- /pysph/sph/tests/test_integrator_cython_helper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | from pysph.base.utils import get_particle_array 6 | from pysph.base.kernels import QuinticSpline 7 | 8 | from pysph.sph.acceleration_eval import AccelerationEval 9 | from pysph.sph.acceleration_eval_cython_helper import AccelerationEvalCythonHelper 10 | from pysph.sph.integrator import PECIntegrator 11 | from pysph.sph.integrator_step import WCSPHStep 12 | from pysph.sph.integrator_cython_helper import IntegratorCythonHelper 13 | 14 | from pysph.sph.basic_equations import SummationDensity 15 | 16 | 17 | class TestIntegratorCythonHelper(unittest.TestCase): 18 | def test_invalid_kwarg_raises_error(self): 19 | # Given 20 | x = np.linspace(0, 1, 10) 21 | pa = get_particle_array(name='fluid', x=x) 22 | equations = [SummationDensity(dest='fluid', sources=['fluid'])] 23 | kernel = QuinticSpline(dim=1) 24 | a_eval = AccelerationEval([pa], equations, kernel=kernel) 25 | a_helper = AccelerationEvalCythonHelper(a_eval) 26 | 27 | # When/Then 28 | integrator = PECIntegrator(f=WCSPHStep()) 29 | self.assertRaises( 30 | RuntimeError, 31 | IntegratorCythonHelper, 32 | integrator, a_helper 33 | ) 34 | 35 | 36 | if __name__ == '__main__': 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /pysph/sph/tests/test_riemann_solver.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pytest import approx 3 | 4 | import pysph.sph.gas_dynamics.riemann_solver as R 5 | 6 | solvers = [ 7 | R.ducowicz, R.exact, R.hll_ball, R.hllc, 8 | R.hllc_ball, R.hlle, R.hllsy, R.llxf, R.roe, 9 | R.van_leer 10 | ] 11 | 12 | 13 | def _check_shock_tube(solver, **approx_kw): 14 | # Given 15 | # Shock tube 16 | gamma = 1.4 17 | rhol, pl, ul = 1.0, 1.0, 0.0 18 | rhor, pr, ur = 0.125, 0.1, 0.0 19 | 20 | result = [0.0, 0.0] 21 | # When 22 | solver(rhol, rhor, pl, pr, ul, ur, gamma, niter=20, 23 | tol=1e-6, result=result) 24 | 25 | # Then 26 | assert result == approx((0.30313, 0.92745), **approx_kw) 27 | 28 | 29 | def _check_blastwave(solver, **approx_kw): 30 | # Given 31 | gamma = 1.4 32 | result = [0.0, 0.0] 33 | rhol, pl, ul = 1.0, 1000.0, 0.0 34 | rhor, pr, ur = 1.0, 0.01, 0.0 35 | 36 | # When 37 | solver(rhol, rhor, pl, pr, ul, ur, gamma, niter=20, 38 | tol=1e-6, result=result) 39 | 40 | # Then 41 | assert result == approx((460.894, 19.5975), **approx_kw) 42 | 43 | 44 | def _check_sjogreen(solver, **approx_kw): 45 | # Given 46 | gamma = 1.4 47 | result = [0.0, 0.0] 48 | rhol, pl, ul = 1.0, 0.4, -2.0 49 | rhor, pr, ur = 1.0, 0.4, 2.0 50 | 51 | # When 52 | solver(rhol, rhor, pl, pr, ul, ur, gamma, niter=20, 53 | tol=1e-6, result=result) 54 | 55 | # Then 56 | assert result == approx((0.0018938, 0.0), **approx_kw) 57 | 58 | 59 | def _check_woodward_collela(solver, **approx_kw): 60 | # Given 61 | gamma = 1.4 62 | result = [0.0, 0.0] 63 | rhol, pl, ul = 1.0, 0.01, 0.0 64 | rhor, pr, ur = 1.0, 100.0, 0.0 65 | 66 | # When 67 | solver(rhol, rhor, pl, pr, ul, ur, gamma, niter=20, 68 | tol=1e-6, result=result) 69 | 70 | # Then 71 | assert result == approx((46.0950, -6.19633), **approx_kw) 72 | 73 | 74 | def test_exact_riemann(): 75 | solver = R.exact 76 | 77 | _check_shock_tube(solver, rel=1e-4) 78 | _check_blastwave(solver, rel=1e-3) 79 | _check_sjogreen(solver, abs=1e-4) 80 | _check_woodward_collela(solver, rel=1e-4) 81 | 82 | 83 | def test_van_leer(): 84 | solver = R.van_leer 85 | _check_shock_tube(solver, rel=1e-3) 86 | _check_blastwave(solver, rel=1e-2) 87 | _check_sjogreen(solver, abs=1e-2) 88 | _check_woodward_collela(solver, rel=1e-2) 89 | 90 | 91 | def test_ducowicz(): 92 | solver = R.ducowicz 93 | _check_shock_tube(solver, rel=0.2) 94 | _check_blastwave(solver, rel=0.4) 95 | _check_sjogreen(solver, abs=1e-2) 96 | _check_woodward_collela(solver, rel=0.4) 97 | 98 | 99 | # Most other solvers seem rather poor in comparison. 100 | @pytest.mark.parametrize("solver", solvers) 101 | def test_all_solver_api(solver): 102 | if solver.__name__ in ['roe', 'hllc']: 103 | rel = 2.0 104 | else: 105 | rel = 1.0 106 | _check_shock_tube(solver, rel=rel) 107 | -------------------------------------------------------------------------------- /pysph/sph/tests/test_scheme.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | from pysph.sph.scheme import SchemeChooser, WCSPHScheme 4 | from pysph.sph.wc.edac import EDACScheme 5 | 6 | 7 | def test_scheme_chooser_does_not_clobber_default(): 8 | 9 | # When 10 | wcsph = WCSPHScheme( 11 | ['f'], ['b'], dim=2, rho0=1.0, c0=10.0, 12 | h0=0.1, hdx=1.3, alpha=0.2, beta=0.1, 13 | ) 14 | edac = EDACScheme( 15 | fluids=['f'], solids=['b'], dim=2, c0=10.0, nu=0.001, 16 | rho0=1.0, h=0.1, alpha=0.0, pb=0.0 17 | ) 18 | s = SchemeChooser(default='wcsph', wcsph=wcsph, edac=edac) 19 | p = ArgumentParser(conflict_handler="resolve") 20 | s.add_user_options(p) 21 | opts = p.parse_args([]) 22 | 23 | # When 24 | s.consume_user_options(opts) 25 | 26 | # Then 27 | assert s.scheme.alpha == 0.2 28 | assert s.scheme.beta == 0.1 29 | 30 | # When 31 | opts = p.parse_args(['--alpha', '0.3', '--beta', '0.4']) 32 | s.consume_user_options(opts) 33 | 34 | # Then 35 | assert s.scheme.alpha == 0.3 36 | assert s.scheme.beta == 0.4 37 | -------------------------------------------------------------------------------- /pysph/sph/wc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/sph/wc/__init__.py -------------------------------------------------------------------------------- /pysph/sph/wc/parshikov.py: -------------------------------------------------------------------------------- 1 | from pysph.sph.equation import Equation 2 | 3 | 4 | class Continuity(Equation): 5 | 6 | def initialize(self, d_idx, d_arho): 7 | d_arho[d_idx] = 0.0 8 | 9 | def loop(self, d_idx, s_idx, s_m, d_u, d_v, d_w, s_u, s_v, s_w, d_cs, s_cs, 10 | d_rho, d_arho, s_rho, d_p, s_p, DWIJ, RIJ, XIJ): 11 | rl = d_rho[d_idx] 12 | rr = s_rho[s_idx] 13 | pl = d_p[d_idx] 14 | pr = s_p[s_idx] 15 | cl = d_cs[d_idx] 16 | cr = s_cs[s_idx] 17 | uxl = d_u[d_idx] 18 | uyl = d_v[d_idx] 19 | uzl = d_w[d_idx] 20 | uxr = s_u[s_idx] 21 | uyr = s_v[s_idx] 22 | uzr = s_w[s_idx] 23 | 24 | if RIJ >= 1.0e-16: 25 | ul = -(uxl * XIJ[0] + uyl * XIJ[1] + uzl * XIJ[2]) / RIJ 26 | ur = -(uxr * XIJ[0] + uyr * XIJ[1] + uzr * XIJ[2]) / RIJ 27 | else: 28 | ul = 0.0 29 | ur = 0.0 30 | u_star = (ul * rl * cl + ur * rr * cr + pl - pr) / (rl * cl + rr * cr) 31 | dwdr = sqrt(DWIJ[0] * DWIJ[0] + DWIJ[1] * DWIJ[1] + DWIJ[2] * DWIJ[2]) 32 | 33 | d_arho[d_idx] += 2.0 * s_m[s_idx] * dwdr * (ul - u_star) * rl / rr 34 | 35 | 36 | class Momentum(Equation): 37 | 38 | def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0): 39 | self.gx = gx 40 | self.gy = gy 41 | self.gz = gz 42 | 43 | super(Momentum, self).__init__(dest, sources) 44 | 45 | def initialize(self, d_idx, d_au, d_av, d_aw): 46 | d_au[d_idx] = self.gx 47 | d_av[d_idx] = self.gy 48 | d_aw[d_idx] = self.gz 49 | 50 | def loop(self, d_idx, s_idx, s_m, d_u, d_v, d_w, s_u, s_v, s_w, d_cs, s_cs, 51 | d_rho, s_rho, d_p, s_p, d_au, d_av, d_aw, RIJ, XIJ, DWIJ): 52 | rl = d_rho[d_idx] 53 | rr = s_rho[s_idx] 54 | pl = d_p[d_idx] 55 | pr = s_p[s_idx] 56 | cl = d_cs[d_idx] 57 | cr = s_cs[s_idx] 58 | uxl = d_u[d_idx] 59 | uyl = d_v[d_idx] 60 | uzl = d_w[d_idx] 61 | uxr = s_u[s_idx] 62 | uyr = s_v[s_idx] 63 | uzr = s_w[s_idx] 64 | m = s_m[s_idx] 65 | 66 | if RIJ >= 1.0e-16: 67 | ul = -(uxl * XIJ[0] + uyl * XIJ[1] + uzl * XIJ[2]) / RIJ 68 | ur = -(uxr * XIJ[0] + uyr * XIJ[1] + uzr * XIJ[2]) / RIJ 69 | else: 70 | ul = 0.0 71 | ur = 0.0 72 | p_star = pl * rr * cr + pr * cl * rl - rl * rr * cl * cr * (ur - ul) 73 | p_star /= (rl * cl + rr * cr) 74 | factor = -2.0 * m * p_star / (rl * rr) 75 | 76 | d_au[d_idx] += factor * DWIJ[0] 77 | d_av[d_idx] += factor * DWIJ[1] 78 | d_aw[d_idx] += factor * DWIJ[2] 79 | -------------------------------------------------------------------------------- /pysph/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/tools/__init__.py -------------------------------------------------------------------------------- /pysph/tools/cli.py: -------------------------------------------------------------------------------- 1 | """Convenience script for running various PySPH related tasks. 2 | """ 3 | 4 | from __future__ import print_function 5 | 6 | from argparse import ArgumentParser 7 | from os.path import exists, join 8 | import sys 9 | 10 | 11 | def run_viewer(args): 12 | from pysph.tools.mayavi_viewer import main 13 | main(args) 14 | 15 | 16 | def run_examples(args): 17 | from pysph.examples.run import main 18 | main(args) 19 | 20 | 21 | def output_vtk(args): 22 | from pysph.solver.vtk_output import main 23 | main(args) 24 | 25 | 26 | def dump_xdmf(args): 27 | from pysph.tools.dump_xdmf import main 28 | main(args) 29 | 30 | 31 | def _has_pysph_dir(): 32 | init_py = join('pysph', '__init__.py') 33 | init_pyc = join('pysph', '__init__.pyc') 34 | return exists(init_py) or exists(init_pyc) 35 | 36 | 37 | def run_tests(args): 38 | argv = ['--pyargs', 'pysph'] + args 39 | from pytest import cmdline 40 | cmdline.main(args=argv) 41 | 42 | 43 | def make_binder(args): 44 | from pysph.tools.binder import main 45 | main(args) 46 | 47 | 48 | def cull_files(args): 49 | from pysph.tools.cull import main 50 | main(args) 51 | 52 | 53 | def manage_cache(args): 54 | from pysph.tools.manage_cache import main 55 | main(args) 56 | 57 | 58 | def main(): 59 | parser = ArgumentParser(description=__doc__, add_help=False) 60 | parser.add_argument( 61 | "-h", "--help", action="store_true", default=False, dest="help", 62 | help="show this help message and exit" 63 | ) 64 | subparsers = parser.add_subparsers(help='sub-command help') 65 | 66 | viewer = subparsers.add_parser( 67 | 'view', help='View output files generated by PySPH', 68 | add_help=False 69 | ) 70 | viewer.set_defaults(func=run_viewer) 71 | 72 | runner = subparsers.add_parser( 73 | 'run', help='Run PySPH examples', 74 | add_help=False 75 | ) 76 | runner.set_defaults(func=run_examples) 77 | 78 | vtk_out = subparsers.add_parser( 79 | 'dump_vtk', help='Dump VTK Output', 80 | add_help=False 81 | ) 82 | vtk_out.set_defaults(func=output_vtk) 83 | 84 | xdmf_out = subparsers.add_parser( 85 | 'dump_xdmf', help='Generate XDMF', 86 | add_help=False 87 | ) 88 | xdmf_out.set_defaults(func=dump_xdmf) 89 | 90 | tests = subparsers.add_parser( 91 | 'test', help='Run entire PySPH test-suite', 92 | add_help=False 93 | ) 94 | tests.set_defaults(func=run_tests) 95 | 96 | binder = subparsers.add_parser( 97 | 'binder', 98 | help='Make a mybinder.org compatible directory for upload to a ' + 99 | 'host repo', 100 | add_help=False 101 | ) 102 | binder.set_defaults(func=make_binder) 103 | 104 | cull = subparsers.add_parser( 105 | 'cull', 106 | help='Cull files in a given directory by a specified culling_factor', 107 | add_help=False 108 | ) 109 | cull.set_defaults(func=cull_files) 110 | 111 | cache = subparsers.add_parser( 112 | 'cache', 113 | help='Show cache directories or clear them', 114 | add_help=False 115 | ) 116 | cache.set_defaults(func=manage_cache) 117 | 118 | if (len(sys.argv) == 1 or (len(sys.argv) > 1 and 119 | sys.argv[1] in ['-h', '--help'])): 120 | parser.print_help() 121 | sys.exit() 122 | 123 | args, extra = parser.parse_known_args() 124 | args.func(extra) 125 | 126 | 127 | if __name__ == '__main__': 128 | main() 129 | -------------------------------------------------------------------------------- /pysph/tools/cull.py: -------------------------------------------------------------------------------- 1 | """Culls files in a given directory, one in every 'c' files is spared. 2 | 3 | The specified directory can contain other directories that house the output 4 | files; files in all these directories will be culled, sparing one in every 5 | 'c' files. Note that DELETION IS PERMANENT. 6 | """ 7 | 8 | from pysph.tools.binder import find_sim_dirs, find_dir_size 9 | from pysph.solver.utils import get_files 10 | import os 11 | import sys 12 | import argparse 13 | 14 | 15 | def cull(src_path, c): 16 | 17 | src_path = os.path.abspath(src_path) 18 | sim_paths_list = find_sim_dirs(src_path) 19 | 20 | initial_size = find_dir_size(src_path) 21 | 22 | for path in sim_paths_list: 23 | files = get_files(path) 24 | l = len(files) 25 | del_files = [ 26 | files[i] for i in set(range(l)) - set(range(0, l, c)) 27 | ] 28 | if len(del_files) != 0: 29 | for f in del_files: 30 | os.remove(f) 31 | 32 | final_size = find_dir_size(src_path) 33 | 34 | print("Initial size of the directory was: "+str(initial_size)+" bytes") 35 | print("Final size of the directory is: "+str(final_size)+" bytes") 36 | return 37 | 38 | 39 | def main(argv=None): 40 | if argv is None: 41 | argv = sys.argv[1:] 42 | 43 | parser = argparse.ArgumentParser( 44 | prog='cull', 45 | description=__doc__, 46 | add_help=False 47 | ) 48 | 49 | parser.add_argument( 50 | "-h", 51 | "--help", 52 | action="store_true", 53 | default=False, 54 | dest="help", 55 | help="show this help message and exit" 56 | ) 57 | 58 | parser.add_argument( 59 | "src_path", 60 | metavar='src_path', 61 | type=str, 62 | nargs=1, 63 | help="the directory containing the directories/files to be culled" 64 | ) 65 | 66 | parser.add_argument( 67 | "-c", "--cull-factor", 68 | metavar="cull_factor", 69 | type=int, 70 | default=2, 71 | help="one in every 'c' files is spared, all remaining output " + 72 | "files are deleted [default=2]" 73 | ) 74 | 75 | if len(argv) > 0 and argv[0] in ['-h', '--help']: 76 | parser.print_help() 77 | sys.exit() 78 | 79 | options, extra = parser.parse_known_args(argv) 80 | 81 | src_path = options.src_path[0] 82 | x = options.cull_factor 83 | 84 | cull(src_path, x) 85 | 86 | 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /pysph/tools/geometry_utils.py: -------------------------------------------------------------------------------- 1 | """ Helper functions to generate commonly used geometries. 2 | 3 | PySPH used an axis convention as follows: 4 | 5 | Y 6 | | 7 | | 8 | | 9 | | 10 | | 11 | | /Z 12 | | / 13 | | / 14 | | / 15 | | / 16 | | / 17 | |/_________________X 18 | 19 | 20 | 21 | """ 22 | 23 | import numpy 24 | 25 | def create_2D_tank(x1,y1,x2,y2,dx): 26 | """ Generate an open rectangular tank. 27 | 28 | Parameters: 29 | ----------- 30 | 31 | x1,y1,x2,y2 : Coordinates defining the rectangle in 2D 32 | 33 | dx : The spacing to use 34 | 35 | """ 36 | 37 | yl = numpy.arange(y1, y2+dx/2, dx) 38 | xl = numpy.ones_like(yl) * x1 39 | nl = len(xl) 40 | 41 | yr = numpy.arange(y1,y2+dx/2, dx) 42 | xr = numpy.ones_like(yr) * x2 43 | nr = len(xr) 44 | 45 | xb = numpy.arange(x1+dx, x2-dx+dx/2, dx) 46 | yb = numpy.ones_like(xb) * y1 47 | nb = len(xb) 48 | 49 | n = nb + nl + nr 50 | 51 | x = numpy.empty( shape=(n,) ) 52 | y = numpy.empty( shape=(n,) ) 53 | 54 | idx = 0 55 | x[idx:nl] = xl; y[idx:nl] = yl 56 | 57 | idx += nl 58 | x[idx:idx+nb] = xb; y[idx:idx+nb] = yb 59 | 60 | idx += nb 61 | x[idx:idx+nr] = xr; y[idx:idx+nr] = yr 62 | 63 | return x, y 64 | 65 | def create_3D_tank(x1, y1, z1, x2, y2, z2, dx): 66 | """ Generate an open rectangular tank. 67 | 68 | Parameters: 69 | ----------- 70 | 71 | x1,y1,x2,y2,x3,y3 : Coordinates defining the rectangle in 2D 72 | 73 | dx : The spacing to use 74 | 75 | """ 76 | 77 | points = [] 78 | # create the base X-Y plane 79 | x, y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx] 80 | x = x.ravel(); y = y.ravel() 81 | z = numpy.ones_like(x) * z1 82 | 83 | for i in range(len(x)): 84 | points.append( (x[i], y[i], z[i]) ) 85 | 86 | # create the front X-Z plane 87 | x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx] 88 | x = x.ravel(); z = z.ravel() 89 | y = numpy.ones_like(x) * y1 90 | 91 | for i in range(len(x)): 92 | points.append( (x[i], y[i], z[i]) ) 93 | 94 | # create the Y-Z plane 95 | y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx] 96 | y = y.ravel(); z = z.ravel() 97 | x = numpy.ones_like(y) * x1 98 | 99 | for i in range(len(x)): 100 | points.append( (x[i], y[i], z[i]) ) 101 | 102 | # create the second X-Z plane 103 | x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx] 104 | x = x.ravel(); z = z.ravel() 105 | y = numpy.ones_like(x) * y2 106 | 107 | for i in range(len(x)): 108 | points.append( (x[i], y[i], z[i]) ) 109 | 110 | # create the second Y-Z plane 111 | y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx] 112 | y = y.ravel(); z = z.ravel() 113 | x = numpy.ones_like(y) * x2 114 | 115 | for i in range(len(x)): 116 | points.append( (x[i], y[i], z[i]) ) 117 | 118 | points = set(points) 119 | 120 | x = numpy.array( [i[0] for i in points] ) 121 | y = numpy.array( [i[1] for i in points] ) 122 | z = numpy.array( [i[2] for i in points] ) 123 | 124 | return x, y, z 125 | 126 | def create_2D_filled_region(x1, y1, x2, y2, dx): 127 | x,y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx] 128 | x = x.ravel(); y = y.ravel() 129 | 130 | return x, y 131 | 132 | def create_3D_filled_region(x1, y1, z1, x2, y2, z2, dx): 133 | x,y,z = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx, z1:z2+dx/2:dx] 134 | x = x.ravel() 135 | y = y.ravel() 136 | z = z.ravel() 137 | 138 | return x, y, z 139 | 140 | 141 | -------------------------------------------------------------------------------- /pysph/tools/manage_cache.py: -------------------------------------------------------------------------------- 1 | """Manage the PySPH and Compyle cache directories. 2 | 3 | These directories contain the generated sources and extension modules and can 4 | get quite big. The command allows you to see the path and size of these cache 5 | directories and also clear them out if they are too big. 6 | 7 | """ 8 | import argparse 9 | from pathlib import Path 10 | import shutil 11 | import sys 12 | 13 | 14 | def _get_cache_dirs(): 15 | home = Path('~').expanduser() 16 | cc = home / '.compyle' / 'source' 17 | pc = home / '.pysph' / 'source' 18 | return (cc, pc) 19 | 20 | 21 | def _find_size(pth): 22 | return sum(f.stat().st_size for f in pth.glob('**/*') if f.is_file()) 23 | 24 | 25 | def show_cache(): 26 | cc, pc = _get_cache_dirs() 27 | print("PySPH cache directories are at:") 28 | GB = 2**30 29 | print("{} {:<.3g} GB".format(str(cc), _find_size(cc)/GB)) 30 | print("{} {:<.3g} GB".format(str(pc), _find_size(pc)/GB)) 31 | 32 | 33 | def clear_cache(): 34 | cc, pc = _get_cache_dirs() 35 | print("Clearing cache in\n", cc, "\n", pc) 36 | confirm = input('Are you sure? (y/N) ') 37 | if confirm in ['y', 'Y']: 38 | if cc.exists(): 39 | shutil.rmtree(cc) 40 | if pc.exists(): 41 | shutil.rmtree(pc) 42 | 43 | 44 | def main(argv=None): 45 | parser = argparse.ArgumentParser( 46 | prog='cache', description=__doc__, add_help=False 47 | ) 48 | 49 | parser.add_argument( 50 | "-h", 51 | "--help", 52 | action="store_true", 53 | default=False, 54 | dest="help", 55 | help="show this help message and exit" 56 | ) 57 | 58 | parser.add_argument( 59 | "-c", "--clear", 60 | action="store_true", 61 | default=False, 62 | help="Delete all the files in the cache directory." 63 | ) 64 | 65 | if argv is not None and len(argv) > 0 and argv[0] in ['-h', '--help']: 66 | parser.print_help() 67 | sys.exit() 68 | 69 | options, extra = parser.parse_known_args(argv) 70 | if options.clear: 71 | clear_cache() 72 | else: 73 | show_cache() 74 | 75 | 76 | if __name__ == '__main__': 77 | main() 78 | -------------------------------------------------------------------------------- /pysph/tools/ndspmhd.py: -------------------------------------------------------------------------------- 1 | """Utility functions to read Daniel Price's NDSPMHD solution files""" 2 | 3 | import struct 4 | from pysph.base.utils import get_particle_array_gasd as gpa 5 | 6 | from .fortranfile import FortranFile 7 | 8 | def ndspmhd2pysph(fname, dim=2, read_type=False): 9 | """Read output data file from NDSPMHD 10 | 11 | Parameters: 12 | 13 | fname : str 14 | NDSPMHD data filename 15 | 16 | dim : int 17 | Problem dimension 18 | 19 | read_type : bint 20 | Flag to read the `type` property for particles 21 | 22 | Returns the ParticleArray representation of the data that can be 23 | used in PySPH. 24 | 25 | """ 26 | f = FortranFile(fname) 27 | 28 | # get the header length 29 | header_length = f._header_length 30 | endian = f.ENDIAN 31 | 32 | # get the length of the record to be read 33 | length = f._read_check() 34 | 35 | # now read the individual entries: 36 | 37 | # current time : double 38 | t = f._read_exactly(8) 39 | t = struct.unpack(endian+"1d", t)[0] 40 | 41 | # number of particles and number printed : int 42 | npart = f._read_exactly(4) 43 | nprint = f._read_exactly(4) 44 | 45 | npart = struct.unpack(endian+"1i", npart)[0] 46 | nprint = struct.unpack(endian+"1i", nprint)[0] 47 | 48 | # gamma and hfact : double 49 | gamma = f._read_exactly(8) 50 | hfact = f._read_exactly(8) 51 | 52 | gamma = struct.unpack(endian+"1d", gamma)[0] 53 | hfact = struct.unpack(endian+"1d", hfact)[0] 54 | 55 | # ndim, ndimV : int 56 | ndim = f._read_exactly(4) 57 | ndimV = f._read_exactly(4) 58 | 59 | # ncollumns, iformat, ibound : int 60 | nc = f._read_exactly(4) 61 | ifmt = f._read_exactly(4) 62 | ib1 = f._read_exactly(4) 63 | ib2 = f._read_exactly(4) 64 | 65 | nc = struct.unpack(endian+"1i", nc)[0] 66 | 67 | # xmin, xmax : double 68 | xmin1 = f._read_exactly(8) 69 | xmin2 = f._read_exactly(8) 70 | xmax1 = f._read_exactly(8) 71 | xmax2 = f._read_exactly(8) 72 | 73 | # n : int 74 | n = f._read_exactly(4) 75 | n = struct.unpack(endian+"1i", n)[0] 76 | 77 | # geometry type 78 | geom = f._read_exactly(n) 79 | 80 | # end reading this header 81 | f._read_check() 82 | 83 | # Now go on to the arrays. Remember, there are 16 entries 84 | # correcponding to the columns 85 | 86 | x = f.readReals(prec="d") 87 | y = f.readReals(prec="d") 88 | u = f.readReals(prec="d") 89 | v = f.readReals(prec="d") 90 | w = f.readReals(prec="d") 91 | 92 | h = f.readReals(prec="d") 93 | rho = f.readReals(prec="d") 94 | e = f.readReals(prec="d") 95 | m = f.readReals(prec="d") 96 | 97 | alpha1 = f.readReals(prec="d") 98 | alpha2 = f.readReals(prec="d") 99 | 100 | p = f.readReals(prec="d") 101 | drhobdtbrho = f.readReals("d") 102 | gradh = f.readReals("d") 103 | 104 | au = f.readReals("d") 105 | av = f.readReals("d") 106 | aw = f.readReals("d") 107 | 108 | # By default, NDSPMHD does not output the type array. You need to 109 | # add this to the output routine if you want it. 110 | if read_type: 111 | type = f.readInts(prec="i") 112 | 113 | # now create the particle array 114 | pa = gpa(name='fluid', x=x, y=y, m=m, h=h, rho=rho, e=e, p=p, 115 | u=u, v=v, w=w, au=au, av=av, aw=aw, div=drhobdtbrho) 116 | 117 | return pa 118 | -------------------------------------------------------------------------------- /pysph/tools/read_mesh.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Functions can be used to generate points to describe a given input 3 | mesh. 4 | 5 | Supported mesh formats: All file formats supported by meshio. 6 | (https://github.com/nschloe/meshio) 7 | ''' 8 | 9 | import numpy as np 10 | import meshio 11 | from pysph.tools.mesh_tools import surface_points, surf_points_uniform 12 | 13 | 14 | class Mesh: 15 | def __init__(self, file_name, file_type=None): 16 | if file_type is None: 17 | self.mesh = meshio.read(file_name) 18 | else: 19 | self.mesh = meshio.read(file_name, file_type) 20 | 21 | self.cells = np.array([], dtype=int).reshape(0, 3) 22 | 23 | def extract_connectivity_info(self): 24 | cell_blocks = self.mesh.cells 25 | for block in cell_blocks: 26 | self.cells = np.concatenate((self.cells, block.data)) 27 | 28 | return self.cells 29 | 30 | def extract_coordinates(self): 31 | x, y, z = self.mesh.points.T 32 | self.x, self.y, self.z = x, y, z 33 | 34 | return x, y, z 35 | 36 | def compute_normals(self): 37 | n = self.cells.shape[0] 38 | self.normals = np.zeros((n, 3)) 39 | points = self.mesh.points 40 | 41 | idx = self.cells 42 | pts = points[idx] 43 | a = pts[:, 1] - pts[:, 0] 44 | b = pts[:, 2] - pts[:, 0] 45 | 46 | normals = np.cross(a, b) 47 | mag = np.linalg.norm(normals, axis=1) 48 | mag.shape = (n, 1) 49 | self.normals = normals/mag 50 | 51 | return self.normals 52 | 53 | 54 | def mesh2points(file_name, dx, file_format=None, uniform=False): 55 | ''' 56 | Generates points with a spacing dx to describe the surface of the 57 | input mesh file. 58 | 59 | Supported file formats: Refer to https://github.com/nschloe/meshio 60 | 61 | Only works with triangle meshes. 62 | 63 | Parameters 64 | ---------- 65 | 66 | file_name : string 67 | Mesh file name 68 | dx : float 69 | Required spacing between generated particles 70 | file_format : str 71 | Mesh file format 72 | uniform : bool 73 | If True generates points on a grid of spacing dx 74 | 75 | Returns 76 | ------- 77 | xf, yf, zf : ndarray 78 | 1d numpy arrays with x, y, z coordinates of covered surface 79 | ''' 80 | mesh = Mesh(file_name) 81 | cells = mesh.extract_connectivity_info() 82 | x, y, z = mesh.extract_coordinates() 83 | 84 | if uniform is False: 85 | xf, yf, zf = surface_points(x, y, z, cells, dx) 86 | 87 | else: 88 | if file_format is 'stl': 89 | normals = mesh.mesh.cell_data['facet_normals'][0] 90 | else: 91 | normals = mesh.compute_normals() 92 | 93 | xf, yf, zf = surf_points_uniform(x, y, z, cells, normals, dx, dx) 94 | 95 | return xf, yf, zf 96 | -------------------------------------------------------------------------------- /pysph/tools/run_parallel_script.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from os.path import abspath, dirname, join 3 | from subprocess import Popen, PIPE 4 | import sys 5 | from threading import Timer 6 | 7 | 8 | def get_directory(file): 9 | return dirname(abspath(file)) 10 | 11 | 12 | def kill_process(process): 13 | print('*'*70) 14 | print('KILLING PROCESS ON TIMEOUT') 15 | print('*'*70) 16 | process.kill() 17 | 18 | 19 | def run(filename, args=None, nprocs=2, timeout=30.0, path=None): 20 | """Run a python script with MPI or in serial (if nprocs=1). Kill process 21 | if it takes longer than the specified timeout. 22 | 23 | Parameters: 24 | ----------- 25 | filename - filename of python script to run under mpi. 26 | args - List of arguments to pass to script. 27 | nprocs - number of processes to run (1 => serial non-mpi run). 28 | timeout - time in seconds to wait for the script to finish running, 29 | else raise a RuntimeError exception. 30 | path - the path under which the script is located 31 | Defaults to the location of this file (__file__), not curdir. 32 | 33 | """ 34 | if args is None: 35 | args = [] 36 | file_path = abspath(join(path, filename)) 37 | cmd = [sys.executable, file_path] + args 38 | if nprocs > 1: 39 | cmd = ['mpiexec', '-n', str(nprocs), '--oversubscribe'] + cmd 40 | 41 | print('running test:', cmd) 42 | 43 | process = Popen(cmd, stdout=PIPE, stderr=PIPE) 44 | timer = Timer(timeout, kill_process, [process]) 45 | timer.start() 46 | out, err = process.communicate() 47 | timer.cancel() 48 | retcode = process.returncode 49 | if retcode: 50 | msg = 'test ' + filename + ' failed with returncode ' + str(retcode) 51 | print(out.decode('utf-8')) 52 | print(err.decode('utf-8')) 53 | print('#'*80) 54 | print(msg) 55 | print('#'*80) 56 | raise RuntimeError(msg) 57 | return retcode, out, err 58 | -------------------------------------------------------------------------------- /pysph/tools/sph_evaluator.py: -------------------------------------------------------------------------------- 1 | """A convenience class that combines an AccelerationEval and an SPHCompiler to 2 | allow a user to specify particle arrays, equations, an optional domain and 3 | kernel to produce an SPH evaluation. 4 | 5 | This is handy for post-processing. 6 | 7 | """ 8 | 9 | from pysph.base.kernels import Gaussian 10 | from pysph.base.nnps import LinkedListNNPS as NNPS 11 | from pysph.sph.acceleration_eval import AccelerationEval 12 | from pysph.sph.sph_compiler import SPHCompiler 13 | 14 | 15 | class SPHEvaluator(object): 16 | def __init__(self, arrays, equations, dim, kernel=None, 17 | domain_manager=None, backend=None, nnps_factory=NNPS): 18 | """Constructor. 19 | 20 | Parameters 21 | ---------- 22 | arrays: list(ParticleArray) 23 | equations: list 24 | dim: int 25 | kernel: kernel instance. 26 | domain_manager: DomainManager 27 | backend: str: indicates the backend to use. 28 | one of ('opencl', 'cython', '', None) 29 | nnps_factory: A factory that creates an NNPSBase instance. 30 | """ 31 | self.arrays = arrays 32 | self.equations = equations 33 | self.domain_manager = domain_manager 34 | self.dim = dim 35 | if kernel is None: 36 | self.kernel = Gaussian(dim=dim) 37 | else: 38 | self.kernel = kernel 39 | 40 | self.nnps_factory = nnps_factory 41 | self.backend = backend 42 | 43 | self.func_eval = AccelerationEval(arrays, equations, self.kernel, 44 | backend=backend) 45 | compiler = SPHCompiler(self.func_eval, None) 46 | compiler.compile() 47 | self._create_nnps(arrays) 48 | 49 | def evaluate(self, t=0.0, dt=0.1): 50 | """Evalute the SPH equations, dummy t and dt values can 51 | be passed. 52 | """ 53 | self.func_eval.compute(t, dt) 54 | 55 | def update(self, update_domain=True): 56 | """Update the NNPS when particles have moved. 57 | 58 | If the update_domain is False, the domain is not updated. 59 | 60 | Use this when the arrays are the same but the particles have themselves 61 | changed. If the particle arrays themselves change use the 62 | `update_particle_arrays` method instead. 63 | """ 64 | if update_domain: 65 | self.nnps.update_domain() 66 | self.nnps.update() 67 | 68 | def update_particle_arrays(self, arrays): 69 | """Call this for a new set of particle arrays which have the 70 | same properties as before. 71 | 72 | For example, if you are reading the particle array data from files, 73 | each time you load a new file a new particle array is read with the 74 | same properties. Call this function to reset the arrays. 75 | """ 76 | self._create_nnps(arrays) 77 | self.func_eval.update_particle_arrays(arrays) 78 | 79 | # Private protocol ################################################### 80 | def _create_nnps(self, arrays): 81 | self.nnps = self.nnps_factory( 82 | dim=self.kernel.dim, particles=arrays, 83 | radius_scale=self.kernel.radius_scale, 84 | domain=self.domain_manager, cache=True 85 | ) 86 | self.func_eval.set_nnps(self.nnps) 87 | -------------------------------------------------------------------------------- /pysph/tools/sphysics.py: -------------------------------------------------------------------------------- 1 | """Utility functions to interact with SPHysics particle data""" 2 | 3 | from os.path import basename 4 | import numpy 5 | from pysph.base.utils import get_particle_array_wcsph as gpa 6 | 7 | # Post-process module for VTK output 8 | 9 | def sphysics2pysph(partfile, indat='INDAT', dim=3, vtk=True): 10 | """Load an SPHysics part file and input data 11 | 12 | Parameters: 13 | 14 | partfile : str 15 | SPHysics part file (eq IPART, PART_00032, etc) 16 | 17 | indat : str 18 | SPHysics input data file 19 | 20 | dim : int 21 | Dimension for SPHysics files 22 | 23 | vtk : bint 24 | Flag to dump VTK output 25 | 26 | Notes: 27 | 28 | The dimension is very important as the SPHysics particle data is 29 | different in the 2D and 3D cases. 30 | 31 | """ 32 | data = numpy.loadtxt(partfile) 33 | 34 | # sanity check on the input file and problem dimension 35 | ncols = data.shape[-1] 36 | if ( (ncols == 9) and (dim == 2) ): 37 | raise RuntimeError('Possiblly inconsistent dim and SPHysics part file') 38 | 39 | input_data = numpy.loadtxt(indat) 40 | 41 | partbase = basename(partfile) 42 | 43 | if partbase.startswith('IPART'): 44 | fileno = 0 45 | else: 46 | fileno = int( partbase.split('_')[-1] ) 47 | 48 | # number of fluid and total number of particles. This is very 49 | # dangerous and relies on the SPHysics manual (pg. 38) 50 | dx = float( input_data[21] ) 51 | dy = float( input_data[22] ) 52 | dz = float( input_data[23] ) 53 | h = float( input_data[24] ) 54 | 55 | np = int(input_data[25]) 56 | nb = int(input_data[26]) 57 | nbf = int(input_data[27]) 58 | 59 | # now load the individual arrays 60 | if dim == 3: 61 | x = data[:, 0]; y = data[:, 1]; z = data[:, 2] 62 | u = data[:, 3]; v = data[:, 4]; w = data[:, 5] 63 | 64 | rho = data[:, 6]; p = data[:, 7]; m = data[:, 8] 65 | 66 | else: 67 | x = data[:, 0]; z = data[:, 1] 68 | u = data[:, 2]; w = data[:, 3] 69 | 70 | rho = data[:, 4]; p = data[:, 5]; m = data[:, 6] 71 | 72 | # smoothing lengths 73 | h = numpy.ones_like(x) * h 74 | 75 | # now create the PySPH arrays 76 | fluid = gpa( 77 | name='fluid', x=x[nb:], y=y[nb:], z=z[nb:], u=u[nb:], 78 | v=v[nb:], w=w[nb:], rho=rho[nb:], p=p[nb:], m=m[nb:], 79 | h=h[nb:]) 80 | 81 | solid = gpa( 82 | name='boundary', x=x[:nb], y=y[:nb], z=z[:nb], u=u[:nb], 83 | v=v[:nb], w=w[:nb], rho=rho[:nb], p=p[:nb], m=m[:nb], 84 | h=h[:nb]) 85 | 86 | # PySPH arrays 87 | arrays = [fluid, solid] 88 | 89 | # Dump out vtk files for Paraview viewing 90 | if vtk: 91 | from .pprocess import PySPH2VTK 92 | props = ['u', 'v', 'w', 'rho', 'p', 'vmag', 'tag'] 93 | pysph2vtk = PySPH2VTK(arrays, fileno=fileno) 94 | 95 | pysph2vtk.write_vtk('fluid', props) 96 | pysph2vtk.write_vtk('boundary', props) 97 | 98 | # return the list of arrays 99 | return arrays 100 | -------------------------------------------------------------------------------- /pysph/tools/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypr/pysph/07761bf9d7242d671dfcda5037d250e410f51aa5/pysph/tools/tests/__init__.py -------------------------------------------------------------------------------- /pysph/tools/tests/test_dump_xdmf.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import unittest 3 | from pathlib import Path 4 | from tempfile import mkdtemp 5 | 6 | import numpy as np 7 | from pytest import importorskip 8 | 9 | from pysph.base.utils import get_particle_array 10 | from pysph.solver.output import dump 11 | from pysph.tools.dump_xdmf import main as dump_xdmf 12 | 13 | 14 | class TestDumpXDMF(unittest.TestCase): 15 | def test_dump_xdmf(self, npoints=10, random_seed=0): 16 | vtk = importorskip('vtk') 17 | np.random.seed(random_seed) 18 | tmp_dir = mkdtemp() 19 | hdf5file = str(Path(tmp_dir) / 'test.hdf5') 20 | 21 | # Assign rho as random data, make a particle array and dump it as hdf5. 22 | self.rho = np.random.rand(npoints) 23 | pa = get_particle_array(name='fluid', 24 | rho=self.rho, 25 | x=np.arange(npoints), 26 | y=np.zeros(npoints), 27 | z=np.zeros(npoints)) 28 | dump(hdf5file, [pa], {}) 29 | 30 | try: 31 | # Generate XDMF for dumped hdf5 file. 32 | dump_xdmf([hdf5file, '--combine-particle-arrays', '--outdir', tmp_dir]) 33 | 34 | # Retrieve data by reading xdmf file 35 | xdmffile = Path(hdf5file).with_suffix('.xdmf') 36 | reader = vtk.vtkXdmfReader() 37 | reader.SetFileName(xdmffile) 38 | reader.Update() 39 | block = reader.GetOutput().GetBlock(0) 40 | point_data = block.GetPointData() 41 | array_data = {} 42 | for i in range(point_data.GetNumberOfArrays()): 43 | vtk_array = point_data.GetArray(i) 44 | if vtk_array: 45 | array_name = vtk_array.GetName() 46 | array_data[array_name] = np.array(vtk_array) 47 | 48 | # Check if retrieved data and generated data is same. 49 | assert np.allclose(self.rho, array_data['rho'], atol=1e-14), \ 50 | "Expected %s,\n got %s" % (self.rho, array_data['rho']) 51 | 52 | del reader 53 | # Note: Ideally, the reader need not be deleted as reader.Update() 54 | # itself will open the file, read its contents, and then close the 55 | # file. 56 | # Ref. https://discourse.vtk.org/t/does-a-python-reader-need-to-be-closed/7261/3 # noqa 57 | # 58 | # But shutil.rmtree(tmp_dir) still gives the following error on 59 | # windows: 60 | # """ 61 | # The process cannot access the file because it is being used by 62 | # another process: 'C:\\Users\\RUNNER~1\\AppData\\Local\\ 63 | # Temp\\tmpqft4428c\\test.xdmf' 64 | # """ 65 | # Deleting the reader avoids the error. 66 | 67 | finally: 68 | shutil.rmtree(tmp_dir) 69 | 70 | 71 | if __name__ == '__main__': 72 | unittest.main() 73 | -------------------------------------------------------------------------------- /pysph/tools/tests/test_sph_evaluator.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | from pysph.base.utils import get_particle_array 6 | from pysph.base.nnps import DomainManager 7 | from pysph.sph.basic_equations import SummationDensity 8 | from pysph.tools.sph_evaluator import SPHEvaluator 9 | 10 | 11 | class TestSPHEvaluator(unittest.TestCase): 12 | def setUp(self): 13 | x = np.linspace(0, 1, 10) 14 | dx = x[1] - x[0] 15 | self.dx = dx 16 | m = np.ones_like(x) 17 | h = np.ones_like(x)*dx 18 | self.src = get_particle_array(name='src', x=x, m=m, h=h) 19 | self.equations = [SummationDensity(dest='dest', sources=['src'])] 20 | 21 | def test_evaluation(self): 22 | # Given 23 | xd = [0.5] 24 | hd = self.src.h[:1] 25 | dest = get_particle_array(name='dest', x=xd, h=hd) 26 | sph_eval = SPHEvaluator( 27 | arrays=[dest, self.src], equations=self.equations, dim=1 28 | ) 29 | 30 | # When. 31 | sph_eval.evaluate() 32 | 33 | # Then. 34 | self.assertAlmostEqual(dest.rho[0], 9.0, places=2) 35 | 36 | def test_evaluation_with_domain_manager(self): 37 | # Given 38 | xd = [0.0] 39 | hd = self.src.h[:1] 40 | dest = get_particle_array(name='dest', x=xd, h=hd) 41 | dx = self.dx 42 | dm = DomainManager(xmin=-dx/2, xmax=1.0+dx/2, periodic_in_x=True) 43 | sph_eval = SPHEvaluator( 44 | arrays=[dest, self.src], equations=self.equations, dim=1, 45 | domain_manager=dm 46 | ) 47 | 48 | # When. 49 | sph_eval.evaluate() 50 | 51 | # Then. 52 | self.assertAlmostEqual(dest.rho[0], 9.0, places=2) 53 | 54 | def test_updating_particle_arrays(self): 55 | # Given 56 | xd = [0.5] 57 | hd = self.src.h[:1] 58 | dest = get_particle_array(name='dest', x=xd, h=hd) 59 | sph_eval = SPHEvaluator( 60 | [dest, self.src], equations=self.equations, dim=1 61 | ) 62 | sph_eval.evaluate() 63 | rho0 = dest.rho[0] 64 | 65 | # When. 66 | dest.x[0] = 0.0 67 | sph_eval.update_particle_arrays([dest, self.src]) 68 | sph_eval.evaluate() 69 | 70 | # Then. 71 | self.assertNotEqual(rho0, dest.rho[0]) 72 | self.assertAlmostEqual(dest.rho[0], 7.0, places=1) 73 | 74 | 75 | if __name__ == '__main__': 76 | unittest.main() 77 | -------------------------------------------------------------------------------- /pysph/tools/uniform_distribution.py: -------------------------------------------------------------------------------- 1 | """Helper module to easily create uniform distributions of particles""" 2 | 3 | from __future__ import print_function 4 | import numpy 5 | 6 | def uniform_distribution_hcp2D(dx, xmin, xmax, ymin, ymax, adjust=False): 7 | """Hexagonal closed packing arrangement in 2D""" 8 | dy = 0.5 * numpy.sqrt(3.0) * dx 9 | dxb2 = 0.5 * dx 10 | dyb2 = 0.5 * dy 11 | 12 | # since we will be shifting each alternate row by dxb2, we use 13 | # xstart as dx/4 14 | xstart = xmin + 0.25 * dx 15 | 16 | ystart = ymin+dyb2 17 | 18 | # adjust ymax so that particles can fill a periodic region 19 | if adjust: 20 | _y = numpy.arange(ystart, ymax, dy) 21 | ymax = _y[-1] + 1.5*dy 22 | 23 | # create the points 24 | x, y = numpy.mgrid[xstart:xmax:dx, 25 | ystart:ymax:dy] 26 | 27 | # each alternate row is shifted by dxb2 28 | x[:,::2] += dxb2 29 | x = x.ravel(); y = y.ravel() 30 | 31 | print('HCP packing domain: xmin, xmax, ymin, ymax = ', xmin, xmax, ymin, 32 | ymax) 33 | print('HCP packing particles: xmin, xmax, ymin, ymax = ', x.min(), x.max(), 34 | y.min(), y.max()) 35 | print('Particle spacings: dx, dy = ', dx, dy) 36 | print('Offset: xmin, xmax = ', x.min()-xmin, xmax-x.max()) 37 | print('Offset: ymin, ymax = ', y.min()-ymin, ymax-y.max()) 38 | 39 | return x, y, dx, dy, xmin, xmax, ymin, ymax 40 | 41 | def uniform_distribution_cubic2D(dx, xmin, xmax, ymin, ymax, nrows=None): 42 | """Cubic lattice arrangement in 2D""" 43 | dy = dx 44 | dxb2 = 0.5 * dx 45 | dyb2 = 0.5 * dy 46 | 47 | if nrows is not None: 48 | ymax = nrows * dy 49 | 50 | xstart = xmin + dxb2 51 | ystart = ymin + dyb2 52 | x, y = numpy.mgrid[xstart:xmax:dx, 53 | ystart:ymax:dy] 54 | 55 | x = x.ravel(); y = y.ravel() 56 | 57 | print('Cubic packing domain: xmin, xmax, ymin, ymax = ', xmin, xmax, ymin, 58 | ymax) 59 | print('Cubic packing particles: xmin, xmax, ymin, ymax = ', x.min(), 60 | x.max(), y.min(), y.max()) 61 | print('Particle spacings: dx, dy = ', dx, dy) 62 | print('Offset: xmin, xmax = ', x.min()-xmin, xmax-x.max()) 63 | print('Offset: ymin, ymax = ', y.min()-ymin, ymax-y.max()) 64 | 65 | return x, y, dx, dy, xmin, xmax, ymin, ymax 66 | 67 | def get_number_density_hcp(dx, dy, kernel, h0): 68 | 69 | # create a dummy particle distribution with the reference spacings 70 | dxb2 = 0.5 * dx 71 | dyb2 = 0.5 * dy 72 | 73 | xstart = 0.25 * dx 74 | ystart = dyb2 75 | 76 | # create the points 77 | x, y = numpy.mgrid[xstart:1.0:dx, 78 | ystart:1.0:dy] 79 | 80 | # each alternate row is shifted by dxb2 81 | x[:,::2] += dxb2 82 | 83 | # the target point 84 | nrows, ncols = x.shape 85 | x0, y0 = x[nrows/2, ncols/2], y[nrows/2, ncols/2] 86 | 87 | x = x.ravel(); y = y.ravel() 88 | 89 | # now do a kernel sum 90 | wij_sum = 0.0 91 | for i in range(x.size): 92 | xij = x0 - x[i] 93 | yij = y0 - y[i] 94 | zij = 0.0 95 | 96 | rij = numpy.sqrt( xij**2 + yij**2 + zij**2 ) 97 | wij_sum += kernel.kernel( [xij, yij, zij], rij, h0 ) 98 | 99 | return wij_sum 100 | -------------------------------------------------------------------------------- /pysph/tools/xdmf_template.mako: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | \ 5 | ${tempotral_collection(files, times, n_particles, particles_info, vectorize_velocity)}\ 6 | 7 | 8 | 9 | <%def name="tempotral_collection(files, times, n_particles, particles_info, vectorize_velocity)"> 10 | 11 | % for index, (file, time) in enumerate(zip(files,times)): 12 | 13 | 16 | % endfor 17 | 18 | 19 | 20 | <%def name="spatial_collection(file, index, n_particles, particles_info, vectorize_velocity)"> 21 | % for pname, data in particles_info.items(): 22 | \ 23 | ${topo_and_geom(file, pname, n_particles[pname][index])}\ 24 | ${variables_data(file, pname, n_particles[pname][index], data['output_props'],data['stride'], data['attr_type'], vectorize_velocity)}\ 25 | 26 | % endfor 27 | 28 | 29 | <%def name="topo_and_geom(file, pname, n_particles)"> 30 | 31 | 32 | 33 | ${file}:/particles/${pname}/arrays/x 34 | 35 | 36 | ${file}:/particles/${pname}/arrays/y 37 | 38 | 39 | ${file}:/particles/${pname}/arrays/z 40 | 41 | 42 | 43 | 44 | <%def name="variables_data(file, pname, n_particles, var_names, stride, attr_type, vectorize_velocity)"> 45 | % for var_name in var_names: 46 | 47 | 48 | ${file}:/particles/${pname}/arrays/${var_name} 49 | 50 | 51 | % endfor 52 | % if vectorize_velocity: 53 | 54 | 55 | 56 | ${file}:/particles/${pname}/arrays/u 57 | 58 | 59 | ${file}:/particles/${pname}/arrays/v 60 | 61 | 62 | ${file}:/particles/${pname}/arrays/w 63 | 64 | 65 | 66 | % endif 67 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest>=3.0 2 | mock>=1.0 3 | h5py 4 | vtk 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | setuptools>=42.0.0 3 | Cython>=0.20 4 | cyarray 5 | compyle>=0.8 6 | mako 7 | pytools 8 | Beaker 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | addopts = -m "not slow" 3 | markers = 4 | slow: marks tests as slow (deselect with '-m "not slow"') 5 | parallel: tests that require MPI 6 | -------------------------------------------------------------------------------- /starcluster/ami.sh: -------------------------------------------------------------------------------- 1 | START_TIME=$SECONDS 2 | # Create the AMI 3 | # Create one t2.micro instance loaded with the Ubuntu 16.04 AMI 4 | starcluster start -o -s 1 -n ami-cd0f5cb6 -i t2.micro starcluster_cust 5 | 6 | # Check if master node can be connected to via SSH 7 | # 5 Retries 8 | for try in {1..5} 9 | do 10 | starcluster sshmaster starcluster_cust "exit" > /dev/null 2>&1 && CONN=1 && break 11 | sleep 15 12 | done 13 | 14 | if [ $CONN != 1 ]; then 15 | echo "Unable to connect to starcluster master node" 16 | starcluster terminate -f starcluster_cust && exit 17 | fi 18 | 19 | 20 | # Install packages 21 | starcluster sshmaster starcluster_cust "bash -s" < xenial_base.sh 22 | 23 | # Get instance id of the node 24 | INSTANCE_ID=$(starcluster sshmaster starcluster_cust "wget -q -O - http://169.254.169.254/latest/meta-data/instance-id") 25 | 26 | # Save image 27 | # The AMI ID is saved to AMI_ID 28 | starcluster ebsimage "$INSTANCE_ID" "pysph_ami_$RANDOM" | egrep -o "ami-[0-9a-f]+" | uniq > AMI_ID 29 | starcluster terminate starcluster_cust 30 | END_TIME=$SECONDS 31 | echo "Creating custom AMI took $(( END_TIME-START_TIME )) seconds" 32 | -------------------------------------------------------------------------------- /starcluster/sc_pysph.py: -------------------------------------------------------------------------------- 1 | from starcluster.clustersetup import DefaultClusterSetup 2 | from starcluster.logger import log 3 | 4 | 5 | class PySPHInstallerBase(DefaultClusterSetup): 6 | PYSPH_PROFILE = "/etc/profile.d/pysph.sh" 7 | PYSPH_HOSTS = "/home/pysph/PYSPH_HOSTS" 8 | PYSPH_USER = "pysph" 9 | 10 | def _create_env(self, master): 11 | master.ssh.execute( 12 | r""" 13 | echo $HOME 14 | if [ ! -d ~/pysph_env ]; then 15 | mkdir ~/pysph_env && 16 | virtualenv --system-site-packages ~/pysph_env; 17 | fi 18 | """ 19 | ) 20 | 21 | def _install_pysph(self, master): 22 | commands = r""" 23 | . ~/pysph_env/bin/activate 24 | if ! python -c "import pysph" &> /dev/null; then 25 | export USE_TRILINOS=1 26 | export ZOLTAN_INCLUDE=/usr/include/trilinos 27 | export ZOLTAN_LIBRARY=/usr/lib/x86_64-linux-gnu 28 | cd ~ && 29 | git clone https://github.com/pypr/pysph && 30 | cd pysph && 31 | python setup.py install 32 | fi 33 | """ 34 | master.ssh.execute(commands) 35 | 36 | def _configure_profile(self, node): 37 | pysph_profile = node.ssh.remote_file(self.PYSPH_PROFILE, 'w') 38 | pysph_profile.write("test -e ~/.bashrc && . ~/.bashrc") 39 | pysph_profile.close() 40 | 41 | 42 | class PySPHInstaller(PySPHInstallerBase): 43 | def run(self, nodes, master, user, user_shell, volumes): 44 | aliases = [n.alias for n in nodes] 45 | 46 | log.info("Configuring PYSPH Profile") 47 | for node in nodes: 48 | self.pool.simple_job(self._configure_profile, 49 | (node,)) 50 | self.pool.wait(len(nodes)) 51 | 52 | master.ssh.switch_user(self.PYSPH_USER) 53 | log.info("Creating virtual environment") 54 | self._create_env(master) 55 | master.ssh.execute("echo '. ~/pysph_env/bin/activate' > ~/.bashrc") 56 | 57 | log.info("Installing PySPH") 58 | self._install_pysph(master) 59 | 60 | log.info("Adding nodes to PYSPH hosts file") 61 | pysph_hosts = master.ssh.remote_file(self.PYSPH_HOSTS, 'w') 62 | pysph_hosts.write('\n'.join(aliases) + '\n') 63 | 64 | def on_add_node(self, new_node, nodes, master, user, user_shell, volumes): 65 | log.info("Configuring PYSPH Profile") 66 | self._configure_profile(new_node) 67 | 68 | master.ssh.switch_user(self.PYSPH_USER) 69 | log.info("Adding %s to PYSPH hosts file" % new_node.alias) 70 | pysph_hosts = master.ssh.remote_file(self.PYSPH_HOSTS, 'a') 71 | pysph_hosts.write(new_node.alias + '\n') 72 | pysph_hosts.close() 73 | 74 | def on_remove_node(self, remove_node, nodes, master, 75 | user, user_shell, volumes): 76 | master.switch_user(self.PYSPH_USER) 77 | log.info("Removing %s from PYSPH hosts file" % remove_node.alias) 78 | master.ssh.remove_lines_from_file(self.PYSPH_HOSTS, remove_node.alias) 79 | -------------------------------------------------------------------------------- /starcluster/xenial_base.sh: -------------------------------------------------------------------------------- 1 | # Install and update required packages 2 | apt-get -y update 3 | apt-get -y upgrade 4 | apt-get install -y build-essential \ 5 | g++ \ 6 | python \ 7 | python-dev \ 8 | python-setuptools \ 9 | nfs-kernel-server \ 10 | nfs-common \ 11 | rpcbind \ 12 | upstart \ 13 | cython \ 14 | cython3 \ 15 | git \ 16 | ipython \ 17 | ipython3 \ 18 | libgomp1 \ 19 | libopenmpi-dev \ 20 | libtrilinos-zoltan-dev \ 21 | mayavi2 \ 22 | python \ 23 | python-execnet \ 24 | python-h5py \ 25 | python-mako \ 26 | python-matplotlib \ 27 | python-mock \ 28 | python-mpi4py \ 29 | python-nose \ 30 | python-numpy \ 31 | python-pip \ 32 | python-psutil \ 33 | python-qt4 \ 34 | python-unittest2 \ 35 | python3 \ 36 | python3-h5py \ 37 | python3-mako \ 38 | python3-matplotlib \ 39 | python3-mpi4py \ 40 | python3-nose \ 41 | python3-numpy \ 42 | python3-pip \ 43 | python3-psutil \ 44 | sudo \ 45 | tox \ 46 | vim \ 47 | wget \ 48 | virtualenv \ 49 | && rm -rf /var/lib/apt/lists/* 50 | 51 | # Starcluster seems to look for /etc/init.d/nfs 52 | # Create a symbolic link to point to the right file 53 | ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs 54 | ln -s /lib/systemd/system/nfs-kernel-server.service /lib/systemd/system/nfs.service 55 | 56 | # rpcbind is shipped instead of portmap on recent Debian installations 57 | # http://star.mit.edu/cluster/mlarchives/2545.html 58 | echo 'exit 0' > /etc/init.d/portmap 59 | chmod +x /etc/init.d/portmap 60 | 61 | # Download sge 62 | curl -L https://github.com/brunogrande/starcluster-ami-config/blob/master/sge.tar.gz?raw=true | sudo tar -xz -C /opt/ 63 | 64 | # Create users 65 | adduser --disabled-password --gecos '' pysph && \ 66 | adduser pysph sudo && \ 67 | echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 68 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py27, py35, py35-pyzoltan, py36, py37 3 | skip_missing_interpreters = True 4 | 5 | [pytest] 6 | addopts = -m "not slow" 7 | markers = 8 | slow: marks tests as slow (deselect with '-m "not slow"') 9 | parallel: tests that require MPI 10 | 11 | [testenv] 12 | sitepackages = True 13 | install_command = 14 | python -m pip install --no-build-isolation {opts} {packages} 15 | # Change to the .tox dir, this avoids problems with the tests picking up the 16 | # the pysph in the current directory leading to false errors. 17 | changedir = {toxworkdir} 18 | passenv = CC CXX ZOLTAN* USE_TRILINOS 19 | deps = 20 | -rrequirements.txt 21 | -rrequirements-test.txt 22 | pyzoltan: pyzoltan 23 | commands = python -m pytest -v \ 24 | --junit-xml=pytest-{envname}.xml \ 25 | {posargs} \ 26 | --pyargs pysph 27 | --------------------------------------------------------------------------------