├── .circleci ├── build_and_push_image.sh └── config.yml ├── .dockerignore ├── .github ├── dependabot.yml └── pull_request_template.md ├── .gitignore ├── .gitmodules ├── .jenkins ├── actions │ ├── lint.sh │ ├── run_driver_parallel_regression_tests.sh │ ├── run_physics_parallel_regression_tests.sh │ └── run_physics_regression_tests.sh ├── baroclinic_initialization.sh ├── cache.sh ├── checksum.sh ├── driver_configs │ ├── baroclinic_c192_54ranks.yaml │ ├── baroclinic_c192_6ranks.yaml │ ├── baroclinic_c48_6ranks_dycore_only.yaml │ └── baroclinic_c48_6ranks_dycore_only_serialbox.yaml ├── driver_performance.sh ├── fetch_caches.sh ├── generate_caches.sh ├── install_virtualenv.sh ├── jenkins.sh ├── pace_physics_cache_setup.sh ├── print_performance_number.py ├── run_compare_fortran.sh ├── run_diff_rank.sh └── test_driver.sh ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── Dockerfile ├── LICENSE.md ├── Makefile ├── Makefile.data_download ├── README.md ├── changed_from_main.py ├── constraints.txt ├── dependencies.dot ├── dependencies.svg ├── doc_primer_orchestration.md ├── docker ├── Makefile.image_names └── postprocessing.Dockerfile ├── docs ├── Makefile ├── conf.py ├── docker.rst ├── fv3.rst ├── index.rst ├── installation.rst ├── make.bat ├── overview.rst ├── physics │ ├── api.rst │ ├── index.rst │ ├── microphysics.rst │ └── state.rst ├── testing.rst └── util │ ├── api.rst │ ├── communication.rst │ ├── history.rst │ ├── index.rst │ ├── installation.rst │ ├── state.rst │ └── utilities.rst ├── driver ├── Makefile ├── README.md ├── examples │ ├── README.md │ ├── baroclinic_init.py │ ├── configs │ │ ├── README.md │ │ ├── baroclinic_c12.yaml │ │ ├── baroclinic_c12_comm_read.yaml │ │ ├── baroclinic_c12_comm_write.yaml │ │ ├── baroclinic_c12_from_serialbox.yaml │ │ ├── baroclinic_c12_null_comm.yaml │ │ ├── baroclinic_c12_orch_cpu.yaml │ │ ├── baroclinic_c12_read_restart_fortran.yml │ │ ├── baroclinic_c12_write_restart.yaml │ │ ├── baroclinic_c48_6ranks_serialbox_test.yaml │ │ ├── tropical_read_restart_fortran.yml │ │ └── tropicalcyclone_c128.yaml │ ├── create_venv.sh │ ├── notebooks │ │ ├── driver_write_config.yaml │ │ └── serial_debugging.ipynb │ ├── plot_baroclinic_init.py │ ├── plot_cube.py │ ├── plot_output.py │ ├── plot_pcolormesh_cube.py │ ├── run_docker.sh │ ├── stencil_signatures.py │ ├── write_then_read.sh │ └── zarr_to_nc.py ├── pace │ └── driver │ │ ├── __init__.py │ │ ├── comm.py │ │ ├── configs │ │ ├── __init__.py │ │ └── comm.py │ │ ├── diagnostics.py │ │ ├── driver.py │ │ ├── grid.py │ │ ├── initialization.py │ │ ├── performance │ │ ├── __init__.py │ │ ├── collector.py │ │ ├── config.py │ │ └── report.py │ │ ├── registry.py │ │ ├── run.py │ │ ├── safety_checks.py │ │ ├── state.py │ │ └── tools.py ├── setup.py └── tests │ └── mpi │ ├── run_save_and_load_restart.sh │ └── test_restart.py ├── dsl ├── pace │ └── dsl │ │ ├── __init__.py │ │ ├── dace │ │ ├── __init__.py │ │ ├── build.py │ │ ├── dace_config.py │ │ ├── orchestration.py │ │ ├── sdfg_debug_passes.py │ │ ├── sdfg_opt_passes.py │ │ ├── utils.py │ │ └── wrapped_halo_exchange.py │ │ ├── gt4py_utils.py │ │ ├── stencil.py │ │ ├── stencil_config.py │ │ └── typing.py └── setup.py ├── examples ├── Dockerfile ├── Makefile ├── README.md ├── build_scripts │ ├── activate_ppan.sh │ ├── build_gaea.sh │ └── build_ppan.sh └── notebooks │ ├── functions.py │ ├── grid_generation.ipynb │ ├── initial_condition_definition.ipynb │ ├── stencil_definition.ipynb │ └── units_config.py ├── external └── daint_venv │ ├── .gitignore │ ├── LICENSE.txt │ ├── README.md │ └── install.sh ├── fv3core ├── .gitignore ├── .jenkins │ ├── actions │ │ ├── get_test_data.sh │ │ ├── run_parallel_regression_tests.sh │ │ ├── run_regression_tests.sh │ │ ├── run_standalone.sh │ │ └── test_action.sh │ └── jenkins.sh ├── FORTRAN_CHANGELOG.md ├── LICENSE.txt ├── Makefile ├── README.md ├── convert_xppm_yppm.sh ├── examples │ └── standalone │ │ ├── benchmarks │ │ ├── README.md │ │ ├── collect_memory_usage_data.py │ │ └── run_on_daint.sh │ │ └── runfile │ │ ├── __init__.py │ │ ├── acoustics.py │ │ ├── compile.py │ │ ├── dynamics.py │ │ └── timing.py ├── pace │ └── fv3core │ │ ├── __init__.py │ │ ├── _config.py │ │ ├── initialization │ │ ├── __init__.py │ │ ├── baroclinic.py │ │ ├── baroclinic_jablonowski_williamson.py │ │ ├── dycore_state.py │ │ ├── geos_wrapper.py │ │ └── tropical_cyclone.py │ │ ├── stencils │ │ ├── __init__.py │ │ ├── a2b_ord4.py │ │ ├── basic_operations.py │ │ ├── c_sw.py │ │ ├── d2a2c_vect.py │ │ ├── d_sw.py │ │ ├── del2cubed.py │ │ ├── delnflux.py │ │ ├── divergence_damping.py │ │ ├── dyn_core.py │ │ ├── fillz.py │ │ ├── fv_dynamics.py │ │ ├── fv_subgridz.py │ │ ├── fvtp2d.py │ │ ├── fxadv.py │ │ ├── map_single.py │ │ ├── mapn_tracer.py │ │ ├── moist_cv.py │ │ ├── neg_adj3.py │ │ ├── nh_p_grad.py │ │ ├── pe_halo.py │ │ ├── pk3_halo.py │ │ ├── ppm.py │ │ ├── ray_fast.py │ │ ├── remap_profile.py │ │ ├── remapping.py │ │ ├── riem_solver3.py │ │ ├── riem_solver_c.py │ │ ├── saturation_adjustment.py │ │ ├── sim1_solver.py │ │ ├── temperature_adjust.py │ │ ├── tracer_2d_1l.py │ │ ├── updatedzc.py │ │ ├── updatedzd.py │ │ ├── xppm.py │ │ ├── xtp_u.py │ │ ├── yppm.py │ │ └── ytp_v.py │ │ ├── testing │ │ ├── __init__.py │ │ ├── map_single.py │ │ ├── translate_dyncore.py │ │ ├── translate_fvdynamics.py │ │ └── validation.py │ │ └── utils │ │ ├── __init__.py │ │ └── functional_validation.py ├── setup.py └── tests │ ├── conftest.py │ ├── mpi │ └── test_doubly_periodic.py │ ├── pytest.ini │ └── savepoint │ ├── __init__.py │ ├── conftest.py │ ├── output │ └── .gitkeep │ ├── test_translate.py │ └── translate │ ├── __init__.py │ ├── overrides │ ├── README.md │ ├── baroclinic.yaml │ └── standard.yaml │ ├── translate_a2b_ord4.py │ ├── translate_c_sw.py │ ├── translate_corners.py │ ├── translate_cubedtolatlon.py │ ├── translate_d2a2c_vect.py │ ├── translate_d_sw.py │ ├── translate_del2cubed.py │ ├── translate_del6vtflux.py │ ├── translate_delnflux.py │ ├── translate_divergencedamping.py │ ├── translate_fillz.py │ ├── translate_fvsubgridz.py │ ├── translate_fvtp2d.py │ ├── translate_fxadv.py │ ├── translate_grid.py │ ├── translate_haloupdate.py │ ├── translate_init_case.py │ ├── translate_last_step.py │ ├── translate_moistcvpluspkz_2d.py │ ├── translate_moistcvpluspt_2d.py │ ├── translate_neg_adj3.py │ ├── translate_nh_p_grad.py │ ├── translate_pe_halo.py │ ├── translate_pk3_halo.py │ ├── translate_pressureadjustedtemperature_nonhydrostatic.py │ ├── translate_qsinit.py │ ├── translate_ray_fast.py │ ├── translate_remapping.py │ ├── translate_riem_solver3.py │ ├── translate_riem_solver_c.py │ ├── translate_satadjust3d.py │ ├── translate_tracer2d1l.py │ ├── translate_updatedzc.py │ ├── translate_updatedzd.py │ ├── translate_xppm.py │ ├── translate_xtp_u.py │ ├── translate_yppm.py │ └── translate_ytp_v.py ├── physics ├── .gitignore ├── Makefile ├── README.md ├── pace │ └── physics │ │ ├── __init__.py │ │ ├── _config.py │ │ ├── functions │ │ ├── __init__.py │ │ └── microphysics_funcs.py │ │ ├── physics_state.py │ │ └── stencils │ │ ├── __init__.py │ │ ├── get_phi_fv3.py │ │ ├── get_prs_fv3.py │ │ ├── microphysics.py │ │ └── physics.py ├── setup.py ├── tests │ ├── conftest.py │ ├── pytest.ini │ └── savepoint │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── test_translate.py │ │ └── translate │ │ ├── README.md │ │ ├── __init__.py │ │ ├── overrides │ │ ├── README.md │ │ ├── baroclinic.yaml │ │ └── standard.yaml │ │ ├── translate_atmos_phy_statein.py │ │ ├── translate_driver.py │ │ ├── translate_fillgfs.py │ │ ├── translate_fv_update_phys.py │ │ ├── translate_gfs_physics_driver.py │ │ ├── translate_microphysics.py │ │ ├── translate_phifv3.py │ │ ├── translate_prsfv3.py │ │ ├── translate_update_pressure_sfc_winds_phys.py │ │ └── translate_update_tracers_phys.py └── tox.ini ├── pyproject.toml ├── requirements_dev.txt ├── requirements_docs.txt ├── requirements_lint.txt ├── setup.cfg ├── stencils ├── pace │ └── stencils │ │ ├── __init__.py │ │ ├── c2l_ord.py │ │ ├── corners.py │ │ ├── fv_update_phys.py │ │ ├── testing │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── grid.py │ │ ├── parallel_translate.py │ │ ├── savepoint.py │ │ ├── temporaries.py │ │ ├── test_translate.py │ │ ├── translate.py │ │ ├── translate_physics.py │ │ └── translate_update_dwind_phys.py │ │ ├── update_atmos_state.py │ │ └── update_dwind_phys.py └── setup.py ├── tests ├── main │ ├── conftest.py │ ├── driver │ │ ├── __init__.py │ │ ├── test_comm_config.py │ │ ├── test_diagnostics.py │ │ ├── test_diagnostics_config.py │ │ ├── test_docs.py │ │ ├── test_driver.py │ │ ├── test_example_configs.py │ │ ├── test_restart_fortran.py │ │ ├── test_restart_serial.py │ │ └── test_safety_checks.py │ ├── dsl │ │ ├── test_compilation_config.py │ │ ├── test_dace_config.py │ │ ├── test_skip_passes.py │ │ ├── test_stencil.py │ │ ├── test_stencil_config.py │ │ ├── test_stencil_factory.py │ │ └── test_stencil_wrapper.py │ ├── fv3core │ │ ├── test_config.py │ │ ├── test_dycore_call.py │ │ ├── test_grid.py │ │ ├── test_init_from_geos.py │ │ └── test_selective_validation.py │ ├── physics │ │ └── test_integration.py │ └── test_grid_init.py ├── mpi_54rank │ └── test_grid_init.py └── savepoint │ ├── conftest.py │ ├── test_checkpoints.py │ └── thresholds │ └── fv_dynamics.yaml └── util ├── .gitignore ├── .jenkins ├── actions │ └── test.sh ├── cache.sh ├── checksum.sh ├── env ├── jenkins.sh └── test_util.sh ├── HISTORY.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── RELEASE.rst ├── examples └── mpi │ ├── .gitignore │ ├── Makefile │ ├── global_timings.py │ └── zarr_monitor.py ├── external └── gt4py ├── mypy.ini ├── pace └── util │ ├── __init__.py │ ├── _boundary_utils.py │ ├── _capture_stream.py │ ├── _corners.py │ ├── _exceptions.py │ ├── _legacy_restart.py │ ├── _optional_imports.py │ ├── _profiler.py │ ├── _properties.py │ ├── _timing.py │ ├── _xarray.py │ ├── boundary.py │ ├── buffer.py │ ├── caching_comm.py │ ├── checkpointer │ ├── __init__.py │ ├── base.py │ ├── null.py │ ├── snapshots.py │ ├── thresholds.py │ └── validation.py │ ├── comm.py │ ├── communicator.py │ ├── constants.py │ ├── cuda_kernels.py │ ├── decomposition.py │ ├── filesystem.py │ ├── global_config.py │ ├── grid │ ├── __init__.py │ ├── eta.py │ ├── generation.py │ ├── geometry.py │ ├── global_setup.py │ ├── gnomonic.py │ ├── helper.py │ ├── mirror.py │ └── stretch_transformation.py │ ├── halo_data_transformer.py │ ├── halo_updater.py │ ├── initialization │ ├── __init__.py │ ├── allocator.py │ └── sizer.py │ ├── io.py │ ├── local_comm.py │ ├── monitor │ ├── __init__.py │ ├── convert.py │ ├── netcdf_monitor.py │ ├── protocol.py │ └── zarr_monitor.py │ ├── mpi.py │ ├── namelist.py │ ├── nudging.py │ ├── null_comm.py │ ├── partitioner.py │ ├── quantity.py │ ├── restart_properties.yml │ ├── rotate.py │ ├── testing │ ├── __init__.py │ ├── comparison.py │ ├── dummy_comm.py │ └── perturbation.py │ ├── time.py │ ├── types.py │ ├── units.py │ └── utils.py ├── requirements.txt ├── setup.cfg ├── setup.py ├── tests ├── checkpointer │ ├── test_snapshot.py │ ├── test_thresholds.py │ └── test_validation.py ├── conftest.py ├── data │ ├── c12_restart │ │ ├── coupler.res │ │ ├── fv_core.res.nc │ │ ├── fv_core.res.tile1.nc │ │ ├── fv_core.res.tile2.nc │ │ ├── fv_core.res.tile3.nc │ │ ├── fv_core.res.tile4.nc │ │ ├── fv_core.res.tile5.nc │ │ ├── fv_core.res.tile6.nc │ │ ├── fv_srf_wnd.res.tile1.nc │ │ ├── fv_srf_wnd.res.tile2.nc │ │ ├── fv_srf_wnd.res.tile3.nc │ │ ├── fv_srf_wnd.res.tile4.nc │ │ ├── fv_srf_wnd.res.tile5.nc │ │ ├── fv_srf_wnd.res.tile6.nc │ │ ├── fv_tracer.res.tile1.nc │ │ ├── fv_tracer.res.tile2.nc │ │ ├── fv_tracer.res.tile3.nc │ │ ├── fv_tracer.res.tile4.nc │ │ ├── fv_tracer.res.tile5.nc │ │ ├── fv_tracer.res.tile6.nc │ │ ├── phy_data.tile1.nc │ │ ├── phy_data.tile2.nc │ │ ├── phy_data.tile3.nc │ │ ├── phy_data.tile4.nc │ │ ├── phy_data.tile5.nc │ │ ├── phy_data.tile6.nc │ │ ├── sfc_data.tile1.nc │ │ ├── sfc_data.tile2.nc │ │ ├── sfc_data.tile3.nc │ │ ├── sfc_data.tile4.nc │ │ ├── sfc_data.tile5.nc │ │ └── sfc_data.tile6.nc │ ├── coupler.res │ ├── coupler_julian.res │ ├── coupler_noleap.res │ └── coupler_thirty_day.res ├── mpi │ ├── mpi_comm.py │ ├── test_mpi_halo_update.py │ └── test_mpi_mock.py ├── quantity │ ├── test_boundary.py │ ├── test_corners.py │ ├── test_deepcopy.py │ ├── test_quantity.py │ ├── test_storage.py │ ├── test_transpose.py │ └── test_view.py ├── test__capture_stream.py ├── test_buffer.py ├── test_caching_comm.py ├── test_cube_scatter_gather.py ├── test_decomposition.py ├── test_dimension_sizer.py ├── test_g2g_communication.py ├── test_get_tile_number.py ├── test_halo_data_transformer.py ├── test_halo_update.py ├── test_halo_update_ranks.py ├── test_legacy_restart.py ├── test_local_comm.py ├── test_netcdf_monitor.py ├── test_nudging.py ├── test_null_comm.py ├── test_partitioner.py ├── test_partitioner_boundaries.py ├── test_rotate.py ├── test_sync_shared_boundary.py ├── test_tile_scatter.py ├── test_tile_scatter_gather.py ├── test_timer.py └── test_zarr_monitor.py └── tox.ini /.circleci/build_and_push_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "branch: $CIRCLE_BRANCH" 4 | echo "tag: $CIRCLE_TAG" 5 | 6 | set -e 7 | set -o pipefail 8 | 9 | if [[ -z "$CIRCLE_SHA1" ]] 10 | then 11 | CIRCLE_SHA1=$(git rev-parse HEAD) 12 | fi 13 | 14 | CACHE_IMAGE="us.gcr.io/vcm-ml/pace:latest" 15 | BUILD_IMAGE="us.gcr.io/vcm-ml/pace:$CIRCLE_SHA1" 16 | 17 | if [[ -z "$GOOGLE_APPLICATION_CREDENTIALS" ]] 18 | then 19 | echo "Google authentication not configured. " 20 | echo "Please set the GOOGLE_APPLICATION_CREDENTIALS environmental variable." 21 | exit 1 22 | fi 23 | 24 | echo $BUILD_IMAGE 25 | 26 | BUILD_FLAGS=" \ 27 | --secret id=gcp,src=$GOOGLE_APPLICATION_CREDENTIALS \ 28 | --build-arg BUILDKIT_INLINE_CACHE=1 \ 29 | --progress=plain \ 30 | --cache-from $CACHE_IMAGE \ 31 | " 32 | 33 | PACE_IMAGE="$BUILD_IMAGE" DEV=n BUILD_FLAGS="$BUILD_FLAGS" make build 34 | 35 | echo "pushing tagged images $CIRCLE_SHA1" 36 | docker push $BUILD_IMAGE 37 | docker tag $BUILD_IMAGE $CACHE_IMAGE 38 | docker push $CACHE_IMAGE 39 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | rundir/** 2 | src/** 3 | test_data/** 4 | fv3/__pycache__ 5 | fv3/.gt_cache/** 6 | .gt_cache/** 7 | .gt_cache_*/** 8 | fv3core/test_data/** 9 | .git/** 10 | .tox/** 11 | *.zarr/** 12 | .mypy_cache/** 13 | venv/** 14 | Dockerfile 15 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gitsubmodule 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "13:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | ## Purpose 3 | 4 | Describe the purpose of this PR and the motivation if relevant to understanding. Include links to related issues, bugs or features. 5 | 6 | Remove the sections below which do not apply. 7 | 8 | ## Code changes: 9 | 10 | - Provide a list of relevant code changes and the side effects they have on other code. 11 | 12 | ## Requirements changes: 13 | 14 | - Provide a list of any changes made to requirements, e.g. changes to files requirements*.txt, constraints.txt, setup.py, pyproject.toml, pre-commit-config.yaml and a reason if not included in the Purpose section (e.g. incompatibility, updates, etc) 15 | 16 | ## Infrastructure changes: 17 | 18 | - Provide a list of changes that impact the infrastructure around running the code -- that is, changes to Makefiles, docker files, git submodules, or .jenkins (testing infrastructure changes). If Jenkins plans are also being manually changed, indicate that as well. 19 | 20 | ## Checklist 21 | Before submitting this PR, please make sure: 22 | 23 | - [ ] You have followed the coding standards guidelines established at [Code Review Checklist](https://drive.google.com/file/d/1R0nqOxfYnzaSdoYdt8yjx5J482ETI2Ft/view?usp=sharing). 24 | - [ ] Docstrings and type hints are added to new and updated routines, as appropriate 25 | - [ ] All relevant documentation has been updated or added (e.g. README, CONTRIBUTING docs) 26 | - [ ] For each public change and fix in `pace-util`, HISTORY has been updated 27 | - [ ] Unit tests are added or updated for non-stencil code changes 28 | 29 | Additionally, if this PR contains code authored by new contributors: 30 | 31 | - [ ] The names of all the new contributors have been added to CONTRIBUTORS.md 32 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "external/gt4py"] 2 | path = external/gt4py 3 | url = https://github.com/gridtools/gt4py.git 4 | [submodule "buildenv"] 5 | path = buildenv 6 | url = https://github.com/ai2cm/buildenv.git 7 | -------------------------------------------------------------------------------- /.jenkins/actions/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | env_name=venv-${BUILD_NUMBER:-0} 4 | python3 -m venv ${env_name} 5 | . ${env_name}/bin/activate 6 | pip install pre-commit 7 | pre-commit run --all-files 8 | deactivate 9 | echo $(date) > aggregate 10 | -------------------------------------------------------------------------------- /.jenkins/actions/run_driver_parallel_regression_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | BACKEND=$1 4 | 5 | export TEST_ARGS="-v -s -rsx --backend=${BACKEND} " 6 | 7 | export CPPFLAGS="${CPPFLAGS} -Wno-unused-but-set-variable" 8 | 9 | if [ ${python_env} == "virtualenv" ]; then 10 | CONTAINER_CMD="" MPIRUN_ARGS="" DEV=n make driver_savepoint_tests_mpi 11 | else 12 | DEV=n make driver_savepoint_tests_mpi 13 | fi 14 | -------------------------------------------------------------------------------- /.jenkins/actions/run_physics_parallel_regression_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | BACKEND=$1 4 | export TEST_ARGS="-v -s -rsx --backend=${BACKEND} " 5 | 6 | if [ ${python_env} == "virtualenv" ]; then 7 | CONTAINER_CMD="" MPIRUN_ARGS="" DEV=n make physics_savepoint_tests_mpi 8 | else 9 | DEV=n make physics_savepoint_tests_mpi 10 | fi 11 | -------------------------------------------------------------------------------- /.jenkins/actions/run_physics_regression_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | BACKEND=$1 4 | XML_REPORT="sequential_test_results.xml" 5 | export TEST_ARGS="-v -s -rsx --backend=${BACKEND} " 6 | 7 | JENKINS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" 8 | if [ ${python_env} == "virtualenv" ]; then 9 | export TEST_ARGS="${TEST_ARGS} --junitxml=${JENKINS_DIR}/${XML_REPORT}" 10 | CONTAINER_CMD="srun" DEV=n make physics_savepoint_tests 11 | else 12 | export TEST_ARGS="${TEST_ARGS} --junitxml=/${XML_REPORT}" 13 | DEV=n make physics_savepoint_tests 14 | fi 15 | -------------------------------------------------------------------------------- /.jenkins/cache.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e -x 4 | 5 | cmd=$1 6 | key=$2 7 | paths=( "$@" ) 8 | unset paths[0] # cmd is not a path to be cached 9 | unset paths[1] # key is not a path to be cached 10 | 11 | if [ "$cmd" == "save" ] && [ "${#paths[@]}" == 0 ]; then 12 | echo "when cmd is 'save', at least one path must be given to save" 13 | exit 1 14 | fi 15 | 16 | if [ "$cmd" != "save" ] && [ "$cmd" != "restore" ]; then 17 | echo "cmd must be one of 'save' or 'restore', got $cmd" 18 | exit 1 19 | fi 20 | 21 | if [ -z $PACE_CACHE_DIR ]; then 22 | cache_dir=~/.cache/pace 23 | else 24 | cache_dir=$PACE_CACHE_DIR 25 | fi 26 | mkdir -p $cache_dir 27 | 28 | key_cache=$cache_dir/$key.tar.gz 29 | target_dir=$(pwd) 30 | 31 | 32 | if [ "$cmd" == "save" ]; then 33 | if [ -f "$key_cache" ]; then 34 | echo "cache for key $key_cache already exists, skipping cache step" 35 | else 36 | tar -czf $key_cache ${paths[*]} 37 | echo "cache stored for key $key" 38 | fi 39 | elif [ "$cmd" == "restore" ]; then 40 | if [ -f "$key_cache" ]; then 41 | tar -xf $key_cache -C $target_dir/ 42 | echo "cache restored for key $key" 43 | else 44 | echo "cache for key $key does not exist, skipping cache restoration" 45 | fi 46 | fi 47 | -------------------------------------------------------------------------------- /.jenkins/checksum.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | echo $(md5sum $@ | md5sum | awk '{print $1;}') 6 | -------------------------------------------------------------------------------- /.jenkins/driver_configs/baroclinic_c192_54ranks.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: dace:gpu 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: true 8 | initialization: 9 | type: baroclinic 10 | diagnostics_config: 11 | path: "output.zarr" 12 | names: 13 | - ua 14 | - va 15 | - pt 16 | output_initial_state: true 17 | performance_config: 18 | collect_performance: false 19 | nx_tile: 192 20 | nz: 79 21 | dt_atmos: 200 22 | minutes: 60 23 | layout: 24 | - 3 25 | - 3 26 | dycore_config: 27 | a_imp: 1.0 28 | beta: 0. 29 | consv_te: 0. 30 | d2_bg: 0. 31 | d2_bg_k1: 0.2 32 | d2_bg_k2: 0.1 33 | d4_bg: 0.15 34 | d_con: 1.0 35 | d_ext: 0.0 36 | dddmp: 0.5 37 | delt_max: 0.002 38 | do_sat_adj: false 39 | do_vort_damp: true 40 | fill: true 41 | hord_dp: 6 42 | hord_mt: 6 43 | hord_tm: 6 44 | hord_tr: 8 45 | hord_vt: 6 46 | hydrostatic: false 47 | k_split: 7 48 | ke_bg: 0. 49 | kord_mt: 9 50 | kord_tm: -9 51 | kord_tr: 9 52 | kord_wz: 9 53 | n_split: 8 54 | nord: 3 55 | nwat: 6 56 | p_fac: 0.05 57 | rf_cutoff: 3000. 58 | rf_fast: true 59 | tau: 10. 60 | vtdm4: 0.06 61 | z_tracer: true 62 | do_qa: true 63 | tau_i2s: 1000. 64 | tau_g2v: 1200. 65 | ql_gen: 0.001 66 | ql_mlt: 0.002 67 | qs_mlt: 0.000001 68 | qi_lim: 1.0 69 | dw_ocean: 0.1 70 | dw_land: 0.15 71 | icloud_f: 0 72 | tau_l2v: 300. 73 | tau_v2l: 90. 74 | fv_sg_adj: 0 75 | n_sponge: 48 76 | 77 | physics_config: 78 | hydrostatic: false 79 | nwat: 6 80 | do_qa: true 81 | -------------------------------------------------------------------------------- /.jenkins/driver_configs/baroclinic_c192_6ranks.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: dace:gpu 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: true 8 | initialization: 9 | type: baroclinic 10 | diagnostics_config: 11 | path: "output.zarr" 12 | names: 13 | - ua 14 | - va 15 | - pt 16 | output_initial_state: true 17 | performance_config: 18 | collect_performance: true 19 | experiment_name: c192_baroclinic 20 | nx_tile: 192 21 | nz: 79 22 | dt_atmos: 200 23 | minutes: 60 24 | layout: 25 | - 1 26 | - 1 27 | dycore_config: 28 | a_imp: 1.0 29 | beta: 0. 30 | consv_te: 0. 31 | d2_bg: 0. 32 | d2_bg_k1: 0.2 33 | d2_bg_k2: 0.1 34 | d4_bg: 0.15 35 | d_con: 1.0 36 | d_ext: 0.0 37 | dddmp: 0.5 38 | delt_max: 0.002 39 | do_sat_adj: false 40 | do_vort_damp: true 41 | fill: true 42 | hord_dp: 6 43 | hord_mt: 6 44 | hord_tm: 6 45 | hord_tr: 8 46 | hord_vt: 6 47 | hydrostatic: false 48 | k_split: 7 49 | ke_bg: 0. 50 | kord_mt: 9 51 | kord_tm: -9 52 | kord_tr: 9 53 | kord_wz: 9 54 | n_split: 8 55 | nord: 3 56 | nwat: 6 57 | p_fac: 0.05 58 | rf_cutoff: 3000. 59 | rf_fast: true 60 | tau: 10. 61 | vtdm4: 0.06 62 | z_tracer: true 63 | do_qa: true 64 | tau_i2s: 1000. 65 | tau_g2v: 1200. 66 | ql_gen: 0.001 67 | ql_mlt: 0.002 68 | qs_mlt: 0.000001 69 | qi_lim: 1.0 70 | dw_ocean: 0.1 71 | dw_land: 0.15 72 | icloud_f: 0 73 | tau_l2v: 300. 74 | tau_v2l: 90. 75 | fv_sg_adj: 0 76 | n_sponge: 48 77 | 78 | physics_config: 79 | hydrostatic: false 80 | nwat: 6 81 | do_qa: true 82 | -------------------------------------------------------------------------------- /.jenkins/driver_configs/baroclinic_c48_6ranks_dycore_only.yaml: -------------------------------------------------------------------------------- 1 | dycore_only: true 2 | disable_step_physics: true 3 | stencil_config: 4 | compilation_config: 5 | backend: gt:gpu 6 | rebuild: false 7 | validate_args: true 8 | format_source: false 9 | device_sync: false 10 | run_mode: Run 11 | initialization: 12 | type: baroclinic 13 | performance_config: 14 | collect_performance: false 15 | nx_tile: 48 16 | nz: 79 17 | dt_atmos: 225 18 | seconds: 4500 19 | layout: 20 | - 1 21 | - 1 22 | diagnostics_config: 23 | path: "output.zarr" 24 | names: 25 | - ua 26 | - va 27 | - pt 28 | dycore_config: 29 | a_imp: 1.0 30 | beta: 0. 31 | consv_te: 0. 32 | d2_bg: 0. 33 | d2_bg_k1: 0.2 34 | d2_bg_k2: 0.1 35 | d4_bg: 0.15 36 | d_con: 1.0 37 | d_ext: 0.0 38 | dddmp: 0.5 39 | delt_max: 0.002 40 | do_sat_adj: true 41 | do_vort_damp: true 42 | fill: true 43 | hord_dp: 6 44 | hord_mt: 6 45 | hord_tm: 6 46 | hord_tr: 8 47 | hord_vt: 6 48 | hydrostatic: false 49 | k_split: 1 50 | ke_bg: 0. 51 | kord_mt: 9 52 | kord_tm: -9 53 | kord_tr: 9 54 | kord_wz: 9 55 | n_split: 1 56 | nord: 3 57 | nwat: 6 58 | p_fac: 0.05 59 | rf_cutoff: 3000. 60 | rf_fast: true 61 | tau: 10. 62 | vtdm4: 0.06 63 | z_tracer: true 64 | do_qa: true 65 | tau_i2s: 1000. 66 | tau_g2v: 1200. 67 | ql_gen: 0.001 68 | ql_mlt: 0.002 69 | qs_mlt: 0.000001 70 | qi_lim: 1.0 71 | dw_ocean: 0.1 72 | dw_land: 0.15 73 | icloud_f: 0 74 | tau_l2v: 300. 75 | tau_v2l: 90. 76 | fv_sg_adj: 0 77 | n_sponge: 48 78 | 79 | physics_config: 80 | hydrostatic: false 81 | nwat: 6 82 | do_qa: true 83 | -------------------------------------------------------------------------------- /.jenkins/driver_configs/baroclinic_c48_6ranks_dycore_only_serialbox.yaml: -------------------------------------------------------------------------------- 1 | dycore_only: true 2 | disable_step_physics: true 3 | stencil_config: 4 | compilation_config: 5 | backend: gt:gpu 6 | rebuild: false 7 | validate_args: true 8 | format_source: false 9 | device_sync: false 10 | run_mode: Run 11 | initialization: 12 | type: serialbox 13 | config: 14 | path: /project/s1053/fv3core_serialized_test_data/8.1.1/c48_6ranks_baroclinic/driver 15 | serialized_grid: grid_option 16 | performance_config: 17 | collect_performance: false 18 | nx_tile: 48 19 | nz: 79 20 | dt_atmos: 225 21 | seconds: 4500 22 | layout: 23 | - 1 24 | - 1 25 | diagnostics_config: 26 | path: "output.zarr" 27 | names: 28 | - ua 29 | - va 30 | - pt 31 | dycore_config: 32 | namelist_override: /project/s1053/fv3core_serialized_test_data/8.1.1/c48_6ranks_baroclinic/driver/input.nml 33 | 34 | physics_config: 35 | namelist_override: /project/s1053/fv3core_serialized_test_data/8.1.1/c48_6ranks_baroclinic/driver/input.nml 36 | -------------------------------------------------------------------------------- /.jenkins/driver_performance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Summary: 4 | # Jenkins plan (only working on Piz daint) to run dace orchestration and gather performance numbers. 5 | 6 | ## Syntax: 7 | # .jenkins/action/driver_performance.sh 8 | 9 | JENKINS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 10 | PACE_DIR=$JENKINS_DIR/../ 11 | export VIRTUALENV=${PACE_DIR}/venv 12 | ${JENKINS_DIR}/install_virtualenv.sh ${VIRTUALENV} 13 | source ${VIRTUALENV}/bin/activate 14 | 15 | BUILDENV_DIR=$PACE_DIR/buildenv 16 | . ${BUILDENV_DIR}/schedulerTools.sh 17 | 18 | mkdir -p ${PACE_DIR}/test_perf 19 | cd $PACE_DIR/test_perf 20 | cat << EOF > run.daint.slurm 21 | #!/bin/bash 22 | #SBATCH --constraint=gpu 23 | #SBATCH --job-name=c192_pace_driver 24 | #SBATCH --ntasks=6 25 | #SBATCH --ntasks-per-node=1 26 | #SBATCH --cpus-per-task=1 27 | #SBATCH --output=driver.out 28 | #SBATCH --time=00:45:00 29 | #SBATCH --gres=gpu:1 30 | #SBATCH --account=go31 31 | #SBATCH --partition=normal 32 | ######################################################## 33 | set -x 34 | export OMP_NUM_THREADS=12 35 | export FV3_DACEMODE=BuildAndRun 36 | srun python -m pace.driver.run ${JENKINS_DIR}/driver_configs/baroclinic_c192_6ranks.yaml 37 | EOF 38 | launch_job run.daint.slurm 3600 39 | 40 | python ${JENKINS_DIR}/print_performance_number.py 41 | cp *.json driver.out /project/s1053/performance/fv3core_performance/dace_gpu 42 | -------------------------------------------------------------------------------- /.jenkins/fetch_caches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BACKEND=$1 3 | EXPNAME=$2 4 | CACHE_TYPE=$3 5 | SANITIZED_BACKEND=`echo $BACKEND | sed 's/:/_/g'` #sanitize the backend from any ':' 6 | CACHE_DIR="/scratch/snx3000/olifu/jenkins/scratch/gt_caches_v1/${CACHE_TYPE}/${EXPNAME}/${SANITIZED_BACKEND}" 7 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 8 | PACE_DIR=$SCRIPT_DIR/../ 9 | 10 | if [ -z "${GT4PY_VERSION}" ]; then 11 | export GT4PY_VERSION=`git submodule status ${PACE_DIR}/external/gt4py | awk '{print $1;}'` 12 | fi 13 | 14 | if ! compgen -G ./.gt_cache* > /dev/null; then 15 | if [ -d ${CACHE_DIR} ]; then 16 | cache_filename=${CACHE_DIR}/${GT4PY_VERSION}.tar.gz 17 | if [ -f "${cache_filename}" ]; then 18 | tar -xzf ${cache_filename} -C . 19 | echo ".gt_cache successfully fetched from ${cache_filename}" 20 | else 21 | echo ".gt_cache not fetched, cache not found at ${cache_filename}" 22 | fi 23 | fi 24 | else 25 | echo "WARNING: $(pwd)/.gt_cache already exists. Will not overwrite directory with caches." 26 | echo "Please remove this directory and try again." 27 | fi 28 | -------------------------------------------------------------------------------- /.jenkins/generate_caches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to generate gt caches on Piz Daint 4 | # Syntax: 5 | # .jenkins/generate_caches.sh 6 | 7 | 8 | # stop on all errors and echo commands 9 | set -e -x 10 | 11 | # utility function for error handling 12 | exitError() 13 | { 14 | echo "ERROR $1: $3" 1>&2 15 | echo "ERROR LOCATION=$0" 1>&2 16 | echo "ERROR LINE=$2" 1>&2 17 | exit $1 18 | } 19 | 20 | backend=$1 21 | experiment=$2 22 | cache_type=$3 23 | SANITIZED_BACKEND=`echo $backend | sed 's/:/_/g'` #sanitize the backend from any ':' 24 | CACHE_DIR="/scratch/snx3000/olifu/jenkins/scratch/gt_caches_v1/${cache_type}/${experiment}/${SANITIZED_BACKEND}/" 25 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 26 | PACE_DIR=$SCRIPT_DIR/../ 27 | 28 | if [ -z "${GT4PY_VERSION}" ]; then 29 | export GT4PY_VERSION=`git submodule status ${PACE_DIR}/external/gt4py | awk '{print $1;}'` 30 | echo "GT4PY_VERSION is ${GT4PY_VERSION}" 31 | fi 32 | 33 | CACHE_FILENAME=${CACHE_DIR}/${GT4PY_VERSION}.tar.gz 34 | 35 | test -n "${experiment}" || exitError 1001 ${LINENO} "experiment is not defined" 36 | test -n "${SANITIZED_BACKEND}" || exitError 1002 ${LINENO} "backend is not defined" 37 | 38 | # store cache artifacts (and remove caches afterwards) 39 | echo "Pruning cache to make sure no __pycache__ and *_pyext_BUILD dirs are present" 40 | find .gt_cache* -type d -name \*_pyext_BUILD -prune -exec \rm -rf {} \; 41 | find .gt_cache* -type d -name __pycache__ -prune -exec \rm -rf {} \; 42 | echo "Copying GT4Py cache directories to ${CACHE_DIR}" 43 | mkdir -p ${CACHE_DIR} 44 | tar -czf _tmp .gt_cache* 45 | mv _tmp ${CACHE_FILENAME} 46 | -------------------------------------------------------------------------------- /.jenkins/install_virtualenv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exitError() 4 | { 5 | echo "ERROR $1: $3" 1>&2 6 | echo "ERROR LOCATION=$0" 1>&2 7 | echo "ERROR LINE=$2" 1>&2 8 | exit $1 9 | } 10 | 11 | 12 | # check a virtualenv path has been provided 13 | test -n "$1" || exitError 1001 ${virtualenv_path} "must pass an argument" 14 | JENKINS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 15 | PACE_DIR=$JENKINS_DIR/../ 16 | echo "pace path is ${PACE_DIR}" 17 | 18 | if [ "$WHEEL_DIR" != "" ]; then 19 | wheel_command="--find-links=$WHEEL_DIR" 20 | else 21 | wheel_command="" 22 | fi 23 | virtualenv_path=$1 24 | 25 | set -e -x 26 | 27 | workdir=$(pwd) 28 | git submodule update --init ${PACE_DIR}/external/daint_venv 29 | git submodule update --init ${PACE_DIR}/external/gt4py 30 | ${PACE_DIR}/external/daint_venv/install.sh ${virtualenv_path} 31 | source ${virtualenv_path}/bin/activate 32 | 33 | workdir=$(pwd) 34 | cd ${PACE_DIR} 35 | python3 -m pip install $wheel_command -r ${PACE_DIR}/requirements_dev.txt -c ${PACE_DIR}/constraints.txt 36 | # have to be installed in non-develop mode because the directory where this gets built from 37 | # gets deleted before the tests run on daint 38 | python3 -m pip install ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/driver ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/dsl ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/fv3core ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/physics ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/stencils ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/util ${PACE_VENV_INSTALL_PREFIX} ${PACE_DIR}/external/gt4py -c ${PACE_DIR}/constraints.txt 39 | cd ${workdir} 40 | 41 | deactivate 42 | -------------------------------------------------------------------------------- /.jenkins/pace_physics_cache_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 3 | PACE_DIR=$SCRIPT_DIR/../ 4 | if [[ "${NODE_NAME}" == *"daint"* ]] ; then source ~/.bashrc ; fi 5 | set -e 6 | export LONG_EXECUTION=1 7 | .jenkins/jenkins.sh run_physics_regression_tests ${backend} ${experiment} 8 | git -C buildenv reset --hard HEAD 9 | .jenkins/jenkins.sh run_physics_parallel_regression_tests ${backend} ${experiment} 10 | git -C buildenv reset --hard HEAD 11 | .jenkins/jenkins.sh run_driver_parallel_regression_tests ${backend} ${experiment} 12 | cd ${PACE_DIR}/physics 13 | ${PACE_DIR}/.jenkins/generate_caches.sh ${backend} ${experiment} physics 14 | cd ${PACE_DIR} 15 | ${PACE_DIR}/.jenkins/generate_caches.sh ${backend} ${experiment} driver 16 | -------------------------------------------------------------------------------- /.jenkins/print_performance_number.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | import numpy as np 5 | 6 | 7 | for x in os.listdir(): 8 | if x.endswith(".json"): 9 | f = open(x) 10 | data = json.load(f) 11 | for rank in range(6): 12 | print( 13 | f"Rank {rank}, mainloop average time: \ 14 | {np.mean(data['times']['mainloop']['times'][rank][1:])}" 15 | ) 16 | -------------------------------------------------------------------------------- /.jenkins/test_driver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | JENKINS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | 5 | PACE_IMAGE="driver_image" make -C ${JENKINS_DIR}/.. build 6 | docker run --rm driver_image make -C /pace/driver test test_mpi 7 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-20.04 11 | tools: 12 | python: "3.8" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: docs/conf.py 17 | 18 | # If using Sphinx, optionally build your docs in additional formats such as PDF 19 | # formats: 20 | # - pdf 21 | 22 | # Optionally declare the Python requirements required to build your docs 23 | python: 24 | install: 25 | - requirements: requirements_docs.txt 26 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | List format (alphabetical order): Surname, Name. Employer/Affiliation 4 | 5 | * Cheeseman, Mark. Vulcan Inc. 6 | * Dahm, Johann. Allen Institute for AI. 7 | * Davis, Eddie. Allen Institute for AI. 8 | * Deconinck, Florian. Allen Institute for AI. 9 | * Elbert, Oliver. Allen Institute for AI. 10 | * Fuhrer, Oliver. Allen Institute for AI. 11 | * George, Rhea. Allen Institute for AI. 12 | * Harris, Lucas. GFDL. 13 | * Kung, Chris. NASA. 14 | * McGibbon, Jeremy. Allen Institute for AI. 15 | * Niedermayr, Yannick. ETH. 16 | * Savarin, Ajda. University of Washington. 17 | * Wicky, Tobias. Allen Institute for AI. 18 | * Wu, Elynn. Allen Institute for AI. 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.13-bullseye@sha256:2a01d88a1684e6d7f08030cf5ae73b536926c64076cab197e9e3d9f699255283 2 | 3 | RUN apt-get update && apt-get install -y make \ 4 | software-properties-common \ 5 | libopenmpi3 \ 6 | libopenmpi-dev \ 7 | libboost-all-dev \ 8 | libhdf5-serial-dev \ 9 | netcdf-bin \ 10 | libnetcdf-dev \ 11 | python3 \ 12 | python3-pip 13 | 14 | RUN pip3 install --upgrade setuptools wheel 15 | 16 | COPY constraints.txt /pace/constraints.txt 17 | 18 | RUN pip3 install -r /pace/constraints.txt 19 | 20 | COPY . /pace 21 | 22 | RUN cd /pace && \ 23 | pip3 install -r /pace/requirements_dev.txt -c /pace/constraints.txt 24 | 25 | ENV OMPI_ALLOW_RUN_AS_ROOT=1 26 | ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 27 | -------------------------------------------------------------------------------- /Makefile.data_download: -------------------------------------------------------------------------------- 1 | 2 | REGRESSION_DATA_STORAGE_BUCKET = gs://vcm-fv3gfs-serialized-regression-data 3 | ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) 4 | EXPERIMENT ?=c12_6ranks_standard 5 | FORTRAN_SERIALIZED_DATA_VERSION=8.1.3 6 | TARGET ?=dycore 7 | TEST_DATA_ROOT ?=$(ROOT_DIR)/test_data/ 8 | TEST_DATA_HOST ?=$(TEST_DATA_ROOT)/$(FORTRAN_SERIALIZED_DATA_VERSION)/$(EXPERIMENT)/$(TARGET) 9 | EXPERIMENT_DATA ?=$(TEST_DATA_ROOT)/$(FORTRAN_SERIALIZED_DATA_VERSION)/$(EXPERIMENT) 10 | FTP_SERVER ?=ftp://anonymous:anonymous@ftp.cscs.ch 11 | TEST_DATA_FTP ?=in/put/abc/cosmo/fuo/pace/fv3core/ 12 | 13 | TEST_DATA_TARFILE=dat_files.tar.gz 14 | TEST_DATA_TARPATH=$(TEST_DATA_HOST)/$(TEST_DATA_TARFILE) 15 | 16 | DATA_BUCKET= $(REGRESSION_DATA_STORAGE_BUCKET)/$(FORTRAN_SERIALIZED_DATA_VERSION)/$(EXPERIMENT)/$(TARGET)/ 17 | 18 | sync_test_data: 19 | mkdir -p $(TEST_DATA_HOST) && gsutil -m rsync -r $(DATA_BUCKET) $(TEST_DATA_HOST) 20 | 21 | sync_test_data_from_ftp: 22 | mkdir -p $(TEST_DATA_HOST) && cd $(TEST_DATA_ROOT) && lftp -c "set ftp:list-options -a; open $(FTP_SERVER); cd $(TEST_DATA_FTP); mirror --delete --use-cache --verbose --allow-chown --allow-suid --no-umask --parallel=2 --max-errors=0 . ." 23 | 24 | get_test_data: 25 | if [ -z "${USE_FTP}" ] ; then \ 26 | if [ ! -f "$(TEST_DATA_HOST)/input.nml" ] || \ 27 | [ "$$(gsutil cp $(DATA_BUCKET)md5sums.txt -)" != "$$(cat $(TEST_DATA_HOST)/md5sums.txt)" ] ; then \ 28 | rm -rf $(TEST_DATA_HOST) ; \ 29 | $(MAKE) sync_test_data ; \ 30 | $(MAKE) unpack_test_data ; \ 31 | fi ; \ 32 | else \ 33 | $(MAKE) sync_test_data_from_ftp ; \ 34 | $(MAKE) unpack_test_data ; \ 35 | fi 36 | 37 | unpack_test_data: 38 | if [ -f $(TEST_DATA_TARPATH) ]; then \ 39 | cd $(TEST_DATA_HOST) && tar -xf $(TEST_DATA_TARFILE) && \ 40 | rm $(TEST_DATA_TARFILE); \ 41 | fi 42 | 43 | list_test_data_options: 44 | gsutil ls $(REGRESSION_DATA_STORAGE_BUCKET)/$(FORTRAN_SERIALIZED_DATA_VERSION) 45 | -------------------------------------------------------------------------------- /dependencies.dot: -------------------------------------------------------------------------------- 1 | # this dotfile is used as a reference source for project dependencies 2 | # each folder entry must have a "label" equal to its directory name 3 | # 4 | # If you update this file, please re-generate the svg with `make dependencies.svg` 5 | # and commit it to the repository 6 | 7 | digraph { 8 | pace [shape=box] 9 | fv3core [shape=oval, label="fv3core"] 10 | driver [shape=oval, label="driver"] 11 | physics [shape=oval, label="physics"] 12 | stencils [shape=oval, label="stencils"] 13 | util [shape=oval, label="util"] 14 | dsl [shape=oval, label="dsl"] 15 | 16 | pace -> driver 17 | driver -> fv3core 18 | driver -> physics 19 | driver -> util 20 | fv3core -> util 21 | fv3core -> stencils 22 | fv3core -> dsl 23 | physics -> util 24 | physics -> stencils 25 | physics -> dsl 26 | stencils -> util 27 | stencils -> dsl 28 | 29 | } 30 | -------------------------------------------------------------------------------- /docker/Makefile.image_names: -------------------------------------------------------------------------------- 1 | GCR_URL ?= us.gcr.io/vcm-ml 2 | PACE_IMAGE?=$(GCR_URL)/pace 3 | -------------------------------------------------------------------------------- /docker/postprocessing.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN apt-get update &&\ 6 | apt install -y --no-install-recommends \ 7 | software-properties-common 8 | 9 | RUN apt-get update -y && \ 10 | apt-get install -y --no-install-recommends\ 11 | g++ \ 12 | gcc \ 13 | gfortran \ 14 | libproj-dev \ 15 | proj-data \ 16 | proj-bin \ 17 | libgeos-dev 18 | 19 | RUN apt-get update -y && \ 20 | apt install -y --no-install-recommends \ 21 | git \ 22 | python3-pip \ 23 | python3.10 \ 24 | python3.10-dev &&\ 25 | rm -rf /var/lib/apt/lists/* && \ 26 | apt-get clean 27 | 28 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.10 60 29 | 30 | RUN apt-get update -y &&\ 31 | apt install -y --no-install-recommends\ 32 | python3-pip 33 | 34 | RUN python -m pip --no-cache-dir install --upgrade pip && \ 35 | python -m pip --no-cache-dir install setuptools &&\ 36 | python -m pip --no-cache-dir install wheel 37 | 38 | RUN python -m pip --no-cache-dir \ 39 | install \ 40 | numpy \ 41 | matplotlib \ 42 | cython \ 43 | cartopy \ 44 | xarray \ 45 | zarr 46 | 47 | # set up for fv3viz 48 | RUN cd / 49 | RUN git clone https://github.com/ai2cm/fv3net.git 50 | RUN cd fv3net && git checkout 1d168ef 51 | RUN python -m pip install fv3net/external/vcm 52 | ENV PYTHONPATH=/fv3net/external/fv3viz 53 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/docker.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ====== 4 | Docker 5 | ====== 6 | 7 | While it is possible to install and build pace bare-metal, we can ensure all system libraries are installed with the correct versions by using a Docker container to test and develop pace. 8 | This requires you have Docker installed (we recommend `Docker Desktop`_ for most users). 9 | You may need to increase memory allocated to Docker in its settings. 10 | 11 | Before building the Docker image, you will need to update the git submodules so that any dependencies are cloned and at the correct version: 12 | 13 | .. code-block:: console 14 | 15 | $ git submodule update --init --recursive 16 | 17 | Then build the `pace` docker image at the top level: 18 | 19 | .. code-block:: console 20 | 21 | $ make build 22 | 23 | .. _`Docker Desktop`: https://www.docker.com/ 24 | -------------------------------------------------------------------------------- /docs/fv3.rst: -------------------------------------------------------------------------------- 1 | === 2 | FV3 3 | === 4 | 5 | This page will include general historical information about FV3, including external links to docs. 6 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Pace documentation 2 | ================== 3 | 4 | Pace is an implementation of the FV3GFS / SHiELD atmospheric model developed by NOAA/GFDL using the GT4Py domain-specific language in Python. 5 | The model can be run on a laptop using Python-based backend or on thousands of heterogeneous compute nodes of a large supercomputer. 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | :caption: Contents: 10 | 11 | overview 12 | fv3 13 | installation 14 | docker 15 | testing 16 | util/index 17 | physics/index 18 | 19 | 20 | Indices and tables 21 | ================== 22 | 23 | * :ref:`genindex` 24 | * :ref:`modindex` 25 | * :ref:`search` 26 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | Shell scripts to install Pace on specific machines such as Gaea can be found in `examples/build_scripts/`. 8 | 9 | When cloning Pace you will need to update the repository's submodules as well: 10 | 11 | .. code-block:: console 12 | 13 | $ git clone --recursive https://github.com/ai2cm/pace.git 14 | 15 | or if you have already cloned the repository: 16 | 17 | .. code-block:: console 18 | 19 | $ git submodule update --init --recursive 20 | 21 | 22 | Pace requires GCC > 9.2, MPI, and Python 3.8 on your system, and CUDA is required to run with a GPU backend. 23 | You will also need the headers of the boost libraries in your `$PATH` (boost itself does not need to be installed). 24 | If installed outside the standard header locations, gt4py requires that `$BOOST_ROOT` be set: 25 | 26 | .. code-block:: console 27 | 28 | $ cd BOOST/ROOT 29 | $ wget https://boostorg.jfrog.io/artifactory/main/release/1.79.0/source/boost_1_79_0.tar.gz 30 | $ tar -xzf boost_1_79_0.tar.gz 31 | $ mkdir -p boost_1_79_0/include 32 | $ mv boost_1_79_0/boost boost_1_79_0/include/ 33 | $ export BOOST_ROOT=BOOST/ROOT/boost_1_79_0 34 | 35 | 36 | We recommend creating a python `venv` or conda environment specifically for Pace. 37 | 38 | .. code-block:: console 39 | 40 | $ python3 -m venv venv_name 41 | $ source venv_name/bin/activate 42 | 43 | Inside of your pace `venv` or conda environment pip install the Python requirements, GT4Py, and Pace: 44 | 45 | .. code-block:: console 46 | 47 | $ pip3 install -r requirements_dev.txt -c constraints.txt 48 | 49 | There are also separate requirements files which can be installed for linting (`requirements_lint.txt`) and building documentation (`requirements_docs.txt`). 50 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/overview.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Overview 3 | ======== 4 | -------------------------------------------------------------------------------- /docs/physics/api.rst: -------------------------------------------------------------------------------- 1 | API 2 | === 3 | 4 | .. automodule:: pace.physics 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/physics/index.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | pace-physics 3 | ============ 4 | 5 | pace-physics includes the Python implementation of GFS physics built using the GT4Py domain-specific language. 6 | Currently, only GFDL cloud microphysics is integrated into Pace. 7 | Additional physics schemes (NOAH land surface, GFS sea ice, scale-aware mass-flux shallow convection, hybrid eddy-diffusivity mass-flux PBL and free atmospheric turbulence, and rapid radiative transfer model) have been ported indendepently and are available in the `physics-standalone`_ repository. 8 | Additional work is required to integrate these schemes. 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | :caption: Contents: 13 | 14 | state 15 | api 16 | 17 | Indices and tables 18 | ------------------ 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | .. _physics-standalone: https://github.com/ai2cm/physics_standalone 24 | -------------------------------------------------------------------------------- /docs/physics/microphysics.rst: -------------------------------------------------------------------------------- 1 | .. _microphysics: 2 | 3 | ============ 4 | Microphysics 5 | ============ 6 | 7 | Description 8 | ----------- 9 | GFDL cloud microphysics scheme has been integrated and tested in Pace. 10 | See documentation in `CCPP`_ for more details on the microphysics scheme. 11 | 12 | .. image:: gfdl_cloud_mp_diagram.png 13 | :width: 600 14 | :alt: Alternative text 15 | 16 | .. _CCPP: https://dtcenter.ucar.edu/GMTB/v5.0.0/sci_doc/GFDL_cloud.html 17 | -------------------------------------------------------------------------------- /docs/physics/state.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | State 3 | ===== 4 | 5 | Containers 6 | ---------- 7 | Variables used in physics are packedged using a container type called :py:class:`pace.physics.PhysicsState`. 8 | This contains variables copied from the dynamical core for calculating physics tendencies. 9 | It also contains sub-container for the individual physics schemes. Currently, it only contains :py:class:`pace.physics.MicrophysicsState`. 10 | 11 | You can initialize a zero-filled PhysicsState and MicrophysicsState from other Pace objects as follows: 12 | 13 | .. doctest:: 14 | 15 | >>> from pace.util import ( 16 | ... CubedSphereCommunicator, 17 | ... CubedSpherePartitioner, 18 | ... Quantity, 19 | ... QuantityFactory, 20 | ... SubtileGridSizer, 21 | ... TilePartitioner, 22 | ... NullComm, 23 | ... ) 24 | >>> from pace.physics import PhysicsState 25 | >>> layout = (1, 1) 26 | >>> partitioner = CubedSpherePartitioner(TilePartitioner(layout)) 27 | >>> communicator = CubedSphereCommunicator(NullComm(rank=0, total_ranks=6), partitioner) 28 | >>> sizer = SubtileGridSizer.from_tile_params( 29 | ... nx_tile=12, 30 | ... ny_tile=12, 31 | ... nz=79, 32 | ... n_halo=3, 33 | ... extra_dim_lengths={}, 34 | ... layout=layout, 35 | ... tile_partitioner=partitioner.tile, 36 | ... tile_rank=communicator.tile.rank, 37 | ... ) 38 | 39 | >>> quantity_factory = QuantityFactory.from_backend(sizer=sizer, backend="numpy") 40 | >>> physics_state = PhysicsState.init_zeros( 41 | ... quantity_factory=quantity_factory, active_packages=["microphysics"] 42 | ... ) 43 | >>> microphysics_state = physics_state.microphysics 44 | -------------------------------------------------------------------------------- /docs/util/api.rst: -------------------------------------------------------------------------------- 1 | API 2 | === 3 | 4 | .. automodule:: pace.util 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/util/history.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | .. include:: ../HISTORY.md 6 | -------------------------------------------------------------------------------- /docs/util/index.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | pace-util 3 | ========= 4 | 5 | pace-util is a toolkit for building Python weather and climate models. 6 | Its features can seem disjoint, which is by design - you can choose which functionality you want to use and leave the rest. 7 | It is currently used to contain pure Python utilities shared by `fv3gfs-wrapper`_ and `fv3core`_. 8 | As the number of features increases, we may move its functionality into separate packages to reduce the dependency stack. 9 | 10 | Some broad categories of features are: 11 | 12 | - :py:class:`pace.util.Quantity`, the data type used by pace-util described in the section on :ref:`State` 13 | - :ref:`Communication` objects used for MPI 14 | - Utility functions useful in weather and climate models, described in :ref:`Utilities` 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | :caption: Contents: 19 | 20 | installation 21 | state 22 | communication 23 | utilities 24 | api 25 | history 26 | 27 | Indices and tables 28 | ================== 29 | * :ref:`genindex` 30 | * :ref:`modindex` 31 | * :ref:`search` 32 | 33 | .. _fv3gfs-wrapper: https://github.com/VulcanClimateModeling/fv3gfs-wrapper 34 | .. _fv3core: https://github.com/VulcanClimateModeling/fv3core 35 | -------------------------------------------------------------------------------- /docs/util/installation.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | Stable release 8 | -------------- 9 | 10 | There is no stable release. This is alpha research software: use at your own risk! 11 | 12 | From sources 13 | ------------ 14 | 15 | The sources for pace-util can be downloaded from the `Github repo`_. 16 | To develop pace-util, you can clone the public repository: 17 | 18 | .. code-block:: console 19 | 20 | $ git clone git://github.com/ai2cm/pace 21 | 22 | Once you have a copy of the source, you can install it in develop mode from the Pace top level directory with: 23 | 24 | .. code-block:: console 25 | 26 | $ pip install -r ./pace-util/requirements.txt -c ./constraints.txt -e pace-util 27 | 28 | The `-e` flag will set up the directory so that python uses the local folder including 29 | any modifications, instead of copying the sources to an installation directory. This 30 | is very useful for development. The `-r requirements.txt` will install extra packages 31 | useful for test, lint & other development requirements. 32 | 33 | The `-c ./constraints.txt` is optional, but will ensure the package versions you use are ones we have tested against. 34 | 35 | .. _Github repo: https://github.com/VulcanClimateModeling/pace-util 36 | -------------------------------------------------------------------------------- /driver/Makefile: -------------------------------------------------------------------------------- 1 | MPIRUN_CALL ?= mpirun -n 6 2 | 3 | test_mpi: 4 | MPIRUN_CALL="$(MPIRUN_CALL)" pytest tests/mpi 5 | $(MPIRUN_CALL) python3 -m mpi4py -m pace.driver.run examples/configs/baroclinic_c12.yaml 6 | cd examples && MPIRUN_CALL="$(MPIRUN_CALL)" ./write_then_read.sh 7 | 8 | clean: 9 | rm -rf *.json 10 | rm -rf RESTART 11 | rm -rf baroclinic_c12_*.yaml 12 | rm -rf *output.zarr 13 | -------------------------------------------------------------------------------- /driver/README.md: -------------------------------------------------------------------------------- 1 | # pace-driver 2 | 3 | This package provides command-line routines to run the Pace model, and utilities to write model driver scripts. 4 | 5 | We suggest reading the code in the examples directory, or taking a look at `pace/driver/run.py` to see how the main entrypoint for this package works. 6 | 7 | # Usage 8 | 9 | Usage examples exist in the examples directory. 10 | The command-line interface may be run in certain debugging modes in serial, but usually you will want to run it using an mpi executor such as mpirun. 11 | 12 | ```bash 13 | $ python3 -m pace.driver.run --help 14 | Usage: python -m pace.driver.run [OPTIONS] CONFIG_PATH 15 | 16 | Run the driver. 17 | 18 | CONFIG_PATH is the path to a DriverConfig yaml file. 19 | 20 | Options: 21 | --log-rank INTEGER rank to log from, or all ranks by default, ignored if 22 | running without MPI 23 | --log-level TEXT one of 'debug', 'info', 'warning', 'error', 'critical' 24 | --help Show this message and exit. 25 | ``` 26 | 27 | A DriverConfig yaml file is the yaml representation of a DriverConfig object, which can be found in the code of this module. 28 | -------------------------------------------------------------------------------- /driver/examples/baroclinic_init.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | import yaml 4 | 5 | from pace.driver.run import Driver, DriverConfig 6 | 7 | 8 | def parse_args(): 9 | usage = "usage: python %(prog)s config_file" 10 | parser = ArgumentParser(usage=usage) 11 | 12 | parser.add_argument( 13 | "config_file", 14 | type=str, 15 | action="store", 16 | help="which config file to use", 17 | ) 18 | return parser.parse_args() 19 | 20 | 21 | args = parse_args() 22 | with open(args.config_file, "r") as f: 23 | driver_config = DriverConfig.from_dict(yaml.safe_load(f)) 24 | driver = Driver(config=driver_config) 25 | driver.diagnostics.store(time=driver.config.start_time, state=driver.state) 26 | driver.diagnostics.store_grid(grid_data=driver.state.grid_data) 27 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | type: baroclinic 10 | performance_config: 11 | collect_performance: true 12 | experiment_name: c12_baroclinic 13 | nx_tile: 12 14 | nz: 79 15 | dt_atmos: 225 16 | minutes: 15 17 | layout: 18 | - 1 19 | - 1 20 | diagnostics_config: 21 | path: output 22 | output_format: netcdf 23 | names: 24 | - u 25 | - v 26 | - ua 27 | - va 28 | - pt 29 | - delp 30 | - qvapor 31 | - qliquid 32 | - qice 33 | - qrain 34 | - qsnow 35 | - qgraupel 36 | z_select: 37 | - level: 65 38 | names: 39 | - pt 40 | dycore_config: 41 | a_imp: 1.0 42 | beta: 0. 43 | consv_te: 0. 44 | d2_bg: 0. 45 | d2_bg_k1: 0.2 46 | d2_bg_k2: 0.1 47 | d4_bg: 0.15 48 | d_con: 1.0 49 | d_ext: 0.0 50 | dddmp: 0.5 51 | delt_max: 0.002 52 | do_sat_adj: true 53 | do_vort_damp: true 54 | fill: true 55 | hord_dp: 6 56 | hord_mt: 6 57 | hord_tm: 6 58 | hord_tr: 8 59 | hord_vt: 6 60 | hydrostatic: false 61 | k_split: 1 62 | ke_bg: 0. 63 | kord_mt: 9 64 | kord_tm: -9 65 | kord_tr: 9 66 | kord_wz: 9 67 | n_split: 1 68 | nord: 3 69 | nwat: 6 70 | p_fac: 0.05 71 | rf_cutoff: 3000. 72 | rf_fast: true 73 | tau: 10. 74 | vtdm4: 0.06 75 | z_tracer: true 76 | do_qa: true 77 | tau_i2s: 1000. 78 | tau_g2v: 1200. 79 | ql_gen: 0.001 80 | ql_mlt: 0.002 81 | qs_mlt: 0.000001 82 | qi_lim: 1.0 83 | dw_ocean: 0.1 84 | dw_land: 0.15 85 | icloud_f: 0 86 | tau_l2v: 300. 87 | tau_v2l: 90. 88 | fv_sg_adj: 0 89 | n_sponge: 48 90 | 91 | physics_config: 92 | hydrostatic: false 93 | nwat: 6 94 | do_qa: true 95 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_comm_read.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | type: baroclinic 10 | performance_config: 11 | collect_performance: false 12 | experiment_name: c12_baroclinic 13 | comm_config: 14 | type: read 15 | config: 16 | path: comm 17 | rank: 0 18 | nx_tile: 12 19 | nz: 79 20 | dt_atmos: 180 21 | minutes: 6 22 | layout: 23 | - 1 24 | - 1 25 | diagnostics_config: 26 | path: "output.zarr" 27 | names: 28 | - u 29 | - v 30 | - ua 31 | - va 32 | - pt 33 | - delp 34 | - qvapor 35 | - qliquid 36 | - qice 37 | - qrain 38 | - qsnow 39 | - qgraupel 40 | dycore_config: 41 | a_imp: 1.0 42 | beta: 0. 43 | consv_te: 0. 44 | d2_bg: 0. 45 | d2_bg_k1: 0.2 46 | d2_bg_k2: 0.1 47 | d4_bg: 0.15 48 | d_con: 1.0 49 | d_ext: 0.0 50 | dddmp: 0.5 51 | delt_max: 0.002 52 | do_sat_adj: true 53 | do_vort_damp: true 54 | fill: true 55 | hord_dp: 6 56 | hord_mt: 6 57 | hord_tm: 6 58 | hord_tr: 8 59 | hord_vt: 6 60 | hydrostatic: false 61 | k_split: 1 62 | ke_bg: 0. 63 | kord_mt: 9 64 | kord_tm: -9 65 | kord_tr: 9 66 | kord_wz: 9 67 | n_split: 1 68 | nord: 3 69 | nwat: 6 70 | p_fac: 0.05 71 | rf_cutoff: 3000. 72 | rf_fast: true 73 | tau: 10. 74 | vtdm4: 0.06 75 | z_tracer: true 76 | do_qa: true 77 | tau_i2s: 1000. 78 | tau_g2v: 1200. 79 | ql_gen: 0.001 80 | ql_mlt: 0.002 81 | qs_mlt: 0.000001 82 | qi_lim: 1.0 83 | dw_ocean: 0.1 84 | dw_land: 0.15 85 | icloud_f: 0 86 | tau_l2v: 300. 87 | tau_v2l: 90. 88 | fv_sg_adj: 0 89 | n_sponge: 48 90 | 91 | physics_config: 92 | hydrostatic: false 93 | nwat: 6 94 | do_qa: true 95 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_comm_write.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | type: baroclinic 10 | performance_config: 11 | collect_performance: false 12 | experiment_name: c12_baroclinic 13 | comm_config: 14 | type: write 15 | config: 16 | path: comm 17 | ranks: 18 | - 0 19 | nx_tile: 12 20 | nz: 79 21 | dt_atmos: 180 22 | minutes: 6 23 | layout: 24 | - 1 25 | - 1 26 | diagnostics_config: 27 | path: "output.zarr" 28 | names: 29 | - u 30 | - v 31 | - ua 32 | - va 33 | - pt 34 | - delp 35 | - qvapor 36 | - qliquid 37 | - qice 38 | - qrain 39 | - qsnow 40 | - qgraupel 41 | dycore_config: 42 | a_imp: 1.0 43 | beta: 0. 44 | consv_te: 0. 45 | d2_bg: 0. 46 | d2_bg_k1: 0.2 47 | d2_bg_k2: 0.1 48 | d4_bg: 0.15 49 | d_con: 1.0 50 | d_ext: 0.0 51 | dddmp: 0.5 52 | delt_max: 0.002 53 | do_sat_adj: true 54 | do_vort_damp: true 55 | fill: true 56 | hord_dp: 6 57 | hord_mt: 6 58 | hord_tm: 6 59 | hord_tr: 8 60 | hord_vt: 6 61 | hydrostatic: false 62 | k_split: 1 63 | ke_bg: 0. 64 | kord_mt: 9 65 | kord_tm: -9 66 | kord_tr: 9 67 | kord_wz: 9 68 | n_split: 1 69 | nord: 3 70 | nwat: 6 71 | p_fac: 0.05 72 | rf_cutoff: 3000. 73 | rf_fast: true 74 | tau: 10. 75 | vtdm4: 0.06 76 | z_tracer: true 77 | do_qa: true 78 | tau_i2s: 1000. 79 | tau_g2v: 1200. 80 | ql_gen: 0.001 81 | ql_mlt: 0.002 82 | qs_mlt: 0.000001 83 | qi_lim: 1.0 84 | dw_ocean: 0.1 85 | dw_land: 0.15 86 | icloud_f: 0 87 | tau_l2v: 300. 88 | tau_v2l: 90. 89 | fv_sg_adj: 0 90 | n_sponge: 48 91 | 92 | physics_config: 93 | hydrostatic: false 94 | nwat: 6 95 | do_qa: true 96 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_from_serialbox.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: gtc:numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization_type: serialbox 9 | initialization_config: 10 | path: /test_data/8.0.0/c12_6ranks_baroclinic_dycore_microphysics/driver 11 | serialized_grid: True 12 | performance_config: 13 | collect_performance: false 14 | nx_tile: 12 15 | nz: 79 16 | dt_atmos: 225 17 | minutes: 4 18 | layout: 19 | - 1 20 | - 1 21 | diagnostics_config: 22 | path: "output.zarr" 23 | names: 24 | - u 25 | - v 26 | - ua 27 | - va 28 | - pt 29 | - delp 30 | - qvapor 31 | - qliquid 32 | - qice 33 | - qrain 34 | - qsnow 35 | - qgraupel 36 | dycore_config: 37 | namelist_override: /test_data/8.0.0/c12_6ranks_baroclinic_dycore_microphysics/driver/input.nml 38 | physics_config: 39 | namelist_override: /test_data/8.0.0/c12_6ranks_baroclinic_dycore_microphysics/driver/input.nml 40 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_null_comm.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | type: baroclinic 10 | performance_config: 11 | collect_performance: false 12 | experiment_name: c12_baroclinic 13 | comm_config: 14 | type: null_comm 15 | config: 16 | rank: 0 17 | total_ranks: 6 18 | nx_tile: 12 19 | nz: 79 20 | dt_atmos: 225 21 | minutes: 60 22 | layout: 23 | - 1 24 | - 1 25 | diagnostics_config: 26 | path: "output.zarr" 27 | names: 28 | - u 29 | - v 30 | - ua 31 | - va 32 | - pt 33 | - delp 34 | - qvapor 35 | - qliquid 36 | - qice 37 | - qrain 38 | - qsnow 39 | - qgraupel 40 | dycore_config: 41 | a_imp: 1.0 42 | beta: 0. 43 | consv_te: 0. 44 | d2_bg: 0. 45 | d2_bg_k1: 0.2 46 | d2_bg_k2: 0.1 47 | d4_bg: 0.15 48 | d_con: 1.0 49 | d_ext: 0.0 50 | dddmp: 0.5 51 | delt_max: 0.002 52 | do_sat_adj: true 53 | do_vort_damp: true 54 | fill: true 55 | hord_dp: 6 56 | hord_mt: 6 57 | hord_tm: 6 58 | hord_tr: 8 59 | hord_vt: 6 60 | hydrostatic: false 61 | k_split: 1 62 | ke_bg: 0. 63 | kord_mt: 9 64 | kord_tm: -9 65 | kord_tr: 9 66 | kord_wz: 9 67 | n_split: 1 68 | nord: 3 69 | nwat: 6 70 | p_fac: 0.05 71 | rf_cutoff: 3000. 72 | rf_fast: true 73 | tau: 10. 74 | vtdm4: 0.06 75 | z_tracer: true 76 | do_qa: true 77 | tau_i2s: 1000. 78 | tau_g2v: 1200. 79 | ql_gen: 0.001 80 | ql_mlt: 0.002 81 | qs_mlt: 0.000001 82 | qi_lim: 1.0 83 | dw_ocean: 0.1 84 | dw_land: 0.15 85 | icloud_f: 0 86 | tau_l2v: 300. 87 | tau_v2l: 90. 88 | fv_sg_adj: 0 89 | n_sponge: 48 90 | 91 | physics_config: 92 | hydrostatic: false 93 | nwat: 6 94 | do_qa: true 95 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_orch_cpu.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: dace:cpu 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | type: baroclinic 10 | performance_config: 11 | collect_performance: false 12 | nx_tile: 12 13 | nz: 79 14 | dt_atmos: 225 15 | minutes: 5 16 | layout: 17 | - 1 18 | - 1 19 | dycore_config: 20 | a_imp: 1.0 21 | beta: 0. 22 | consv_te: 0. 23 | d2_bg: 0. 24 | d2_bg_k1: 0.2 25 | d2_bg_k2: 0.1 26 | d4_bg: 0.15 27 | d_con: 1.0 28 | d_ext: 0.0 29 | dddmp: 0.5 30 | delt_max: 0.002 31 | do_sat_adj: true 32 | do_vort_damp: true 33 | fill: true 34 | hord_dp: 6 35 | hord_mt: 6 36 | hord_tm: 6 37 | hord_tr: 8 38 | hord_vt: 6 39 | hydrostatic: false 40 | k_split: 1 41 | ke_bg: 0. 42 | kord_mt: 9 43 | kord_tm: -9 44 | kord_tr: 9 45 | kord_wz: 9 46 | n_split: 1 47 | nord: 3 48 | nwat: 6 49 | p_fac: 0.05 50 | rf_cutoff: 3000. 51 | rf_fast: true 52 | tau: 10. 53 | vtdm4: 0.06 54 | z_tracer: true 55 | do_qa: true 56 | tau_i2s: 1000. 57 | tau_g2v: 1200. 58 | ql_gen: 0.001 59 | ql_mlt: 0.002 60 | qs_mlt: 0.000001 61 | qi_lim: 1.0 62 | dw_ocean: 0.1 63 | dw_land: 0.15 64 | icloud_f: 0 65 | tau_l2v: 300. 66 | tau_v2l: 90. 67 | fv_sg_adj: 0 68 | n_sponge: 48 69 | 70 | physics_config: 71 | hydrostatic: false 72 | nwat: 6 73 | do_qa: true 74 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_read_restart_fortran.yml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | config: 10 | path: /home/ajdas/pace/restart_data/1.0 11 | type: fortran_restart 12 | performance_config: 13 | collect_performance: false 14 | experiment_name: c12_baroclinic_restart 15 | grid_config: 16 | type: generated 17 | nx_tile: 12 18 | nz: 79 19 | dt_atmos: 225 20 | minutes: 15 21 | layout: 22 | - 1 23 | - 1 24 | diagnostics_config: 25 | path: "output.zarr" 26 | names: 27 | - u 28 | - v 29 | - ua 30 | - va 31 | - pt 32 | - delp 33 | - qvapor 34 | - qliquid 35 | - qice 36 | - qrain 37 | - qsnow 38 | - qgraupel 39 | dycore_config: 40 | a_imp: 1.0 41 | beta: 0. 42 | consv_te: 0. 43 | d2_bg: 0. 44 | d2_bg_k1: 0.2 45 | d2_bg_k2: 0.1 46 | d4_bg: 0.15 47 | d_con: 1.0 48 | d_ext: 0.0 49 | dddmp: 0.5 50 | delt_max: 0.002 51 | do_sat_adj: true 52 | do_vort_damp: true 53 | fill: true 54 | hord_dp: 6 55 | hord_mt: 6 56 | hord_tm: 6 57 | hord_tr: 8 58 | hord_vt: 6 59 | hydrostatic: false 60 | k_split: 1 61 | ke_bg: 0. 62 | kord_mt: 9 63 | kord_tm: -9 64 | kord_tr: 9 65 | kord_wz: 9 66 | n_split: 1 67 | nord: 3 68 | nwat: 6 69 | p_fac: 0.05 70 | rf_cutoff: 3000. 71 | rf_fast: true 72 | tau: 10. 73 | vtdm4: 0.06 74 | z_tracer: true 75 | do_qa: true 76 | tau_i2s: 1000. 77 | tau_g2v: 1200. 78 | ql_gen: 0.001 79 | ql_mlt: 0.002 80 | qs_mlt: 0.000001 81 | qi_lim: 1.0 82 | dw_ocean: 0.1 83 | dw_land: 0.15 84 | icloud_f: 0 85 | tau_l2v: 300. 86 | tau_v2l: 90. 87 | fv_sg_adj: 0 88 | n_sponge: 48 89 | 90 | physics_config: 91 | hydrostatic: false 92 | nwat: 6 93 | do_qa: true 94 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c12_write_restart.yaml: -------------------------------------------------------------------------------- 1 | stencil_config: 2 | compilation_config: 3 | backend: numpy 4 | rebuild: false 5 | validate_args: true 6 | format_source: false 7 | device_sync: false 8 | initialization: 9 | type: baroclinic 10 | performance_config: 11 | collect_performance: false 12 | experiment_name: c12_baroclinic 13 | nx_tile: 12 14 | nz: 79 15 | dt_atmos: 225 16 | minutes: 0 17 | seconds: 225 18 | layout: 19 | - 1 20 | - 1 21 | restart_config: 22 | save_restart: true 23 | diagnostics_config: 24 | path: "output.zarr" 25 | names: 26 | - u 27 | - v 28 | - ua 29 | - va 30 | - pt 31 | - delp 32 | - qvapor 33 | - qliquid 34 | - qice 35 | - qrain 36 | - qsnow 37 | - qgraupel 38 | dycore_config: 39 | a_imp: 1.0 40 | beta: 0. 41 | consv_te: 0. 42 | d2_bg: 0. 43 | d2_bg_k1: 0.2 44 | d2_bg_k2: 0.1 45 | d4_bg: 0.15 46 | d_con: 1.0 47 | d_ext: 0.0 48 | dddmp: 0.5 49 | delt_max: 0.002 50 | do_sat_adj: true 51 | do_vort_damp: true 52 | fill: true 53 | hord_dp: 6 54 | hord_mt: 6 55 | hord_tm: 6 56 | hord_tr: 8 57 | hord_vt: 6 58 | hydrostatic: false 59 | k_split: 1 60 | ke_bg: 0. 61 | kord_mt: 9 62 | kord_tm: -9 63 | kord_tr: 9 64 | kord_wz: 9 65 | n_split: 1 66 | nord: 3 67 | nwat: 6 68 | p_fac: 0.05 69 | rf_cutoff: 3000. 70 | rf_fast: true 71 | tau: 10. 72 | vtdm4: 0.06 73 | z_tracer: true 74 | do_qa: true 75 | tau_i2s: 1000. 76 | tau_g2v: 1200. 77 | ql_gen: 0.001 78 | ql_mlt: 0.002 79 | qs_mlt: 0.000001 80 | qi_lim: 1.0 81 | dw_ocean: 0.1 82 | dw_land: 0.15 83 | icloud_f: 0 84 | tau_l2v: 300. 85 | tau_v2l: 90. 86 | fv_sg_adj: 0 87 | n_sponge: 48 88 | 89 | physics_config: 90 | hydrostatic: false 91 | nwat: 6 92 | do_qa: true 93 | -------------------------------------------------------------------------------- /driver/examples/configs/baroclinic_c48_6ranks_serialbox_test.yaml: -------------------------------------------------------------------------------- 1 | dycore_only: true 2 | disable_step_physics: true 3 | stencil_config: 4 | compilation_config: 5 | backend: gt:gpu 6 | rebuild: false 7 | validate_args: true 8 | format_source: false 9 | device_sync: false 10 | run_mode: Run 11 | initialization: 12 | type: serialbox 13 | config: 14 | path: notapath 15 | serialized_grid: true 16 | performance_config: 17 | collect_performance: false 18 | nx_tile: 48 19 | nz: 79 20 | dt_atmos: 225 21 | seconds: 4500 22 | layout: 23 | - 1 24 | - 1 25 | diagnostics_config: 26 | path: "output.zarr" 27 | names: 28 | - ua 29 | - va 30 | - pt 31 | dycore_config: 32 | a_imp: 1.0 33 | beta: 0. 34 | consv_te: 0. 35 | d2_bg: 0. 36 | d2_bg_k1: 0.2 37 | d2_bg_k2: 0.1 38 | d4_bg: 0.15 39 | d_con: 1.0 40 | d_ext: 0.0 41 | dddmp: 0.5 42 | delt_max: 0.002 43 | do_sat_adj: true 44 | do_vort_damp: true 45 | fill: true 46 | hord_dp: 6 47 | hord_mt: 6 48 | hord_tm: 6 49 | hord_tr: 8 50 | hord_vt: 6 51 | hydrostatic: false 52 | k_split: 1 53 | ke_bg: 0. 54 | kord_mt: 9 55 | kord_tm: -9 56 | kord_tr: 9 57 | kord_wz: 9 58 | n_split: 1 59 | nord: 3 60 | nwat: 6 61 | p_fac: 0.05 62 | rf_cutoff: 3000. 63 | rf_fast: true 64 | tau: 10. 65 | vtdm4: 0.06 66 | z_tracer: true 67 | do_qa: true 68 | tau_i2s: 1000. 69 | tau_g2v: 1200. 70 | ql_gen: 0.001 71 | ql_mlt: 0.002 72 | qs_mlt: 0.000001 73 | qi_lim: 1.0 74 | dw_ocean: 0.1 75 | dw_land: 0.15 76 | icloud_f: 0 77 | tau_l2v: 300. 78 | tau_v2l: 90. 79 | fv_sg_adj: 0 80 | n_sponge: 48 81 | 82 | physics_config: 83 | hydrostatic: false 84 | nwat: 6 85 | do_qa: true 86 | -------------------------------------------------------------------------------- /driver/examples/configs/tropical_read_restart_fortran.yml: -------------------------------------------------------------------------------- 1 | dycore_only: true 2 | disable_step_physics: true 3 | stencil_config: 4 | compilation_config: 5 | backend: numpy 6 | rebuild: false 7 | validate_args: true 8 | format_source: false 9 | device_sync: false 10 | initialization: 11 | config: 12 | path: gs://vcm-ml-public/dsl/initial_conditions/c128_6ranks_tc 13 | type: fortran_restart 14 | grid_config: 15 | config: 16 | stretch_factor: 3.0 17 | lon_target: 172.5 18 | lat_target: 17.5 19 | ks: 0 20 | vertical_grid_from_restart: True 21 | type: generated 22 | performance_config: 23 | collect_performance: false 24 | experiment_name: c128_tropical 25 | nx_tile: 128 26 | nz: 79 27 | dt_atmos: 60 28 | minutes: 20 29 | layout: 30 | - 1 31 | - 1 32 | diagnostics_config: 33 | path: "output.zarr" 34 | names: 35 | - u 36 | - v 37 | - ua 38 | - va 39 | - pt 40 | - delp 41 | - qvapor 42 | output_initial_state: true 43 | dycore_config: 44 | a_imp: 1.0 45 | beta: 0. 46 | consv_te: 0. 47 | d2_bg: 0. 48 | d2_bg_k1: 0.2 49 | d2_bg_k2: 0.15 50 | d4_bg: 0.14 51 | d_con: 1.0 52 | d_ext: 0.0 53 | dddmp: 0.5 54 | delt_max: 0.002 55 | do_sat_adj: false 56 | do_vort_damp: true 57 | fill: false 58 | hord_dp: 6 59 | hord_mt: 6 60 | hord_tm: 6 61 | hord_tr: 8 62 | hord_vt: 6 63 | hydrostatic: false 64 | k_split: 2 65 | ke_bg: 0. 66 | kord_mt: 9 67 | kord_tm: -9 68 | kord_tr: 9 69 | kord_wz: 9 70 | n_split: 3 71 | nord: 3 72 | nwat: 6 73 | p_fac: 0.1 74 | rf_cutoff: 5000. 75 | rf_fast: true 76 | tau: 5. 77 | vtdm4: 0.06 78 | z_tracer: true 79 | do_qa: true 80 | tau_i2s: 1000. 81 | tau_g2v: 900. 82 | ql_gen: 0.001 83 | ql_mlt: 0.001 84 | qs_mlt: 0.000001 85 | qi_lim: 1.0 86 | dw_ocean: 0.1 87 | dw_land: 0.16 88 | icloud_f: 0 89 | tau_l2v: 225. 90 | tau_v2l: 150. 91 | fv_sg_adj: 600 92 | n_sponge: 48 93 | p_ref: 101500. 94 | 95 | physics_config: 96 | hydrostatic: false 97 | nwat: 6 98 | do_qa: true 99 | -------------------------------------------------------------------------------- /driver/examples/configs/tropicalcyclone_c128.yaml: -------------------------------------------------------------------------------- 1 | dycore_only: true 2 | disable_step_physics: true 3 | stencil_config: 4 | compilation_config: 5 | backend: numpy 6 | rebuild: false 7 | validate_args: true 8 | format_source: false 9 | device_sync: false 10 | initialization: 11 | type: tropicalcyclone 12 | performance_config: 13 | performance_mode: true 14 | experiment_name: c128_tropical 15 | grid_config: 16 | config: 17 | stretch_factor: 3.0 18 | lon_target: 172.5 19 | lat_target: 17.5 20 | ks: 0 21 | type: generated 22 | nx_tile: 128 23 | nz: 79 24 | dt_atmos: 60 25 | minutes: 2 26 | layout: 27 | - 1 28 | - 1 29 | diagnostics_config: 30 | path: "output.zarr" 31 | names: 32 | - u 33 | - v 34 | - ua 35 | - va 36 | - pt 37 | - delp 38 | - delz 39 | - qvapor 40 | - ps 41 | output_initial_state: true 42 | dycore_config: 43 | a_imp: 1.0 44 | beta: 0. 45 | consv_te: 0. 46 | d2_bg: 0. 47 | d2_bg_k1: 0.2 48 | d2_bg_k2: 0.15 49 | d4_bg: 0.14 50 | d_con: 1.0 51 | d_ext: 0.0 52 | dddmp: 0.5 53 | delt_max: 0.002 54 | do_sat_adj: false 55 | do_vort_damp: true 56 | fill: false 57 | hord_dp: 6 58 | hord_mt: 6 59 | hord_tm: 6 60 | hord_tr: 8 61 | hord_vt: 6 62 | hydrostatic: false 63 | k_split: 2 64 | ke_bg: 0. 65 | kord_mt: 9 66 | kord_tm: -9 67 | kord_tr: 9 68 | kord_wz: 9 69 | n_split: 3 70 | nord: 3 71 | nwat: 6 72 | p_fac: 0.1 73 | rf_cutoff: 5000. 74 | rf_fast: true 75 | tau: 5. 76 | vtdm4: 0.06 77 | z_tracer: true 78 | do_qa: true 79 | tau_i2s: 1000. 80 | tau_g2v: 900. 81 | ql_gen: 0.001 82 | ql_mlt: 0.001 83 | qs_mlt: 0.000001 84 | qi_lim: 1.0 85 | dw_ocean: 0.1 86 | dw_land: 0.16 87 | icloud_f: 0 88 | tau_l2v: 225. 89 | tau_v2l: 150. 90 | fv_sg_adj: 600 91 | n_sponge: 48 92 | p_ref: 101500. 93 | 94 | physics_config: 95 | hydrostatic: false 96 | nwat: 6 97 | do_qa: true 98 | -------------------------------------------------------------------------------- /driver/examples/create_venv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Requires you have git and python>=3.8 with venv and pip installed 3 | # Requires an MPI library for mpi4py, such as libopenmpi3 and libmopenmpi3-dev 4 | 5 | set -e -x 6 | 7 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 8 | 9 | python3 -m venv venv 10 | . venv/bin/activate 11 | 12 | rundir=$(pwd) 13 | cd ${SCRIPT_DIR}/../../ 14 | 15 | pip3 install --upgrade setuptools wheel 16 | pip3 install -r requirements_dev.txt -c constraints.txt 17 | 18 | deactivate 19 | cd $rundir 20 | -------------------------------------------------------------------------------- /driver/examples/notebooks/driver_write_config.yaml: -------------------------------------------------------------------------------- 1 | comm_config: 2 | config: 3 | path: comm 4 | ranks: 5 | - 0 6 | type: write 7 | dt_atmos: 225 8 | dycore_config: 9 | a_imp: 1.0 10 | beta: 0.0 11 | consv_te: 0.0 12 | d2_bg: 0.0 13 | d2_bg_k1: 0.2 14 | d2_bg_k2: 0.1 15 | d4_bg: 0.15 16 | d_con: 1.0 17 | d_ext: 0.0 18 | dddmp: 0.5 19 | delt_max: 0.002 20 | do_qa: true 21 | do_sat_adj: true 22 | do_vort_damp: true 23 | dw_land: 0.15 24 | dw_ocean: 0.1 25 | fill: true 26 | fv_sg_adj: 0 27 | hord_dp: 6 28 | hord_mt: 6 29 | hord_tm: 6 30 | hord_tr: 8 31 | hord_vt: 6 32 | hydrostatic: false 33 | icloud_f: 0 34 | k_split: 1 35 | ke_bg: 0.0 36 | kord_mt: 9 37 | kord_tm: -9 38 | kord_tr: 9 39 | kord_wz: 9 40 | n_split: 1 41 | n_sponge: 48 42 | nord: 3 43 | nwat: 6 44 | p_fac: 0.05 45 | qi_lim: 1.0 46 | ql_gen: 0.001 47 | ql_mlt: 0.002 48 | qs_mlt: 1.0e-06 49 | rf_cutoff: 3000.0 50 | rf_fast: true 51 | tau: 10.0 52 | tau_g2v: 1200.0 53 | tau_i2s: 1000.0 54 | tau_l2v: 300.0 55 | tau_v2l: 90.0 56 | vtdm4: 0.06 57 | z_tracer: true 58 | initialization_config: {} 59 | initialization_type: baroclinic 60 | layout: 61 | - 1 62 | - 1 63 | minutes: 60 64 | nx_tile: 12 65 | nz: 79 66 | performance_config: 67 | collect_performance: false 68 | experiment_name: c12_baroclinic 69 | physics_config: 70 | do_qa: true 71 | hydrostatic: false 72 | nwat: 6 73 | stencil_config: 74 | backend: numpy 75 | device_sync: true 76 | format_source: false 77 | rebuild: false 78 | validate_args: true 79 | -------------------------------------------------------------------------------- /driver/examples/plot_baroclinic_init.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | from datetime import datetime 3 | 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | import xarray as xr 7 | import zarr 8 | from cartopy import crs as ccrs 9 | from fv3viz import pcolormesh_cube 10 | 11 | 12 | def parse_args(): 13 | usage = "usage: python %(prog)s config_file" 14 | parser = ArgumentParser(usage=usage) 15 | 16 | parser.add_argument( 17 | "zarr_output", 18 | type=str, 19 | action="store", 20 | help="which zarr output file to use", 21 | ) 22 | 23 | parser.add_argument( 24 | "experiment", 25 | type=str, 26 | action="store", 27 | help="experiment name", 28 | ) 29 | 30 | parser.add_argument( 31 | "variable", 32 | type=str, 33 | action="store", 34 | help="variable name to be plotted", 35 | ) 36 | 37 | parser.add_argument( 38 | "zlevel", 39 | type=int, 40 | action="store", 41 | help="variable zlevel to be plotted", 42 | ) 43 | 44 | return parser.parse_args() 45 | 46 | 47 | args = parse_args() 48 | ds = xr.open_zarr(store=zarr.DirectoryStore(path=args.zarr_output), consolidated=False) 49 | fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.Robinson()}) 50 | lat = ds["lat"].values * 180.0 / np.pi 51 | lon = ds["lon"].values * 180.0 / np.pi 52 | h = pcolormesh_cube( 53 | lat, 54 | lon, 55 | ds[args.variable].isel(time=0, z=args.zlevel).values, 56 | cmap=plt.cm.viridis, 57 | ax=ax, 58 | ) 59 | fig.colorbar(h, ax=ax, location="bottom", label=f"{args.variable}") 60 | title = args.experiment.replace("_", " ") 61 | fig.suptitle(f"{title}: {args.variable}, z={args.zlevel}") 62 | ax.annotate( 63 | "Generated on " + datetime.now().strftime("%m/%d/%y %H:%M:%S"), 64 | xy=(1.0, -0.6), 65 | xycoords="axes fraction", 66 | ha="right", 67 | va="center", 68 | fontsize=8, 69 | ) 70 | plt.tight_layout() 71 | plt.savefig( 72 | f"/work/{args.experiment}_baroclinic_initialization_{args.variable}.png", dpi=150 73 | ) 74 | -------------------------------------------------------------------------------- /driver/examples/plot_cube.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import xarray as xr 4 | import zarr 5 | from cartopy import crs as ccrs 6 | from fv3viz import pcolormesh_cube 7 | 8 | 9 | ds = xr.open_zarr(store=zarr.DirectoryStore(path="output.zarr"), consolidated=False) 10 | fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.Robinson()}) 11 | lat = ds["lat"].values * 180.0 / np.pi 12 | lon = ds["lon"].values * 180.0 / np.pi 13 | h = pcolormesh_cube( 14 | lat, 15 | lon, 16 | ds["ua"].isel(time=5, z=78).values, 17 | cmap=plt.cm.viridis, 18 | ax=ax, 19 | ) 20 | fig.colorbar(h, ax=ax, location="bottom", label="u [m/s]") 21 | plt.tight_layout() 22 | plt.savefig("test.png") 23 | -------------------------------------------------------------------------------- /driver/examples/plot_output.py: -------------------------------------------------------------------------------- 1 | import xarray as xr 2 | import zarr 3 | 4 | 5 | try: 6 | import matplotlib.pyplot as plt 7 | except ModuleNotFoundError: 8 | print( 9 | "matplotlib is not installed, install it first with " 10 | "`pip install matplotlib` or similar" 11 | ) 12 | raise 13 | 14 | ds = xr.open_zarr(store=zarr.DirectoryStore(path="output.zarr"), consolidated=False) 15 | 16 | 17 | fig, ax = plt.subplots(2, 3, figsize=(12, 8)) 18 | ax = ax.flatten() 19 | level = -1 20 | varname = "delp" 21 | for i in range(6): 22 | temperature_anomaly = ( 23 | ds[varname].isel(time=-1, tile=i, z=level).values 24 | - ds[varname].isel(time=1, tile=i, z=level).values 25 | ) 26 | im = ax[i].pcolormesh(temperature_anomaly) 27 | ax[i].set_title(f"Tile {i}") 28 | plt.colorbar(im, ax=ax[i]) 29 | fig.suptitle("Lowest level temperature evolution (end - step 1)") 30 | plt.tight_layout() 31 | plt.show() 32 | -------------------------------------------------------------------------------- /driver/examples/run_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | 5 | docker run -v ${SCRIPT_DIR}/../../:/pace -w /pace python:3.8 bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python3-venv python3-dev libopenmpi3 libopenmpi-dev && cd /pace/driver/examples && /pace/driver/examples/create_venv.sh && . venv/bin/activate && mpirun -n 6 --allow-run-as-root --mca btl_vader_single_copy_mechanism none python3 -m pace.driver.run /pace/driver/examples/configs/baroclinic_c12.yaml" 6 | -------------------------------------------------------------------------------- /driver/examples/write_then_read.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This example shows how to use CachingCommWriter to write MPI communication data to disk, and then re-run the model using the data from disk. 3 | 4 | set -e -x 5 | 6 | MPIRUN_CMD=${MPIRUN_CMD:-mpirun -n 6} 7 | 8 | $MPIRUN_CMD python3 -m pace.driver.run configs/baroclinic_c12_comm_write.yaml --log-rank 0 9 | python3 -m pace.driver.run configs/baroclinic_c12_comm_read.yaml 10 | -------------------------------------------------------------------------------- /driver/examples/zarr_to_nc.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import xarray as xr 4 | import zarr 5 | 6 | 7 | if __name__ == "__main__": 8 | parser = argparse.ArgumentParser( 9 | description="Converts zarr directory stores to netcdf" 10 | ) 11 | parser.add_argument("zarr_in", type=str, help="path of zarr to convert") 12 | parser.add_argument("netcdf_out", type=str, help="output netcdf") 13 | args = parser.parse_args() 14 | ds: xr.Dataset = xr.open_zarr(store=zarr.DirectoryStore(args.zarr_in)) 15 | ds.to_netcdf(args.netcdf_out) 16 | -------------------------------------------------------------------------------- /driver/pace/driver/__init__.py: -------------------------------------------------------------------------------- 1 | from .comm import ( 2 | CreatesComm, 3 | CreatesCommSelector, 4 | MPICommConfig, 5 | NullCommConfig, 6 | ReaderCommConfig, 7 | WriterCommConfig, 8 | ) 9 | from .diagnostics import Diagnostics, DiagnosticsConfig 10 | from .driver import Driver, DriverConfig, RestartConfig 11 | from .grid import GeneratedGridConfig, SerialboxGridConfig 12 | from .initialization import BaroclinicInit, PredefinedStateInit, RestartInit 13 | from .performance import PerformanceConfig 14 | from .registry import Registry 15 | from .state import DriverState, TendencyState 16 | 17 | 18 | __version__ = "0.2.0" 19 | -------------------------------------------------------------------------------- /driver/pace/driver/configs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/driver/pace/driver/configs/__init__.py -------------------------------------------------------------------------------- /driver/pace/driver/performance/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import PerformanceConfig 2 | -------------------------------------------------------------------------------- /driver/pace/driver/performance/config.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | 3 | import pace.util 4 | from pace.util import NullProfiler, Profiler 5 | 6 | from .collector import ( 7 | AbstractPerformanceCollector, 8 | NullPerformanceCollector, 9 | PerformanceCollector, 10 | ) 11 | 12 | 13 | @dataclasses.dataclass 14 | class PerformanceConfig: 15 | """Performance stats collector. 16 | 17 | collect_performance: overall flag turning collection on/pff 18 | collect_cProfile: use cProfile for CPU Python profiling 19 | collect_communication: collect halo exchange details 20 | experiment_name: to be printed in the JSON summary 21 | json_all_rank_threshold: number of nodes above the full performance 22 | report for all nodes won't be written (rank 0 is always written) 23 | """ 24 | 25 | collect_performance: bool = False 26 | collect_cProfile: bool = False 27 | collect_communication: bool = False 28 | experiment_name: str = "test" 29 | json_all_rank_threshold: int = 1000 30 | 31 | def build(self, comm: pace.util.Comm) -> AbstractPerformanceCollector: 32 | if self.collect_performance: 33 | return PerformanceCollector(experiment_name=self.experiment_name, comm=comm) 34 | else: 35 | return NullPerformanceCollector() 36 | 37 | def build_profiler(self): 38 | if self.collect_cProfile: 39 | return Profiler() 40 | else: 41 | return NullProfiler() 42 | -------------------------------------------------------------------------------- /driver/pace/driver/tools.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import click 4 | 5 | from pace.dsl.dace.utils import ( 6 | kernel_theoretical_timing_from_path, 7 | memory_static_analysis_from_path, 8 | ) 9 | 10 | 11 | # Count the memory from a given SDFG 12 | ACTION_SDFG_MEMORY_STATIC_ANALYSIS = "sdfg_memory_static_analysis" 13 | ACTION_SDFG_KERNEL_THEORETICAL_TIMING = "sdfg_kernel_theoretical_timing" 14 | 15 | 16 | @click.command() 17 | @click.argument( 18 | "action", 19 | required=True, 20 | type=click.Choice( 21 | [ACTION_SDFG_MEMORY_STATIC_ANALYSIS, ACTION_SDFG_KERNEL_THEORETICAL_TIMING] 22 | ), 23 | ) 24 | @click.option( 25 | "--sdfg_path", 26 | required=True, 27 | type=click.STRING, 28 | ) 29 | @click.option("--report_detail", is_flag=True, type=click.BOOL, default=False) 30 | def command_line(action: str, sdfg_path: Optional[str], report_detail: Optional[bool]): 31 | """ 32 | Run tooling. 33 | """ 34 | if action == ACTION_SDFG_MEMORY_STATIC_ANALYSIS: 35 | print(memory_static_analysis_from_path(sdfg_path, detail_report=report_detail)) 36 | elif action == ACTION_SDFG_KERNEL_THEORETICAL_TIMING: 37 | print(kernel_theoretical_timing_from_path(sdfg_path)) 38 | 39 | 40 | if __name__ == "__main__": 41 | command_line() 42 | -------------------------------------------------------------------------------- /driver/setup.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from setuptools import find_namespace_packages, setup 4 | 5 | 6 | setup_requirements: List[str] = [] 7 | 8 | requirements = [ 9 | "pace-util", 10 | "pace-fv3core", 11 | "pace-physics", 12 | "pace-stencils", 13 | "dacite", 14 | "pyyaml", 15 | "mpi4py", 16 | "numpy", 17 | "netCDF4", 18 | "xarray", 19 | "zarr", 20 | ] 21 | 22 | test_requirements: List[str] = [] 23 | 24 | 25 | setup( 26 | author="Allen Institute for AI", 27 | author_email="elynnw@allenai.org", 28 | python_requires=">=3.8", 29 | classifiers=[ 30 | "Development Status :: 2 - Pre-Alpha", 31 | "Intended Audience :: Developers", 32 | "License :: OSI Approved :: BSD License", 33 | "Natural Language :: English", 34 | "Programming Language :: Python :: 3", 35 | "Programming Language :: Python :: 3.8", 36 | "Programming Language :: Python :: 3.9", 37 | ], 38 | install_requires=requirements, 39 | setup_requires=setup_requirements, 40 | tests_require=test_requirements, 41 | name="pace-driver", 42 | license="BSD license", 43 | packages=find_namespace_packages(include=["pace.*"]), 44 | include_package_data=True, 45 | url="https://github.com/ai2cm/pace", 46 | version="0.2.0", 47 | zip_safe=False, 48 | ) 49 | -------------------------------------------------------------------------------- /driver/tests/mpi/run_save_and_load_restart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | MPIRUN_CALL=${MPIRUN_CALL:-mpirun -n 6} 4 | cp examples/configs/baroclinic_c12_write_restart.yaml baroclinic_c12_write_restart.yaml 5 | cp examples/configs/baroclinic_c12_write_restart.yaml baroclinic_c12_run_two_steps.yaml 6 | sed -i.bak 's/seconds: 225/seconds: 450/' baroclinic_c12_run_two_steps.yaml 7 | sed -i.bak 's/save_restart: true/save_restart: false/' baroclinic_c12_run_two_steps.yaml 8 | sed -i.bak 's/path: "output.zarr"/path: "run_two_steps_output.zarr"/' baroclinic_c12_run_two_steps.yaml 9 | rm *.bak 10 | $MPIRUN_CALL python -m pace.driver.run baroclinic_c12_write_restart.yaml --log-level=ERROR 11 | $MPIRUN_CALL python -m pace.driver.run RESTART/restart.yaml --log-level=ERROR 12 | $MPIRUN_CALL python -m pace.driver.run baroclinic_c12_run_two_steps.yaml --log-level=ERROR 13 | -------------------------------------------------------------------------------- /dsl/pace/dsl/__init__.py: -------------------------------------------------------------------------------- 1 | import gt4py.cartesian.config 2 | 3 | from pace.util.mpi import MPI 4 | 5 | from . import dace 6 | from .dace.dace_config import DaceConfig, DaCeOrchestration 7 | from .dace.orchestration import orchestrate, orchestrate_function 8 | from .stencil import ( 9 | CompilationConfig, 10 | FrozenStencil, 11 | GridIndexing, 12 | StencilConfig, 13 | StencilFactory, 14 | ) 15 | 16 | 17 | if MPI is not None: 18 | import os 19 | 20 | gt4py.cartesian.config.cache_settings["dir_name"] = os.environ.get( 21 | "GT_CACHE_DIR_NAME", f".gt_cache_{MPI.COMM_WORLD.Get_rank():06}" 22 | ) 23 | 24 | __version__ = "0.2.0" 25 | -------------------------------------------------------------------------------- /dsl/pace/dsl/dace/__init__.py: -------------------------------------------------------------------------------- 1 | from pace.dsl.dace.orchestration import orchestrate 2 | -------------------------------------------------------------------------------- /dsl/pace/dsl/dace/sdfg_opt_passes.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import dace 4 | 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def splittable_region_expansion(sdfg: dace.SDFG, verbose: bool = False): 10 | """ 11 | Set certain StencilComputation library nodes to expand to a different 12 | schedule if they contain small splittable regions. 13 | """ 14 | from gt4py.cartesian.gtc.dace.nodes import StencilComputation 15 | 16 | for node, _ in sdfg.all_nodes_recursive(): 17 | if isinstance(node, StencilComputation): 18 | if node.has_splittable_regions() and "corner" in node.label: 19 | node.expansion_specification = [ 20 | "Sections", 21 | "Stages", 22 | "J", 23 | "I", 24 | "K", 25 | ] 26 | if verbose: 27 | logger.info(f"Reordered schedule for {node.label}") 28 | -------------------------------------------------------------------------------- /dsl/pace/dsl/typing.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union, cast 2 | 3 | import gt4py.cartesian.gtscript as gtscript 4 | import numpy as np 5 | 6 | 7 | # A Field 8 | Field = gtscript.Field 9 | """A gt4py field""" 10 | 11 | # Axes 12 | IJK = gtscript.IJK 13 | IJ = gtscript.IJ 14 | IK = gtscript.IK 15 | JK = gtscript.JK 16 | I = gtscript.I # noqa: E741 17 | J = gtscript.J # noqa: E741 18 | K = gtscript.K # noqa: E741 19 | 20 | # Union of valid data types (from gt4py.cartesian.gtscript) 21 | DTypes = Union[bool, np.bool_, int, np.int32, np.int64, float, np.float32, np.float64] 22 | 23 | # Default float and int types 24 | Float = np.float_ 25 | Int = np.int_ 26 | Bool = np.bool_ 27 | 28 | 29 | FloatField = Field[gtscript.IJK, Float] 30 | FloatFieldI = Field[gtscript.I, Float] 31 | FloatFieldJ = Field[gtscript.J, Float] 32 | FloatFieldIJ = Field[gtscript.IJ, Float] 33 | FloatFieldK = Field[gtscript.K, Float] 34 | IntField = Field[gtscript.IJK, Int] 35 | IntFieldIJ = Field[gtscript.IJ, Int] 36 | IntFieldK = Field[gtscript.K, Int] 37 | BoolField = Field[gtscript.IJK, Bool] 38 | 39 | Index3D = Tuple[int, int, int] 40 | 41 | 42 | def cast_to_index3d(val: Tuple[int, ...]) -> Index3D: 43 | if len(val) != 3: 44 | raise ValueError(f"expected 3d index, received {val}") 45 | return cast(Index3D, val) 46 | -------------------------------------------------------------------------------- /dsl/setup.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from setuptools import find_namespace_packages, setup 4 | 5 | 6 | setup_requirements: List[str] = [] 7 | 8 | requirements = ["gt4py", "pace-util", "dace"] 9 | 10 | test_requirements: List[str] = [] 11 | 12 | 13 | setup( 14 | author="Allen Institute for AI", 15 | author_email="elynnw@allenai.org", 16 | python_requires=">=3.8", 17 | classifiers=[ 18 | "Development Status :: 2 - Pre-Alpha", 19 | "Intended Audience :: Developers", 20 | "License :: OSI Approved :: BSD License", 21 | "Natural Language :: English", 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.8", 24 | "Programming Language :: Python :: 3.9", 25 | ], 26 | install_requires=requirements, 27 | setup_requires=setup_requirements, 28 | tests_require=test_requirements, 29 | name="pace-dsl", 30 | license="BSD license", 31 | packages=find_namespace_packages(include=["pace.*"]), 32 | include_package_data=True, 33 | url="https://github.com/ai2cm/pace", 34 | version="0.2.0", 35 | zip_safe=False, 36 | ) 37 | -------------------------------------------------------------------------------- /examples/Makefile: -------------------------------------------------------------------------------- 1 | IMAGE_NAME ?=gcr.io/vcm-ml/pace_notebook_examples 2 | APP_NAME ?=pace_notebook_examples 3 | PORT ?=8888 4 | BUILD_OPTIONS ?= 5 | RUN_OPTIONS ?= 6 | CMD?= 7 | CWD =$(shell pwd) 8 | 9 | .PHONY: help build build-nc run dev up stop 10 | 11 | help: 12 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 13 | 14 | .DEFAULT_GOAL := help 15 | 16 | build: ## Build the image 17 | docker build $(BUILD_OPTIONS) -t $(IMAGE_NAME) -f Dockerfile .. 18 | 19 | build-nc: ## Build the image without caching 20 | $(MAKE) BUILD_OPTIONS="--no-cache" build 21 | 22 | run: ## Run container 23 | docker run -i -t --rm $(RUN_OPTIONS) -p=$(PORT):$(PORT) --name="$(APP_NAME)" $(IMAGE_NAME) $(CMD) 24 | 25 | dev: ## Run container and mount local directory for development 26 | RUN_OPTIONS="-v $(CWD)/notebooks:/notebooks" $(MAKE) run 27 | 28 | enter: 29 | CMD="/bin/bash" $(MAKE) run 30 | 31 | up: build run ## Build image and run container 32 | 33 | stop: ## Stop and remove running container 34 | docker stop $(APP_NAME) 35 | docker rm $(APP_NAME) 36 | -------------------------------------------------------------------------------- /examples/build_scripts/activate_ppan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | INSTALL_PREFIX=$1 4 | ENVIRONMENT_NAME=$2 5 | 6 | PACE_DIR=$(pwd)/../.. 7 | FV3NET_DIR=$INSTALL_PREFIX/fv3net 8 | 9 | module load conda 10 | module load intel_compilers/2021.3.0 11 | 12 | conda activate $ENVIRONMENT_NAME 13 | export PYTHONPATH=$FV3NET_DIR/external/fv3viz:$PACE_DIR/external/gt4py/src 14 | -------------------------------------------------------------------------------- /examples/build_scripts/build_gaea.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Example bash script to install Pace to run bare-metal on Gaea's c4 cluster 4 | 5 | set -e -x 6 | 7 | # module load necessary system software 8 | module rm PrgEnv-intel 9 | module load PrgEnv-gnu 10 | module rm gcc 11 | module load gcc/10.3.0 12 | module load boost/1.72.0 13 | module load python/3.9 14 | 15 | # clone Pace and update submodules 16 | git clone --recursive https://github.com/ai2cm/pace 17 | cd pace 18 | 19 | # create a conda environment for pace 20 | conda create -y --name my_name python=3.8 21 | 22 | # enter the environment and update it 23 | conda activate my_name 24 | pip3 install --upgrade pip setuptools wheel 25 | 26 | # install the Pace dependencies, GT4Py, and Pace 27 | pip3 install -r requirements_dev.txt -c constraints.txt 28 | -------------------------------------------------------------------------------- /examples/build_scripts/build_ppan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | INSTALL_PREFIX=$1 6 | ENVIRONMENT_NAME=$2 7 | 8 | PACE_DIR=$(pwd)/../.. 9 | FV3NET_DIR=$INSTALL_PREFIX/fv3net 10 | 11 | # module load necessary system software 12 | module load conda 13 | module load intel_compilers/2021.3.0 14 | 15 | export MPICC=$(which mpicc) 16 | 17 | CONDA_PREFIX=$INSTALL_PREFIX/conda 18 | conda config --add pkgs_dirs $CONDA_PREFIX/pkgs 19 | conda config --add envs_dirs $CONDA_PREFIX/envs 20 | 21 | # enter the pace directory 22 | cd $PACE_DIR 23 | 24 | # create a conda environment with cartopy and its dependencies installed 25 | conda create -c conda-forge -y --name $ENVIRONMENT_NAME python=3.8 matplotlib==3.5.2 cartopy==0.18.0 26 | 27 | # enter the environment and update it 28 | conda activate $ENVIRONMENT_NAME 29 | pip3 install --upgrade --no-cache-dir pip setuptools wheel 30 | 31 | # install the Pace dependencies, GT4Py, and Pace 32 | pip3 install --no-cache-dir -r requirements_dev.txt -c constraints.txt 33 | 34 | # clone fv3net 35 | git clone https://github.com/ai2cm/fv3net.git $FV3NET_DIR 36 | 37 | # install jupyter and ipyparallel 38 | pip3 install --no-cache-dir \ 39 | ipyparallel==8.4.1 \ 40 | jupyterlab==3.4.4 \ 41 | jupyterlab_code_formatter==1.5.2 \ 42 | isort==5.10.1 \ 43 | black==22.3.0 44 | 45 | # install vcm 46 | python3 -m pip install $FV3NET_DIR/external/vcm 47 | -------------------------------------------------------------------------------- /examples/notebooks/units_config.py: -------------------------------------------------------------------------------- 1 | units = { 2 | "area": "m2", 3 | "coord-deg": "degrees", 4 | "coord-rad": "radian", 5 | "courant": "", 6 | "dist": "m", 7 | "mass": "kg", 8 | "pressure": "Pa", 9 | "streamfunction": "kg/m/s", 10 | "tracer": "", 11 | "wind": "m/s", 12 | } 13 | -------------------------------------------------------------------------------- /external/daint_venv/LICENSE.txt: -------------------------------------------------------------------------------- 1 | BSD License 2 | 3 | Copyright (c) 2019, Vulcan Technologies LLC 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without modification, 7 | are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, this 13 | list of conditions and the following disclaimer in the documentation and/or 14 | other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 | IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 20 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 23 | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 24 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 25 | OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /external/daint_venv/README.md: -------------------------------------------------------------------------------- 1 | # Standard Python environment on Daint 2 | 3 | This repo contains the definition of the standard Python virtual environment 4 | on Piz Daint. 5 | 6 | Note: This repo is most probably of little general relevance if you are not working on the climate modeling team or on the Piz Daint supercomputer at CSCS in Switzerland. If you happen to want to use this repo for some purpose, it's probably best to reach out to someone on the climate modeling team to get help for achieving what you'd like to achieve. 7 | -------------------------------------------------------------------------------- /external/daint_venv/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | BUILDENV_DIR=$SCRIPT_DIR/../../buildenv 5 | 6 | VERSION=vcm_1.0 7 | env_file=env.daint.sh 8 | src_dir=$(pwd) 9 | 10 | # module environment 11 | source ${BUILDENV_DIR}/machineEnvironment.sh 12 | source ${BUILDENV_DIR}/${env_file} 13 | 14 | # echo commands and stop on error 15 | set -e 16 | set -x 17 | 18 | dst_dir=${1:-${installdir}/venv/${VERSION}} 19 | wheeldir=${2:-${installdir}/wheeldir} 20 | save_wheel=${3: false} 21 | 22 | # delete any pre-existing venv directories 23 | if [ -d ${dst_dir} ] ; then 24 | /bin/rm -rf ${dst_dir} 25 | fi 26 | 27 | # setup virtual env 28 | python3 -m venv ${dst_dir} 29 | source ${dst_dir}/bin/activate 30 | python3 -m pip install --upgrade pip 31 | python3 -m pip install --upgrade wheel 32 | 33 | # installation of standard packages that are backend specific 34 | if [ $save_wheel ]; then 35 | python3 -m pip wheel --wheel-dir=$wheeldir cupy Cython clang-format 36 | fi 37 | python3 -m pip install --find-links=$wheeldir cupy Cython clang-format 38 | 39 | python3 -m pip install ${installdir}/mpi4py/mpi4py-3.1.0a0-cp38-cp38-linux_x86_64.whl 40 | 41 | # deactivate virtual environment 42 | deactivate 43 | 44 | # echo module environment 45 | echo "Note: this virtual env has been created on `hostname`." 46 | cat ${BUILDENV_DIR}/${env_file} ${dst_dir}/bin/activate > ${dst_dir}/bin/activate~ 47 | mv ${dst_dir}/bin/activate~ ${dst_dir}/bin/activate 48 | 49 | 50 | exit 0 51 | -------------------------------------------------------------------------------- /fv3core/.jenkins/actions/get_test_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | for dataset in c12_6ranks_standard c12_54ranks_standard c128_6ranks_baroclinic ; do 4 | EXPERIMENT=${dataset} make get_test_data 5 | done 6 | -------------------------------------------------------------------------------- /fv3core/.jenkins/actions/run_parallel_regression_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | BACKEND=$1 4 | EXPNAME=$2 5 | export TEST_ARGS="${EXTRA_TEST_ARGS} -v -s -rsx --backend=${BACKEND} " 6 | 7 | # sync the test data 8 | make get_test_data 9 | 10 | export CPPFLAGS="${CPPFLAGS} -Wno-unused-but-set-variable" 11 | 12 | if [ ${python_env} == "virtualenv" ]; then 13 | CONTAINER_CMD="" MPIRUN_ARGS="" DEV=n make savepoint_tests_mpi 14 | TARGET=init CONTAINER_CMD="" MPIRUN_ARGS="" DEV=n make savepoint_tests_mpi 15 | else 16 | DEV=n make savepoint_tests_mpi 17 | TARGET=init DEV=n make savepoint_tests_mpi 18 | fi 19 | export TEST_ARGS="${TEST_ARGS} --compute_grid" 20 | if [ ${python_env} == "virtualenv" ]; then 21 | CONTAINER_CMD="" MPIRUN_ARGS="" DEV=n make savepoint_tests_mpi 22 | TARGET=init CONTAINER_CMD="" MPIRUN_ARGS="" DEV=n make savepoint_tests_mpi 23 | else 24 | DEV=n make savepoint_tests_mpi 25 | TARGET=init DEV=n make savepoint_tests_mpi 26 | fi 27 | -------------------------------------------------------------------------------- /fv3core/.jenkins/actions/run_regression_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | BACKEND=$1 4 | EXPNAME=$2 5 | XML_REPORT="sequential_test_results.xml" 6 | export TEST_ARGS="-v -s -rsx --backend=${BACKEND} " 7 | 8 | JENKINS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" 9 | 10 | # sync the test data 11 | make get_test_data 12 | 13 | if [ ${python_env} == "virtualenv" ]; then 14 | export TEST_ARGS="${TEST_ARGS} --junitxml=${JENKINS_DIR}/${XML_REPORT}" 15 | export CONTAINER_CMD="srun" 16 | else 17 | export TEST_ARGS="${TEST_ARGS} --junitxml=/.jenkins/${XML_REPORT}" 18 | export VOLUMES="-v ${SCRIPT_DIR}/../:/.jenkins" 19 | fi 20 | 21 | DEV=n make savepoint_tests 22 | -------------------------------------------------------------------------------- /fv3core/.jenkins/actions/test_action.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | echo "${JOB_NAME}-${BUILD_NUMBER}" 4 | echo `pip list` 5 | echo `which python` 6 | -------------------------------------------------------------------------------- /fv3core/FORTRAN_CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Fortran Changelog 2 | ================= 3 | 4 | This document outlines changes to the source code from the patterns or conventions used in the original Fortran. This information is meant to help map between the updated Python source code and the Fortran equivalent. 5 | 6 | d_sw: 7 | - in flux_adjust, "w" is renamed to "q" since the variable refers to any scalar, not just vertical wind 8 | - gx/gy for pt fluxes, fx/fy for delp fluxes have been renamed to pt_x_flux, delp_x_flux and similar for y fluxes 9 | - converted one of the usages of ut to u and dx (ut = u * dx) and similar for vt to v and dy 10 | - ubke and vbke calculations in Fortran use dt4 which is 0.25 * dt and dt5 which is 0.2 * dt in the new code 11 | - in ubke/vbke calculations, renamed ub to ub_contra since it is contravariant wind (similarly for vb) 12 | - in ubke/vbke calculations, renamed ut to uc_contra since it is contravariant wind (similarly for vt) 13 | - renamed one of the usages of ub/vb to vort_x_delta and vort_y_delta, where they hold x and y finite differences of the vort variable 14 | - renamed first usage of ptc to u_contra_dyc and first usage of vort to v_contra_dxc 15 | - in xppm/yppm routines, separated "courant" which was b-wind * dt into dt (timestep) and ub_contra/vb_contra 16 | - a2b_ord4 uses lon for the Fortran code's `grid(:, :, 1)` and lat for `grid(:, :, 2)`, and similarly lon_agrid/lat_agrid for the components of the Fortran code's agrid variable. 17 | -------------------------------------------------------------------------------- /fv3core/examples/standalone/benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # How to get performance numbers 2 | 3 | ## Daint 4 | 5 | ### Arguments 6 | 7 | - timesteps: Number of timesteps to execute (this includes the first one as a warm up step) 8 | - ranks: Number of ranks to run with 9 | - backend: choice of gt4py backend 10 | - data_path: the test data 11 | 12 | ### Constraints 13 | 14 | The data directory is expected to be serialized data (serialized by serialbox). The archive `dat_files.tar.gz` gets unpacked. The serialized data is also expected to have both the `input.nml` as well as the `*.yml` namelists present. 15 | 16 | ### Output 17 | 18 | A `timing.json` file containing statistics over the ranks for execution time. The first timestep is counted towards `init`, the rest of the timesteps are in `main loop`. Total is inclusive of the other categories 19 | 20 | ### Example 21 | 22 | `examples/standalone/benchmarks/run_on_daint.sh 60 6 gtx86` 23 | -------------------------------------------------------------------------------- /fv3core/examples/standalone/runfile/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/fv3core/examples/standalone/runfile/__init__.py -------------------------------------------------------------------------------- /fv3core/pace/fv3core/__init__.py: -------------------------------------------------------------------------------- 1 | from ._config import DynamicalCoreConfig 2 | from .initialization.dycore_state import DycoreState 3 | from .initialization.geos_wrapper import GeosDycoreWrapper 4 | from .stencils.fv_dynamics import DynamicalCore 5 | from .stencils.fv_subgridz import DryConvectiveAdjustment 6 | 7 | 8 | __version__ = "0.2.0" 9 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/initialization/__init__.py: -------------------------------------------------------------------------------- 1 | from .baroclinic import init_baroclinic_state 2 | from .tropical_cyclone import init_tc_state 3 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/stencils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/fv3core/pace/fv3core/stencils/__init__.py -------------------------------------------------------------------------------- /fv3core/pace/fv3core/stencils/basic_operations.py: -------------------------------------------------------------------------------- 1 | import gt4py.cartesian.gtscript as gtscript 2 | from gt4py.cartesian.gtscript import PARALLEL, computation, interval 3 | 4 | from pace.dsl.typing import FloatField, FloatFieldIJ 5 | 6 | 7 | def copy_defn(q_in: FloatField, q_out: FloatField): 8 | """Copy q_in to q_out. 9 | 10 | Args: 11 | q_in: input field 12 | q_out: output field 13 | """ 14 | with computation(PARALLEL), interval(...): 15 | q_out = q_in 16 | 17 | 18 | def adjustmentfactor_stencil_defn(adjustment: FloatFieldIJ, q_out: FloatField): 19 | with computation(PARALLEL), interval(...): 20 | q_out = q_out * adjustment 21 | 22 | 23 | def set_value_defn(q_out: FloatField, value: float): 24 | with computation(PARALLEL), interval(...): 25 | q_out = value 26 | 27 | 28 | def adjust_divide_stencil(adjustment: FloatField, q_out: FloatField): 29 | with computation(PARALLEL), interval(...): 30 | q_out = q_out / adjustment 31 | 32 | 33 | @gtscript.function 34 | def sign(a, b): 35 | asignb = abs(a) 36 | if b > 0: 37 | asignb = asignb 38 | else: 39 | asignb = -asignb 40 | return asignb 41 | 42 | 43 | @gtscript.function 44 | def dim(a, b): 45 | diff = a - b if a - b > 0 else 0 46 | return diff 47 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/stencils/pe_halo.py: -------------------------------------------------------------------------------- 1 | from gt4py.cartesian.gtscript import FORWARD, computation, horizontal, interval, region 2 | 3 | from pace.dsl.typing import FloatField 4 | 5 | 6 | def edge_pe(pe: FloatField, delp: FloatField, ptop: float): 7 | """ 8 | This corresponds to the pe_halo routine in FV3core 9 | Updading the interface pressure from the pressure differences 10 | 11 | Args: 12 | pe (out): The pressure on the interfaces of the cell 13 | delp (in): The pressure difference between vertical grid cells 14 | ptop (in): The pressure level at the top of the grid 15 | """ 16 | from __externals__ import local_ie, local_is, local_je, local_js 17 | 18 | with computation(FORWARD): 19 | with interval(0, 1): 20 | with horizontal( 21 | region[local_is - 1, local_js : local_je + 1], 22 | region[local_ie + 1, local_js : local_je + 1], 23 | region[local_is - 1 : local_ie + 2, local_js - 1], 24 | region[local_is - 1 : local_ie + 2, local_je + 1], 25 | ): 26 | pe[0, 0, 0] = ptop 27 | with interval(1, None): 28 | with horizontal( 29 | region[local_is - 1, local_js : local_je + 1], 30 | region[local_ie + 1, local_js : local_je + 1], 31 | region[local_is - 1 : local_ie + 2, local_js - 1], 32 | region[local_is - 1 : local_ie + 2, local_je + 1], 33 | ): 34 | pe[0, 0, 0] = pe[0, 0, -1] + delp[0, 0, -1] 35 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/stencils/ppm.py: -------------------------------------------------------------------------------- 1 | from gt4py.cartesian import gtscript 2 | 3 | from pace.dsl.typing import FloatField 4 | 5 | 6 | # volume-conserving cubic with 2nd drv=0 at end point: 7 | # non-monotonic 8 | c1 = -2.0 / 14.0 9 | c2 = 11.0 / 14.0 10 | c3 = 5.0 / 14.0 11 | 12 | # PPM volume mean form 13 | p1 = 7.0 / 12.0 14 | p2 = -1.0 / 12.0 15 | 16 | s11 = 11.0 / 14.0 17 | s14 = 4.0 / 7.0 18 | s15 = 3.0 / 14.0 19 | 20 | 21 | @gtscript.function 22 | def pert_ppm_standard_constraint_fcn(a0: FloatField, al: FloatField, ar: FloatField): 23 | if al * ar < 0.0: 24 | da1 = al - ar 25 | da2 = da1 ** 2 26 | a6da = 3.0 * (al + ar) * da1 27 | if a6da < -da2: 28 | ar = -2.0 * al 29 | elif a6da > da2: 30 | al = -2.0 * ar 31 | else: 32 | # effect of dm=0 included here 33 | al = 0.0 34 | ar = 0.0 35 | return al, ar 36 | 37 | 38 | @gtscript.function 39 | def pert_ppm_positive_definite_constraint_fcn( 40 | a0: FloatField, al: FloatField, ar: FloatField 41 | ): 42 | if a0 <= 0.0: 43 | al = 0.0 44 | ar = 0.0 45 | else: 46 | a4 = -3.0 * (ar + al) 47 | da1 = ar - al 48 | if abs(da1) < -a4: 49 | fmin = a0 + 0.25 / a4 * da1 ** 2 + a4 * (1.0 / 12.0) 50 | if fmin < 0.0: 51 | if ar > 0.0 and al > 0.0: 52 | ar = 0.0 53 | al = 0.0 54 | elif da1 > 0.0: 55 | ar = -2.0 * al 56 | else: 57 | al = -2.0 * ar 58 | 59 | return al, ar 60 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/stencils/temperature_adjust.py: -------------------------------------------------------------------------------- 1 | from gt4py.cartesian.gtscript import PARALLEL, computation, exp, interval, log 2 | 3 | import pace.util.constants as constants 4 | from pace.dsl.typing import FloatField 5 | from pace.fv3core.stencils.basic_operations import sign 6 | 7 | 8 | def apply_diffusive_heating( 9 | delp: FloatField, 10 | delz: FloatField, 11 | cappa: FloatField, 12 | heat_source: FloatField, 13 | pt: FloatField, 14 | delt_time_factor: float, 15 | ): 16 | """ 17 | Adjust air temperature from heating due to vorticity damping. 18 | Heating is limited by deltmax times the length of a timestep, with the 19 | highest levels limited further. 20 | 21 | Args: 22 | delp (in): Pressure thickness of atmosphere layers 23 | delz (in): Vertical thickness of atmosphere layers 24 | cappa (in): R/Cp 25 | heat_source (in): heat source from vorticity damping implied by 26 | energy conservation 27 | pt (inout): Air potential temperature 28 | delta_time_factor (in): scaled time step 29 | """ 30 | with computation(PARALLEL), interval(...): 31 | pkz = exp(cappa / (1.0 - cappa) * log(constants.RDG * delp / delz * pt)) 32 | pkz = (constants.RDG * delp / delz * pt) ** (cappa / (1.0 - cappa)) 33 | dtmp = heat_source / (constants.CV_AIR * delp) 34 | with computation(PARALLEL): 35 | with interval(0, 1): 36 | deltmin = sign(min(delt_time_factor * 0.1, abs(dtmp)), dtmp) 37 | pt = pt + deltmin / pkz 38 | with interval(1, 2): 39 | deltmin = sign(min(delt_time_factor * 0.5, abs(dtmp)), dtmp) 40 | pt = pt + deltmin / pkz 41 | with interval(2, None): 42 | deltmin = sign(min(delt_time_factor, abs(dtmp)), dtmp) 43 | pt = pt + deltmin / pkz 44 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/testing/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F401 2 | from .map_single import MapSingleFactory 3 | from .translate_dyncore import TranslateDynCore 4 | from .translate_fvdynamics import TranslateDycoreFortranData2Py, TranslateFVDynamics 5 | from .validation import enable_selective_validation 6 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/testing/map_single.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Tuple 2 | 3 | import pace.dsl 4 | import pace.util 5 | from pace.fv3core.stencils.map_single import MapSingle 6 | from pace.util import X_INTERFACE_DIM, Y_INTERFACE_DIM, Z_DIM 7 | 8 | 9 | class MapSingleFactory: 10 | _object_pool: Dict[Tuple[int, int, Tuple[str, ...]], MapSingle] = {} 11 | """Pool of MapSingle objects.""" 12 | 13 | def __init__( 14 | self, 15 | stencil_factory: pace.dsl.StencilFactory, 16 | quantity_factory: pace.util.QuantityFactory, 17 | ): 18 | self.stencil_factory = stencil_factory 19 | self.quantity_factory = quantity_factory 20 | 21 | def __call__( 22 | self, 23 | kord: int, 24 | mode: int, 25 | *args, 26 | **kwargs, 27 | ): 28 | key_tuple = (kord, mode, (X_INTERFACE_DIM, Y_INTERFACE_DIM, Z_DIM)) 29 | if key_tuple not in self._object_pool: 30 | self._object_pool[key_tuple] = MapSingle( 31 | self.stencil_factory, self.quantity_factory, *key_tuple 32 | ) 33 | return self._object_pool[key_tuple](*args, **kwargs) 34 | -------------------------------------------------------------------------------- /fv3core/pace/fv3core/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/fv3core/pace/fv3core/utils/__init__.py -------------------------------------------------------------------------------- /fv3core/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """The setup script.""" 5 | 6 | from setuptools import find_namespace_packages, setup 7 | 8 | 9 | with open("README.md", encoding="utf-8") as readme_file: 10 | readme = readme_file.read() 11 | 12 | requirements = [ 13 | "f90nml>=1.1.0", 14 | "gt4py", 15 | "numpy", 16 | "pace-util>=0.4.3", 17 | "pace-stencils", 18 | "pace-dsl", 19 | "xarray", 20 | ] 21 | 22 | test_requirements = ["pytest==5.2.2", "pytest-subtests>=0.3.0", "serialbox"] 23 | 24 | setup( 25 | author="The Allen Institute for Artificial Intelligence", 26 | author_email="jeremym@allenai.org", 27 | python_requires=">=3.8", 28 | classifiers=[ 29 | "Development Status :: 2 - Pre-Alpha", 30 | "Intended Audience :: Developers", 31 | "License :: OSI Approved :: BSD License", 32 | "Natural Language :: English", 33 | "Programming Language :: Python :: 3", 34 | "Programming Language :: Python :: 3.8", 35 | "Programming Language :: Python :: 3.9", 36 | ], 37 | description="fv3core is a gt4py-based FV3 dynamical core for atmospheric models", 38 | install_requires=requirements, 39 | extras_require={}, 40 | license="BSD license", 41 | long_description=readme, 42 | include_package_data=True, 43 | keywords="fv3core", 44 | name="pace-fv3core", 45 | packages=find_namespace_packages(include=["pace.*"]), 46 | setup_requires=[], 47 | test_suite="tests", 48 | tests_require=test_requirements, 49 | url="https://github.com/ai2cm/fv3core", 50 | version="0.2.0", 51 | zip_safe=False, 52 | ) 53 | -------------------------------------------------------------------------------- /fv3core/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture() 5 | def backend(pytestconfig): 6 | backend = pytestconfig.getoption("backend") 7 | return backend 8 | 9 | 10 | def pytest_addoption(parser): 11 | parser.addoption("--backend", action="store", default="numpy") 12 | parser.addoption("--which_modules", action="store") 13 | parser.addoption("--which_rank", action="store") 14 | parser.addoption("--skip_modules", action="store") 15 | parser.addoption("--print_failures", action="store_true") 16 | parser.addoption("--failure_stride", action="store", default=1) 17 | parser.addoption("--data_path", action="store", default="./") 18 | parser.addoption("--threshold_overrides_file", action="store", default=None) 19 | parser.addoption("--compute_grid", action="store_true") 20 | 21 | 22 | def pytest_configure(config): 23 | # register an additional marker 24 | config.addinivalue_line( 25 | "markers", "sequential(name): mark test as running sequentially on ranks" 26 | ) 27 | config.addinivalue_line( 28 | "markers", "parallel(name): mark test as running in parallel across ranks" 29 | ) 30 | config.addinivalue_line( 31 | "markers", 32 | "mock_parallel(name): mark test as running in mock parallel across ranks", 33 | ) 34 | -------------------------------------------------------------------------------- /fv3core/tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore:invalid value encountered in true_divide 4 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/fv3core/tests/savepoint/__init__.py -------------------------------------------------------------------------------- /fv3core/tests/savepoint/conftest.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # This magical series of imports is to de-duplicate the conftest.py file 4 | # between the dycore and physics tests. We can avoid this if we refactor the tests 5 | # to all run from one directory 6 | 7 | import pace.fv3core.testing 8 | 9 | 10 | # this must happen before any classes from fv3core are instantiated 11 | pace.fv3core.testing.enable_selective_validation() 12 | 13 | import pace.stencils.testing.conftest 14 | from pace.stencils.testing.conftest import * # noqa: F403,F401 15 | 16 | from . import translate 17 | 18 | 19 | pace.stencils.testing.conftest.translate = translate # type: ignore 20 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/output/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/fv3core/tests/savepoint/output/.gitkeep -------------------------------------------------------------------------------- /fv3core/tests/savepoint/test_translate.py: -------------------------------------------------------------------------------- 1 | from pace.stencils.testing.test_translate import * # noqa: F403,F401 2 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/overrides/README.md: -------------------------------------------------------------------------------- 1 | # Threshold overrides 2 | 3 | `--threshold_overrides_file` takes in a yaml file with error thresholds specified for specific backend and platform configuration. Currently, two types of error overrides are allowed: maximum error and near zero. 4 | 5 | For maximum error, a blanket `max_error` is specified to override the parent classes relative error threshold. 6 | 7 | For near zero override, `ignore_near_zero_errors` is specified to allow some fields to pass with higher relative error if the absolute error is very small. Additionally, it is also possible to define a global near zero value for all remaining fields not specified in `ignore_near_zero_errors`. This is done by specifying `all_other_near_zero=`. 8 | 9 | Override yaml file should have one of the following formats: 10 | 11 | ## One near zero value for all variables 12 | 13 | ```Stencil_name: 14 | - backend: 15 | max_error: 16 | near_zero: 17 | ignore_near_zero_errors: 18 | - 19 | - 20 | - ... 21 | ``` 22 | ## Variable specific near zero value 23 | 24 | ```Stencil_name: 25 | - backend: 26 | max_error: 27 | ignore_near_zero_errors: 28 | : 29 | : 30 | ... 31 | ``` 32 | 33 | ## [optional] Global near zero value for remaining fields 34 | 35 | ```Stencil_name: 36 | - backend: 37 | max_error: 38 | ignore_near_zero_errors: 39 | : 40 | : 41 | all_other_near_zero: 42 | ... 43 | ``` 44 | 45 | where fields other than `var1` and `var2` will use `global_value`. 46 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_d2a2c_vect.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.util 3 | from pace.fv3core.stencils.d2a2c_vect import DGrid2AGrid2CGridVectors 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslateD2A2C_Vect(TranslateDycoreFortranData2Py): 8 | def __init__( 9 | self, 10 | grid, 11 | namelist: pace.util.Namelist, 12 | stencil_factory: pace.dsl.StencilFactory, 13 | ): 14 | super().__init__(grid, namelist, stencil_factory) 15 | dord4 = True 16 | self.stencil_factory = stencil_factory 17 | self.namelist = namelist # type: ignore 18 | self.compute_func = DGrid2AGrid2CGridVectors( # type: ignore 19 | self.stencil_factory, 20 | self.grid.quantity_factory, 21 | self.grid.grid_data, 22 | self.grid.nested, 23 | self.namelist.grid_type, 24 | dord4, 25 | ) 26 | self.in_vars["data_vars"] = { 27 | "uc": {}, 28 | "vc": {}, 29 | "u": {}, 30 | "v": {}, 31 | "ua": {}, 32 | "va": {}, 33 | "utc": {}, 34 | "vtc": {}, 35 | } 36 | self.out_vars = { 37 | "uc": grid.x3d_domain_dict(), 38 | "vc": grid.y3d_domain_dict(), 39 | "ua": {}, 40 | "va": {}, 41 | "utc": {}, 42 | "vtc": {}, 43 | } 44 | # TODO: This seems to be needed primarily for the edge_interpolate_4 45 | # methods, can we rejigger the order of operations to make it match to 46 | # more precision? 47 | self.max_error = 2e-10 48 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_del2cubed.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | import pace.dsl 4 | import pace.util 5 | from pace.fv3core.stencils.del2cubed import HyperdiffusionDamping 6 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 7 | 8 | 9 | class TranslateDel2Cubed(TranslateDycoreFortranData2Py): 10 | def __init__( 11 | self, 12 | grid, 13 | namelist: pace.util.Namelist, 14 | stencil_factory: pace.dsl.StencilFactory, 15 | ): 16 | super().__init__(grid, namelist, stencil_factory) 17 | self.in_vars["data_vars"] = {"qdel": {}} 18 | self.in_vars["parameters"] = ["nmax", "cd"] 19 | self.out_vars: Dict[str, Any] = {"qdel": {}} 20 | self.stencil_factory = stencil_factory 21 | 22 | def compute_from_storage(self, inputs): 23 | hyperdiffusion = HyperdiffusionDamping( 24 | self.stencil_factory, 25 | quantity_factory=self.grid.quantity_factory, 26 | damping_coefficients=self.grid.damping_coefficients, 27 | rarea=self.grid.rarea, 28 | nmax=inputs.pop("nmax"), 29 | ) 30 | hyperdiffusion(**inputs) 31 | return inputs 32 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_del6vtflux.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.fv3core.stencils.delnflux as delnflux 3 | import pace.util 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslateDel6VtFlux(TranslateDycoreFortranData2Py): 8 | def __init__( 9 | self, 10 | grid, 11 | namelist: pace.util.Namelist, 12 | stencil_factory: pace.dsl.StencilFactory, 13 | ): 14 | super().__init__(grid, namelist, stencil_factory) 15 | fxstat = grid.x3d_domain_dict() 16 | fxstat.update({"serialname": "fx2"}) 17 | fystat = grid.y3d_domain_dict() 18 | fystat.update({"serialname": "fy2"}) 19 | self.in_vars["data_vars"] = { 20 | "q": {"serialname": "wq"}, 21 | "d2": {"serialname": "wd2"}, 22 | "fx2": grid.x3d_domain_dict(), 23 | "fy2": grid.y3d_domain_dict(), 24 | "damp_c": {"serialname": "damp4"}, 25 | "nord_w": {}, 26 | } 27 | self.in_vars["parameters"] = [] 28 | self.out_vars = { 29 | "fx2": grid.x3d_domain_dict(), 30 | "fy2": grid.y3d_domain_dict(), 31 | "d2": {"serialname": "wd2"}, 32 | "q": {"serialname": "wq"}, 33 | } 34 | self.stencil_factory = stencil_factory 35 | 36 | # use_sg -- 'dx', 'dy', 'rdxc', 'rdyc', 'sin_sg needed 37 | def compute(self, inputs): 38 | self.make_storage_data_input_vars(inputs) 39 | nord_col = self.grid.quantity_factory.zeros( 40 | dims=[pace.util.Z_DIM], units="unknown" 41 | ) 42 | nord_col.data[:] = nord_col.np.asarray(inputs.pop("nord_w")) 43 | self.compute_func = delnflux.DelnFluxNoSG( # type: ignore 44 | self.stencil_factory, 45 | self.grid.damping_coefficients, 46 | self.grid.rarea, 47 | nord_col, 48 | ) 49 | self.compute_func(**inputs) 50 | return self.slice_output(inputs) 51 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_last_step.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.fv3core.stencils.moist_cv as moist_cv 3 | import pace.util 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslateLastStep(TranslateDycoreFortranData2Py): 8 | def __init__( 9 | self, 10 | grid, 11 | namelist: pace.util.Namelist, 12 | stencil_factory: pace.dsl.StencilFactory, 13 | ): 14 | super().__init__(grid, namelist, stencil_factory) 15 | self.compute_func = stencil_factory.from_origin_domain( # type: ignore 16 | moist_cv.moist_pt_last_step, 17 | origin=self.grid.compute_origin(), 18 | domain=self.grid.domain_shape_compute(add=(0, 0, 1)), 19 | ) 20 | self.in_vars["data_vars"] = { 21 | "qvapor": {}, 22 | "qliquid": {}, 23 | "qice": {}, 24 | "qrain": {}, 25 | "qsnow": {}, 26 | "qgraupel": {}, 27 | "pt": {}, 28 | "pkz": {"istart": grid.is_, "jstart": grid.js}, 29 | "gz": { 30 | "serialname": "gz1d", 31 | "kstart": grid.is_, 32 | "axis": 0, 33 | "full_shape": True, 34 | }, 35 | } 36 | self.in_vars["parameters"] = ["r_vir", "dtmp"] 37 | self.out_vars = { 38 | "gz": { 39 | "serialname": "gz1d", 40 | "istart": grid.is_, 41 | "iend": grid.ie, 42 | "jstart": grid.je, 43 | "jend": grid.je, 44 | "kstart": grid.npz - 1, 45 | "kend": grid.npz - 1, 46 | }, 47 | "pt": {}, 48 | } 49 | self.write_vars = ["gz"] 50 | self.stencil_factory = stencil_factory 51 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_nh_p_grad.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.fv3core.stencils.nh_p_grad as NH_P_Grad 3 | import pace.util 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslateNH_P_Grad(TranslateDycoreFortranData2Py): 8 | max_error = 5e-10 9 | 10 | def __init__( 11 | self, 12 | grid, 13 | namelist: pace.util.Namelist, 14 | stencil_factory: pace.dsl.StencilFactory, 15 | ): 16 | super().__init__(grid, namelist, stencil_factory) 17 | self.in_vars["data_vars"] = { 18 | "u": {}, 19 | "v": {}, 20 | "pp": {}, 21 | "gz": {}, 22 | "pk3": {}, 23 | "delp": {}, 24 | } 25 | self.in_vars["parameters"] = ["dt", "ptop", "akap"] 26 | self.out_vars = { 27 | "u": grid.y3d_domain_dict(), 28 | "v": grid.x3d_domain_dict(), 29 | "pp": {"kend": grid.npz + 1}, 30 | "gz": {"kend": grid.npz + 1}, 31 | "pk3": {"kend": grid.npz + 1}, 32 | "delp": {}, 33 | } 34 | self.stencil_factory = stencil_factory 35 | self.namelist = namelist # type: ignore 36 | 37 | def compute(self, inputs): 38 | self.compute_func = NH_P_Grad.NonHydrostaticPressureGradient( # type: ignore 39 | self.stencil_factory, 40 | self.grid.quantity_factory, 41 | grid_data=self.grid.grid_data, 42 | grid_type=self.namelist.grid_type, 43 | ) 44 | self.make_storage_data_input_vars(inputs) 45 | self.compute_func(**inputs) 46 | return self.slice_output(inputs) 47 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_pe_halo.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.util 3 | from pace.fv3core.stencils import pe_halo 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class PE_Halo_Wrapper: 8 | def __init__(self, stencil_factory) -> None: 9 | ax_offsets_pe = stencil_factory.grid_indexing.axis_offsets( 10 | stencil_factory.grid_indexing.origin_full(), 11 | stencil_factory.grid_indexing.domain_full(add=(0, 0, 1)), 12 | ) 13 | self._stencil = stencil_factory.from_origin_domain( 14 | pe_halo.edge_pe, 15 | origin=stencil_factory.grid_indexing.origin_full(), 16 | domain=stencil_factory.grid_indexing.domain_full(add=(0, 0, 1)), 17 | externals={**ax_offsets_pe}, 18 | skip_passes=("PruneKCacheFills",), 19 | ) 20 | 21 | def __call__(self, pe, delp, ptop): 22 | self._stencil(pe, delp, ptop) 23 | 24 | 25 | class TranslatePE_Halo(TranslateDycoreFortranData2Py): 26 | def __init__( 27 | self, 28 | grid, 29 | namelist: pace.util.Namelist, 30 | stencil_factory: pace.dsl.StencilFactory, 31 | ): 32 | 33 | super().__init__(grid, namelist, stencil_factory) 34 | self.in_vars["data_vars"] = { 35 | "pe": { 36 | "istart": grid.is_ - 1, 37 | "iend": grid.ie + 1, 38 | "jstart": grid.js - 1, 39 | "jend": grid.je + 1, 40 | "kend": grid.npz + 1, 41 | "kaxis": 1, 42 | }, 43 | "delp": {}, 44 | } 45 | self.in_vars["parameters"] = ["ptop"] 46 | self.out_vars = {"pe": self.in_vars["data_vars"]["pe"]} 47 | self.stencil_factory = stencil_factory 48 | stencil_class = PE_Halo_Wrapper(self.stencil_factory) 49 | self.compute_func = stencil_class # type: ignore 50 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_pk3_halo.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.util 3 | from pace.fv3core.stencils.pk3_halo import PK3Halo 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslatePK3_Halo(TranslateDycoreFortranData2Py): 8 | def __init__( 9 | self, 10 | grid, 11 | namelist: pace.util.Namelist, 12 | stencil_factory: pace.dsl.StencilFactory, 13 | ): 14 | super().__init__(grid, namelist, stencil_factory) 15 | self.stencil_factory = stencil_factory 16 | self.compute_func = PK3Halo( # type: ignore 17 | self.stencil_factory, self.grid.quantity_factory 18 | ) 19 | self.in_vars["data_vars"] = {"pk3": {}, "delp": {}} 20 | self.in_vars["parameters"] = ["akap", "ptop"] 21 | self.out_vars = {"pk3": {"kend": grid.npz + 1}} 22 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_pressureadjustedtemperature_nonhydrostatic.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.fv3core 3 | import pace.util 4 | from pace.fv3core.stencils import temperature_adjust 5 | from pace.fv3core.stencils.dyn_core import get_nk_heat_dissipation 6 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 7 | 8 | 9 | class TranslatePressureAdjustedTemperature_NonHydrostatic( 10 | TranslateDycoreFortranData2Py 11 | ): 12 | def __init__( 13 | self, 14 | grid, 15 | namelist: pace.util.Namelist, 16 | stencil_factory: pace.dsl.StencilFactory, 17 | ): 18 | super().__init__(grid, namelist, stencil_factory) 19 | dycore_config = pace.fv3core.DynamicalCoreConfig.from_namelist(namelist) 20 | self.namelist = dycore_config 21 | n_adj = get_nk_heat_dissipation( 22 | config=dycore_config.d_grid_shallow_water, 23 | npz=grid.grid_indexing.domain[2], 24 | ) 25 | self.compute_func = stencil_factory.from_origin_domain( # type: ignore 26 | temperature_adjust.apply_diffusive_heating, 27 | origin=stencil_factory.grid_indexing.origin_compute(), 28 | domain=stencil_factory.grid_indexing.restrict_vertical( 29 | nk=n_adj 30 | ).domain_compute(), 31 | ) 32 | self.in_vars["data_vars"] = { 33 | "cappa": {}, 34 | "delp": {}, 35 | "delz": {}, 36 | "pt": {}, 37 | "heat_source": {"serialname": "heat_source_dyn"}, 38 | } 39 | self.in_vars["parameters"] = ["bdt"] 40 | self.out_vars = {"pt": {}} 41 | self.stencil_factory = stencil_factory 42 | 43 | def compute_from_storage(self, inputs): 44 | inputs["delt_time_factor"] = abs(inputs["bdt"] * self.namelist.delt_max) 45 | del inputs["bdt"] 46 | self.compute_func(**inputs) 47 | return inputs 48 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_qsinit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import pace.dsl 4 | import pace.dsl.gt4py_utils as utils 5 | import pace.fv3core.stencils.saturation_adjustment as satadjust 6 | import pace.util 7 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 8 | 9 | 10 | class TranslateQSInit(TranslateDycoreFortranData2Py): 11 | def __init__( 12 | self, 13 | grid, 14 | namelist: pace.util.Namelist, 15 | stencil_factory: pace.dsl.StencilFactory, 16 | ): 17 | super().__init__(grid, namelist, stencil_factory) 18 | self.in_vars["data_vars"] = { 19 | "table": {}, 20 | "table2": {}, 21 | "tablew": {}, 22 | "des2": {}, 23 | "desw": {}, 24 | } 25 | self.out_vars = self.in_vars["data_vars"] 26 | self.maxshape = (1, 1, satadjust.QS_LENGTH) 27 | self.write_vars = list(self.in_vars["data_vars"].keys()) 28 | self.stencil_factory = stencil_factory 29 | self._compute_q_tables_stencil = self.stencil_factory.from_origin_domain( 30 | satadjust.compute_q_tables, origin=(0, 0, 0), domain=self.maxshape 31 | ) 32 | self.max_error = 1e-12 33 | 34 | def compute(self, inputs): 35 | self.make_storage_data_input_vars(inputs) 36 | index = np.arange(satadjust.QS_LENGTH) 37 | inputs["index"] = utils.make_storage_data( 38 | index, 39 | self.maxshape, 40 | origin=(0, 0, 0), 41 | read_only=False, 42 | backend=self.stencil_factory.backend, 43 | ) 44 | self._compute_q_tables_stencil(**inputs) 45 | utils.device_sync(backend=self.stencil_factory.backend) 46 | for k, v in inputs.items(): 47 | if v.shape == self.maxshape: 48 | inputs[k] = np.squeeze(v) 49 | return inputs 50 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_ray_fast.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.util 3 | from pace.fv3core.stencils.ray_fast import RayleighDamping 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslateRay_Fast(TranslateDycoreFortranData2Py): 8 | def __init__( 9 | self, 10 | grid, 11 | namelist: pace.util.Namelist, 12 | stencil_factory: pace.dsl.StencilFactory, 13 | ): 14 | super().__init__(grid, namelist, stencil_factory) 15 | self.compute_func = RayleighDamping( # type: ignore 16 | stencil_factory, 17 | namelist.rf_cutoff, 18 | namelist.tau, 19 | namelist.hydrostatic, 20 | ) 21 | self.in_vars["data_vars"] = { 22 | "u": grid.y3d_domain_dict(), 23 | "v": grid.x3d_domain_dict(), 24 | "w": {}, 25 | "dp": {}, 26 | "pfull": {}, 27 | } 28 | self.in_vars["parameters"] = ["dt", "ptop"] 29 | self.out_vars = { 30 | "u": grid.y3d_domain_dict(), 31 | "v": grid.x3d_domain_dict(), 32 | "w": {}, 33 | } 34 | self.stencil_factory = stencil_factory 35 | -------------------------------------------------------------------------------- /fv3core/tests/savepoint/translate/translate_riem_solver_c.py: -------------------------------------------------------------------------------- 1 | import pace.dsl 2 | import pace.util 3 | from pace.fv3core.stencils.riem_solver_c import NonhydrostaticVerticalSolverCGrid 4 | from pace.fv3core.testing import TranslateDycoreFortranData2Py 5 | 6 | 7 | class TranslateRiem_Solver_C(TranslateDycoreFortranData2Py): 8 | def __init__( 9 | self, 10 | grid, 11 | namelist: pace.util.Namelist, 12 | stencil_factory: pace.dsl.StencilFactory, 13 | ): 14 | super().__init__(grid, namelist, stencil_factory) 15 | self.compute_func = NonhydrostaticVerticalSolverCGrid( # type: ignore 16 | stencil_factory, 17 | quantity_factory=self.grid.quantity_factory, 18 | p_fac=namelist.p_fac, 19 | ) 20 | self.in_vars["data_vars"] = { 21 | "cappa": {}, 22 | "hs": {}, 23 | "w3": {}, 24 | "ptc": {}, 25 | "q_con": {}, 26 | "delpc": {}, 27 | "gz": {}, 28 | "pef": {}, 29 | "ws": {}, 30 | } 31 | self.in_vars["parameters"] = ["dt2", "ptop"] 32 | self.out_vars = {"pef": {"kend": grid.npz}, "gz": {"kend": grid.npz}} 33 | self.max_error = 5e-14 34 | self.stencil_factory = stencil_factory 35 | -------------------------------------------------------------------------------- /physics/README.md: -------------------------------------------------------------------------------- 1 | # pace-physics 2 | Python implementation of FV3 GFS physics built using the GT4Py domain-specific language in Python. 3 | 4 | ## Description 5 | pace-physics is under active development. Currently, the pace level docker environment should be used for development. 6 | 7 | ## QuickStart 8 | 9 | Install the requirements with: 10 | ``` 11 | pip install -r ./physics/requirements.txt 12 | ``` 13 | A standard `setup.py` installation script is provided and can be installed with `pip`. 14 | If you are planning on modifying the source files, use the following command: 15 | ``` 16 | pip install -e ./physics 17 | ``` 18 | -------------------------------------------------------------------------------- /physics/pace/physics/__init__.py: -------------------------------------------------------------------------------- 1 | from ._config import PhysicsConfig 2 | from .physics_state import PhysicsState 3 | from .stencils.microphysics import Microphysics 4 | from .stencils.physics import Physics 5 | 6 | 7 | __all__ = list(key for key in locals().keys() if not key.startswith("_")) 8 | __version__ = "0.2.0" 9 | -------------------------------------------------------------------------------- /physics/pace/physics/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/physics/pace/physics/functions/__init__.py -------------------------------------------------------------------------------- /physics/pace/physics/stencils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/physics/pace/physics/stencils/__init__.py -------------------------------------------------------------------------------- /physics/pace/physics/stencils/get_phi_fv3.py: -------------------------------------------------------------------------------- 1 | from gt4py.cartesian.gtscript import BACKWARD, PARALLEL, computation, interval 2 | 3 | from pace.dsl.typing import FloatField 4 | from pace.util.constants import ZVIR 5 | 6 | 7 | def get_phi_fv3( 8 | gt0: FloatField, 9 | gq0: FloatField, 10 | del_gz: FloatField, 11 | phii: FloatField, 12 | phil: FloatField, 13 | ): 14 | with computation(PARALLEL), interval(0, -1): 15 | del_gz = del_gz[0, 0, 0] * gt0[0, 0, 0] * (1.0 + ZVIR * max(0.0, gq0[0, 0, 0])) 16 | 17 | with computation(BACKWARD): 18 | with interval(-1, None): 19 | phii = 0.0 20 | with interval(-2, -1): 21 | phil = 0.5 * (phii[0, 0, 1] + phii[0, 0, 1] + del_gz[0, 0, 0]) 22 | phii = phii[0, 0, 1] + del_gz[0, 0, 0] 23 | with interval(0, -2): 24 | phil = 0.5 * (phii[0, 0, 1] + phii[0, 0, 1] + del_gz[0, 0, 0]) 25 | phii = phii[0, 0, 1] + del_gz[0, 0, 0] 26 | -------------------------------------------------------------------------------- /physics/pace/physics/stencils/get_prs_fv3.py: -------------------------------------------------------------------------------- 1 | from gt4py.cartesian.gtscript import PARALLEL, computation, interval 2 | 3 | from pace.dsl.typing import FloatField 4 | from pace.util.constants import ZVIR 5 | 6 | 7 | def get_prs_fv3( 8 | phii: FloatField, 9 | prsi: FloatField, 10 | tgrs: FloatField, 11 | qgrs: FloatField, 12 | del_: FloatField, 13 | del_gz: FloatField, 14 | ): 15 | # Passing with integration, but zero padding is different from fortran for del_gz 16 | with computation(PARALLEL), interval(0, -1): 17 | del_ = prsi[0, 0, 1] - prsi[0, 0, 0] 18 | del_gz = (phii[0, 0, 0] - phii[0, 0, 1]) / ( 19 | tgrs[0, 0, 0] * (1.0 + ZVIR * max(0.0, qgrs[0, 0, 0])) 20 | ) 21 | -------------------------------------------------------------------------------- /physics/setup.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from setuptools import find_namespace_packages, setup 4 | 5 | 6 | with open("README.md", encoding="utf-8") as readme_file: 7 | readme = readme_file.read() 8 | 9 | setup_requirements: List[str] = [] 10 | 11 | requirements = [ 12 | "f90nml>=1.1.0", 13 | "gt4py", 14 | "numpy", 15 | "pace-util>=0.4.3", 16 | "pace-stencils", 17 | "pace-dsl", 18 | "xarray", 19 | ] 20 | 21 | test_requirements = ["pytest"] 22 | 23 | setup( 24 | author="Vulcan Technologies LLC", 25 | author_email="elynnw@vulcan.com", 26 | python_requires=">=3.8", 27 | classifiers=[ 28 | "Development Status :: 2 - Pre-Alpha", 29 | "Intended Audience :: Developers", 30 | "License :: OSI Approved :: BSD License", 31 | "Natural Language :: English", 32 | "Programming Language :: Python :: 3", 33 | "Programming Language :: Python :: 3.8", 34 | "Programming Language :: Python :: 3.9", 35 | ], 36 | description="pace-physics is a gt4py-based physical parameterization " 37 | "for atmospheric models", 38 | install_requires=requirements, 39 | extras_require={}, 40 | license="BSD license", 41 | long_description=readme, 42 | include_package_data=True, 43 | name="pace-physics", 44 | packages=find_namespace_packages(include=["pace.*"]), 45 | setup_requires=[], 46 | test_suite="tests", 47 | tests_require=test_requirements, 48 | url="https://github.com/ai2cm/pace", 49 | version="0.2.0", 50 | zip_safe=False, 51 | ) 52 | -------------------------------------------------------------------------------- /physics/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture() 5 | def backend(pytestconfig): 6 | backend = pytestconfig.getoption("backend") 7 | return backend 8 | 9 | 10 | def pytest_addoption(parser): 11 | parser.addoption("--backend", action="store", default="numpy") 12 | parser.addoption("--which_modules", action="store") 13 | parser.addoption("--which_rank", action="store") 14 | parser.addoption("--skip_modules", action="store") 15 | parser.addoption("--print_failures", action="store_true") 16 | parser.addoption("--failure_stride", action="store", default=1) 17 | parser.addoption("--data_path", action="store", default="./") 18 | parser.addoption("--threshold_overrides_file", action="store", default=None) 19 | parser.addoption("--compute_grid", action="store_true") 20 | 21 | 22 | def pytest_configure(config): 23 | # register an additional marker 24 | config.addinivalue_line( 25 | "markers", "sequential(name): mark test as running sequentially on ranks" 26 | ) 27 | config.addinivalue_line( 28 | "markers", "parallel(name): mark test as running in parallel across ranks" 29 | ) 30 | config.addinivalue_line( 31 | "markers", 32 | "mock_parallel(name): mark test as running in mock parallel across ranks", 33 | ) 34 | -------------------------------------------------------------------------------- /physics/tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore:invalid value encountered in true_divide 4 | -------------------------------------------------------------------------------- /physics/tests/savepoint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/physics/tests/savepoint/__init__.py -------------------------------------------------------------------------------- /physics/tests/savepoint/conftest.py: -------------------------------------------------------------------------------- 1 | # This magical series of imports is to de-duplicate the conftest.py file 2 | # between the dycore and physics tests. We can avoid this if we refactor the tests 3 | # to all run from one directory 4 | import pace.stencils.testing.conftest 5 | from pace.stencils.testing.conftest import * # noqa: F403,F401 6 | 7 | from . import translate 8 | 9 | 10 | pace.stencils.testing.conftest.translate = translate # type: ignore 11 | -------------------------------------------------------------------------------- /physics/tests/savepoint/test_translate.py: -------------------------------------------------------------------------------- 1 | from pace.stencils.testing.test_translate import * # noqa: F403,F401 2 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F401 2 | from pace.stencils.testing.translate_update_dwind_phys import TranslateUpdateDWindsPhys 3 | 4 | from .translate_atmos_phy_statein import TranslateAtmosPhysDriverStatein 5 | from .translate_driver import TranslateDriver 6 | from .translate_fillgfs import TranslateFillGFS 7 | from .translate_fv_update_phys import TranslateFVUpdatePhys 8 | from .translate_gfs_physics_driver import TranslateGFSPhysicsDriver 9 | from .translate_microphysics import TranslateMicroph 10 | from .translate_phifv3 import TranslatePhiFV3 11 | from .translate_prsfv3 import TranslatePrsFV3 12 | from .translate_update_pressure_sfc_winds_phys import ( 13 | TranslatePhysUpdatePressureSurfaceWinds, 14 | ) 15 | from .translate_update_tracers_phys import TranslatePhysUpdateTracers 16 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/overrides/baroclinic.yaml: -------------------------------------------------------------------------------- 1 | Driver: 2 | - ignore_near_zero_errors: 3 | uc: 1e-13 4 | vc: 1e-13 5 | mfxd: 1e-3 6 | mfyd: 1e-3 7 | cxd: 1e-3 8 | cyd: 1e-3 9 | 10 | GFSPhysicsDriver: 11 | - backend: numpy 12 | max_error: 1e-10 13 | - backend: gt:cpu_ifirst 14 | max_error: 1e-10 15 | - backend: cuda 16 | cuda_no_fma: true 17 | - backend: gt:gpu 18 | cuda_no_fma: true 19 | - backend: dace:gpu 20 | cuda_no_fma: true 21 | ignore_near_zero_errors: 22 | IPD_rain: 1e-12 23 | IPD_qice: 1e-12 24 | IPD_qgraupel: 1e-12 25 | 26 | # On GPU u/v wind tendencies seems to diverge in computation from numpy/fortran 27 | # equivalent due to the use of fused multiply-add in the update stencil. 28 | # For validation we deactivate it (for validation only!) 29 | Microph: 30 | - backend: numpy 31 | max_error: 2e-10 32 | - backend: gt:cpu_ifirst 33 | max_error: 6e-11 34 | - backend: cuda 35 | cuda_no_fma: true 36 | - backend: gt:gpu 37 | max_error: 2.2e-8 38 | cuda_no_fma: true 39 | ignore_near_zero_errors: 40 | mph_ql_dt: 1e-8 41 | mph_qr_dt: 1e-9 42 | mph_qg_dt: 1e-18 43 | - backend: dace:gpu 44 | max_error: 2.2e-8 45 | cuda_no_fma: true 46 | ignore_near_zero_errors: 47 | mph_ql_dt: 1e-8 48 | mph_qr_dt: 1e-9 49 | mph_qg_dt: 1e-18 50 | mph_udt: 1e-8 51 | mph_vdt: 1e-8 52 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/overrides/standard.yaml: -------------------------------------------------------------------------------- 1 | CS_Profile_2d: 2 | - backend: gt:gpu 3 | max_error: 2.5e-9 4 | near_zero: 1.5e-14 5 | - backend: cuda 6 | max_error: 2.5e-9 7 | near_zero: 1.5e-14 8 | 9 | CS_Profile_2d-2: 10 | - backend: gt:gpu 11 | max_error: 3e-8 12 | near_zero: 1.5e-14 13 | - backend: cuda 14 | max_error: 3e-8 15 | near_zero: 1.5e-14 16 | 17 | Fillz: 18 | - backend: gt:gpu 19 | max_error: 5e-6 20 | 21 | MapN_Tracer_2d: 22 | - backend: numpy 23 | platform: docker 24 | max_error: 9e-9 # 48_6ranks 25 | 26 | Riem_Solver3: 27 | - backend: gt:gpu 28 | max_error: 5e-6 29 | - backend: cuda 30 | max_error: 5e-6 31 | - platform: metal 32 | backend: numpy 33 | max_error: 1e-11 # 48_6ranks 34 | 35 | Remapping: 36 | - backend: gt:gpu 37 | max_error: 1e-9 38 | near_zero: 5e-6 39 | ignore_near_zero_errors: 40 | - q_con 41 | - tracers 42 | 43 | UpdateDzC: 44 | - backend: gt:gpu 45 | max_error: 5e-10 46 | near_zero: 4.5e-15 47 | ignore_near_zero_errors: 48 | - ws 49 | - backend: cuda 50 | max_error: 5e-10 51 | near_zero: 4.5e-15 52 | ignore_near_zero_errors: 53 | - ws 54 | 55 | UpdateDzD: 56 | - backend: gt:gpu 57 | max_error: 5e-10 58 | ignore_near_zero_errors: 59 | - wsd 60 | - backend: cuda 61 | max_error: 5e-10 62 | ignore_near_zero_errors: 63 | - wsd 64 | 65 | Microph: 66 | - backend: numpy 67 | max_error: 2e-9 68 | - backend: gt:cpu_ifirst 69 | max_error: 1e-12 70 | - backend: cuda 71 | cuda_no_fma: true 72 | - backend: gt:gpu 73 | max_error: 2.2e-8 74 | cuda_no_fma: true 75 | ignore_near_zero_errors: 76 | mph_ql_dt: 1e-8 77 | mph_qr_dt: 1e-9 78 | mph_qg_dt: 1e-18 79 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/translate_fillgfs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import pace.dsl.gt4py_utils as utils 4 | from pace.stencils.testing.translate_physics import TranslatePhysicsFortranData2Py 5 | from pace.stencils.update_atmos_state import fill_gfs_delp 6 | from pace.util.utils import safe_assign_array 7 | 8 | 9 | class TranslateFillGFS(TranslatePhysicsFortranData2Py): 10 | def __init__(self, grid, namelist, stencil_factory): 11 | super().__init__(grid, namelist, stencil_factory) 12 | 13 | self.in_vars["data_vars"] = { 14 | "pe": {"serialname": "IPD_prsi"}, 15 | "q": {"serialname": "IPD_gq0"}, 16 | } 17 | self.out_vars = { 18 | "q": {"serialname": "IPD_qvapor", "kend": namelist.npz - 1}, 19 | } 20 | self.grid_indexing = stencil_factory.grid_indexing 21 | self.compute_func = stencil_factory.from_origin_domain( 22 | fill_gfs_delp, 23 | origin=self.grid_indexing.origin_full(), 24 | domain=self.grid_indexing.domain_full(add=(0, 0, 1)), 25 | ) 26 | 27 | def compute(self, inputs): 28 | self.make_storage_data_input_vars(inputs) 29 | inputs["q"] = inputs["q"]["qvapor"] 30 | inputs["q_min"] = 1.0e-9 31 | shape = self.grid_indexing.domain_full(add=(1, 1, 1)) 32 | delp = np.zeros(shape) 33 | safe_assign_array( 34 | delp[:, :, :-1], inputs["pe"][:, :, 1:] - inputs["pe"][:, :, :-1] 35 | ) 36 | delp = utils.make_storage_data( 37 | delp, 38 | origin=self.grid_indexing.origin_full(), 39 | shape=shape, 40 | backend=self.stencil_factory.backend, 41 | ) 42 | del inputs["pe"] 43 | inputs["delp"] = delp 44 | self.compute_func(**inputs) 45 | return self.slice_output(inputs) 46 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/translate_phifv3.py: -------------------------------------------------------------------------------- 1 | from pace.physics.stencils.get_phi_fv3 import get_phi_fv3 2 | from pace.stencils.testing.translate_physics import TranslatePhysicsFortranData2Py 3 | 4 | 5 | class TranslatePhiFV3(TranslatePhysicsFortranData2Py): 6 | def __init__(self, grid, namelist, stencil_factory): 7 | super().__init__(grid, namelist, stencil_factory) 8 | 9 | self.in_vars["data_vars"] = { 10 | "gt0": {"serialname": "phi_gt0"}, 11 | "gq0": {"serialname": "phi_gq0"}, 12 | "del_gz": { 13 | "serialname": "phi_del_gz", 14 | "in_roll_zero": True, 15 | "out_roll_zero": True, 16 | }, 17 | "phii": {"serialname": "phi_phii"}, 18 | "phil": {"serialname": "phi_phil", "kend": namelist.npz - 1}, 19 | } 20 | self.out_vars = { 21 | "del_gz": self.in_vars["data_vars"]["del_gz"], 22 | "phii": self.in_vars["data_vars"]["phii"], 23 | "phil": self.in_vars["data_vars"]["phil"], 24 | } 25 | self.compute_func = stencil_factory.from_origin_domain( 26 | get_phi_fv3, 27 | origin=stencil_factory.grid_indexing.origin_full(), 28 | domain=stencil_factory.grid_indexing.domain_full(add=(0, 0, 1)), 29 | ) 30 | 31 | def compute(self, inputs): 32 | self.make_storage_data_input_vars(inputs) 33 | inputs["gq0"] = inputs["gq0"]["qvapor"] 34 | self.compute_func(**inputs) 35 | return self.slice_output(inputs) 36 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/translate_prsfv3.py: -------------------------------------------------------------------------------- 1 | from pace.physics.stencils.get_prs_fv3 import get_prs_fv3 2 | from pace.stencils.testing.translate_physics import TranslatePhysicsFortranData2Py 3 | 4 | 5 | class TranslatePrsFV3(TranslatePhysicsFortranData2Py): 6 | def __init__(self, grid, namelist, stencil_factory): 7 | super().__init__(grid, namelist, stencil_factory) 8 | 9 | self.in_vars["data_vars"] = { 10 | "phii": {"serialname": "prs_phii"}, 11 | "prsi": {"serialname": "prs_prsi"}, 12 | "tgrs": {"serialname": "prs_tgrs"}, 13 | "qgrs": {"serialname": "prs_qgrs"}, 14 | "del_": {"serialname": "prs_del", "kend": namelist.npz - 1}, 15 | "del_gz": {"serialname": "prs_del_gz", "out_roll_zero": True}, 16 | } 17 | self.out_vars = { 18 | "del_": self.in_vars["data_vars"]["del_"], 19 | "del_gz": self.in_vars["data_vars"]["del_gz"], 20 | } 21 | self.compute_func = stencil_factory.from_origin_domain( 22 | get_prs_fv3, 23 | origin=stencil_factory.grid_indexing.origin_full(), 24 | domain=stencil_factory.grid_indexing.domain_full(add=(0, 0, 1)), 25 | ) 26 | 27 | def compute(self, inputs): 28 | self.make_storage_data_input_vars(inputs) 29 | inputs["qgrs"] = inputs["qgrs"]["qvapor"] 30 | self.compute_func(**inputs) 31 | return self.slice_output(inputs) 32 | -------------------------------------------------------------------------------- /physics/tests/savepoint/translate/translate_update_pressure_sfc_winds_phys.py: -------------------------------------------------------------------------------- 1 | from pace.stencils.fv_update_phys import update_pressure_and_surface_winds 2 | from pace.stencils.testing.translate_physics import TranslatePhysicsFortranData2Py 3 | from pace.util.constants import KAPPA 4 | 5 | 6 | class TranslatePhysUpdatePressureSurfaceWinds(TranslatePhysicsFortranData2Py): 7 | def __init__(self, grid, namelist, stencil_factory): 8 | super().__init__(grid, namelist, stencil_factory) 9 | self.in_vars["data_vars"] = { 10 | "peln": {"dycore": True, "istart": grid.is_, "jstart": grid.js, "kaxis": 1}, 11 | "pk": { 12 | "dycore": True, 13 | }, 14 | "delp": { 15 | "dycore": True, 16 | }, 17 | "pe": { 18 | "dycore": True, 19 | "istart": grid.is_ - 1, 20 | "jstart": grid.js - 1, 21 | "kaxis": 1, 22 | }, 23 | "ps": {"dycore": True}, 24 | "ua": { 25 | "dycore": True, 26 | }, 27 | "va": { 28 | "dycore": True, 29 | }, 30 | "u_srf": { 31 | "dycore": True, 32 | }, 33 | "v_srf": { 34 | "dycore": True, 35 | }, 36 | } 37 | 38 | self.out_vars = { 39 | "pk": self.in_vars["data_vars"]["pk"], 40 | "ps": {"compute": False}, 41 | "u_srf": {}, 42 | "v_srf": {}, 43 | } 44 | origin = stencil_factory.grid_indexing.origin_compute() 45 | domain = stencil_factory.grid_indexing.domain_compute(add=(0, 0, 1)) 46 | self.compute_func = stencil_factory.from_origin_domain( 47 | update_pressure_and_surface_winds, origin=origin, domain=domain 48 | ) 49 | 50 | def compute(self, inputs): 51 | self.make_storage_data_input_vars(inputs) 52 | inputs["KAPPA"] = KAPPA 53 | self.compute_func(**inputs) 54 | out = self.slice_output(inputs) 55 | return out 56 | -------------------------------------------------------------------------------- /physics/tox.ini: -------------------------------------------------------------------------------- 1 | # tox (https://tox.readthedocs.io/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py3 8 | 9 | [testenv:test] 10 | deps = 11 | pytest 12 | pytest-subtests 13 | pytest-cov 14 | -e ../external/gt4py 15 | -e ../util 16 | -e ../stencils 17 | -e ../dsl 18 | -c../constraints.txt 19 | commands = pytest tests/main 20 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 88 3 | target_version = ['py36', 'py37', 'py38'] 4 | -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-subtests 3 | pytest-regressions 4 | pytest-profiling 5 | pytest-cov 6 | nbmake 7 | mpi4py 8 | xarray 9 | zarr 10 | dask>=2021.10.0 11 | netCDF4 12 | cftime 13 | fv3config>=0.9.0 14 | dace>=0.14 15 | f90nml>=1.1.0 16 | numpy>=1.15 17 | -e external/gt4py 18 | -e util[dace] 19 | -e stencils 20 | -e dsl 21 | -e physics 22 | -e fv3core 23 | -e driver 24 | -------------------------------------------------------------------------------- /requirements_docs.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx>=1.4 3 | sphinx-argparse 4 | sphinx_rtd_theme 5 | sphinx-gallery 6 | -------------------------------------------------------------------------------- /requirements_lint.txt: -------------------------------------------------------------------------------- 1 | pre-commit 2 | -------------------------------------------------------------------------------- /stencils/pace/stencils/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.2.0" 2 | -------------------------------------------------------------------------------- /stencils/pace/stencils/testing/__init__.py: -------------------------------------------------------------------------------- 1 | from . import parallel_translate, translate 2 | from .parallel_translate import ( 3 | ParallelTranslate, 4 | ParallelTranslate2Py, 5 | ParallelTranslate2PyState, 6 | ParallelTranslateBaseSlicing, 7 | ) 8 | from .savepoint import SavepointCase, Translate, dataset_to_dict 9 | from .temporaries import assert_same_temporaries, copy_temporaries 10 | from .translate import ( 11 | TranslateFortranData2Py, 12 | TranslateGrid, 13 | pad_field_in_j, 14 | read_serialized_data, 15 | ) 16 | -------------------------------------------------------------------------------- /stencils/pace/stencils/testing/savepoint.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import os 3 | from typing import Dict, Protocol, Union 4 | 5 | import numpy as np 6 | import xarray as xr 7 | 8 | from .grid import Grid # type: ignore 9 | 10 | 11 | def dataset_to_dict(ds: xr.Dataset) -> Dict[str, Union[np.ndarray, float, int]]: 12 | return { 13 | name: _process_if_scalar(array.values) for name, array in ds.data_vars.items() 14 | } 15 | 16 | 17 | def _process_if_scalar(value: np.ndarray) -> Union[np.ndarray, float, int]: 18 | if len(value.shape) == 0: 19 | return value.item() 20 | else: 21 | return value 22 | 23 | 24 | class Translate(Protocol): 25 | def collect_input_data(self, ds: xr.Dataset) -> dict: 26 | ... 27 | 28 | def compute(self, data: dict): 29 | ... 30 | 31 | 32 | @dataclasses.dataclass 33 | class SavepointCase: 34 | """ 35 | Represents a savepoint with data on one rank. 36 | """ 37 | 38 | savepoint_name: str 39 | data_dir: str 40 | rank: int 41 | i_call: int 42 | testobj: Translate 43 | grid: Grid 44 | 45 | def __str__(self): 46 | return f"{self.savepoint_name}-rank={self.rank}-call={self.i_call}" 47 | 48 | @property 49 | def ds_in(self) -> xr.Dataset: 50 | return ( 51 | xr.open_dataset(os.path.join(self.data_dir, f"{self.savepoint_name}-In.nc")) 52 | .isel(rank=self.rank) 53 | .isel(savepoint=self.i_call) 54 | ) 55 | 56 | @property 57 | def ds_out(self) -> xr.Dataset: 58 | return ( 59 | xr.open_dataset( 60 | os.path.join(self.data_dir, f"{self.savepoint_name}-Out.nc") 61 | ) 62 | .isel(rank=self.rank) 63 | .isel(savepoint=self.i_call) 64 | ) 65 | -------------------------------------------------------------------------------- /stencils/pace/stencils/testing/temporaries.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from typing import List 3 | 4 | import numpy as np 5 | 6 | import pace.util 7 | 8 | 9 | def copy_temporaries(obj, max_depth: int) -> dict: 10 | temporaries = {} 11 | attrs = [a for a in dir(obj) if not a.startswith("__")] 12 | for attr_name in attrs: 13 | try: 14 | attr = getattr(obj, attr_name) 15 | except AttributeError: 16 | attr = None 17 | if isinstance(attr, pace.util.Quantity): 18 | temporaries[attr_name] = copy.deepcopy(np.asarray(attr.data)) 19 | elif attr.__class__.__module__.split(".")[0] in ( # type: ignore 20 | "fv3core", 21 | "pace", 22 | ): 23 | if max_depth > 0: 24 | sub_temporaries = copy_temporaries(attr, max_depth - 1) 25 | if len(sub_temporaries) > 0: 26 | temporaries[attr_name] = sub_temporaries 27 | return temporaries 28 | 29 | 30 | def assert_same_temporaries(dict1: dict, dict2: dict): 31 | diffs = _assert_same_temporaries(dict1, dict2) 32 | if len(diffs) > 0: 33 | raise AssertionError(f"{len(diffs)} differing temporaries found: {diffs}") 34 | 35 | 36 | def _assert_same_temporaries(dict1: dict, dict2: dict) -> List[str]: 37 | differences = [] 38 | for attr in dict1: 39 | attr1 = dict1[attr] 40 | attr2 = dict2[attr] 41 | if isinstance(attr1, np.ndarray): 42 | try: 43 | np.testing.assert_almost_equal( 44 | attr1, attr2, err_msg=f"{attr} not equal" 45 | ) 46 | except AssertionError: 47 | differences.append(attr) 48 | else: 49 | sub_differences = _assert_same_temporaries(attr1, attr2) 50 | for d in sub_differences: 51 | differences.append(f"{attr}.{d}") 52 | return differences 53 | -------------------------------------------------------------------------------- /stencils/pace/stencils/testing/translate_update_dwind_phys.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import pace.util 4 | from pace.stencils.testing.translate_physics import TranslatePhysicsFortranData2Py 5 | from pace.stencils.update_dwind_phys import AGrid2DGridPhysics 6 | from pace.util.utils import safe_assign_array 7 | 8 | 9 | class TranslateUpdateDWindsPhys(TranslatePhysicsFortranData2Py): 10 | def __init__(self, grid, namelist, stencil_factory): 11 | super().__init__(grid, namelist, stencil_factory) 12 | self.in_vars["data_vars"] = { 13 | "u": {"dwind": True}, 14 | "u_dt": {"dwind": True}, 15 | "v": {"dwind": True}, 16 | "v_dt": {"dwind": True}, 17 | } 18 | self.out_vars = { 19 | "u": {"dwind": True, "kend": namelist.npz - 1}, 20 | "v": {"dwind": True, "kend": namelist.npz - 1}, 21 | } 22 | self.namelist = namelist 23 | self.stencil_factory = stencil_factory 24 | 25 | def compute(self, inputs): 26 | self.make_storage_data_input_vars(inputs) 27 | partitioner = pace.util.TilePartitioner(self.namelist.layout) 28 | self.compute_func = AGrid2DGridPhysics( 29 | self.stencil_factory, 30 | self.grid.quantity_factory, 31 | partitioner, 32 | self.grid.rank, 33 | self.namelist, 34 | grid_info=self.grid.driver_grid_data, 35 | ) 36 | self.compute_func(**inputs) 37 | out = {} 38 | # This alloc then copy pattern is requried to deal transparently with 39 | # arrays on different device 40 | out["u"] = np.empty_like(inputs["u"][self.grid.y3d_domain_interface()]) 41 | out["v"] = np.empty_like(inputs["v"][self.grid.x3d_domain_interface()]) 42 | safe_assign_array(out["u"], inputs["u"][self.grid.y3d_domain_interface()]) 43 | safe_assign_array(out["v"], inputs["v"][self.grid.x3d_domain_interface()]) 44 | return out 45 | -------------------------------------------------------------------------------- /stencils/setup.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from setuptools import find_namespace_packages, setup 4 | 5 | 6 | setup_requirements: List[str] = [] 7 | 8 | requirements = ["gt4py", "pace-util", "pace-dsl"] 9 | 10 | test_requirements: List[str] = [] 11 | 12 | 13 | setup( 14 | author="Allen Institute for AI", 15 | author_email="elynnw@allenai.org", 16 | python_requires=">=3.8", 17 | classifiers=[ 18 | "Development Status :: 2 - Pre-Alpha", 19 | "Intended Audience :: Developers", 20 | "License :: OSI Approved :: BSD License", 21 | "Natural Language :: English", 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.8", 24 | "Programming Language :: Python :: 3.9", 25 | ], 26 | install_requires=requirements, 27 | setup_requires=setup_requirements, 28 | tests_require=test_requirements, 29 | name="pace-stencils", 30 | license="BSD license", 31 | packages=find_namespace_packages(include=["pace.*"]), 32 | include_package_data=True, 33 | url="https://github.com/ai2cm/pace", 34 | version="0.2.0", 35 | zip_safe=False, 36 | ) 37 | -------------------------------------------------------------------------------- /tests/main/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture() 5 | def backend(pytestconfig): 6 | backend = pytestconfig.getoption("backend") 7 | return backend 8 | 9 | 10 | def pytest_addoption(parser): 11 | parser.addoption("--backend", action="store", default="numpy") 12 | -------------------------------------------------------------------------------- /tests/main/driver/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/tests/main/driver/__init__.py -------------------------------------------------------------------------------- /tests/main/driver/test_comm_config.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import unittest.mock 3 | 4 | from pace.driver import CreatesComm, CreatesCommSelector, WriterCommConfig 5 | 6 | 7 | @CreatesCommSelector.register("mock") 8 | @dataclasses.dataclass(frozen=True) 9 | class MockCommConfig(CreatesComm): 10 | def __post_init__(self): 11 | self.mock_comm = unittest.mock.MagicMock() 12 | self.cleaned_up = False 13 | 14 | def get_comm(self): 15 | assert not self.cleaned_up 16 | return self.mock_comm 17 | 18 | def cleanup(self): 19 | self.cleaned_up = True 20 | 21 | 22 | def test_create_comm_writer(): 23 | config_dict = { 24 | "type": "write", 25 | "config": { 26 | "ranks": [0], 27 | }, 28 | } 29 | config = CreatesCommSelector.from_dict(config_dict) 30 | assert isinstance(config, CreatesCommSelector) 31 | assert isinstance(config.config, WriterCommConfig) 32 | -------------------------------------------------------------------------------- /tests/main/driver/test_diagnostics.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import xarray as xr 4 | import yaml 5 | 6 | import pace.driver 7 | import pace.dsl 8 | from pace.driver.run import main 9 | 10 | 11 | DIR = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | 14 | def test_diagnostics_can_be_opened(tmpdir): 15 | with open( 16 | os.path.join(DIR, "../../../driver/examples/configs/baroclinic_c12.yaml"), "r" 17 | ) as f: 18 | driver_config = pace.driver.DriverConfig.from_dict(yaml.safe_load(f)) 19 | diagnostics_path = os.path.join(tmpdir, "output.zarr") 20 | driver_config.diagnostics_config = pace.driver.DiagnosticsConfig( 21 | path=diagnostics_path, 22 | names=["u", "v", "ua", "va", "w", "delp", "pt", "qvapor"], 23 | ) 24 | driver_config.comm_config = pace.driver.NullCommConfig(rank=0, total_ranks=6) 25 | driver_config.dt_atmos = 60 26 | driver_config.minutes = 1 27 | main(driver_config) 28 | xr.open_zarr(diagnostics_path) 29 | -------------------------------------------------------------------------------- /tests/main/driver/test_docs.py: -------------------------------------------------------------------------------- 1 | import doctest 2 | 3 | import pace.driver.registry 4 | 5 | 6 | def test_registry_doc_examples(): 7 | result = doctest.testmod(pace.driver.registry) 8 | assert result.attempted > 0, "No doctests found" 9 | assert result.failed == 0, "doctests failed" 10 | -------------------------------------------------------------------------------- /tests/main/dsl/test_stencil.py: -------------------------------------------------------------------------------- 1 | from gt4py.cartesian.gtscript import PARALLEL, Field, computation, interval 2 | from gt4py.storage import empty, ones 3 | 4 | import pace.dsl 5 | from pace.dsl.stencil import CompilationConfig, GridIndexing 6 | 7 | 8 | def _make_storage( 9 | func, 10 | grid_indexing, 11 | stencil_config: pace.dsl.StencilConfig, 12 | *, 13 | dtype=float, 14 | aligned_index=(0, 0, 0), 15 | ): 16 | return func( 17 | backend=stencil_config.compilation_config.backend, 18 | shape=grid_indexing.domain, 19 | dtype=dtype, 20 | aligned_index=aligned_index, 21 | ) 22 | 23 | 24 | def test_timing_collector(): 25 | grid_indexing = GridIndexing( 26 | domain=(5, 5, 5), 27 | n_halo=2, 28 | south_edge=True, 29 | north_edge=True, 30 | west_edge=True, 31 | east_edge=True, 32 | ) 33 | stencil_config = pace.dsl.StencilConfig( 34 | compilation_config=CompilationConfig(backend="numpy", rebuild=True) 35 | ) 36 | 37 | stencil_factory = pace.dsl.StencilFactory(stencil_config, grid_indexing) 38 | 39 | def func(inp: Field[float], out: Field[float]): 40 | with computation(PARALLEL), interval(...): 41 | out = inp 42 | 43 | test = stencil_factory.from_origin_domain( 44 | func, (0, 0, 0), domain=grid_indexing.domain 45 | ) 46 | 47 | build_report = stencil_factory.build_report(key="parse_time") 48 | assert "func" in build_report 49 | 50 | inp = _make_storage(ones, grid_indexing, stencil_config, dtype=float) 51 | out = _make_storage(empty, grid_indexing, stencil_config, dtype=float) 52 | 53 | test(inp, out) 54 | exec_report = stencil_factory.exec_report() 55 | assert "func" in exec_report 56 | -------------------------------------------------------------------------------- /tests/savepoint/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | 6 | DIR = os.path.dirname(os.path.realpath(__file__)) 7 | 8 | 9 | @pytest.fixture() 10 | def backend(pytestconfig): 11 | backend = pytestconfig.getoption("backend") 12 | return backend 13 | 14 | 15 | @pytest.fixture() 16 | def data_path(pytestconfig): 17 | data_path = pytestconfig.getoption("data_path") 18 | return data_path 19 | 20 | 21 | @pytest.fixture() 22 | def threshold_path(pytestconfig): 23 | threshold_path = pytestconfig.getoption("threshold_path") 24 | if threshold_path is None: 25 | threshold_path = os.path.join(DIR, "thresholds") 26 | return threshold_path 27 | 28 | 29 | @pytest.fixture() 30 | def calibrate_thresholds(pytestconfig): 31 | calibrate_thresholds = pytestconfig.getoption("calibrate_thresholds") 32 | return calibrate_thresholds 33 | 34 | 35 | def pytest_addoption(parser): 36 | parser.addoption( 37 | "--backend", action="store", default="numpy", help="gt4py backend name" 38 | ) 39 | parser.addoption( 40 | "--data_path", action="store", default="./", help="location of reference data" 41 | ) 42 | parser.addoption( 43 | "--threshold_path", 44 | action="store", 45 | default=None, 46 | help="directory containing comparison thresholds for tests", 47 | ) 48 | parser.addoption( 49 | "--calibrate_thresholds", 50 | action="store_true", 51 | default=False, 52 | help="re-calibrate error thresholds for comparison to reference", 53 | ) 54 | -------------------------------------------------------------------------------- /util/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | 4 | # Fortran compilation files 5 | *.o 6 | *.mod 7 | 8 | # Cython c-output 9 | *.c 10 | 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | *$py.class 15 | 16 | # C extensions 17 | *.so 18 | 19 | # Distribution / packaging 20 | .Python 21 | build/ 22 | develop-eggs/ 23 | dist/ 24 | downloads/ 25 | eggs/ 26 | .eggs/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # celery beat schedule file 85 | celerybeat-schedule 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # dotenv 91 | .env 92 | 93 | # virtualenv 94 | .venv 95 | venv/ 96 | ENV/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | # mkdocs documentation 106 | /site 107 | 108 | # mypy 109 | .mypy_cache/ 110 | .vscode 111 | -------------------------------------------------------------------------------- /util/.jenkins/actions/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -f 2 | 3 | set -x -e 4 | 5 | ################################################## 6 | # functions 7 | ################################################## 8 | 9 | exitError() 10 | { 11 | echo "ERROR $1: $3" 1>&2 12 | echo "ERROR LOCATION=$0" 1>&2 13 | echo "ERROR LINE=$2" 1>&2 14 | exit $1 15 | } 16 | 17 | showUsage() 18 | { 19 | echo "usage: `basename $0` [-h]" 20 | echo "" 21 | echo "optional arguments:" 22 | echo "-h show this help message and exit" 23 | } 24 | 25 | parseOptions() 26 | { 27 | # process command line options 28 | while getopts "h" opt 29 | do 30 | case $opt in 31 | h) showUsage; exit 0 ;; 32 | \?) showUsage; exitError 301 ${LINENO} "invalid command line option (-${OPTARG})" ;; 33 | :) showUsage; exitError 302 ${LINENO} "command line option (-${OPTARG}) requires argument" ;; 34 | esac 35 | done 36 | 37 | } 38 | 39 | # echo basic setup 40 | echo "####### executing: $0 $* (PID=$$ HOST=$HOSTNAME TIME=`date '+%D %H:%M:%S'`)" 41 | 42 | JENKINS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" 43 | 44 | # start timer 45 | T="$(date +%s)" 46 | 47 | # parse command line options (pass all of them to function) 48 | parseOptions $* 49 | 50 | # run tests 51 | echo "### run tests" 52 | pytest --junitxml results.xml tests 53 | 54 | # end timer and report time taken 55 | T="$(($(date +%s)-T))" 56 | printf "####### time taken: %02d:%02d:%02d:%02d\n" "$((T/86400))" "$((T/3600%24))" "$((T/60%60))" "$((T%60))" 57 | 58 | # no errors encountered 59 | echo "####### finished: $0 $* (PID=$$ HOST=$HOSTNAME TIME=`date '+%D %H:%M:%S'`)" 60 | exit 0 61 | 62 | # so long, Earthling! 63 | -------------------------------------------------------------------------------- /util/.jenkins/cache.sh: -------------------------------------------------------------------------------- 1 | ../../.jenkins/cache.sh -------------------------------------------------------------------------------- /util/.jenkins/checksum.sh: -------------------------------------------------------------------------------- 1 | ../../.jenkins/checksum.sh -------------------------------------------------------------------------------- /util/.jenkins/env: -------------------------------------------------------------------------------- 1 | ../../external/buildenv -------------------------------------------------------------------------------- /util/.jenkins/test_util.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 6 | 7 | if [ "${target}" == "gpu" ] ; then 8 | # we only run this on HPC 9 | set +e 10 | module load cray-python 11 | module load pycuda 12 | set -e 13 | fi 14 | 15 | # run tests 16 | echo "restoring cache" 17 | 18 | UTIL_DIR=$SCRIPT_DIR/.. 19 | 20 | cache_key=v1-util-$($SCRIPT_DIR/checksum.sh $SCRIPT_DIR/test_util.sh $UTIL_DIR/requirements.txt $UTIL_DIR/requirements_gpu.txt $UTIL_DIR/../constraints.txt)-$target 21 | 22 | $SCRIPT_DIR/cache.sh restore $cache_key 23 | 24 | echo "running tests" 25 | 26 | python3 -m venv venv 27 | . ./venv/bin/activate 28 | 29 | if [ "${target}" == "gpu" ] ; then 30 | set +e 31 | module unload cray-python 32 | module unload pycuda 33 | set -e 34 | pip3 install -r $UTIL_DIR/requirements.txt -r $UTIL_DIR/requirements_gpu.txt -c $UTIL_DIR/../constraints.txt -e $UTIL_DIR 35 | else 36 | pip3 install -r $UTIL_DIR/requirements.txt -c $UTIL_DIR/../constraints.txt -e $UTIL_DIR 37 | fi 38 | 39 | pytest --junitxml results.xml $UTIL_DIR/tests 40 | 41 | echo "saving cache" 42 | 43 | $SCRIPT_DIR/cache.sh save $cache_key venv 44 | 45 | deactivate 46 | 47 | exit 0 48 | -------------------------------------------------------------------------------- /util/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | 3 | BSD License 4 | 5 | Copyright (c) 2019, Vulcan Technologies LLC 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without modification, 9 | are permitted provided that the following conditions are met: 10 | 11 | * Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 14 | * Redistributions in binary form must reproduce the above copyright notice, this 15 | list of conditions and the following disclaimer in the documentation and/or 16 | other materials provided with the distribution. 17 | 18 | * Neither the name of the copyright holder nor the names of its 19 | contributors may be used to endorse or promote products derived from this 20 | software without specific prior written permission. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 23 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 | IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 26 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 29 | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 30 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 31 | OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /util/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include HISTORY.md 4 | 5 | recursive-include fv3gfs *.json *.yml 6 | recursive-include tests *.py *.sh 7 | recursive-include tests/data * 8 | 9 | recursive-exclude * __pycache__ 10 | recursive-exclude * *.py[co] 11 | recursive-exclude * *.mod *.o 12 | recursive-exclude * *.log 13 | -------------------------------------------------------------------------------- /util/README.md: -------------------------------------------------------------------------------- 1 | This package is a toolkit of Python objects and routines for writing weather and climate models. 2 | 3 | This is research software and still in development. We welcome external contributions. If you would like to contribute to this project, please get in touch with one of our developers! 4 | 5 | * Free software: BSD license 6 | -------------------------------------------------------------------------------- /util/RELEASE.rst: -------------------------------------------------------------------------------- 1 | Release Instructions 2 | ==================== 3 | 4 | Versions should take the form "v..patch". For example, "v0.3.0" is a valid 5 | version, while "v1" is not and "0.3.0" is not. 6 | 7 | 1. Make sure all PRs are merged and tests pass. 8 | 9 | 2. Prepare a release branch with `git checkout -b release/util/`. 10 | 11 | 3. Update the HISTORY.md, replacing the "latest" version heading with the new version. 12 | 13 | 4. Commit your changes so far to the release branch. 14 | 15 | 5. In the pace-util directory, run `bumpversion `. This will create a new commit. 16 | 17 | 6. `git push -u origin release/util/` and create a new pull request in Github. 18 | 19 | 7. When the pull request is merged to main, `git checkout main` and `git pull`, 20 | followed by `git tag util/`. 21 | 22 | 8. Run `git push origin --tags` to push all local tags to Github. 23 | 24 | 9. Run `make release` to push latest release to PyPI. Contact a core developer to get the 25 | necessary API token. 26 | -------------------------------------------------------------------------------- /util/examples/mpi/.gitignore: -------------------------------------------------------------------------------- 1 | output 2 | -------------------------------------------------------------------------------- /util/examples/mpi/Makefile: -------------------------------------------------------------------------------- 1 | 2 | MPI_ENV_VARS=PMIX_MCA_gds=hash 3 | 4 | all: global_timings zarr_monitor 5 | 6 | global_timings: 7 | $(MPI_ENV_VARS) mpirun -n 4 python -m mpi4py global_timings.py 8 | 9 | zarr_monitor: 10 | $(MPI_ENV_VARS) mpirun -n 6 python -m mpi4py zarr_monitor.py 11 | 12 | clean: 13 | $(RM) -r output/* 14 | touch output/.gitkeep 15 | -------------------------------------------------------------------------------- /util/examples/mpi/global_timings.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | 3 | import numpy as np 4 | from mpi4py import MPI 5 | 6 | from pace.util import Timer 7 | 8 | 9 | @contextlib.contextmanager 10 | def nullcontext(): 11 | yield 12 | 13 | 14 | def print_global_timings(times, comm, root=0): 15 | is_root = comm.Get_rank() == root 16 | recvbuf = np.array(0.0) 17 | for name, value in timer.times.items(): 18 | if is_root: 19 | print(name) 20 | for label, op in [("min", MPI.MIN), ("max", MPI.MAX), ("mean", MPI.SUM)]: 21 | comm.Reduce(np.array(value), recvbuf, op=op) 22 | if is_root: 23 | if label == "mean": 24 | recvbuf /= comm.Get_size() 25 | print(f" {label}: {recvbuf}") 26 | 27 | 28 | if __name__ == "__main__": 29 | # a Timer gathers statistics about the blocks it times 30 | arr = np.random.randn(100, 100) 31 | timer = Timer() 32 | 33 | # using a context manager ensures that stop is always called, even if there is an 34 | # exception/error in the block. We strongly encourage using this method when 35 | # possible. 36 | with timer.clock("addition"): 37 | arr += 1 38 | 39 | # sometimes, you will need to trigger the start and end of the timer manually, if 40 | # the start and end cannot be represented by a context manager 41 | timer.start("context_manager") 42 | with nullcontext(): 43 | timer.stop("context_manager") 44 | 45 | comm = MPI.COMM_WORLD 46 | # timer.times is a dictionary giving you the total time in seconds spent on each 47 | # operation 48 | print_global_timings(timer.times, comm) 49 | -------------------------------------------------------------------------------- /util/examples/mpi/zarr_monitor.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | import cftime 4 | import numpy as np 5 | import zarr 6 | from mpi4py import MPI 7 | 8 | import pace.util 9 | 10 | 11 | OUTPUT_PATH = "output/zarr_monitor.zarr" 12 | 13 | 14 | def get_example_state(time): 15 | sizer = pace.util.SubtileGridSizer( 16 | nx=48, ny=48, nz=70, n_halo=3, extra_dim_lengths={} 17 | ) 18 | allocator = pace.util.QuantityFactory(sizer, np) 19 | air_temperature = allocator.zeros( 20 | [pace.util.X_DIM, pace.util.Y_DIM, pace.util.Z_DIM], units="degK" 21 | ) 22 | air_temperature.view[:] = np.random.randn(*air_temperature.extent) 23 | return {"time": time, "air_temperature": air_temperature} 24 | 25 | 26 | if __name__ == "__main__": 27 | size = MPI.COMM_WORLD.Get_size() 28 | # assume square tile faces 29 | ranks_per_edge = int((size // 6) ** 0.5) 30 | layout = (ranks_per_edge, ranks_per_edge) 31 | 32 | store = zarr.storage.DirectoryStore(OUTPUT_PATH) 33 | partitioner = pace.util.CubedSpherePartitioner(pace.util.TilePartitioner(layout)) 34 | monitor = pace.util.ZarrMonitor(store, partitioner, mpi_comm=MPI.COMM_WORLD) 35 | 36 | time = cftime.DatetimeJulian(2020, 1, 1) 37 | timestep = timedelta(hours=1) 38 | 39 | for i in range(10): 40 | state = get_example_state(time) 41 | monitor.store(state) 42 | time += timestep 43 | -------------------------------------------------------------------------------- /util/external/gt4py: -------------------------------------------------------------------------------- 1 | ../../external/gt4py -------------------------------------------------------------------------------- /util/mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | ignore_missing_imports = True 3 | 4 | # untyped vcm packages 5 | [mypy-fv3viz] 6 | ignore_missing_imports = True 7 | 8 | [mypy-report] 9 | ignore_missing_imports = True 10 | 11 | [mypy-loaders] 12 | ignore_missing_imports = True 13 | 14 | # External Libraries 15 | [mypy-mappm] 16 | ignore_missing_imports = True 17 | 18 | [mypy-gcsfs] 19 | ignore_missing_imports = True 20 | 21 | [mypy-xgcm] 22 | ignore_missing_imports = True 23 | 24 | [mypy-google.*] 25 | ignore_missing_imports = True 26 | 27 | [mypy-numpy] 28 | ignore_missing_imports = True 29 | 30 | [mypy-fsspec] 31 | ignore_missing_imports = True 32 | 33 | [mypy-dask.*] 34 | ignore_missing_imports = True 35 | 36 | [mypy-scipy.*] 37 | ignore_missing_imports = True 38 | 39 | [mypy-skimage.*] 40 | ignore_missing_imports = True 41 | 42 | [mypy-apache_beam.*] 43 | ignore_missing_imports = True 44 | 45 | [mypy-intake] 46 | ignore_missing_imports = True 47 | 48 | [mypy-joblib] 49 | ignore_missing_imports = True 50 | 51 | [mypy-sklearn.*] 52 | ignore_missing_imports = True 53 | 54 | [mypy-toolz] 55 | ignore_missing_imports = True 56 | -------------------------------------------------------------------------------- /util/pace/util/_capture_stream.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import io 3 | import os 4 | import tempfile 5 | 6 | 7 | @contextlib.contextmanager 8 | def capture_stream(stream): 9 | 10 | out_stream = io.BytesIO() 11 | 12 | # parent process: 13 | # close the reading end, we won't need this 14 | orig_file_handle = os.dup(stream.fileno()) 15 | 16 | with tempfile.NamedTemporaryFile() as out: 17 | # overwrite the streams fileno with a the pipe to be read by the forked 18 | # process below 19 | os.dup2(out.fileno(), stream.fileno()) 20 | yield out_stream 21 | # restore the original file handle 22 | os.dup2(orig_file_handle, stream.fileno()) 23 | # print logging info 24 | out.seek(0) 25 | out_stream.write(out.read()) 26 | -------------------------------------------------------------------------------- /util/pace/util/_exceptions.py: -------------------------------------------------------------------------------- 1 | class InvalidQuantityError(Exception): 2 | pass 3 | 4 | 5 | class OutOfBoundsError(ValueError): 6 | pass 7 | -------------------------------------------------------------------------------- /util/pace/util/_optional_imports.py: -------------------------------------------------------------------------------- 1 | class RaiseWhenAccessed: 2 | def __init__(self, err): 3 | self._err = err 4 | 5 | def __getattr__(self, _): 6 | raise self._err 7 | 8 | def __call__(self, *args, **kwargs): 9 | raise self._err 10 | 11 | 12 | try: 13 | import zarr 14 | except ModuleNotFoundError as err: 15 | zarr = RaiseWhenAccessed(err) 16 | 17 | try: 18 | import xarray 19 | except ModuleNotFoundError as err: 20 | xarray = None 21 | 22 | try: 23 | import cupy 24 | except ImportError: 25 | cupy = None 26 | 27 | try: 28 | import gt4py 29 | except ImportError: 30 | gt4py = None 31 | 32 | try: 33 | import dace 34 | except ImportError: 35 | dace = None 36 | -------------------------------------------------------------------------------- /util/pace/util/_profiler.py: -------------------------------------------------------------------------------- 1 | import cProfile 2 | 3 | 4 | class Profiler: 5 | def __init__(self): 6 | self._enabled = True 7 | self.profiler = cProfile.Profile() 8 | self.profiler.disable() 9 | 10 | def enable(self): 11 | self.profiler.enable() 12 | 13 | def dump_stats(self, filename: str): 14 | self.profiler.disable() 15 | self._enabled = False 16 | self.profiler.dump_stats(filename) 17 | 18 | @property 19 | def enabled(self) -> bool: 20 | """Indicates whether the profiler is currently enabled.""" 21 | return self._enabled 22 | 23 | 24 | class NullProfiler: 25 | """A profiler class which does not actually profile anything. 26 | 27 | Meant to be used in place of an optional profiler. 28 | """ 29 | 30 | def __init__(self): 31 | self.profiler = None 32 | self._enabled = False 33 | 34 | def enable(self): 35 | pass 36 | 37 | def dump_stats(self, filename: str): 38 | pass 39 | 40 | @property 41 | def enabled(self) -> bool: 42 | """Indicates whether the profiler is enabled.""" 43 | return False 44 | -------------------------------------------------------------------------------- /util/pace/util/_xarray.py: -------------------------------------------------------------------------------- 1 | try: 2 | import xarray as xr 3 | from xarray import DataArray, Dataset, open_dataset 4 | except ModuleNotFoundError as err: 5 | from ._optional_imports import RaiseWhenAccessed 6 | 7 | xr = RaiseWhenAccessed(err) 8 | DataArray = RaiseWhenAccessed(err) 9 | Dataset = RaiseWhenAccessed(err) 10 | open_dataset = RaiseWhenAccessed(err) 11 | 12 | 13 | def to_dataset(state): 14 | data_vars = { 15 | name: value.data_array for name, value in state.items() if name != "time" 16 | } 17 | if "time" in state: 18 | data_vars["time"] = state["time"] 19 | return xr.Dataset(data_vars=data_vars) 20 | -------------------------------------------------------------------------------- /util/pace/util/checkpointer/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Checkpointer 2 | from .null import NullCheckpointer 3 | from .snapshots import SnapshotCheckpointer 4 | from .thresholds import ( 5 | InsufficientTrialsError, 6 | SavepointThresholds, 7 | Threshold, 8 | ThresholdCalibrationCheckpointer, 9 | ) 10 | from .validation import ValidationCheckpointer 11 | -------------------------------------------------------------------------------- /util/pace/util/checkpointer/base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class Checkpointer(abc.ABC): 5 | @abc.abstractmethod 6 | def __call__(self, savepoint_name, **kwargs): 7 | ... 8 | -------------------------------------------------------------------------------- /util/pace/util/checkpointer/null.py: -------------------------------------------------------------------------------- 1 | from .base import Checkpointer 2 | 3 | 4 | class NullCheckpointer(Checkpointer): 5 | def __call__(self, savepoint_name, **kwargs): 6 | pass 7 | -------------------------------------------------------------------------------- /util/pace/util/comm.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import List, Optional, TypeVar 3 | 4 | 5 | T = TypeVar("T") 6 | 7 | 8 | class Request(abc.ABC): 9 | @abc.abstractmethod 10 | def wait(self): 11 | ... 12 | 13 | 14 | class Comm(abc.ABC): 15 | @abc.abstractmethod 16 | def Get_rank(self) -> int: 17 | ... 18 | 19 | @abc.abstractmethod 20 | def Get_size(self) -> int: 21 | ... 22 | 23 | @abc.abstractmethod 24 | def bcast(self, value: Optional[T], root=0) -> T: 25 | ... 26 | 27 | @abc.abstractmethod 28 | def barrier(self): 29 | ... 30 | 31 | @abc.abstractmethod 32 | def Barrier(self): 33 | ... 34 | 35 | @abc.abstractmethod 36 | def Scatter(self, sendbuf, recvbuf, root=0, **kwargs): 37 | ... 38 | 39 | @abc.abstractmethod 40 | def Gather(self, sendbuf, recvbuf, root=0, **kwargs): 41 | ... 42 | 43 | @abc.abstractmethod 44 | def allgather(self, sendobj: T) -> List[T]: 45 | ... 46 | 47 | @abc.abstractmethod 48 | def Send(self, sendbuf, dest, tag: int = 0, **kwargs): 49 | ... 50 | 51 | @abc.abstractmethod 52 | def sendrecv(self, sendbuf, dest, **kwargs): 53 | ... 54 | 55 | @abc.abstractmethod 56 | def Isend(self, sendbuf, dest, tag: int = 0, **kwargs) -> Request: 57 | ... 58 | 59 | @abc.abstractmethod 60 | def Recv(self, recvbuf, source, tag: int = 0, **kwargs): 61 | ... 62 | 63 | @abc.abstractmethod 64 | def Irecv(self, recvbuf, source, tag: int = 0, **kwargs) -> Request: 65 | ... 66 | 67 | @abc.abstractmethod 68 | def Split(self, color, key) -> "Comm": 69 | ... 70 | 71 | @abc.abstractmethod 72 | def allreduce(self, sendobj: T, op=None) -> T: 73 | ... 74 | -------------------------------------------------------------------------------- /util/pace/util/filesystem.py: -------------------------------------------------------------------------------- 1 | import fsspec 2 | 3 | 4 | def get_fs(path: str) -> fsspec.AbstractFileSystem: 5 | """Return the fsspec filesystem required to handle a given path.""" 6 | fs, _, _ = fsspec.get_fs_token_paths(path) 7 | return fs 8 | 9 | 10 | def is_file(filename): 11 | return get_fs(filename).isfile(filename) 12 | 13 | 14 | def open(filename, *args, **kwargs): 15 | fs = get_fs(filename) 16 | return fs.open(filename, *args, **kwargs) 17 | -------------------------------------------------------------------------------- /util/pace/util/global_config.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import os 3 | from typing import Optional 4 | 5 | 6 | def getenv_bool(name: str, default: str) -> bool: 7 | indicator = os.getenv(name, default).title() 8 | return indicator == "True" 9 | 10 | 11 | def set_backend(new_backend: str): 12 | global _BACKEND 13 | _BACKEND = new_backend 14 | 15 | 16 | def get_backend() -> str: 17 | return _BACKEND 18 | 19 | 20 | def set_rebuild(flag: bool): 21 | global _REBUILD 22 | _REBUILD = flag 23 | 24 | 25 | def get_rebuild() -> bool: 26 | return _REBUILD 27 | 28 | 29 | def set_validate_args(new_validate_args: bool): 30 | global _VALIDATE_ARGS 31 | _VALIDATE_ARGS = new_validate_args 32 | 33 | 34 | # Set to "False" to skip validating gt4py stencil arguments 35 | @functools.lru_cache(maxsize=None) 36 | def get_validate_args() -> bool: 37 | return _VALIDATE_ARGS 38 | 39 | 40 | # Options 41 | # CPU: numpy, gt:cpu_ifirst, gt:cpu_kfirst 42 | # GPU: gt:gpu, cuda 43 | _BACKEND: Optional[str] = None 44 | 45 | # If TRUE, all caches will bypassed and stencils recompiled 46 | # if FALSE, caches will be checked and rebuild if code changes 47 | _REBUILD: bool = getenv_bool("FV3_STENCIL_REBUILD_FLAG", "False") 48 | _VALIDATE_ARGS: bool = True 49 | -------------------------------------------------------------------------------- /util/pace/util/grid/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F401 2 | 3 | from .eta import set_hybrid_pressure_coefficients 4 | from .generation import GridDefinitions, MetricTerms 5 | from .gnomonic import ( 6 | great_circle_distance_along_axis, 7 | great_circle_distance_lon_lat, 8 | lon_lat_corner_to_cell_center, 9 | lon_lat_midpoint, 10 | lon_lat_to_xyz, 11 | xyz_midpoint, 12 | xyz_to_lon_lat, 13 | ) 14 | from .helper import ( 15 | AngleGridData, 16 | ContravariantGridData, 17 | DampingCoefficients, 18 | DriverGridData, 19 | GridData, 20 | HorizontalGridData, 21 | VerticalGridData, 22 | ) 23 | from .stretch_transformation import direct_transform 24 | -------------------------------------------------------------------------------- /util/pace/util/initialization/__init__.py: -------------------------------------------------------------------------------- 1 | from .allocator import QuantityFactory 2 | from .sizer import GridSizer, SubtileGridSizer 3 | -------------------------------------------------------------------------------- /util/pace/util/monitor/__init__.py: -------------------------------------------------------------------------------- 1 | from .netcdf_monitor import NetCDFMonitor 2 | from .protocol import Monitor 3 | from .zarr_monitor import ZarrMonitor 4 | -------------------------------------------------------------------------------- /util/pace/util/monitor/convert.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .._optional_imports import cupy 4 | 5 | 6 | def to_numpy(array, dtype=None) -> np.ndarray: 7 | """ 8 | Input array can be a numpy array or a cupy array. Returns numpy array. 9 | """ 10 | try: 11 | output = np.asarray(array) 12 | except ValueError as err: 13 | if err.args[0] == "object __array__ method not producing an array": 14 | output = cupy.asnumpy(array) 15 | else: 16 | raise err 17 | except TypeError as err: 18 | if err.args[0].startswith( 19 | "Implicit conversion to a NumPy array is not allowed." 20 | ): 21 | output = cupy.asnumpy(array) 22 | else: 23 | raise err 24 | if dtype: 25 | output = output.astype(dtype=dtype) 26 | return output 27 | -------------------------------------------------------------------------------- /util/pace/util/monitor/protocol.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Protocol 2 | 3 | from pace.util.quantity import Quantity 4 | 5 | 6 | class Monitor(Protocol): 7 | """ 8 | sympl.Monitor-style object for storing model state dictionaries. 9 | """ 10 | 11 | def store(self, state: dict) -> None: 12 | """Append the model state dictionary to the stored data.""" 13 | ... 14 | 15 | def store_constant(self, state: Dict[str, Quantity]) -> None: 16 | ... 17 | 18 | def cleanup(self): 19 | ... 20 | -------------------------------------------------------------------------------- /util/pace/util/rotate.py: -------------------------------------------------------------------------------- 1 | from . import constants 2 | 3 | 4 | def rotate_scalar_data(data, dims, numpy, n_clockwise_rotations): 5 | n_clockwise_rotations = n_clockwise_rotations % 4 6 | if n_clockwise_rotations == 0: 7 | pass 8 | elif n_clockwise_rotations in (1, 3): 9 | x_dim, y_dim = None, None 10 | for i, dim in enumerate(dims): 11 | if dim in constants.X_DIMS: 12 | x_dim = i 13 | elif dim in constants.Y_DIMS: 14 | y_dim = i 15 | if (x_dim is not None) and (y_dim is not None): 16 | if n_clockwise_rotations == 1: 17 | data = numpy.rot90(data, axes=(y_dim, x_dim)) 18 | elif n_clockwise_rotations == 3: 19 | data = numpy.rot90(data, axes=(x_dim, y_dim)) 20 | elif x_dim is not None: 21 | if n_clockwise_rotations == 1: 22 | data = numpy.flip(data, axis=x_dim) 23 | elif y_dim is not None: 24 | if n_clockwise_rotations == 3: 25 | data = numpy.flip(data, axis=y_dim) 26 | elif n_clockwise_rotations == 2: 27 | slice_list = [] 28 | for dim in dims: 29 | if dim in constants.HORIZONTAL_DIMS: 30 | slice_list.append(slice(None, None, -1)) 31 | else: 32 | slice_list.append(slice(None, None)) 33 | data = data[tuple(slice_list)] 34 | return data 35 | 36 | 37 | def rotate_vector_data(x_data, y_data, n_clockwise_rotations, dims, numpy): 38 | x_data = rotate_scalar_data(x_data, dims, numpy, n_clockwise_rotations) 39 | y_data = rotate_scalar_data(y_data, dims, numpy, n_clockwise_rotations) 40 | data = [x_data, y_data] 41 | n_clockwise_rotations = n_clockwise_rotations % 4 42 | if n_clockwise_rotations == 0: 43 | pass 44 | elif n_clockwise_rotations == 1: 45 | data[0], data[1] = data[1], -data[0] 46 | elif n_clockwise_rotations == 2: 47 | data[0], data[1] = -data[0], -data[1] 48 | elif n_clockwise_rotations == 3: 49 | data[0], data[1] = -data[1], data[0] 50 | return data 51 | -------------------------------------------------------------------------------- /util/pace/util/testing/__init__.py: -------------------------------------------------------------------------------- 1 | from .comparison import compare_arr, compare_scalar, success, success_array 2 | from .dummy_comm import ConcurrencyError, DummyComm 3 | from .perturbation import perturb 4 | -------------------------------------------------------------------------------- /util/pace/util/testing/dummy_comm.py: -------------------------------------------------------------------------------- 1 | from ..local_comm import ConcurrencyError # noqa 2 | from ..local_comm import LocalComm as DummyComm # noqa 3 | -------------------------------------------------------------------------------- /util/pace/util/testing/perturbation.py: -------------------------------------------------------------------------------- 1 | from typing import Mapping 2 | 3 | import numpy as np 4 | 5 | 6 | def perturb(input: Mapping[str, np.ndarray]): 7 | """ 8 | Adds roundoff-level noise to the input array in-place through multiplication. 9 | 10 | Will only make changes to float64 or float32 arrays. 11 | """ 12 | roundoff = 1e-16 13 | for data in input.values(): 14 | if isinstance(data, np.ndarray) and data.dtype in (np.float64, np.float32): 15 | not_fill_value = data < 1e30 16 | # multiply data by roundoff-level error 17 | data[not_fill_value] *= 1.0 + np.random.uniform( 18 | low=-roundoff, high=roundoff, size=data[not_fill_value].shape 19 | ) 20 | -------------------------------------------------------------------------------- /util/pace/util/time.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | import cftime 4 | import numpy as np 5 | 6 | 7 | # Calendar constant values copied from time_manager in FMS 8 | THIRTY_DAY_MONTHS = 1 9 | JULIAN = 2 10 | GREGORIAN = 3 11 | NOLEAP = 4 12 | FMS_TO_CFTIME_TYPE = { 13 | THIRTY_DAY_MONTHS: cftime.Datetime360Day, 14 | JULIAN: cftime.DatetimeJulian, 15 | GREGORIAN: cftime.DatetimeGregorian, # Not a valid calendar in FV3GFS 16 | NOLEAP: cftime.DatetimeNoLeap, 17 | } 18 | 19 | 20 | def datetime64_to_datetime(dt64: np.datetime64) -> datetime.datetime: 21 | utc_start = np.datetime64(0, "s") 22 | timestamp = (dt64 - utc_start) / np.timedelta64(1, "s") 23 | return datetime.datetime.utcfromtimestamp(timestamp) 24 | -------------------------------------------------------------------------------- /util/pace/util/types.py: -------------------------------------------------------------------------------- 1 | import functools 2 | from typing import Iterable, TypeVar 3 | 4 | import numpy as np 5 | from typing_extensions import Protocol 6 | 7 | 8 | Array = TypeVar("Array") 9 | 10 | 11 | class Allocator(Protocol): 12 | def __call__(self, shape: Iterable[int], dtype: type) -> Array: 13 | pass 14 | 15 | 16 | class NumpyModule(Protocol): 17 | 18 | empty: Allocator 19 | zeros: Allocator 20 | ones: Allocator 21 | 22 | @functools.wraps(np.rot90) 23 | def rot90(self, *args, **kwargs): 24 | ... 25 | 26 | @functools.wraps(np.sum) 27 | def sum(self, *args, **kwargs): 28 | ... 29 | 30 | @functools.wraps(np.log) 31 | def log(self, *args, **kwargs): 32 | ... 33 | 34 | @functools.wraps(np.sin) 35 | def sin(self, *args, **kwargs): 36 | ... 37 | 38 | @functools.wraps(np.asarray) 39 | def asarray(self, *args, **kwargs): 40 | ... 41 | 42 | 43 | class AsyncRequest(Protocol): 44 | """Define the result of an over-the-network capable communication API""" 45 | 46 | def wait(self): 47 | """Block the current thread waiting for the request to be completed""" 48 | ... 49 | -------------------------------------------------------------------------------- /util/pace/util/units.py: -------------------------------------------------------------------------------- 1 | def ensure_equal_units(units1: str, units2: str) -> None: 2 | if not units_are_equal(units1, units2): 3 | raise UnitsError(f"incompatible units {units1} and {units2}") 4 | 5 | 6 | def units_are_equal(units1: str, units2: str) -> bool: 7 | return units1.strip() == units2.strip() 8 | 9 | 10 | class UnitsError(Exception): 11 | pass 12 | -------------------------------------------------------------------------------- /util/requirements.txt: -------------------------------------------------------------------------------- 1 | bump2version 2 | wheel 3 | flake8==3.8.4 4 | mypy==0.790 5 | tox 6 | coverage 7 | f90nml>=1.1.0 8 | appdirs>=1.4.0 9 | sphinx_rtd_theme 10 | pytest-cov 11 | pytest-subtests 12 | gcsfs>=0.7.0 13 | google-cloud-storage 14 | numcodecs>=0.7.2 #pin for gt4py, h5py and py3.6 to agree 15 | h5py>=2.10.0 #pin for gt4py, h5py and py3.6 to agree 16 | h5netcdf 17 | dask>=2021.10.0 18 | numpy>=1.15. #pin for gt4py, h5py, cupy9.1 and py3.6 to agree 19 | toolz 20 | -------------------------------------------------------------------------------- /util/setup.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.10.0 3 | commit = True 4 | 5 | [bdist_wheel] 6 | universal = 1 7 | 8 | [flake8] 9 | exclude = docs 10 | ignore = E203,E501,W293,W503 11 | max-line-length = 88 12 | 13 | [aliases] 14 | 15 | [bumpversion:file:pace/util/__init__.py] 16 | search = __version__ = "{current_version}" 17 | replace = __version__ = "{new_version}" 18 | 19 | [bumpversion:file:setup.py] 20 | search = version="{current_version}" 21 | replace = version="{new_version}" 22 | -------------------------------------------------------------------------------- /util/setup.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from setuptools import find_namespace_packages, setup 4 | 5 | 6 | setup_requirements: List[str] = [] 7 | 8 | requirements = [ 9 | "cftime>=1.2.1", 10 | "numpy>=0.15.0", 11 | "fsspec>=0.6.0", 12 | "typing_extensions>=3.7.4", 13 | "f90nml>=1.1.0", 14 | ] 15 | 16 | test_requirements: List[str] = [] 17 | 18 | with open("README.md") as readme_file: 19 | readme = readme_file.read() 20 | 21 | 22 | with open("HISTORY.md") as history_file: 23 | history = history_file.read() 24 | 25 | setup( 26 | author="Allen Institute of Artificial Intelligence", 27 | author_email="jeremym@allenai.org", 28 | python_requires=">=3.8", 29 | classifiers=[ 30 | "Development Status :: 2 - Pre-Alpha", 31 | "Intended Audience :: Developers", 32 | "License :: OSI Approved :: BSD License", 33 | "Natural Language :: English", 34 | "Programming Language :: Python :: 3", 35 | "Programming Language :: Python :: 3.8", 36 | "Programming Language :: Python :: 3.9", 37 | ], 38 | install_requires=requirements, 39 | setup_requires=setup_requirements, 40 | tests_require=test_requirements, 41 | extras_require={ 42 | "netcdf": ["xarray>=0.15.1", "scipy>=1.3.1"], 43 | "zarr": ["zarr>=2.3.2", "xarray>=0.15.1", "scipy>=1.3.1"], 44 | "dace": ["dace>=0.14"], 45 | }, 46 | name="pace-util", 47 | license="BSD license", 48 | long_description=readme + "\n\n" + history, 49 | packages=find_namespace_packages(include=["pace.*"]), 50 | include_package_data=True, 51 | url="https://github.com/ai2cm/pace", 52 | version="0.10.0", 53 | zip_safe=False, 54 | ) 55 | -------------------------------------------------------------------------------- /util/tests/data/c12_restart/coupler.res: -------------------------------------------------------------------------------- 1 | 2 (Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4) 2 | 2016 8 1 0 0 0 Model start time: year, month, day, hour, minute, second 3 | 2016 8 1 0 30 0 Current model time: year, month, day, hour, minute, second 4 | -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.tile1.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.tile1.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.tile2.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.tile2.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.tile3.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.tile3.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.tile4.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.tile4.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.tile5.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.tile5.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_core.res.tile6.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_core.res.tile6.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_srf_wnd.res.tile1.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_srf_wnd.res.tile1.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_srf_wnd.res.tile2.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_srf_wnd.res.tile2.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_srf_wnd.res.tile3.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_srf_wnd.res.tile3.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_srf_wnd.res.tile4.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_srf_wnd.res.tile4.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_srf_wnd.res.tile5.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_srf_wnd.res.tile5.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_srf_wnd.res.tile6.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_srf_wnd.res.tile6.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_tracer.res.tile1.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_tracer.res.tile1.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_tracer.res.tile2.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_tracer.res.tile2.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_tracer.res.tile3.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_tracer.res.tile3.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_tracer.res.tile4.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_tracer.res.tile4.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_tracer.res.tile5.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_tracer.res.tile5.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/fv_tracer.res.tile6.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/fv_tracer.res.tile6.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/phy_data.tile1.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/phy_data.tile1.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/phy_data.tile2.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/phy_data.tile2.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/phy_data.tile3.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/phy_data.tile3.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/phy_data.tile4.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/phy_data.tile4.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/phy_data.tile5.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/phy_data.tile5.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/phy_data.tile6.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/phy_data.tile6.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/sfc_data.tile1.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/sfc_data.tile1.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/sfc_data.tile2.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/sfc_data.tile2.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/sfc_data.tile3.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/sfc_data.tile3.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/sfc_data.tile4.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/sfc_data.tile4.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/sfc_data.tile5.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/sfc_data.tile5.nc -------------------------------------------------------------------------------- /util/tests/data/c12_restart/sfc_data.tile6.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ai2cm/pace/6a46e69818cf1b340f09ed57c260fed64b06cfdc/util/tests/data/c12_restart/sfc_data.tile6.nc -------------------------------------------------------------------------------- /util/tests/data/coupler.res: -------------------------------------------------------------------------------- 1 | 2 (Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4) 2 | 2016 8 1 0 0 0 Model start time: year, month, day, hour, minute, second 3 | 2016 8 3 0 0 0 Current model time: year, month, day, hour, minute, second 4 | -------------------------------------------------------------------------------- /util/tests/data/coupler_julian.res: -------------------------------------------------------------------------------- 1 | 2 (Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4) 2 | 2016 8 1 0 0 0 Model start time: year, month, day, hour, minute, second 3 | 2016 8 3 0 0 0 Current model time: year, month, day, hour, minute, second 4 | -------------------------------------------------------------------------------- /util/tests/data/coupler_noleap.res: -------------------------------------------------------------------------------- 1 | 4 (Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4) 2 | 2016 8 1 0 0 0 Model start time: year, month, day, hour, minute, second 3 | 2016 8 3 0 0 0 Current model time: year, month, day, hour, minute, second 4 | -------------------------------------------------------------------------------- /util/tests/data/coupler_thirty_day.res: -------------------------------------------------------------------------------- 1 | 1 (Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4) 2 | 2016 8 1 0 0 0 Model start time: year, month, day, hour, minute, second 3 | 2016 8 3 0 0 0 Current model time: year, month, day, hour, minute, second 4 | -------------------------------------------------------------------------------- /util/tests/mpi/mpi_comm.py: -------------------------------------------------------------------------------- 1 | try: 2 | from mpi4py import MPI 3 | except ImportError: 4 | MPI = None 5 | 6 | if MPI is not None and MPI.COMM_WORLD.Get_size() == 1: 7 | # not run as a parallel test, disable MPI tests 8 | MPI.Finalize() 9 | MPI = None 10 | -------------------------------------------------------------------------------- /util/tests/quantity/test_deepcopy.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import dataclasses 3 | 4 | import numpy as np 5 | 6 | import pace.util 7 | 8 | 9 | def test_deepcopy_copy_is_editable_by_view(): 10 | nx, ny, nz = 12, 12, 15 11 | quantity = pace.util.Quantity( 12 | np.zeros([nx, ny, nz]), 13 | origin=(0, 0, 0), 14 | extent=(nx, ny, nz), 15 | dims=["x", "y", "z"], 16 | units="", 17 | ) 18 | quantity_copy = copy.deepcopy(quantity) 19 | # assertion below is only valid if we're overwriting the entire data through view 20 | assert np.product(quantity_copy.view[:].shape) == np.product( 21 | quantity_copy.data.shape 22 | ) 23 | quantity_copy.view[:] = 1.0 24 | np.testing.assert_array_equal(quantity.data, 0.0) 25 | np.testing.assert_array_equal(quantity_copy.data, 1.0) 26 | 27 | 28 | def test_deepcopy_copy_is_editable_by_data(): 29 | nx, ny, nz = 12, 12, 15 30 | quantity = pace.util.Quantity( 31 | np.zeros([nx, ny, nz]), 32 | origin=(0, 0, 0), 33 | extent=(nx, ny, nz), 34 | dims=["x", "y", "z"], 35 | units="", 36 | ) 37 | quantity_copy = copy.deepcopy(quantity) 38 | quantity_copy.data[:] = 1.0 39 | np.testing.assert_array_equal(quantity.data, 0.0) 40 | np.testing.assert_array_equal(quantity_copy.data, 1.0) 41 | 42 | 43 | def test_deepcopy_of_dataclass_is_editable_by_data(): 44 | nx, ny, nz = 12, 12, 15 45 | quantity = pace.util.Quantity( 46 | np.zeros([nx, ny, nz]), 47 | origin=(0, 0, 0), 48 | extent=(nx, ny, nz), 49 | dims=["x", "y", "z"], 50 | units="", 51 | ) 52 | quantity_copy = copy.deepcopy(quantity) 53 | quantity_copy.data[:] = 1.0 54 | 55 | @dataclasses.dataclass 56 | class MyClass: 57 | quantity: pace.util.Quantity 58 | 59 | instance = MyClass(quantity) 60 | instance_copy = copy.deepcopy(instance) 61 | instance_copy.quantity.data[:] = 1.0 62 | np.testing.assert_array_equal(instance.quantity.data, 0.0) 63 | np.testing.assert_array_equal(instance_copy.quantity.data, 1.0) 64 | -------------------------------------------------------------------------------- /util/tests/test__capture_stream.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | import sys 4 | 5 | import pytest 6 | 7 | from pace.util import capture_stream 8 | 9 | 10 | def get_libc(): 11 | if os.uname().sysname == "Linux": 12 | return ctypes.cdll.LoadLibrary("libc.so.6") 13 | else: 14 | pytest.skip() 15 | 16 | 17 | def printc(fd, text): 18 | libc = get_libc() 19 | b = bytes(text + "\n", "UTF-8") 20 | libc.write(fd, b, len(b)) 21 | 22 | 23 | def printpy(_, text): 24 | print(text) 25 | 26 | 27 | @pytest.mark.parametrize("print_", [printc, printpy]) 28 | @pytest.mark.cpu_only 29 | def test_capture_stream_python_print(capfdbinary, print_): 30 | text = "hello world" 31 | 32 | # This test interacts in a confusing with pytests output capturing 33 | # sys.stdout.fileno() is usually 1, but not here. 34 | fd = sys.stdout.fileno() 35 | with capture_stream(sys.stdout) as out: 36 | print_(fd, text) 37 | 38 | assert out.getvalue().decode("UTF-8") == text + "\n" 39 | -------------------------------------------------------------------------------- /util/tests/test_get_tile_number.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from pace.util import get_tile_number 4 | 5 | 6 | @pytest.mark.cpu_only 7 | def test_get_tile_number_six_ranks(): 8 | rank_list = list(range(6)) 9 | for i_rank in rank_list: 10 | tile = get_tile_number(i_rank, len(rank_list)) 11 | assert tile == i_rank + 1 12 | 13 | 14 | @pytest.mark.cpu_only 15 | def test_get_tile_number_twenty_four_ranks(): 16 | rank_list = list(range(24)) 17 | i_rank = 0 18 | for i_tile in [i + 1 for i in range(6)]: 19 | for _ in range(4): 20 | return_value = get_tile_number(i_rank, len(rank_list)) 21 | assert return_value == i_tile 22 | i_rank += 1 23 | -------------------------------------------------------------------------------- /util/tests/test_local_comm.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pytest 3 | 4 | from pace.util import LocalComm 5 | 6 | 7 | @pytest.fixture 8 | def total_ranks(): 9 | return 2 10 | 11 | 12 | @pytest.fixture 13 | def tags(): 14 | return [1, 2] 15 | 16 | 17 | @pytest.fixture 18 | def local_communicator_list(total_ranks): 19 | shared_buffer = {} 20 | return_list = [] 21 | for rank in range(total_ranks): 22 | return_list.append( 23 | LocalComm(rank=rank, total_ranks=total_ranks, buffer_dict=shared_buffer) 24 | ) 25 | return return_list 26 | 27 | 28 | def test_local_comm_simple(local_communicator_list): 29 | for comm in local_communicator_list: 30 | rank = comm.Get_rank() 31 | size = comm.Get_size() 32 | data = numpy.asarray([rank], dtype=numpy.int) 33 | if rank % 2 == 0: 34 | comm.Send(data, dest=(rank + 1) % size) 35 | else: 36 | comm.Recv(data, source=(rank - 1) % size) 37 | assert data == (rank - 1) % size 38 | 39 | 40 | @pytest.mark.parametrize("tags", [(0, 1, 2), (2, 1, 0), (2, 0, 1)]) 41 | def test_local_comm_tags(local_communicator_list, tags): 42 | for comm in local_communicator_list: 43 | rank = comm.Get_rank() 44 | size = comm.Get_size() 45 | data = numpy.array([[rank], [rank + 1], [rank + 2]]) 46 | if rank % 2 == 0: 47 | for i in range(len(tags)): 48 | comm.Isend(data[i], dest=(rank + 1) % size, tag=tags[i]) 49 | else: 50 | rec_buffer = numpy.array([[-1], [-1], [-1]]) 51 | for i in range(len(tags)): 52 | recv = comm.Irecv(rec_buffer[i], source=(rank - 1) % size, tag=i) 53 | recv.wait() 54 | assert (rec_buffer[list(tags)] == data - 1).all() 55 | -------------------------------------------------------------------------------- /util/tests/test_null_comm.py: -------------------------------------------------------------------------------- 1 | import pace.util 2 | from pace.util.null_comm import NullComm 3 | 4 | 5 | def test_can_create_cube_communicator(): 6 | rank = 2 7 | total_ranks = 24 8 | mpi_comm = NullComm(rank, total_ranks) 9 | layout = (2, 2) 10 | partitioner = pace.util.CubedSpherePartitioner(pace.util.TilePartitioner(layout)) 11 | communicator = pace.util.CubedSphereCommunicator(mpi_comm, partitioner) 12 | communicator.tile.partitioner 13 | -------------------------------------------------------------------------------- /util/tox.ini: -------------------------------------------------------------------------------- 1 | # tox (https://tox.readthedocs.io/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py3 8 | 9 | [testenv:test_no_extras] 10 | allowlist_externals=make 11 | deps = 12 | # other versions of pytest don't work with subtests 13 | pytest 14 | pytest-subtests 15 | pytest-cov 16 | dask # used for open_mfdataset in a test 17 | netcdf4 18 | h5netcdf 19 | -e external/gt4py 20 | -c../constraints.txt 21 | # only run a subset of tests (fast, no MPI tests) 22 | # to check import infrastructure works with no extras 23 | setenv = 24 | PYTEST_ARGS = --fast 25 | commands = 26 | make test 27 | 28 | 29 | [testenv:test] 30 | allowlist_externals=make mpirun 31 | deps = 32 | # other versions of pytest don't work with subtests 33 | pytest 34 | pytest-subtests 35 | pytest-cov 36 | dask # used for open_mfdataset in a test 37 | -e external/gt4py 38 | netcdf4 39 | h5netcdf 40 | mpi4py 41 | -c../constraints.txt 42 | extras = netcdf,zarr 43 | commands = 44 | make test test_mpi 45 | 46 | [testenv:lint] 47 | allowlist_externals=make 48 | skip_install = true 49 | deps = 50 | black 51 | flake8 52 | mypy 53 | -c../constraints.txt 54 | commands = 55 | make lint 56 | --------------------------------------------------------------------------------