├── .gitignore ├── README.md ├── bubbleml_data ├── DOCS.md ├── README.md └── download_all.sh ├── conda ├── neuralop-main-pytorch-2.0.1-cuda-11.7.yaml └── pytorch-2.0.1-cuda-11.7.yaml ├── conf ├── dataset │ ├── FB_Gravity.yaml │ ├── FB_Gravity_0.1.yaml │ ├── FB_InletVel.yaml │ ├── PB_Gravity.yaml │ ├── PB_SubCooled.yaml │ ├── PB_SubCooled_0.1.yaml │ ├── PB_SubCooled_CrossVal.yaml │ ├── PB_WallSuperHeat.yaml │ ├── PB_WallSuperHeat_CrossVal100.yaml │ ├── PB_WallSuperHeat_CrossVal150.yaml │ ├── PB_WallSuperHeat_CrossVal65.yaml │ ├── PB_WallSuperHeat_CrossVal70.yaml │ ├── PB_WallSuperHeat_CrossVal75.yaml │ ├── PB_WallSuperHeat_CrossVal80.yaml │ ├── PB_WallSuperHeat_CrossVal85.yaml │ ├── PB_WallSuperHeat_CrossVal90.yaml │ └── PB_WallSuperHeat_CrossVal95.yaml ├── default.yaml └── experiment │ ├── README.md │ ├── experimental │ ├── cno │ │ └── pb_temp.yaml │ ├── ffno │ │ ├── pb_temp.yaml │ │ └── pb_vel.yaml │ ├── fno │ │ ├── fb_vel.yaml │ │ ├── pb_temp.yaml │ │ └── pb_vel.yaml │ ├── gfno │ │ └── pb_temp.yaml │ ├── gfno_test │ │ ├── cosine.yaml │ │ ├── gcnn_pb_temp.yaml │ │ ├── pb_temp_mode_64_width_16.yaml │ │ ├── pb_temp_mode_64_width_28.yaml │ │ ├── pb_temp_mode_96_width_20.yaml │ │ ├── pb_temp_mode_96_width_8.yaml │ │ └── vel_cosine.yaml │ ├── ufnet │ │ ├── pb_temp.yaml │ │ └── pb_vel.yaml │ ├── unet_arena │ │ ├── pb_temp.yaml │ │ └── pb_vel.yaml │ ├── unet_bench │ │ ├── pb_temp.yaml │ │ └── pb_vel.yaml │ └── uno │ │ ├── .fb_vel.yaml.swp │ │ ├── fb_vel.yaml │ │ ├── pb_temp.yaml │ │ └── pb_vel.yaml │ └── paper │ ├── ffno │ ├── pb_temp_7.yaml │ └── pb_temp_8.yaml │ ├── fno │ └── pb_temp.yaml │ ├── gfno │ ├── pb_temp.yaml │ └── pb_temp_old.yaml │ ├── unet_arena │ └── pb_temp.yaml │ └── uno │ └── pb_temp.yaml ├── examples ├── README.md ├── Twall-100.hdf5 ├── Twall-103.hdf5 ├── Twall-106.hdf5 ├── data_loading.ipynb └── pytorch_training.ipynb ├── model-zoo └── README.md ├── optical_flow ├── README.md ├── create_opticalflow_dataset.py ├── datasets_gmflow.py ├── datasets_raft.py ├── gmflow_analysis.ipynb └── raft_analysis.ipynb ├── sciml ├── README.md ├── models │ ├── ConvolutionalNeuralOperator │ │ ├── CNOModule.py │ │ ├── debug_tools.py │ │ ├── dnnlib │ │ │ ├── __init__.py │ │ │ └── util.py │ │ ├── torch_utils │ │ │ ├── custom_ops.py │ │ │ ├── misc.py │ │ │ ├── ops │ │ │ │ ├── __init__.py │ │ │ │ ├── bias_act.cpp │ │ │ │ ├── bias_act.cu │ │ │ │ ├── bias_act.h │ │ │ │ ├── bias_act.py │ │ │ │ ├── conv2d_gradfix.py │ │ │ │ ├── conv2d_resample.py │ │ │ │ ├── filtered_lrelu.cpp │ │ │ │ ├── filtered_lrelu.cu │ │ │ │ ├── filtered_lrelu.h │ │ │ │ ├── filtered_lrelu.py │ │ │ │ ├── filtered_lrelu_ns.cu │ │ │ │ ├── filtered_lrelu_rd.cu │ │ │ │ ├── filtered_lrelu_wr.cu │ │ │ │ ├── fma.py │ │ │ │ ├── grid_sample_gradfix.py │ │ │ │ ├── upfirdn2d.cpp │ │ │ │ ├── upfirdn2d.cu │ │ │ │ ├── upfirdn2d.h │ │ │ │ └── upfirdn2d.py │ │ │ └── persistence.py │ │ └── training │ │ │ ├── FourierFeatures.py │ │ │ └── filtered_networks.py │ ├── factorized_fno │ │ ├── factorized_fno.py │ │ ├── feedforward.py │ │ └── linear.py │ ├── gefno │ │ └── gfno.py │ ├── get_model.py │ ├── pdearena │ │ ├── activations.py │ │ ├── fourier.py │ │ └── unet.py │ └── pdebench │ │ └── unet.py ├── op_lib │ ├── disk_hdf5_dataset.py │ ├── dist_utils.py │ ├── downsample.py │ ├── hdf5_dataset.py │ ├── heatflux.py │ ├── losses.py │ ├── metrics.py │ ├── nucleation.py │ ├── plt_util.py │ ├── push_vel_trainer.py │ ├── schedule_utils.py │ ├── temp_trainer.py │ └── vel_trainer.py └── train.py ├── scripts ├── boxkit_dataset.py ├── downsample_data.py ├── fourier.py ├── permute_dataset.py ├── plt_hf.py ├── plt_rmse.py ├── psd.py ├── viz.py ├── viz_temp.py ├── viz_temp2.py └── viz_vel.py ├── submit ├── basic.sh ├── cross_val.sh ├── data_convert.sh ├── data_unblock.sh ├── debug.sh ├── dist.sh ├── fb_vel.sh ├── fno_overfit.sh ├── pb_temp.sh ├── pb_temp_0.1.sh └── pb_vel.sh └── video ├── README.md ├── saturated.gif ├── subcooled.gif ├── temp-1.0.gif ├── temp-2.0.gif ├── temp-4.0.gif └── vel.gif /.gitignore: -------------------------------------------------------------------------------- 1 | # PyTorch Tensor 2 | *.pt 3 | 4 | # boiling dataset 5 | *.hdf5 6 | 7 | # FFMPEG video output 8 | *.mp4 9 | output.gif 10 | 11 | # image directories 12 | *.png 13 | im/ 14 | test_im/ 15 | 16 | # slurm and tensorboard logs output files 17 | outputs/ 18 | logs/ 19 | maes* 20 | slurm*.out 21 | 22 | # env loading script 23 | *.sub 24 | env.sh 25 | 26 | 27 | # Byte-compiled / optimized / DLL files 28 | __pycache__/ 29 | *.py[cod] 30 | *$py.class 31 | 32 | # C extensions 33 | *.so 34 | 35 | # Distribution / packaging 36 | .Python 37 | .DS_Store 38 | build/ 39 | develop-eggs/ 40 | dist/ 41 | downloads/ 42 | eggs/ 43 | .eggs/ 44 | lib/ 45 | lib64/ 46 | parts/ 47 | sdist/ 48 | var/ 49 | wheels/ 50 | share/python-wheels/ 51 | *.egg-info/ 52 | .installed.cfg 53 | *.egg 54 | MANIFEST 55 | 56 | # PyInstaller 57 | # Usually these files are written by a python script from a template 58 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 59 | *.manifest 60 | *.spec 61 | 62 | # Installer logs 63 | pip-log.txt 64 | pip-delete-this-directory.txt 65 | 66 | # Unit test / coverage reports 67 | htmlcov/ 68 | .tox/ 69 | .nox/ 70 | .coverage 71 | .coverage.* 72 | .cache 73 | nosetests.xml 74 | coverage.xml 75 | *.cover 76 | *.py,cover 77 | .hypothesis/ 78 | .pytest_cache/ 79 | cover/ 80 | 81 | # Translations 82 | *.mo 83 | *.pot 84 | 85 | # Django stuff: 86 | *.log 87 | local_settings.py 88 | db.sqlite3 89 | db.sqlite3-journal 90 | 91 | # Flask stuff: 92 | instance/ 93 | .webassets-cache 94 | 95 | # Scrapy stuff: 96 | .scrapy 97 | 98 | # Sphinx documentation 99 | docs/_build/ 100 | 101 | # PyBuilder 102 | .pybuilder/ 103 | target/ 104 | 105 | # Jupyter Notebook 106 | .ipynb_checkpoints 107 | 108 | # IPython 109 | profile_default/ 110 | ipython_config.py 111 | 112 | # pyenv 113 | # For a library or package, you might want to ignore these files since the code is 114 | # intended to run in multiple environments; otherwise, check them in: 115 | # .python-version 116 | 117 | # pipenv 118 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 119 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 120 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 121 | # install all needed dependencies. 122 | #Pipfile.lock 123 | 124 | # poetry 125 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 126 | # This is especially recommended for binary packages to ensure reproducibility, and is more 127 | # commonly ignored for libraries. 128 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 129 | #poetry.lock 130 | 131 | # pdm 132 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 133 | #pdm.lock 134 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 135 | # in version control. 136 | # https://pdm.fming.dev/#use-with-ide 137 | .pdm.toml 138 | 139 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 140 | __pypackages__/ 141 | 142 | # Celery stuff 143 | celerybeat-schedule 144 | celerybeat.pid 145 | 146 | # SageMath parsed files 147 | *.sage.py 148 | 149 | # Environments 150 | .env 151 | .venv 152 | env/ 153 | venv/ 154 | ENV/ 155 | env.bak/ 156 | venv.bak/ 157 | 158 | # Spyder project settings 159 | .spyderproject 160 | .spyproject 161 | 162 | # Rope project settings 163 | .ropeproject 164 | 165 | # mkdocs documentation 166 | /site 167 | 168 | # mypy 169 | .mypy_cache/ 170 | .dmypy.json 171 | dmypy.json 172 | 173 | # Pyre type checker 174 | .pyre/ 175 | 176 | # pytype static type analyzer 177 | .pytype/ 178 | 179 | # Cython debug symbols 180 | cython_debug/ 181 | 182 | # PyCharm 183 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 184 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 185 | # and can be added to the global gitignore or merged into this file. For a more nuclear 186 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 187 | #.idea/ 188 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BubbleML 2 | 3 | [![Paper](https://img.shields.io/badge/arXiv-2209.15616-blue)](https://arxiv.org/abs/2307.14623) 4 | 5 | A multiphase, multiphysics dataset of boiling processes. These simulations can be used to model datacenter cooling systems, like liquid cooling flowing across a GPU. They can even model the cooling of nuclear waste: a pool of liquid sitting on a heated surface. 6 | 7 | ![SubCooled Temperature](video/subcooled.gif) 8 | 9 | We hope that BubbleML will be useful to members of the Thermal Science community who are interested in exploring and applying machine learning techniques. We also believe this dataset offers interesting challenges to the scientific machine learning community: handling multiphase data, handling complex boundary conditions, achieving stability in long auto-regressive rollouts, etc. 10 | 11 | ## Documentation and Examples 12 | 13 | Documentation discussing the data fields, format, and relevant parameters can be found in [bubbleml_data/DOCS.md](bubbleml_data/DOCS.md). We also provide a set of [examples](examples/) illustrating how to use the dataset. 14 | 15 | The examples are Jupyter Notebooks showing how to [read and visualize BubbleML](examples/data_loading.ipynb) 16 | and [train a Fourier Neural Operator](examples/pytorch_training.ipynb) on the BubbleML dataset. These are stand-alone examples that use a small, downsampled version of 17 | Subcooled Pool boiling. These examples are intended to show 1. how to load the dataset, 2. how to read tensors from 18 | the dataset, and 3. how to setup model training for the dataset. Extended descriptions can be found in [bubbleml_data/DOCS.md](bubbleml_data/DOCS.md). To run the examples, you should follow the [environment setup](sciml/README.md) for the SciML code. 19 | 20 | ## Download BubbleML 21 | 22 | BubbleML is publicly available and open source. We provide links to download each study in [bubbleml_data/README.md](bubbleml_data/README.md). 23 | 24 | ## Extending BubbleML 25 | 26 | It's possible that BubbleML will not match your needs. For instance, in BubbleML's current iteration, each study varies one parameter. One obvious extension is to vary multiple parameters, like both the heater and liquid temperatures. This will lead to different phenomena. Another idea is runnning low resolution simulations to study upscaling models. And, of course, there are some labs who may just want to generate very large datasets, containing hundreds or thousands of individual simulations! 27 | 28 | To support such efforts, we provide a [reproducibility capsule](https://github.com/Lab-Notebooks/Outflow-Forcing-BubbleML) for running your own boiling simulations with Flash-X. This includes lab notebooks for running simulations. It also includes analysis scripts and the submissions files used to generate BubbleML. 29 | 30 | ## Models 31 | 32 | Checkpoints for the models mentioned in the paper, along with ther respective results are listed in the [model zoo](model-zoo/README.md). (Note: metrics will not necessarily match the paper. We hope that this page serves as a "live" listing that shows the best results thus far.) 33 | 34 | ## Running SciML Code 35 | 36 | Please refer to the [SciML README.md](sciml/README.md) 37 | 38 | ## Running Optical Flow Benchmarks 39 | 40 | Please refer to the [Optical Flow README.md](optical_flow/README.md) 41 | 42 | ## Citation 43 | 44 | If you have found BubbleML useful in your research, please consider citing the following paper: 45 | 46 | ```bibtex 47 | @inproceedings{ 48 | hassan2023bubbleml, 49 | title={Bubble{ML}: A Multi-Physics Dataset and Benchmarks for Machine Learning}, 50 | author={Sheikh Md Shakeel Hassan and Arthur Feeney and Akash Dhruv and Jihoon Kim and 51 | Youngjoon Suh and Jaiyoung Ryu and Yoonjin Won and Aparna Chandramowlishwaran}, 52 | booktitle={Advances in Neural Information Processing Systems}, 53 | year={2023}, 54 | url={https://openreview.net/forum?id=0Wmglu8zak} 55 | } 56 | ``` 57 | -------------------------------------------------------------------------------- /bubbleml_data/README.md: -------------------------------------------------------------------------------- 1 | # BubbleML Downloads 2 | 3 | BubbleML is hosted on AWS and can be publicly downloaded. Each boiling study can be downloaded separately: 4 | 5 | | Study | Size | 6 | |-----------------------|----| 7 | | [Single Bubble](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/single-bubble.tar.gz) | 503.0 MB | 8 | | [Pool Boiling Saturated](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-saturated-fc72-2d.tar.gz) | 24.4 GB | 9 | | [Pool Boiling Subcooled](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-subcooled-fc72-2d.tar.gz) | 10.5 GB | 10 | | [Pool Boiling Gravity](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-gravity-fc72-2d.tar.gz) | 16.5 GB | 11 | | [Flow Boiling Inlet Velocity](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-velscale-fc72-2d.tar.gz) | 11.4 GB | 12 | | [Flow Boiling Gravity](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-gravity-fc72-2d.tar.gz) | 10.9 GB | 13 | | [Pool Boiling Subcooled 0.1](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-subcooled-fc72-2d-0.1.tar.gz) | 155.1 GB | 14 | | [Pool Boiling Gravity 0.1](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-gravity-fc72-2d-0.1.tar.gz) | 163.8 GB | 15 | | [Flow Boiling Gravity 0.1](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-gravity-fc72-2d-0.1.tar.gz) | 108.6 GB | 16 | | [3D Pool Boiling Earth Gravity](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-earth-gravity-3d.tar.gz) | 122.2 GB | 17 | | [3D Pool Boiling ISS Gravity](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-iss-gravity-3d.tar.gz) | 62.6 GB | 18 | | [3D Flow Boiling Earth Gravity](https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-earth-gravity-3d.tar.gz) | 93.9 GB | 19 | 20 | Each download is a `.tar.gz`. They can be unzipped using the command 21 | 22 | ```console 23 | tar -xvf .tar.gz -C /path/to/BubbleML// 24 | ``` 25 | 26 | After unzipping, you will see a collection of hdf5 files in `/path/to/BubbleML//. 27 | Each hdf5 file corresponds to one simulation. The hdf5 files can be loaded with common libraries, 28 | such as `h5py`. 29 | 30 | ## Documentation and Examples 31 | 32 | We provide [documentation](DOCS.md) describing the different hdf5 datasets in each simulation file. 33 | There are also [examples](../examples) showing how to load a BubbleML simulation, list out it's datasets, 34 | visualize the different simulation fields, and access the metadata. 35 | 36 | ## Data Generation and Extension 37 | 38 | We provide a separate [reproducibility capsule](https://github.com/Lab-Notebooks/Outflow-Forcing-BubbleML) for running Flash-X simulations. 39 | This repo includes all of the submission files BubbleML used. Modifying these will be a straightforward way to generate 40 | new data and extend BubbleML. Note, Flash-X is designed for large-scale, long-running simulations so there are dependencies on other projects like MPI. 41 | 42 | ## Bulk Download 43 | 44 | The studies can also be downloaded in bulk by running the bash script 45 | 46 | ```console 47 | bash download_all.sh 48 | ``` 49 | 50 | This will download all datasets listed above. Note: the full dataset is over a terabyte in size. 51 | 52 | -------------------------------------------------------------------------------- /bubbleml_data/download_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Download the Single Bubble Simulation 4 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/single-bubble.tar.gz 5 | tar -xvzf single-bubble.tar.gz && rm single-bubble.tar.gz 6 | 7 | # Download the Saturated Pool Boiling study consisting of 13 simulations 8 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-saturated-fc72-2d.tar.gz 9 | tar -xvzf pool-boiling-saturated-fc72-2d.tar.gz && rm pool-boiling-saturated-fc72-2d.tar.gz 10 | 11 | # Download the Subcooled Pool Boiling study consisting of 10 simulations 12 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-subcooled-fc72-2d.tar.gz 13 | tar -xvzf pool-boiling-subcooled-fc72-2d.tar.gz && rm pool-boiling-subcooled-fc72-2d.tar.gz 14 | 15 | # Download the Gravity Pool Boiling study consisting of 9 simulations 16 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-gravity-fc72-2d.tar.gz 17 | tar -xvzf pool-boiling-gravity-fc72-2d.tar.gz && rm https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-gravity-fc72-2d.tar.gz 18 | 19 | # Download the Flow Boiling Inlet Velocity study consisting of 8 simulations 20 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-velscale-fc72-2d.tar.gz 21 | tar -xvzf flow-boiling-velscale-fc72-2d.tar.gz && rm flow-boiling-velscale-fc72-2d.tar.gz 22 | 23 | # Download the Flow Boiling Gravity study consisting of 6 simulations 24 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-gravity-fc72-2d.tar.gz 25 | tar -xvzf flow-boiling-gravity-fc72-2d.tar.gz && rm flow-boiling-gravity-fc72-2d.tar.gz 26 | 27 | # Download the Subcooled Pool Boiling study consisting of 15 simulations with 0.1 temporal resolution 28 | wget https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-subcooled-fc72-2d-0.1.tar.gz 29 | tar -xvzf pool-boiling-subcooled-fc72-2d-0.1.tar.gz && rm pool-boiling-subcooled-fc72-2d-0.1.tar.gz 30 | 31 | # Download the Gravity Pool Boiling study consisting of 9 simulations with 0.1 temporal resolution 32 | https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-gravity-fc72-2d-0.1.tar.gz 33 | tar -xvzf pool-boiling-gravity-fc72-2d-0.1.tar.gz && rm pool-boiling-gravity-fc72-2d-0.1.tar.gz 34 | 35 | # Download the Flow Boiling Gravity study consisting of 6 simulations with 0.1 temporal resolution 36 | https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-gravity-fc72-2d-0.1.tar.gz 37 | tar -xvzf flow-boiling-gravity-fc72-2d-0.1.tar.gz && rm flow-boiling-gravity-fc72-2d-0.1.tar.gz 38 | 39 | # Download the 3D Pool Boiling Earth gravity simulation 40 | https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-earth-gravity-3d.tar.gz 41 | tar -xvzf pool-boiling-earth-gravity-3d.tar.gz && rm pool-boiling-earth-gravity-3d.tar.gz 42 | 43 | # Download the 3D Pool Boiling ISS gravity simulation 44 | https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/pool-boiling-iss-gravity-3d.tar.gz 45 | tar -xvzf pool-boiling-iss-gravity-3d.tar.gz && rm pool-boiling-iss-gravity-3d.tar.gz 46 | 47 | # Download the 3D Flow Boiling Earth gravity simulation 48 | https://bubble-ml-simulations.s3.us-east-2.amazonaws.com/flow-boiling-earth-gravity-3d.tar.gz 49 | tar -xvzf flow-boiling-earth-gravity-3d.tar.gz && rm flow-boiling-earth-gravity-3d.tar.gz 50 | -------------------------------------------------------------------------------- /conda/neuralop-main-pytorch-2.0.1-cuda-11.7.yaml: -------------------------------------------------------------------------------- 1 | name: bubble_sciml 2 | channels: 3 | - numba 4 | - pytorch 5 | - nvidia 6 | - pyg 7 | dependencies: 8 | - python=3.9 9 | - pytorch 10 | - torchvision 11 | - pytorch-cuda=11.7 12 | - pyg 13 | - pytorch-scatter 14 | - numpy 15 | - h5py 16 | - numba 17 | - pip 18 | - pip: 19 | - hydra-core 20 | - git+https://github.com/neuraloperator/neuraloperator 21 | - opencv-python 22 | - matplotlib 23 | - tensorboard 24 | - tensorly 25 | - tensorly-torch 26 | - torch-harmonics 27 | - zarr 28 | - opt_einsum 29 | - einops 30 | - wandb 31 | - jupyterlab 32 | -------------------------------------------------------------------------------- /conda/pytorch-2.0.1-cuda-11.7.yaml: -------------------------------------------------------------------------------- 1 | name: bubble_sciml 2 | channels: 3 | - numba 4 | - pytorch 5 | - nvidia 6 | - pyg 7 | dependencies: 8 | - python=3.9 9 | - pytorch=2.0.1 10 | - torchvision 11 | - pytorch-cuda=11.7 12 | - pyg 13 | - pytorch-scatter 14 | - numpy 15 | - h5py 16 | - numba 17 | - pip 18 | - pip: 19 | - hydra-core 20 | - neuraloperator=0.2.0 21 | - opencv-python 22 | - matplotlib 23 | - tensorboard 24 | - tensorly 25 | - tensorly-torch 26 | - zarr 27 | - opt_einsum 28 | - wandb 29 | - jupyterlab 30 | -------------------------------------------------------------------------------- /conf/dataset/FB_Gravity.yaml: -------------------------------------------------------------------------------- 1 | name: fb_gravity 2 | transform: False 3 | steady_time: 30 4 | train_paths: 5 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D/gravX-0.0001.hdf5 6 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D/gravX-0.01.hdf5 7 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D/gravX-0.05.hdf5 8 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D/gravX-0.5.hdf5 9 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D/gravX-1.0.hdf5 10 | val_paths: 11 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D/gravX-0.1.hdf5 12 | -------------------------------------------------------------------------------- /conf/dataset/FB_Gravity_0.1.yaml: -------------------------------------------------------------------------------- 1 | name: gravity 2 | transform: True 3 | steady_time: 300 4 | train_paths: 5 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D-0.1/gravX-0.0001.hdf5 6 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D-0.1/gravX-0.01.hdf5 7 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D-0.1/gravX-0.05.hdf5 8 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D-0.1/gravX-0.5.hdf5 9 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D-0.1/gravX-1.0.hdf5 10 | val_paths: 11 | - ${data_base_dir}/FlowBoiling-Gravity-FC72-2D-0.1/gravX-0.1.hdf5 12 | -------------------------------------------------------------------------------- /conf/dataset/FB_InletVel.yaml: -------------------------------------------------------------------------------- 1 | name: fb_inlet_vel 2 | transform: False 3 | steady_time: 30 4 | train_paths: 5 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-0.5.hdf5 6 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-1.0.hdf5 7 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-1.5.hdf5 8 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-2.0.hdf5 9 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-2.5.hdf5 10 | #- ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-3.0.hdf5 11 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-4.0.hdf5 12 | val_paths: 13 | - ${data_base_dir}/FlowBoiling-VelScale-FC72-2D/inletVelScale-3.5.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_Gravity.yaml: -------------------------------------------------------------------------------- 1 | name: pb_gravity 2 | transform: True 3 | steady_time: 30 4 | train_paths: 5 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.0001.hdf5 6 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.001.hdf5 7 | #- ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.01.hdf5 8 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.02.hdf5 9 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.05.hdf5 10 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.1.hdf5 11 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.5.hdf5 12 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-1.0.hdf5 13 | val_paths: 14 | - ${data_base_dir}/PoolBoiling-Gravity-FC72-2D/gravY-0.2.hdf5 15 | -------------------------------------------------------------------------------- /conf/dataset/PB_SubCooled.yaml: -------------------------------------------------------------------------------- 1 | name: subcooled 2 | transform: True 3 | steady_time: 30 4 | train_paths: 5 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-79.hdf5 6 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-81.hdf5 7 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-85.hdf5 8 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-90.hdf5 9 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-95.hdf5 10 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-98.hdf5 11 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-103.hdf5 12 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-106.hdf5 13 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-110.hdf5 14 | val_paths: 15 | - ${data_base_dir}/PoolBoiling-SubCooled-FC72-2D/Twall-100.hdf5 16 | -------------------------------------------------------------------------------- /conf/dataset/PB_SubCooled_0.1.yaml: -------------------------------------------------------------------------------- 1 | name: subcooled 2 | transform: True 3 | steady_time: 300 4 | train_paths: 5 | #- ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-79.hdf5 6 | #- ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-81.hdf5 7 | #- ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-85.hdf5 8 | #- ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-88.hdf5 9 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-90.hdf5 10 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-92.hdf5 11 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-95.hdf5 12 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-97.hdf5 13 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-98.hdf5 14 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-100.hdf5 15 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-102.hdf5 16 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-106.hdf5 17 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-108.hdf5 18 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-110.hdf5 19 | val_paths: 20 | - ${data_base_dir}/SubCooled-FC72-2D_HDF5/Twall-103.hdf5 21 | -------------------------------------------------------------------------------- /conf/dataset/PB_SubCooled_CrossVal.yaml: -------------------------------------------------------------------------------- 1 | name: subcooled 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-79.hdf5 5 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-81.hdf5 6 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-85.hdf5 7 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-90.hdf5 8 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-95.hdf5 9 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-98.hdf5 10 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-103.hdf5 12 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-110.hdf5 13 | val_paths: 14 | - ${data_base_dir}/PB_simulation/SubCooled-FC72-2D_HDF5/Twall-106.hdf5 15 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | steady_time: 30 4 | train_paths: 5 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-60.hdf5 6 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-65.hdf5 7 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-70.hdf5 8 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-75.hdf5 9 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-80.hdf5 10 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-85.hdf5 11 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-90.hdf5 12 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-100.hdf5 13 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-105.hdf5 14 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-110.hdf5 15 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-115.hdf5 16 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-120.hdf5 17 | val_paths: 18 | - ${data_base_dir}/PoolBoiling-WallSuperheat-FC72-2D/Twall-95.hdf5 19 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal100.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal150.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal65.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal70.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal75.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal80.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal85.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal90.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 14 | -------------------------------------------------------------------------------- /conf/dataset/PB_WallSuperHeat_CrossVal95.yaml: -------------------------------------------------------------------------------- 1 | name: wall_super_heat 2 | transform: True 3 | train_paths: 4 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-65.hdf5 5 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-70.hdf5 6 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-75.hdf5 7 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-80.hdf5 8 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-85.hdf5 9 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-90.hdf5 10 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-100.hdf5 11 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-150.hdf5 12 | val_paths: 13 | - ${data_base_dir}/PB_simulation/WallSuperheat-FC72-2D_HDF5/Twall-95.hdf5 14 | -------------------------------------------------------------------------------- /conf/default.yaml: -------------------------------------------------------------------------------- 1 | # NOTE: The data_base_dir must be set by the user 2 | # Either explicitly set it in this file or set it as a command-line argument 3 | # when running the train script. 4 | data_base_dir: 5 | log_dir: 6 | 7 | train: True 8 | test: True 9 | 10 | model_checkpoint: 11 | 12 | defaults: 13 | - _self_ 14 | - dataset: PB_WallSuperHeat 15 | - experiment: paper/unet_arena/pb_temp 16 | -------------------------------------------------------------------------------- /conf/experiment/README.md: -------------------------------------------------------------------------------- 1 | # Experiment configs: 2 | 3 | Each subdirectory corresponds to a model. 4 | 5 | - `fno/` is for the Fourier Neural Operator 6 | - `uno/` is a U-shaped Fourier Neural Operator. 7 | - `ffno/` is "Factorized Fourier Neural Operator" 8 | - `gfno/` is for the Group-Equivarient Fourier Neural Operator 9 | - `unet_bench` is the unet implementation from PDEBench 10 | - `unet_arena` is the unet implementation from PDEArena 11 | - `ufnet` is a version of unet that also uses some fourier layers. 12 | 13 | 14 | The config files in each subdirectory correspond to different experiments. 15 | -------------------------------------------------------------------------------- /conf/experiment/experimental/cno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: cno 18 | in_size: 512 19 | n_layers: 6 20 | 21 | 22 | optimizer: 23 | initial_lr: 1e-3 24 | weight_decay: 1e-6 25 | 26 | lr_scheduler: 27 | name: cosine 28 | eta_min: 1e-5 -------------------------------------------------------------------------------- /conf/experiment/experimental/ffno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 350 7 | batch_size: 20 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 4 15 | 16 | model: 17 | model_name: factorized_fno 18 | fmode_frac: [0.66, 0.66] 19 | width: 64 20 | dropout: 0.0 21 | n_layers: 4 22 | layer_norm: True 23 | 24 | optimizer: 25 | initial_lr: 1e-3 26 | weight_decay: 1e-6 27 | 28 | lr_scheduler: 29 | name: cosine 30 | -------------------------------------------------------------------------------- /conf/experiment/experimental/ffno/pb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 2 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: factorized_fno 18 | fmode_frac: [0.3, 0.3] 19 | width: 256 20 | dropout: 0.0 21 | n_layers: 7 22 | layer_norm: True 23 | 24 | optimizer: 25 | initial_lr: 1e-4 26 | weight_decay: 1e-6 27 | 28 | lr_scheduler: 29 | name: cosine 30 | eta_min: 1e-6 31 | -------------------------------------------------------------------------------- /conf/experiment/experimental/fno/fb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 25 7 | batch_size: 12 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: [1, 2] 15 | 16 | model: 17 | model_name: fno 18 | hidden_channels: 64 19 | fmode_frac: [0.66, 0.66] 20 | domain_padding: [0.2, 0.2] 21 | n_layers: 4 22 | norm: 'group_norm' 23 | separable: False 24 | 25 | optimizer: 26 | initial_lr: 1e-3 27 | weight_decay: 1e-5 28 | 29 | lr_scheduler: 30 | name: cosine 31 | eta_min: 5e-6 32 | -------------------------------------------------------------------------------- /conf/experiment/experimental/fno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: fno 18 | fmode_frac: [0.66, 0.66] 19 | hidden_channels: 64 20 | domain_padding: [0.1, 0.1] 21 | n_layers: 4 22 | norm: 'group_norm' 23 | separable: False 24 | 25 | 26 | optimizer: 27 | initial_lr: 1e-3 28 | weight_decay: 1e-6 29 | 30 | lr_scheduler: 31 | name: cosine 32 | -------------------------------------------------------------------------------- /conf/experiment/experimental/fno/pb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 25 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 2 15 | 16 | model: 17 | model_name: fno 18 | hidden_channels: 64 19 | fmode_frac: [0.66, 0.66] 20 | domain_padding: [0.1, 0.1] 21 | n_layers: 4 22 | norm: 'group_norm' 23 | separable: False 24 | 25 | optimizer: 26 | initial_lr: 1e-3 27 | weight_decay: 1e-5 28 | 29 | lr_scheduler: 30 | name: cosine 31 | eta_min: 1e-6 32 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 64 20 | width: 8 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-5 26 | 27 | lr_scheduler: 28 | name: step 29 | patience: 75 30 | factor: 0.5 31 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/cosine.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 25 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 96 20 | width: 20 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-4 26 | 27 | lr_scheduler: 28 | name: cosine 29 | eta_min: 1e-5 30 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/gcnn_pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: False 14 | noise: True 15 | downsample_factor: 1 16 | 17 | model: 18 | model_name: gcnn 19 | width: 24 20 | reflection: False 21 | 22 | optimizer: 23 | initial_lr: 1e-3 24 | weight_decay: 1e-5 25 | 26 | lr_scheduler: 27 | name: step 28 | patience: 70 29 | factor: 0.5 30 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/pb_temp_mode_64_width_16.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 64 20 | width: 16 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-4 26 | 27 | lr_scheduler: 28 | name: step 29 | patience: 75 30 | factor: 0.5 31 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/pb_temp_mode_64_width_28.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 64 20 | width: 28 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-4 26 | 27 | lr_scheduler: 28 | name: step 29 | patience: 75 30 | factor: 0.5 31 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/pb_temp_mode_96_width_20.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 96 20 | width: 20 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-4 26 | 27 | lr_scheduler: 28 | name: step 29 | patience: 75 30 | factor: 0.5 31 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/pb_temp_mode_96_width_8.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 96 20 | width: 8 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-4 26 | 27 | lr_scheduler: 28 | name: step 29 | patience: 75 30 | factor: 0.5 31 | -------------------------------------------------------------------------------- /conf/experiment/experimental/gfno_test/vel_cosine.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 25 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: False 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 64 20 | width: 28 21 | reflection: False 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-4 26 | 27 | lr_scheduler: 28 | name: cosine 29 | eta_min: 1e-5 30 | -------------------------------------------------------------------------------- /conf/experiment/experimental/ufnet/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 350 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: ufnet 18 | hidden_channels: 64 19 | modes1: 8 20 | modes2: 8 21 | n_fourier_layers: 2 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-6 26 | 27 | lr_scheduler: 28 | name: cosine 29 | -------------------------------------------------------------------------------- /conf/experiment/experimental/ufnet/pb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 500 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 0.5 15 | 16 | model: 17 | model_name: ufnet 18 | hidden_channels: 64 19 | modes1: 8 20 | modes2: 8 21 | n_fourier_layers: 2 22 | 23 | optimizer: 24 | initial_lr: 1e-3 25 | weight_decay: 1e-6 26 | 27 | lr_scheduler: 28 | name: cosine 29 | -------------------------------------------------------------------------------- /conf/experiment/experimental/unet_arena/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: unet_arena 18 | hidden_channels: 32 19 | 20 | optimizer: 21 | initial_lr: 1e-3 22 | weight_decay: 1e-6 23 | 24 | lr_scheduler: 25 | name: cosine 26 | eta_min: 1e-5 27 | -------------------------------------------------------------------------------- /conf/experiment/experimental/unet_arena/pb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 25 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: unet_arena 18 | hidden_channels: 32 19 | 20 | optimizer: 21 | initial_lr: 1e-3 22 | weight_decay: 0.001 23 | 24 | lr_scheduler: 25 | name: cosine 26 | eta_min: 1e-6 27 | -------------------------------------------------------------------------------- /conf/experiment/experimental/unet_bench/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 8 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: unet_bench 18 | init_features: 64 19 | 20 | optimizer: 21 | initial_lr: 1e-3 22 | weight_decay: 1e-6 23 | 24 | lr_scheduler: 25 | name: cosine 26 | -------------------------------------------------------------------------------- /conf/experiment/experimental/unet_bench/pb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: True 4 | 5 | train: 6 | max_epochs: 25 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: unet_bench 18 | init_features: 64 19 | 20 | optimizer: 21 | initial_lr: 1e-3 22 | weight_decay: 0.001 23 | 24 | lr_scheduler: 25 | name: cosine 26 | eta_min: 1e-6 27 | -------------------------------------------------------------------------------- /conf/experiment/experimental/uno/.fb_vel.yaml.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/conf/experiment/experimental/uno/.fb_vel.yaml.swp -------------------------------------------------------------------------------- /conf/experiment/experimental/uno/fb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 25 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: [1, 4] 15 | 16 | model: 17 | model_name: uno 18 | hidden_channels: 128 19 | projection_channels: 256 20 | norm: 'group_norm' 21 | n_layers: 5 22 | uno_out_channels: [64,64,64,64,64] 23 | uno_n_modes: [[128,128],[64, 64],[64,64],[128,128]] 24 | uno_scalings: [[1,1],[0.5,0.5],[1,1],[2,2],[1,1]] 25 | domain_padding: [0.2, 0.2] 26 | 27 | optimizer: 28 | initial_lr: 1e-3 29 | weight_decay: 1e-5 30 | 31 | lr_scheduler: 32 | name: cosine 33 | -------------------------------------------------------------------------------- /conf/experiment/experimental/uno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 350 7 | batch_size: 16 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 4 15 | 16 | model: 17 | model_name: uno 18 | hidden_channels: 64 19 | projection_channels: 256 20 | norm: 'group_norm' 21 | n_layers: 5 22 | uno_out_channels: [128,128,128,128,128] 23 | uno_n_modes: [[128,128],[64, 64],[64,64],[128,128],[128,128]] 24 | uno_scalings: [[1,1],[0.5,0.5],[1,1],[2,2],[1,1]] 25 | domain_padding: 0.1 26 | 27 | optimizer: 28 | initial_lr: 1e-3 29 | weight_decay: 1e-6 30 | 31 | lr_scheduler: 32 | name: cosine 33 | -------------------------------------------------------------------------------- /conf/experiment/experimental/uno/pb_vel.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: vel_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 6 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 2 15 | 16 | model: 17 | model_name: uno 18 | hidden_channels: 128 19 | projection_channels: 256 20 | norm: 'group_norm' 21 | n_layers: 5 22 | uno_out_channels: [128,128,128,128,128] 23 | uno_n_modes: [[128,128],[64, 64],[64,64],[128,128]] 24 | uno_scalings: [[1,1],[0.5,0.5],[1,1],[2,2],[1,1]] 25 | domain_padding: 0.1 26 | 27 | optimizer: 28 | initial_lr: 1e-3 29 | weight_decay: 1e-6 30 | 31 | lr_scheduler: 32 | name: cosine 33 | -------------------------------------------------------------------------------- /conf/experiment/paper/ffno/pb_temp_7.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 2 15 | 16 | model: 17 | model_name: factorized_fno 18 | modes: 64 19 | width: 256 20 | dropout: 0.0 21 | n_layers: 7 22 | layer_norm: True 23 | 24 | optimizer: 25 | initial_lr: 1e-3 26 | weight_decay: 0.01 27 | 28 | lr_scheduler: 29 | name: 'step' 30 | factor: 0.5 31 | patience: 75 32 | -------------------------------------------------------------------------------- /conf/experiment/paper/ffno/pb_temp_8.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 2 15 | 16 | model: 17 | model_name: factorized_fno 18 | modes: 64 19 | width: 256 20 | dropout: 0.0 21 | n_layers: 8 22 | layer_norm: True 23 | 24 | optimizer: 25 | initial_lr: 1e-3 26 | weight_decay: 0.01 27 | 28 | lr_scheduler: 29 | name: 'step' 30 | factor: 0.5 31 | patience: 75 32 | -------------------------------------------------------------------------------- /conf/experiment/paper/fno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 350 7 | batch_size: 20 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 2 15 | 16 | model: 17 | model_name: fno 18 | modes: [64, 64] 19 | hidden_channels: 256 20 | domain_padding: [0.1, 0.1] 21 | n_layers: 6 22 | norm: 'instance_norm' 23 | rank: 0.1 24 | 25 | optimizer: 26 | initial_lr: 1e-3 27 | weight_decay: 0.01 28 | 29 | lr_scheduler: 30 | name: step 31 | factor: 0.5 32 | patience: 75 33 | -------------------------------------------------------------------------------- /conf/experiment/paper/gfno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 2 16 | 17 | model: 18 | model_name: gfno 19 | modes: 64 20 | width: 128 21 | reflection: False 22 | domain_padding: 0.1 23 | 24 | optimizer: 25 | initial_lr: 1e-3 26 | weight_decay: 0.01 27 | 28 | lr_scheduler: 29 | name: step 30 | factor: 0.5 31 | patience: 75 32 | -------------------------------------------------------------------------------- /conf/experiment/paper/gfno/pb_temp_old.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | # torch distributed does not support complex parameters 4 | distributed: False 5 | 6 | train: 7 | max_epochs: 250 8 | batch_size: 4 9 | shuffle_data: True 10 | time_window: 5 11 | future_window: 5 12 | push_forward_steps: 1 13 | use_coords: True 14 | noise: True 15 | downsample_factor: 4 16 | 17 | model: 18 | model_name: gfno 19 | modes: 64 20 | width: 64 21 | reflection: False 22 | domain_padding: 0.0 23 | 24 | optimizer: 25 | initial_lr: 1e-3 26 | weight_decay: 0.01 27 | 28 | lr_scheduler: 29 | name: step 30 | factor: 0.5 31 | patience: 75 32 | -------------------------------------------------------------------------------- /conf/experiment/paper/unet_arena/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 4 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: False 13 | noise: True 14 | downsample_factor: 1 15 | 16 | model: 17 | model_name: unet_arena 18 | hidden_channels: 32 19 | 20 | optimizer: 21 | initial_lr: 1e-3 22 | weight_decay: 0.01 23 | 24 | lr_scheduler: 25 | name: step 26 | factor: 0.5 27 | patience: 75 28 | -------------------------------------------------------------------------------- /conf/experiment/paper/uno/pb_temp.yaml: -------------------------------------------------------------------------------- 1 | torch_dataset_name: temp_input_dataset 2 | 3 | distributed: False 4 | 5 | train: 6 | max_epochs: 250 7 | batch_size: 8 8 | shuffle_data: True 9 | time_window: 5 10 | future_window: 5 11 | push_forward_steps: 1 12 | use_coords: True 13 | noise: True 14 | downsample_factor: 2 15 | 16 | model: 17 | model_name: uno 18 | hidden_channels: 256 19 | projection_channels: 256 20 | norm: 'group_norm' 21 | n_layers: 8 22 | uno_out_channels: [64,128,128,256,256,128,128,64] 23 | uno_n_modes: [[64,64],[32, 32],[32,32],[16,16],[16,16],[32,32],[32,32],[64,64]] 24 | uno_scalings: [[1,1],[0.5,0.5],[0.5,0.5],[1,1],[1,1],[2,2],[2,2],[1,1]] 25 | domain_padding: 0.1 26 | 27 | optimizer: 28 | initial_lr: 1e-3 29 | weight_decay: 0.01 30 | 31 | lr_scheduler: 32 | name: step 33 | factor: 0.5 34 | patience: 75 35 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # BubbleML Examples 2 | 3 | This directory contains examples of how to use BubbleML that are much simpler 4 | than our experiment code. We use a downsampled version of several Subcooled Pool Boiling datasets. 5 | Each simulation has had its resolution downsampled to 1/8th size, so it is much smaller and faster to train models. These 6 | downsampled datasets have the exact same keys and metadata as the "true" datasets. The only difference 7 | is the domain resolution. 8 | 9 | We provide two jupyter notebooks: 10 | 1. `data_loading.ipynb` shows how to load simulations using h5py, what the keys are for each hdf5 file, 11 | and how to visualize different timesteps for each variable. It also shows how to use the distance 12 | function to get point in liquid or vapor, or the phase interface. 13 | 2. `pytorch_training.ipynb` uses the three sample datasets to train a Fourier Neural Operator. 14 | We do not apply the training strategies used in our experiments, this is intended to serve as an 15 | example of how to use BubbleML, not reproduce our results. This example shows how to setup a 16 | PyTorch dataset for each HDF5 file and how to use a `ConcatDataset` to combine them. It then 17 | shows how to build a Fourier Neural Operator, run a training loop, and visualize the results. 18 | 19 | ## Running the examples: 20 | 21 | The SciML conda environment includes jupyterlab as a dependency, so it can run the example notebooks. 22 | In the project root directory, you can create the conda environment with pytorch, neuraloperator, 23 | and jupyter. 24 | 25 | ```console 26 | conda env create -n bubble-sciml -f conda/pytorch-2.0.1-cuda-11.7.yaml 27 | ``` 28 | -------------------------------------------------------------------------------- /examples/Twall-100.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/examples/Twall-100.hdf5 -------------------------------------------------------------------------------- /examples/Twall-103.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/examples/Twall-103.hdf5 -------------------------------------------------------------------------------- /examples/Twall-106.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/examples/Twall-106.hdf5 -------------------------------------------------------------------------------- /optical_flow/README.md: -------------------------------------------------------------------------------- 1 | ## Helper files for optical flow of boiling datasets 2 | 3 | This directory provides helper files for running optical flow experiments with the official [RAFT](https://github.com/princeton-vl/RAFT) and [GMFlow](https://github.com/haofeixu/gmflow). 4 | 5 | ### Creation of BubbleML optical flow dataset 6 | An optical flow dataset can be created from an uncompressed folder of a BubbleML study using the script 7 | ```console 8 | python create_opticalflow_dataset.py --ip_dir /path/to/BubbleML/study/ --op_dir /path/to/optical-flow-datasets/Boiling/ 9 | ``` 10 | 11 | The dataloaders provided for RAFT and GMFlow are slight modifications of the original implementations in the respective repositories to enable the training of models using BubbleML data. Copy the respective files to `core/datasets.py` in case of RAFT and `data/datasets.py` in case of GMFlow. 12 | The finetuning process can then be performed using the scripts given below: 13 | 14 | #### GMFlow 15 | ```console 16 | python main.py --checkpoint_dir chairs_boil/ --resume pretrained/gmflow_chairs-1d776046.pth --stage boiling --batch_size 8 --num_workers 4 --lr 1e-6 --weight_decay 1e-6 --image_size 512 512 --save_ckpt_freq 1000 --num_steps 2000 17 | python main.py --checkpoint_dir things_boil/ --resume pretrained/gmflow_things-e9887eda.pth --stage boiling --batch_size 8 --num_workers 4 --lr 1e-6 --weight_decay 1e-6 --image_size 512 512 --save_ckpt_freq 1000 --num_steps 2000 18 | python main.py --checkpoint_dir sintel_boil/ --resume pretrained/gmflow_sintel-0c07dcb3.pth --stage boiling --batch_size 8 --num_workers 4 --lr 1e-6 --weight_decay 1e-6 --image_size 512 512 --save_ckpt_freq 1000 --num_steps 2000 19 | ``` 20 | #### RAFT 21 | ```console 22 | python -u train.py --name raft-boiling-chairs --stage boiling --restore_ckpt /data/homezvol1/sheikhh1/RAFT/models/raft-chairs.pth --gpus 0 --num_steps 2000 --batch_size 5 --lr 0.000001 --image_size 512 512 --wdecay 0.000001 --mixed_precision 23 | python -u train.py --name raft-boiling-things --stage boiling --restore_ckpt /data/homezvol1/sheikhh1/RAFT/models/raft-things.pth --gpus 0 --num_steps 2000 --batch_size 5 --lr 0.000001 --image_size 512 512 --wdecay 0.000001 --mixed_precision 24 | python -u train.py --name raft-boiling-sintel --stage boiling --restore_ckpt /data/homezvol1/sheikhh1/RAFT/models/raft-sintel.pth --gpus 0 --num_steps 2000 --batch_size 5 --lr 0.000001 --image_size 512 512 --wdecay 0.000001 --mixed_precision 25 | ``` 26 | 27 | The jupyter notebooks can be copied to the `RAFT` and `GMFlow` home directories to observe performance of optical flow models on the BubbleML data. 28 | -------------------------------------------------------------------------------- /optical_flow/create_opticalflow_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import glob 4 | import h5py 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | L_C = 0.7 # The characteristic length of the fluid in mm. Multiply the non-dimensional length and height of the domain to get real world dimensions in mm. 9 | U_C = 82.867 # The characteristic velocity of the fluid in mm/s. Multiply the non-dimensional velocities to get real world velocities in mm/s 10 | T_C = 0.008 # The characteristic time of the fluid in s. Multiply with non-dimensional time to get the dimensional time. 11 | 12 | def write_flo(file_path, u, v): 13 | """ 14 | Write optical flow to file. 15 | :param file_path: Path obtained from user to write optical flow file 16 | :param u: np.ndarray is assumed to contain u channel or x velocities, 17 | :param v: np.ndarray is assumed to contain v channel or y velocities, 18 | """ 19 | nBands = 2 20 | 21 | assert (u.shape == v.shape) 22 | height, width = u.shape 23 | f = open(file_path, 'wb') 24 | # write the header 25 | np.array([202021.25]).astype(np.float32).tofile(f) 26 | np.array(width).astype(np.int32).tofile(f) 27 | np.array(height).astype(np.int32).tofile(f) 28 | # arrange into matrix form 29 | tmp = np.zeros((height, width * nBands)) 30 | tmp[:, np.arange(width) * 2] = u 31 | tmp[:, np.arange(width) * 2 + 1] = v 32 | tmp.astype(np.float32).tofile(f) 33 | 34 | f.close() 35 | 36 | def make_dataset(sim_file, op_dir, train_valid_split, plot_interval): 37 | simul_data = h5py.File(sim_file, 'r') 38 | dist_fields = simul_data['dfun'][:] 39 | num_timesteps = dist_fields.shape[0] 40 | train_length = int(train_valid_split * num_timesteps) 41 | train_save_dir = os.path.join(op_dir, 'train', sim_file.split('/')[-1][:-5]) 42 | valid_save_dir = os.path.join(op_dir, 'valid', sim_file.split('/')[-1][:-5]) 43 | 44 | domain_height = round(L_C * (simul_data['y'][-1,-1,-1] + simul_data['y'][0,0,0]), 2) 45 | pixel_height = simul_data['y'].shape[1] 46 | pixel_density = pixel_height/domain_height 47 | 48 | secs_per_frame = T_C * plot_interval 49 | 50 | os.makedirs(os.path.join(train_save_dir, 'img'), exist_ok=True) 51 | os.makedirs(os.path.join(train_save_dir, 'flow'), exist_ok=True) 52 | os.makedirs(os.path.join(valid_save_dir, 'img'), exist_ok=True) 53 | os.makedirs(os.path.join(valid_save_dir, 'flow'), exist_ok=True) 54 | 55 | for index in range(train_length): 56 | dist_field = np.flipud(simul_data['dfun'][()][index,:,:]) 57 | u = np.flipud(simul_data['velx'][()][index,:,:]) 58 | v = -1*np.flipud(simul_data['vely'][()][index,:,:]) 59 | u[dist_field < 0] = 0 60 | v[dist_field < 0] = 0 61 | u = u * U_C * pixel_density * secs_per_frame 62 | v = v * U_C * pixel_density * secs_per_frame 63 | dist_field[dist_field>0] *= (255/dist_field.max()) 64 | dist_field[dist_field<0] = 255 65 | dist_field = dist_field.astype(np.uint8) 66 | plt.imsave(os.path.join(train_save_dir, 'img', '{:04d}'.format(index) + '.png'), dist_field, cmap='gray') 67 | write_flo(os.path.join(train_save_dir, 'flow', '{:04d}'.format(index) + '.flo'), u, v) 68 | if index%10 == 0: 69 | print(f'{index} files done for {sim_file}, u_max = {u.max()}, v_max = {v.max()}') 70 | 71 | for index in range(train_length, num_timesteps): 72 | dist_field = np.flipud(simul_data['dfun'][()][index,:,:]) 73 | u = np.flipud(simul_data['velx'][()][index,:,:]) 74 | v = -1*np.flipud(simul_data['vely'][()][index,:,:]) 75 | u[dist_field < 0] = 0 76 | v[dist_field < 0] = 0 77 | u = u * U_C * pixel_density * secs_per_frame 78 | v = v * U_C * pixel_density * secs_per_frame 79 | dist_field[dist_field>0] *= (255/dist_field.max()) 80 | dist_field[dist_field<0] = 255 81 | dist_field = dist_field.astype(np.uint8) 82 | plt.imsave(os.path.join(valid_save_dir, 'img', '{:04d}'.format(index) + '.png'), dist_field, cmap='gray') 83 | write_flo(os.path.join(valid_save_dir, 'flow', '{:04d}'.format(index) + '.flo'), u, v) 84 | if index%10 == 0: 85 | print(f'{index} files done for {sim_file}, u_max = {u.max()}, v_max = {v.max()}') 86 | 87 | if __name__ == '__main__': 88 | parser = argparse.ArgumentParser() 89 | 90 | parser.add_argument('--ip_dir', type=str, 91 | help='path to the directory of hdf5 simulation files of a particular boiling study') 92 | parser.add_argument('--op_dir', type=str, default='Boiling', 93 | help='path to save the optical flow training set') 94 | parser.add_argument('--train_valid_split', type=float, default=0.8, 95 | help='percentage of images to be kept for validation') 96 | parser.add_argument('--plot_interval', type=float, default=1.0, 97 | help='non-dimensional time interval at which simulation plot files were generated.') 98 | args = parser.parse_args() 99 | 100 | sim_files = glob.glob(f'{args.ip_dir}/*.hdf5') 101 | 102 | for sim_file in sim_files: 103 | make_dataset(sim_file, args.op_dir, args.train_valid_split, args.plot_interval) 104 | -------------------------------------------------------------------------------- /sciml/README.md: -------------------------------------------------------------------------------- 1 | # Scientific Machine Learning 2 | 3 | ## Environment Setup 4 | 5 | The code assumes access to a fairly modern Nvidia GPU, though 6 | it may also work on AMD GPUs if PyTorch is installed with Rocm support. 7 | Results have been reproduced on a Linux cluster with V100, A30, and A100 GPUs using PyTorch 2.0 and CUDA 11.7. 8 | 9 | To install dependencies, we recommend creating a conda environment: 10 | 11 | ```console 12 | conda env create -n bubble-sciml -f conda/pytorch-2.0.1-cuda-11.7.yaml 13 | ``` 14 | 15 | 16 | Our sample application code uses Hydra to manage different configurations. 17 | For example, we treat each simulation type as a dataset: `conf/dataset/*.yaml`. 18 | Similarly, each model is treated as a separate experiment: `conf/experiment/*.yaml`. 19 | 20 | For example, training a temperature prediction UNet model on the subcooled boiling dataset is simple: 21 | 22 | ```console 23 | python sciml/train.py dataset=PB_SubCooled experiment=temp_unet2d 24 | ``` 25 | 26 | If you want to run a pretrained model, you can specify the `model_checkpoint` path 27 | 28 | ```console 29 | python sciml/train.py dataset=PB_SubCooled experiment=temp_unet2d model_checkpoint= 30 | ``` 31 | 32 | The config file `conf/default.yaml` assumes that the datasets are extracted to the same location. 33 | **This location should be set by the user. By default, this setting is empty**. 34 | Setting the `data_base_dir` can be done by explicity updating `conf/default.yaml` or 35 | specifying the dataset base directory when running the python scripts.) 36 | 37 | For example, if you downloaded two datasets to 38 | 39 | ```console 40 | /your/path/to/BubbleML/saturated.hdf5 41 | /your/path/to/BubbleML/subcooled.hdf5 42 | ``` 43 | 44 | then, to train a UNet model on the subcooled boiling dataset, just run 45 | 46 | ```console 47 | python sciml/train.py \ 48 | data_base_dir=/your/path/to/BubbleML \ 49 | dataset=PB_SubCooled experiment=temp_unet 50 | ``` 51 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/debug_tools.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | 4 | units = { 5 | 0: 'B', 6 | 1: 'KiB', 7 | 2: 'MiB', 8 | 3: 'GiB', 9 | 4: 'TiB' 10 | } 11 | 12 | 13 | def format_mem(x): 14 | """ 15 | Takes integer 'x' in bytes and returns a number in [0, 1024) and 16 | the corresponding unit. 17 | 18 | """ 19 | if abs(x) < 1024: 20 | return round(x, 2), 'B' 21 | 22 | scale = math.log2(abs(x)) // 10 23 | scaled_x = x / 1024 ** scale 24 | unit = units[scale] 25 | 26 | if int(scaled_x) == scaled_x: 27 | return int(scaled_x), unit 28 | 29 | # rounding leads to 2 or fewer decimal places, as required 30 | return round(scaled_x, 2), unit 31 | 32 | 33 | def format_tensor_size(x): 34 | val, unit = format_mem(x) 35 | return f'{val}{unit}' 36 | 37 | 38 | class CudaMemoryDebugger(): 39 | """ 40 | Helper to track changes in CUDA memory. 41 | 42 | """ 43 | DEVICE = 'cuda' 44 | LAST_MEM = 0 45 | ENABLED = True 46 | 47 | 48 | def __init__(self, print_mem): 49 | self.print_mem = print_mem 50 | if not CudaMemoryDebugger.ENABLED: 51 | return 52 | 53 | cur_mem = torch.cuda.memory_allocated(CudaMemoryDebugger.DEVICE) 54 | cur_mem_fmt, cur_mem_unit = format_mem(cur_mem) 55 | print(f'cuda allocated (initial): {cur_mem_fmt:.2f}{cur_mem_unit}') 56 | CudaMemoryDebugger.LAST_MEM = cur_mem 57 | 58 | def print(self,id_str=None): 59 | if not CudaMemoryDebugger.ENABLED: 60 | return 61 | 62 | desc = 'cuda allocated' 63 | 64 | if id_str is not None: 65 | desc += f' ({id_str})' 66 | 67 | desc += ':' 68 | 69 | cur_mem = torch.cuda.memory_allocated(CudaMemoryDebugger.DEVICE) 70 | cur_mem_fmt, cur_mem_unit = format_mem(cur_mem) 71 | 72 | diff = cur_mem - CudaMemoryDebugger.LAST_MEM 73 | if self.print_mem: 74 | if diff == 0: 75 | print(f'{desc} {cur_mem_fmt:.2f}{cur_mem_unit} (no change)') 76 | 77 | else: 78 | diff_fmt, diff_unit = format_mem(diff) 79 | print(f'{desc} {cur_mem_fmt:.2f}{cur_mem_unit}' 80 | f' ({diff_fmt:+}{diff_unit})') 81 | 82 | CudaMemoryDebugger.LAST_MEM = cur_mem 83 | 84 | 85 | def print_tensor_mem(x, id_str=None): 86 | """ 87 | Prints the memory required by tensor 'x'. 88 | 89 | """ 90 | if not CudaMemoryDebugger.ENABLED: 91 | return 92 | 93 | desc = 'memory' 94 | 95 | if id_str is not None: 96 | desc += f' ({id_str})' 97 | 98 | desc += ':' 99 | 100 | val, unit = format_mem(x.element_size() * x.nelement()) 101 | 102 | print(f'{desc} {val}{unit}') 103 | 104 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/dnnlib/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | from .util import EasyDict, make_cache_dir_path -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | # empty 10 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/bias_act.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include 10 | #include 11 | #include 12 | #include "bias_act.h" 13 | 14 | //------------------------------------------------------------------------ 15 | 16 | static bool has_same_layout(torch::Tensor x, torch::Tensor y) 17 | { 18 | if (x.dim() != y.dim()) 19 | return false; 20 | for (int64_t i = 0; i < x.dim(); i++) 21 | { 22 | if (x.size(i) != y.size(i)) 23 | return false; 24 | if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) 25 | return false; 26 | } 27 | return true; 28 | } 29 | 30 | //------------------------------------------------------------------------ 31 | 32 | static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) 33 | { 34 | // Validate arguments. 35 | TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); 36 | TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); 37 | TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); 38 | TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); 39 | TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); 40 | TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); 41 | TORCH_CHECK(b.dim() == 1, "b must have rank 1"); 42 | TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); 43 | TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); 44 | TORCH_CHECK(grad >= 0, "grad must be non-negative"); 45 | 46 | // Validate layout. 47 | TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); 48 | TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); 49 | TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); 50 | TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); 51 | TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); 52 | 53 | // Create output tensor. 54 | const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); 55 | torch::Tensor y = torch::empty_like(x); 56 | TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); 57 | 58 | // Initialize CUDA kernel parameters. 59 | bias_act_kernel_params p; 60 | p.x = x.data_ptr(); 61 | p.b = (b.numel()) ? b.data_ptr() : NULL; 62 | p.xref = (xref.numel()) ? xref.data_ptr() : NULL; 63 | p.yref = (yref.numel()) ? yref.data_ptr() : NULL; 64 | p.dy = (dy.numel()) ? dy.data_ptr() : NULL; 65 | p.y = y.data_ptr(); 66 | p.grad = grad; 67 | p.act = act; 68 | p.alpha = alpha; 69 | p.gain = gain; 70 | p.clamp = clamp; 71 | p.sizeX = (int)x.numel(); 72 | p.sizeB = (int)b.numel(); 73 | p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; 74 | 75 | // Choose CUDA kernel. 76 | void* kernel; 77 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] 78 | { 79 | kernel = choose_bias_act_kernel(p); 80 | }); 81 | TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); 82 | 83 | // Launch CUDA kernel. 84 | p.loopX = 4; 85 | int blockSize = 4 * 32; 86 | int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; 87 | void* args[] = {&p}; 88 | AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); 89 | return y; 90 | } 91 | 92 | //------------------------------------------------------------------------ 93 | 94 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) 95 | { 96 | m.def("bias_act", &bias_act); 97 | } 98 | 99 | //------------------------------------------------------------------------ 100 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/bias_act.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include 10 | #include "bias_act.h" 11 | 12 | //------------------------------------------------------------------------ 13 | // Helpers. 14 | 15 | template struct InternalType; 16 | template <> struct InternalType { typedef double scalar_t; }; 17 | template <> struct InternalType { typedef float scalar_t; }; 18 | template <> struct InternalType { typedef float scalar_t; }; 19 | 20 | //------------------------------------------------------------------------ 21 | // CUDA kernel. 22 | 23 | template 24 | __global__ void bias_act_kernel(bias_act_kernel_params p) 25 | { 26 | typedef typename InternalType::scalar_t scalar_t; 27 | int G = p.grad; 28 | scalar_t alpha = (scalar_t)p.alpha; 29 | scalar_t gain = (scalar_t)p.gain; 30 | scalar_t clamp = (scalar_t)p.clamp; 31 | scalar_t one = (scalar_t)1; 32 | scalar_t two = (scalar_t)2; 33 | scalar_t expRange = (scalar_t)80; 34 | scalar_t halfExpRange = (scalar_t)40; 35 | scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; 36 | scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; 37 | 38 | // Loop over elements. 39 | int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; 40 | for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) 41 | { 42 | // Load. 43 | scalar_t x = (scalar_t)((const T*)p.x)[xi]; 44 | scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0; 45 | scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0; 46 | scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0; 47 | scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one; 48 | scalar_t yy = (gain != 0) ? yref / gain : 0; 49 | scalar_t y = 0; 50 | 51 | // Apply bias. 52 | ((G == 0) ? x : xref) += b; 53 | 54 | // linear 55 | if (A == 1) 56 | { 57 | if (G == 0) y = x; 58 | if (G == 1) y = x; 59 | } 60 | 61 | // relu 62 | if (A == 2) 63 | { 64 | if (G == 0) y = (x > 0) ? x : 0; 65 | if (G == 1) y = (yy > 0) ? x : 0; 66 | } 67 | 68 | // lrelu 69 | if (A == 3) 70 | { 71 | if (G == 0) y = (x > 0) ? x : x * alpha; 72 | if (G == 1) y = (yy > 0) ? x : x * alpha; 73 | } 74 | 75 | // tanh 76 | if (A == 4) 77 | { 78 | if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } 79 | if (G == 1) y = x * (one - yy * yy); 80 | if (G == 2) y = x * (one - yy * yy) * (-two * yy); 81 | } 82 | 83 | // sigmoid 84 | if (A == 5) 85 | { 86 | if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); 87 | if (G == 1) y = x * yy * (one - yy); 88 | if (G == 2) y = x * yy * (one - yy) * (one - two * yy); 89 | } 90 | 91 | // elu 92 | if (A == 6) 93 | { 94 | if (G == 0) y = (x >= 0) ? x : exp(x) - one; 95 | if (G == 1) y = (yy >= 0) ? x : x * (yy + one); 96 | if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); 97 | } 98 | 99 | // selu 100 | if (A == 7) 101 | { 102 | if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); 103 | if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); 104 | if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); 105 | } 106 | 107 | // softplus 108 | if (A == 8) 109 | { 110 | if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); 111 | if (G == 1) y = x * (one - exp(-yy)); 112 | if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } 113 | } 114 | 115 | // swish 116 | if (A == 9) 117 | { 118 | if (G == 0) 119 | y = (x < -expRange) ? 0 : x / (exp(-x) + one); 120 | else 121 | { 122 | scalar_t c = exp(xref); 123 | scalar_t d = c + one; 124 | if (G == 1) 125 | y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); 126 | else 127 | y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); 128 | yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; 129 | } 130 | } 131 | 132 | // Apply gain. 133 | y *= gain * dy; 134 | 135 | // Clamp. 136 | if (clamp >= 0) 137 | { 138 | if (G == 0) 139 | y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; 140 | else 141 | y = (yref > -clamp & yref < clamp) ? y : 0; 142 | } 143 | 144 | // Store. 145 | ((T*)p.y)[xi] = (T)y; 146 | } 147 | } 148 | 149 | //------------------------------------------------------------------------ 150 | // CUDA kernel selection. 151 | 152 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p) 153 | { 154 | if (p.act == 1) return (void*)bias_act_kernel; 155 | if (p.act == 2) return (void*)bias_act_kernel; 156 | if (p.act == 3) return (void*)bias_act_kernel; 157 | if (p.act == 4) return (void*)bias_act_kernel; 158 | if (p.act == 5) return (void*)bias_act_kernel; 159 | if (p.act == 6) return (void*)bias_act_kernel; 160 | if (p.act == 7) return (void*)bias_act_kernel; 161 | if (p.act == 8) return (void*)bias_act_kernel; 162 | if (p.act == 9) return (void*)bias_act_kernel; 163 | return NULL; 164 | } 165 | 166 | //------------------------------------------------------------------------ 167 | // Template specializations. 168 | 169 | template void* choose_bias_act_kernel (const bias_act_kernel_params& p); 170 | template void* choose_bias_act_kernel (const bias_act_kernel_params& p); 171 | template void* choose_bias_act_kernel (const bias_act_kernel_params& p); 172 | 173 | //------------------------------------------------------------------------ 174 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/bias_act.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | //------------------------------------------------------------------------ 10 | // CUDA kernel parameters. 11 | 12 | struct bias_act_kernel_params 13 | { 14 | const void* x; // [sizeX] 15 | const void* b; // [sizeB] or NULL 16 | const void* xref; // [sizeX] or NULL 17 | const void* yref; // [sizeX] or NULL 18 | const void* dy; // [sizeX] or NULL 19 | void* y; // [sizeX] 20 | 21 | int grad; 22 | int act; 23 | float alpha; 24 | float gain; 25 | float clamp; 26 | 27 | int sizeX; 28 | int sizeB; 29 | int stepB; 30 | int loopX; 31 | }; 32 | 33 | //------------------------------------------------------------------------ 34 | // CUDA kernel selection. 35 | 36 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p); 37 | 38 | //------------------------------------------------------------------------ 39 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/filtered_lrelu.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include 10 | 11 | //------------------------------------------------------------------------ 12 | // CUDA kernel parameters. 13 | 14 | struct filtered_lrelu_kernel_params 15 | { 16 | // These parameters decide which kernel to use. 17 | int up; // upsampling ratio (1, 2, 4) 18 | int down; // downsampling ratio (1, 2, 4) 19 | int2 fuShape; // [size, 1] | [size, size] 20 | int2 fdShape; // [size, 1] | [size, size] 21 | 22 | int _dummy; // Alignment. 23 | 24 | // Rest of the parameters. 25 | const void* x; // Input tensor. 26 | void* y; // Output tensor. 27 | const void* b; // Bias tensor. 28 | unsigned char* s; // Sign tensor in/out. NULL if unused. 29 | const float* fu; // Upsampling filter. 30 | const float* fd; // Downsampling filter. 31 | 32 | int2 pad0; // Left/top padding. 33 | float gain; // Additional gain factor. 34 | float slope; // Leaky ReLU slope on negative side. 35 | float clamp; // Clamp after nonlinearity. 36 | int flip; // Filter kernel flip for gradient computation. 37 | 38 | int tilesXdim; // Original number of horizontal output tiles. 39 | int tilesXrep; // Number of horizontal tiles per CTA. 40 | int blockZofs; // Block z offset to support large minibatch, channel dimensions. 41 | 42 | int4 xShape; // [width, height, channel, batch] 43 | int4 yShape; // [width, height, channel, batch] 44 | int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. 45 | int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. 46 | int swLimit; // Active width of sign tensor in bytes. 47 | 48 | longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. 49 | longlong4 yStride; // 50 | int64_t bStride; // 51 | longlong3 fuStride; // 52 | longlong3 fdStride; // 53 | }; 54 | 55 | struct filtered_lrelu_act_kernel_params 56 | { 57 | void* x; // Input/output, modified in-place. 58 | unsigned char* s; // Sign tensor in/out. NULL if unused. 59 | 60 | float gain; // Additional gain factor. 61 | float slope; // Leaky ReLU slope on negative side. 62 | float clamp; // Clamp after nonlinearity. 63 | 64 | int4 xShape; // [width, height, channel, batch] 65 | longlong4 xStride; // Input/output tensor strides, same order as in shape. 66 | int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. 67 | int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. 68 | }; 69 | 70 | //------------------------------------------------------------------------ 71 | // CUDA kernel specialization. 72 | 73 | struct filtered_lrelu_kernel_spec 74 | { 75 | void* setup; // Function for filter kernel setup. 76 | void* exec; // Function for main operation. 77 | int2 tileOut; // Width/height of launch tile. 78 | int numWarps; // Number of warps per thread block, determines launch block size. 79 | int xrep; // For processing multiple horizontal tiles per thread block. 80 | int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. 81 | }; 82 | 83 | //------------------------------------------------------------------------ 84 | // CUDA kernel selection. 85 | 86 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 87 | template void* choose_filtered_lrelu_act_kernel(void); 88 | template cudaError_t copy_filters(cudaStream_t stream); 89 | 90 | //------------------------------------------------------------------------ 91 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/filtered_lrelu_ns.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include "filtered_lrelu.cu" 10 | 11 | // Template/kernel specializations for no signs mode (no gradients required). 12 | 13 | // Full op, 32-bit indexing. 14 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 15 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 16 | 17 | // Full op, 64-bit indexing. 18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 20 | 21 | // Activation/signs only for generic variant. 64-bit indexing. 22 | template void* choose_filtered_lrelu_act_kernel(void); 23 | template void* choose_filtered_lrelu_act_kernel(void); 24 | template void* choose_filtered_lrelu_act_kernel(void); 25 | 26 | // Copy filters to constant memory. 27 | template cudaError_t copy_filters(cudaStream_t stream); 28 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/filtered_lrelu_rd.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include "filtered_lrelu.cu" 10 | 11 | // Template/kernel specializations for sign read mode. 12 | 13 | // Full op, 32-bit indexing. 14 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 15 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 16 | 17 | // Full op, 64-bit indexing. 18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 20 | 21 | // Activation/signs only for generic variant. 64-bit indexing. 22 | template void* choose_filtered_lrelu_act_kernel(void); 23 | template void* choose_filtered_lrelu_act_kernel(void); 24 | template void* choose_filtered_lrelu_act_kernel(void); 25 | 26 | // Copy filters to constant memory. 27 | template cudaError_t copy_filters(cudaStream_t stream); 28 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/filtered_lrelu_wr.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include "filtered_lrelu.cu" 10 | 11 | // Template/kernel specializations for sign write mode. 12 | 13 | // Full op, 32-bit indexing. 14 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 15 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 16 | 17 | // Full op, 64-bit indexing. 18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 20 | 21 | // Activation/signs only for generic variant. 64-bit indexing. 22 | template void* choose_filtered_lrelu_act_kernel(void); 23 | template void* choose_filtered_lrelu_act_kernel(void); 24 | template void* choose_filtered_lrelu_act_kernel(void); 25 | 26 | // Copy filters to constant memory. 27 | template cudaError_t copy_filters(cudaStream_t stream); 28 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/fma.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | """Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" 10 | 11 | import torch 12 | 13 | #---------------------------------------------------------------------------- 14 | 15 | def fma(a, b, c): # => a * b + c 16 | return _FusedMultiplyAdd.apply(a, b, c) 17 | 18 | #---------------------------------------------------------------------------- 19 | 20 | class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c 21 | @staticmethod 22 | def forward(ctx, a, b, c): # pylint: disable=arguments-differ 23 | out = torch.addcmul(c, a, b) 24 | ctx.save_for_backward(a, b) 25 | ctx.c_shape = c.shape 26 | return out 27 | 28 | @staticmethod 29 | def backward(ctx, dout): # pylint: disable=arguments-differ 30 | a, b = ctx.saved_tensors 31 | c_shape = ctx.c_shape 32 | da = None 33 | db = None 34 | dc = None 35 | 36 | if ctx.needs_input_grad[0]: 37 | da = _unbroadcast(dout * b, a.shape) 38 | 39 | if ctx.needs_input_grad[1]: 40 | db = _unbroadcast(dout * a, b.shape) 41 | 42 | if ctx.needs_input_grad[2]: 43 | dc = _unbroadcast(dout, c_shape) 44 | 45 | return da, db, dc 46 | 47 | #---------------------------------------------------------------------------- 48 | 49 | def _unbroadcast(x, shape): 50 | extra_dims = x.ndim - len(shape) 51 | assert extra_dims >= 0 52 | dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] 53 | if len(dim): 54 | x = x.sum(dim=dim, keepdim=True) 55 | if extra_dims: 56 | x = x.reshape(-1, *x.shape[extra_dims+1:]) 57 | assert x.shape == shape 58 | return x 59 | 60 | #---------------------------------------------------------------------------- 61 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/grid_sample_gradfix.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | """Custom replacement for `torch.nn.functional.grid_sample` that 10 | supports arbitrarily high order gradients between the input and output. 11 | Only works on 2D images and assumes 12 | `mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" 13 | 14 | import torch 15 | from pkg_resources import parse_version 16 | 17 | # pylint: disable=redefined-builtin 18 | # pylint: disable=arguments-differ 19 | # pylint: disable=protected-access 20 | 21 | #---------------------------------------------------------------------------- 22 | 23 | enabled = False # Enable the custom op by setting this to true. 24 | _use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11 25 | 26 | #---------------------------------------------------------------------------- 27 | 28 | def grid_sample(input, grid): 29 | if _should_use_custom_op(): 30 | return _GridSample2dForward.apply(input, grid) 31 | return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) 32 | 33 | #---------------------------------------------------------------------------- 34 | 35 | def _should_use_custom_op(): 36 | return enabled 37 | 38 | #---------------------------------------------------------------------------- 39 | 40 | class _GridSample2dForward(torch.autograd.Function): 41 | @staticmethod 42 | def forward(ctx, input, grid): 43 | assert input.ndim == 4 44 | assert grid.ndim == 4 45 | output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) 46 | ctx.save_for_backward(input, grid) 47 | return output 48 | 49 | @staticmethod 50 | def backward(ctx, grad_output): 51 | input, grid = ctx.saved_tensors 52 | grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) 53 | return grad_input, grad_grid 54 | 55 | #---------------------------------------------------------------------------- 56 | 57 | class _GridSample2dBackward(torch.autograd.Function): 58 | @staticmethod 59 | def forward(ctx, grad_output, input, grid): 60 | op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') 61 | if _use_pytorch_1_11_api: 62 | output_mask = (ctx.needs_input_grad[1], ctx.needs_input_grad[2]) 63 | grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False, output_mask) 64 | else: 65 | grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) 66 | ctx.save_for_backward(grid) 67 | return grad_input, grad_grid 68 | 69 | @staticmethod 70 | def backward(ctx, grad2_grad_input, grad2_grad_grid): 71 | _ = grad2_grad_grid # unused 72 | grid, = ctx.saved_tensors 73 | grad2_grad_output = None 74 | grad2_input = None 75 | grad2_grid = None 76 | 77 | if ctx.needs_input_grad[0]: 78 | grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) 79 | 80 | assert not ctx.needs_input_grad[2] 81 | return grad2_grad_output, grad2_input, grad2_grid 82 | 83 | #---------------------------------------------------------------------------- 84 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/upfirdn2d.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include 10 | #include 11 | #include 12 | #include "upfirdn2d.h" 13 | 14 | //------------------------------------------------------------------------ 15 | 16 | static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain) 17 | { 18 | // Validate arguments. 19 | TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); 20 | TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x"); 21 | TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32"); 22 | TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); 23 | TORCH_CHECK(f.numel() <= INT_MAX, "f is too large"); 24 | TORCH_CHECK(x.numel() > 0, "x has zero size"); 25 | TORCH_CHECK(f.numel() > 0, "f has zero size"); 26 | TORCH_CHECK(x.dim() == 4, "x must be rank 4"); 27 | TORCH_CHECK(f.dim() == 2, "f must be rank 2"); 28 | TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large"); 29 | TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1"); 30 | TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1"); 31 | TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1"); 32 | 33 | // Create output tensor. 34 | const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); 35 | int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx; 36 | int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy; 37 | TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1"); 38 | torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format()); 39 | TORCH_CHECK(y.numel() <= INT_MAX, "output is too large"); 40 | TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large"); 41 | 42 | // Initialize CUDA kernel parameters. 43 | upfirdn2d_kernel_params p; 44 | p.x = x.data_ptr(); 45 | p.f = f.data_ptr(); 46 | p.y = y.data_ptr(); 47 | p.up = make_int2(upx, upy); 48 | p.down = make_int2(downx, downy); 49 | p.pad0 = make_int2(padx0, pady0); 50 | p.flip = (flip) ? 1 : 0; 51 | p.gain = gain; 52 | p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); 53 | p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0)); 54 | p.filterSize = make_int2((int)f.size(1), (int)f.size(0)); 55 | p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0)); 56 | p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); 57 | p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0)); 58 | p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z; 59 | p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1; 60 | 61 | // Choose CUDA kernel. 62 | upfirdn2d_kernel_spec spec; 63 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] 64 | { 65 | spec = choose_upfirdn2d_kernel(p); 66 | }); 67 | 68 | // Set looping options. 69 | p.loopMajor = (p.sizeMajor - 1) / 16384 + 1; 70 | p.loopMinor = spec.loopMinor; 71 | p.loopX = spec.loopX; 72 | p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1; 73 | p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1; 74 | 75 | // Compute grid size. 76 | dim3 blockSize, gridSize; 77 | if (spec.tileOutW < 0) // large 78 | { 79 | blockSize = dim3(4, 32, 1); 80 | gridSize = dim3( 81 | ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor, 82 | (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, 83 | p.launchMajor); 84 | } 85 | else // small 86 | { 87 | blockSize = dim3(256, 1, 1); 88 | gridSize = dim3( 89 | ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor, 90 | (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, 91 | p.launchMajor); 92 | } 93 | 94 | // Launch CUDA kernel. 95 | void* args[] = {&p}; 96 | AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); 97 | return y; 98 | } 99 | 100 | //------------------------------------------------------------------------ 101 | 102 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) 103 | { 104 | m.def("upfirdn2d", &upfirdn2d); 105 | } 106 | 107 | //------------------------------------------------------------------------ 108 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/torch_utils/ops/upfirdn2d.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include 10 | 11 | //------------------------------------------------------------------------ 12 | // CUDA kernel parameters. 13 | 14 | struct upfirdn2d_kernel_params 15 | { 16 | const void* x; 17 | const float* f; 18 | void* y; 19 | 20 | int2 up; 21 | int2 down; 22 | int2 pad0; 23 | int flip; 24 | float gain; 25 | 26 | int4 inSize; // [width, height, channel, batch] 27 | int4 inStride; 28 | int2 filterSize; // [width, height] 29 | int2 filterStride; 30 | int4 outSize; // [width, height, channel, batch] 31 | int4 outStride; 32 | int sizeMinor; 33 | int sizeMajor; 34 | 35 | int loopMinor; 36 | int loopMajor; 37 | int loopX; 38 | int launchMinor; 39 | int launchMajor; 40 | }; 41 | 42 | //------------------------------------------------------------------------ 43 | // CUDA kernel specialization. 44 | 45 | struct upfirdn2d_kernel_spec 46 | { 47 | void* kernel; 48 | int tileOutW; 49 | int tileOutH; 50 | int loopMinor; 51 | int loopX; 52 | }; 53 | 54 | //------------------------------------------------------------------------ 55 | // CUDA kernel selection. 56 | 57 | template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); 58 | 59 | //------------------------------------------------------------------------ 60 | -------------------------------------------------------------------------------- /sciml/models/ConvolutionalNeuralOperator/training/FourierFeatures.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class FourierFeatures(nn.Module): 7 | 8 | def __init__(self, scale, mapping_size, device): 9 | super().__init__() 10 | self.mapping_size = mapping_size 11 | self.scale = scale 12 | self.B = scale * torch.randn((self.mapping_size, 2)).to(device) 13 | 14 | def forward(self, x): 15 | # x is the set of coordinate and it is passed as a tensor (nx, ny, 2) 16 | if self.scale != 0: 17 | x_proj = torch.matmul((2. * np.pi * x), self.B.T) 18 | inp = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], axis=-1) 19 | return inp 20 | else: 21 | return x 22 | -------------------------------------------------------------------------------- /sciml/models/factorized_fno/feedforward.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .linear import WNLinear 4 | 5 | 6 | class FeedForward(nn.Module): 7 | def __init__(self, dim, factor, ff_weight_norm, n_layers, layer_norm, dropout): 8 | super().__init__() 9 | self.layers = nn.ModuleList([]) 10 | for i in range(n_layers): 11 | in_dim = dim if i == 0 else dim * factor 12 | out_dim = dim if i == n_layers - 1 else dim * factor 13 | self.layers.append(nn.Sequential( 14 | WNLinear(in_dim, out_dim, wnorm=ff_weight_norm), 15 | nn.Dropout(dropout), 16 | nn.ReLU(inplace=True) if i < n_layers - 1 else nn.Identity(), 17 | nn.LayerNorm(out_dim) if layer_norm and i == n_layers - 18 | 1 else nn.Identity(), 19 | )) 20 | 21 | def forward(self, x): 22 | for layer in self.layers: 23 | x = layer(x) 24 | return x 25 | -------------------------------------------------------------------------------- /sciml/models/factorized_fno/linear.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | 4 | import torch.nn as nn 5 | from torch.nn.utils import weight_norm 6 | from torch.nn.utils.weight_norm import WeightNorm 7 | 8 | class WNLinear(nn.Linear): 9 | def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, wnorm=False): 10 | super().__init__(in_features=in_features, 11 | out_features=out_features, 12 | bias=bias, 13 | device=device, 14 | dtype=dtype) 15 | if wnorm: 16 | weight_norm(self) 17 | 18 | self._fix_weight_norm_deepcopy() 19 | 20 | def _fix_weight_norm_deepcopy(self): 21 | # Fix bug where deepcopy doesn't work with weightnorm. 22 | # Taken from https://github.com/pytorch/pytorch/issues/28594#issuecomment-679534348 23 | orig_deepcopy = getattr(self, '__deepcopy__', None) 24 | 25 | def __deepcopy__(self, memo): 26 | # save and delete all weightnorm weights on self 27 | weights = {} 28 | for hook in self._forward_pre_hooks.values(): 29 | if isinstance(hook, WeightNorm): 30 | weights[hook.name] = getattr(self, hook.name) 31 | delattr(self, hook.name) 32 | # remove this deepcopy method, restoring the object's original one if necessary 33 | __deepcopy__ = self.__deepcopy__ 34 | if orig_deepcopy: 35 | self.__deepcopy__ = orig_deepcopy 36 | else: 37 | del self.__deepcopy__ 38 | # actually do the copy 39 | result = copy.deepcopy(self) 40 | # restore weights and method on self 41 | for name, value in weights.items(): 42 | setattr(self, name, value) 43 | self.__deepcopy__ = __deepcopy__ 44 | return result 45 | # bind __deepcopy__ to the weightnorm'd layer 46 | self.__deepcopy__ = __deepcopy__.__get__(self, self.__class__) 47 | -------------------------------------------------------------------------------- /sciml/models/get_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | from neuralop.models import FNO, UNO 3 | from .factorized_fno.factorized_fno import FNOFactorized2DBlock 4 | from .gefno.gfno import GFNO2d 5 | from .pdebench.unet import UNet2d 6 | from .pdearena.unet import Unet, FourierUnet 7 | from .ConvolutionalNeuralOperator.CNOModule import CNO 8 | 9 | from torch.nn.parallel import DistributedDataParallel as DDP 10 | 11 | 12 | _UNET_BENCH = 'unet_bench' 13 | 14 | _UNET_ARENA = 'unet_arena' 15 | _UFNET = 'ufnet' 16 | 17 | _FNO = 'fno' 18 | _UNO = 'uno' 19 | 20 | _FFNO = 'factorized_fno' 21 | 22 | _GFNO = 'gfno' 23 | 24 | _CNO = 'cno' 25 | 26 | _MODEL_LIST = [ 27 | _UNET_BENCH, 28 | _UNET_ARENA, 29 | _UFNET, 30 | _FNO, 31 | _UNO, 32 | _FFNO, 33 | _GFNO, 34 | _CNO 35 | ] 36 | 37 | def get_model(model_name, 38 | in_channels, 39 | out_channels, 40 | domain_rows, 41 | domain_cols, 42 | exp): 43 | assert model_name in _MODEL_LIST, f'Model name {model_name} invalid' 44 | if model_name == _UNET_ARENA: 45 | model = Unet(in_channels=in_channels, 46 | out_channels=out_channels, 47 | hidden_channels=exp.model.hidden_channels, 48 | ch_mults=[1,2,2,4,4], 49 | is_attn=[False]*5, 50 | activation='gelu', 51 | mid_attn=False, 52 | norm=True, 53 | use1x1=True) 54 | elif model_name == _UNET_BENCH: 55 | model = UNet2d(in_channels=in_channels, 56 | out_channels=out_channels, 57 | init_features=exp.model.init_features) 58 | elif model_name == _UFNET: 59 | model = FourierUnet(in_channels=in_channels, 60 | out_channels=out_channels, 61 | hidden_channels=exp.model.hidden_channels, 62 | # UFNET's fourier layers are in the middle of 63 | # the U, so it doesn't make sense to use the 2/3 64 | # setting like we do for the other models. 65 | modes1=exp.model.modes1, 66 | modes2=exp.model.modes2, 67 | norm=True, 68 | n_fourier_layers=exp.model.n_fourier_layers) 69 | elif model_name == _FNO: 70 | model = FNO(n_modes=(exp.model.modes, exp.model.modes), 71 | hidden_channels=exp.model.hidden_channels, 72 | domain_padding=exp.model.domain_padding[0], 73 | in_channels=in_channels, 74 | out_channels=out_channels, 75 | n_layers=exp.model.n_layers, 76 | norm=exp.model.norm, 77 | rank=exp.model.rank, 78 | factorization='tucker', 79 | implementation='factorized', 80 | separable=False) 81 | elif model_name == _UNO: 82 | model = UNO(in_channels=in_channels, 83 | out_channels=out_channels, 84 | hidden_channels=exp.model.hidden_channels, 85 | projection_channels=exp.model.projection_channels, 86 | uno_out_channels=exp.model.uno_out_channels, 87 | uno_n_modes=exp.model.uno_n_modes, 88 | uno_scalings=exp.model.uno_scalings, 89 | n_layers=exp.model.n_layers, 90 | domain_padding=exp.model.domain_padding) 91 | elif model_name == _FFNO: 92 | model = FNOFactorized2DBlock(in_channels=in_channels, 93 | out_channels=out_channels, 94 | modes=exp.model.modes // 2, 95 | width=exp.model.width, 96 | dropout=exp.model.dropout, 97 | n_layers=exp.model.n_layers) 98 | elif model_name == _GFNO: 99 | model = GFNO2d(in_channels=in_channels, 100 | out_channels=out_channels, 101 | modes=exp.model.modes // 2, 102 | width=exp.model.width, 103 | reflection=exp.model.reflection, 104 | domain_padding=exp.model.domain_padding) # padding is NEW 105 | elif model_name == _CNO: 106 | model = CNO(in_dim=in_channels, 107 | in_size=exp.model.in_size, 108 | N_layers=exp.model.n_layers, 109 | out_dim=exp.train.future_window) 110 | if exp.distributed: 111 | local_rank = int(os.environ['LOCAL_RANK']) 112 | model = model.to(local_rank).float() 113 | model = DDP(model, device_ids=[local_rank], output_device=local_rank, 114 | find_unused_parameters=False) 115 | else: 116 | model = model.cuda().float() 117 | return model 118 | -------------------------------------------------------------------------------- /sciml/models/pdearena/activations.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | ACTIVATION_REGISTRY = { 4 | "relu": nn.ReLU(), 5 | "silu": nn.SiLU(), 6 | "gelu": nn.GELU(), 7 | "tanh": nn.Tanh(), 8 | "sigmoid": nn.Sigmoid(), 9 | } 10 | -------------------------------------------------------------------------------- /sciml/models/pdebench/unet.py: -------------------------------------------------------------------------------- 1 | """ 2 | U-Net. Implementation taken and modified from 3 | https://github.com/mateuszbuda/brain-segmentation-pytorch 4 | 5 | MIT License 6 | 7 | Copyright (c) 2019 mateuszbuda 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | """ 19 | 20 | from collections import OrderedDict 21 | import torch 22 | from torch import nn 23 | 24 | class UNet2d(nn.Module): 25 | def __init__(self, in_channels, out_channels, init_features=32): 26 | super(UNet2d, self).__init__() 27 | features = init_features 28 | self.encoder1 = UNet2d._block(in_channels, features, name="enc1") 29 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) 30 | self.encoder2 = UNet2d._block(features, features * 2, name="enc2") 31 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) 32 | self.encoder3 = UNet2d._block(features * 2, features * 4, name="enc3") 33 | self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) 34 | self.encoder4 = UNet2d._block(features * 4, features * 8, name="enc4") 35 | self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) 36 | 37 | self.bottleneck = UNet2d._block(features * 8, features * 16, name="bottleneck") 38 | 39 | self.upconv4 = nn.ConvTranspose2d( 40 | features * 16, features * 8, kernel_size=2, stride=2 41 | ) 42 | self.decoder4 = UNet2d._block((features * 8) * 2, features * 8, name="dec4") 43 | self.upconv3 = nn.ConvTranspose2d( 44 | features * 8, features * 4, kernel_size=2, stride=2 45 | ) 46 | self.decoder3 = UNet2d._block((features * 4) * 2, features * 4, name="dec3") 47 | self.upconv2 = nn.ConvTranspose2d( 48 | features * 4, features * 2, kernel_size=2, stride=2 49 | ) 50 | self.decoder2 = UNet2d._block((features * 2) * 2, features * 2, name="dec2") 51 | self.upconv1 = nn.ConvTranspose2d( 52 | features * 2, features, kernel_size=2, stride=2 53 | ) 54 | self.decoder1 = UNet2d._block(features * 2, features, name="dec1") 55 | 56 | self.conv = nn.Conv2d( 57 | in_channels=features, out_channels=out_channels, kernel_size=1 58 | ) 59 | 60 | def forward(self, x): 61 | enc1 = self.encoder1(x) 62 | enc2 = self.encoder2(self.pool1(enc1)) 63 | enc3 = self.encoder3(self.pool2(enc2)) 64 | enc4 = self.encoder4(self.pool3(enc3)) 65 | 66 | bottleneck = self.bottleneck(self.pool4(enc4)) 67 | 68 | dec4 = self.upconv4(bottleneck) 69 | dec4 = torch.cat((dec4, enc4), dim=1) 70 | dec4 = self.decoder4(dec4) 71 | dec3 = self.upconv3(dec4) 72 | dec3 = torch.cat((dec3, enc3), dim=1) 73 | dec3 = self.decoder3(dec3) 74 | dec2 = self.upconv2(dec3) 75 | dec2 = torch.cat((dec2, enc2), dim=1) 76 | dec2 = self.decoder2(dec2) 77 | dec1 = self.upconv1(dec2) 78 | dec1 = torch.cat((dec1, enc1), dim=1) 79 | dec1 = self.decoder1(dec1) 80 | return self.conv(dec1) 81 | 82 | @staticmethod 83 | def _block(in_channels, features, name): 84 | return nn.Sequential( 85 | OrderedDict( 86 | [ 87 | ( 88 | name + "conv1", 89 | nn.Conv2d( 90 | in_channels=in_channels, 91 | out_channels=features, 92 | kernel_size=3, 93 | padding=1, 94 | bias=False, 95 | ), 96 | ), 97 | (name + "norm1", nn.BatchNorm2d(num_features=features)), 98 | (name + "tanh1", nn.GELU()), 99 | ( 100 | name + "conv2", 101 | nn.Conv2d( 102 | in_channels=features, 103 | out_channels=features, 104 | kernel_size=3, 105 | padding=1, 106 | bias=False, 107 | ), 108 | ), 109 | (name + "norm2", nn.BatchNorm2d(num_features=features)), 110 | (name + "tanh2", nn.GELU()), 111 | ] 112 | ) 113 | ) 114 | 115 | 116 | -------------------------------------------------------------------------------- /sciml/op_lib/dist_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch.distributed as dist 3 | 4 | def initialize(backend): 5 | dist.init_process_group(backend=backend) 6 | 7 | def dist_is_used(): 8 | return (dist.is_available() and dist.is_initialized()) 9 | 10 | def local_rank(): 11 | if not dist.is_initialized(): 12 | return 0 13 | return int(os.environ['LOCAL_RANK']) 14 | 15 | def rank(): 16 | if not dist.is_initialized(): 17 | return 0 18 | return dist.get_rank() 19 | 20 | def world_size(): 21 | if not dist.is_initialized(): 22 | return 1 23 | return dist.get_world_size() 24 | 25 | def leader_rank(): 26 | return 0 27 | 28 | def is_leader_process(): 29 | return rank() == leader_rank() 30 | -------------------------------------------------------------------------------- /sciml/op_lib/downsample.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def downsample_domain(downsample_factor, *args): 4 | if isinstance(downsample_factor, int): 5 | downsample_factor = [downsample_factor, downsample_factor] 6 | assert all([df >= 1 and isinstance(df, int) for df in downsample_factor]) 7 | return tuple([im[..., ::downsample_factor[0], ::downsample_factor[1]] for im in args]) 8 | -------------------------------------------------------------------------------- /sciml/op_lib/heatflux.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def denormalize_temp_grad(temp, t_wall, t_bulk=58, k=0.054): 4 | print('t_wall: ', t_wall) 5 | del_t = t_wall - t_bulk 6 | return 2 * k * del_t * (1 - temp) 7 | 8 | def heatflux(temp, dfun, t_wall, x, dy): 9 | r""" 10 | heat flux, q=dT/dy, is the temperature in the liquid phase in cells directly 11 | above the heater. 12 | temp and dfun are layed out T x row x col 13 | dy is the grid spacing in the y direction. 14 | """ 15 | assert temp.dim() == 3 16 | assert temp.size() == dfun.size() 17 | assert temp.size() == x.size() 18 | lc = 0.0007 19 | 20 | d_temp = denormalize_temp_grad(temp[:, 0], t_wall) 21 | heater_mask = (x >= -2.5) & (x <= 2.5) 22 | liquid_mask = dfun < 0 23 | hflux_list = torch.mean((heater_mask[:, 0] & liquid_mask[:, 0]).to(float) * 24 | d_temp / (dy * lc), 25 | dim=1) 26 | 27 | hflux = torch.mean(hflux_list) 28 | qmax = torch.max(hflux_list) 29 | return hflux, qmax 30 | -------------------------------------------------------------------------------- /sciml/op_lib/losses.py: -------------------------------------------------------------------------------- 1 | r""" 2 | relative LpLoss. Implementation taken and modified from 3 | https://github.com/neuraloperator/neuraloperator 4 | """ 5 | import math 6 | import torch 7 | 8 | class LpLoss(object): 9 | def __init__(self, d=1, p=2, L=2*math.pi, reduce_dims=0, reductions='sum'): 10 | super().__init__() 11 | 12 | self.d = d 13 | self.p = p 14 | 15 | if isinstance(reduce_dims, int): 16 | self.reduce_dims = [reduce_dims] 17 | else: 18 | self.reduce_dims = reduce_dims 19 | 20 | if self.reduce_dims is not None: 21 | if isinstance(reductions, str): 22 | assert reductions == 'sum' or reductions == 'mean' 23 | self.reductions = [reductions]*len(self.reduce_dims) 24 | else: 25 | for j in range(len(reductions)): 26 | assert reductions[j] == 'sum' or reductions[j] == 'mean' 27 | self.reductions = reductions 28 | 29 | if isinstance(L, float): 30 | self.L = [L]*self.d 31 | else: 32 | self.L = L 33 | 34 | def uniform_h(self, x): 35 | h = [0.0]*self.d 36 | for j in range(self.d, 0, -1): 37 | h[-j] = self.L[-j]/x.size(-j) 38 | 39 | return h 40 | 41 | def reduce_all(self, x): 42 | for j in range(len(self.reduce_dims)): 43 | if self.reductions[j] == 'sum': 44 | x = torch.sum(x, dim=self.reduce_dims[j], keepdim=True) 45 | else: 46 | x = torch.mean(x, dim=self.reduce_dims[j], keepdim=True) 47 | 48 | return x 49 | 50 | def rel(self, x, y): 51 | diff = torch.norm(torch.flatten(x, start_dim=-self.d) - torch.flatten(y, start_dim=-self.d), \ 52 | p=self.p, dim=-1, keepdim=False) 53 | ynorm = torch.norm(torch.flatten(y, start_dim=-self.d), p=self.p, dim=-1, keepdim=False) 54 | 55 | diff = diff/ynorm 56 | 57 | if self.reduce_dims is not None: 58 | diff = self.reduce_all(diff).squeeze() 59 | 60 | return diff 61 | 62 | def __call__(self, x, y): 63 | return self.rel(x, y) 64 | 65 | 66 | class H1Loss(object): 67 | def __init__(self, d=1, L=2*math.pi, reduce_dims=0, reductions='sum', fix_x_bnd=False, fix_y_bnd=False, fix_z_bnd=False): 68 | super().__init__() 69 | 70 | assert d > 0 and d < 4, "Currently only implemented for 1, 2, and 3-D." 71 | 72 | self.d = d 73 | self.fix_x_bnd = fix_x_bnd 74 | self.fix_y_bnd = fix_y_bnd 75 | self.fix_z_bnd = fix_z_bnd 76 | 77 | if isinstance(reduce_dims, int): 78 | self.reduce_dims = [reduce_dims] 79 | else: 80 | self.reduce_dims = reduce_dims 81 | 82 | if self.reduce_dims is not None: 83 | if isinstance(reductions, str): 84 | assert reductions == 'sum' or reductions == 'mean' 85 | self.reductions = [reductions]*len(self.reduce_dims) 86 | else: 87 | for j in range(len(reductions)): 88 | assert reductions[j] == 'sum' or reductions[j] == 'mean' 89 | self.reductions = reductions 90 | 91 | if isinstance(L, float): 92 | self.L = [L]*self.d 93 | else: 94 | self.L = L 95 | 96 | def compute_terms(self, x, y, h): 97 | dict_x = {} 98 | dict_y = {} 99 | 100 | if self.d == 1: 101 | dict_x[0] = x 102 | dict_y[0] = y 103 | 104 | x_x = central_diff_1d(x, h[0], fix_x_bnd=self.fix_x_bnd) 105 | y_x = central_diff_1d(y, h[0], fix_x_bnd=self.fix_x_bnd) 106 | 107 | dict_x[1] = x_x 108 | dict_y[1] = y_x 109 | 110 | elif self.d == 2: 111 | dict_x[0] = torch.flatten(x, start_dim=-2) 112 | dict_y[0] = torch.flatten(y, start_dim=-2) 113 | 114 | x_x, x_y = central_diff_2d(x, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd) 115 | y_x, y_y = central_diff_2d(y, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd) 116 | 117 | dict_x[1] = torch.flatten(x_x, start_dim=-2) 118 | dict_x[2] = torch.flatten(x_y, start_dim=-2) 119 | 120 | dict_y[1] = torch.flatten(y_x, start_dim=-2) 121 | dict_y[2] = torch.flatten(y_y, start_dim=-2) 122 | 123 | else: 124 | dict_x[0] = torch.flatten(x, start_dim=-3) 125 | dict_y[0] = torch.flatten(y, start_dim=-3) 126 | 127 | x_x, x_y, x_z = central_diff_3d(x, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd, fix_z_bnd=self.fix_z_bnd) 128 | y_x, y_y, y_z = central_diff_3d(y, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd, fix_z_bnd=self.fix_z_bnd) 129 | 130 | dict_x[1] = torch.flatten(x_x, start_dim=-3) 131 | dict_x[2] = torch.flatten(x_y, start_dim=-3) 132 | dict_x[3] = torch.flatten(x_z, start_dim=-3) 133 | 134 | dict_y[1] = torch.flatten(y_x, start_dim=-3) 135 | dict_y[2] = torch.flatten(y_y, start_dim=-3) 136 | dict_y[3] = torch.flatten(y_z, start_dim=-3) 137 | 138 | return dict_x, dict_y 139 | 140 | def uniform_h(self, x): 141 | h = [0.0]*self.d 142 | for j in range(self.d, 0, -1): 143 | h[-j] = self.L[-j]/x.size(-j) 144 | 145 | return h 146 | 147 | def reduce_all(self, x): 148 | for j in range(len(self.reduce_dims)): 149 | if self.reductions[j] == 'sum': 150 | x = torch.sum(x, dim=self.reduce_dims[j], keepdim=True) 151 | else: 152 | x = torch.mean(x, dim=self.reduce_dims[j], keepdim=True) 153 | 154 | return x 155 | 156 | def rel(self, x, y, h=None): 157 | #Assume uniform mesh 158 | if h is None: 159 | h = self.uniform_h(x) 160 | else: 161 | if isinstance(h, float): 162 | h = [h]*self.d 163 | 164 | dict_x, dict_y = self.compute_terms(x, y, h) 165 | 166 | diff = torch.norm(dict_x[0] - dict_y[0], p=2, dim=-1, keepdim=False)**2 167 | ynorm = torch.norm(dict_y[0], p=2, dim=-1, keepdim=False)**2 168 | 169 | for j in range(1, self.d + 1): 170 | diff += torch.norm(dict_x[j] - dict_y[j], p=2, dim=-1, keepdim=False)**2 171 | ynorm += torch.norm(dict_y[j], p=2, dim=-1, keepdim=False)**2 172 | 173 | diff = (diff**0.5)/(ynorm**0.5) 174 | 175 | if self.reduce_dims is not None: 176 | diff = self.reduce_all(diff).squeeze() 177 | 178 | return diff 179 | 180 | 181 | def __call__(self, x, y, h=None): 182 | return self.rel(x, y, h=h) 183 | -------------------------------------------------------------------------------- /sciml/op_lib/metrics.py: -------------------------------------------------------------------------------- 1 | r""" 2 | PyTorch computes metrics w.r.t. each item in a batch individually. 3 | It then sums or averages those individual metrics. These implementations 4 | take the same approach. 5 | """ 6 | import torch 7 | import torch.nn.functional as F 8 | from dataclasses import dataclass 9 | import math 10 | import numpy as np 11 | import numba as nb 12 | 13 | from .losses import LpLoss 14 | 15 | @dataclass 16 | class Metrics: 17 | mae: float 18 | rmse: float 19 | relative_error: float 20 | max_error: float 21 | boundary_rmse: float 22 | interface_rmse: float 23 | fourier_low: float 24 | fourier_mid: float 25 | fourier_high: float 26 | 27 | def __str__(self): 28 | return f""" 29 | MAE: {self.mae} 30 | RMSE: {self.rmse} 31 | Relative Error: {self.relative_error} 32 | Max Error: {self.max_error} 33 | Boundary RMSE: {self.boundary_rmse} 34 | Interface RMSE: {self.interface_rmse} 35 | Fourier 36 | - Low: {self.fourier_low} 37 | - Mid: {self.fourier_mid} 38 | - High: {self.fourier_high} 39 | """ 40 | 41 | def compute_metrics(pred, label, dfun): 42 | low, mid, high = fourier_error(pred, label, 8, 8) 43 | return Metrics( 44 | mae=mae(pred, label), 45 | rmse=rmse(pred, label), 46 | relative_error=relative_error(pred, label), 47 | max_error=max_error(pred, label), 48 | boundary_rmse=boundary_rmse(pred, label), 49 | interface_rmse=interface_rmse(pred, label, dfun), 50 | fourier_low=low, 51 | fourier_mid=mid, 52 | fourier_high=high 53 | ) 54 | 55 | def write_metrics(pred, label, iter, stage, writer): 56 | writer.add_scalar(f'{stage}/MAE', mae(pred, label), iter) 57 | writer.add_scalar(f'{stage}/RMSE', rmse(pred, label), iter) 58 | writer.add_scalar(f'{stage}/MaxERror', max_error(pred, label), iter) 59 | 60 | def mae(pred, label): 61 | return F.l1_loss(pred, label) 62 | 63 | def relative_error(pred, label): 64 | assert pred.size() == label.size() 65 | loss = LpLoss(d=2, reductions='mean') 66 | return loss(pred, label) 67 | 68 | def rmse(pred, label): 69 | r""" Assumes input has shape [b x h x w] 70 | """ 71 | assert pred.size() == label.size() 72 | batch_size = pred.size(0) 73 | var_size = pred[0].numel() 74 | sum_dim = 1 if pred.dim() == 2 else [1, 2] 75 | mses = ((pred - label) ** 2).sum(dim=sum_dim) / var_size 76 | return torch.sqrt(mses).sum() / batch_size 77 | 78 | def max_error(pred, label): 79 | return ((pred - label) ** 2).max() 80 | 81 | def _extract_boundary(tensor): 82 | r""" Extracts boundaries of a tensor [... x h x w] 83 | The output will have shape [... x 2h + 2w] 84 | """ 85 | left = tensor[..., :, 0] 86 | right = tensor[..., :, -1] 87 | top = tensor[..., 0, :] 88 | bottom = tensor[..., -1, :] 89 | return torch.cat([left, right, top, bottom], dim=-1) 90 | 91 | def boundary_rmse(pred, label): 92 | r""" assumes input has shape [b x h x w] 93 | """ 94 | assert pred.size() == label.size() 95 | bpred = _extract_boundary(pred) 96 | blabel = _extract_boundary(label) 97 | print(bpred.size()) 98 | return rmse(bpred, blabel) 99 | 100 | def interface_rmse(pred, label, dfun): 101 | assert pred.size() == label.size() 102 | assert pred.size() == dfun.size() 103 | mses = [] 104 | for i in range(pred.size(0)): 105 | squared_error = (pred[i] - label[i]) ** 2 106 | mask = torch.tensor(get_interface_mask(dfun[i].numpy())) 107 | interface_mse = torch.mean(squared_error[mask]) 108 | mses.append(interface_mse) 109 | return torch.sqrt(torch.tensor(mses)).sum() / pred.size(0) 110 | 111 | @nb.njit 112 | def get_interface_mask(dgrid): 113 | interface = np.zeros(dgrid.shape).astype(np.bool_) 114 | [rows, cols] = dgrid.shape 115 | for i in range(rows): 116 | for j in range(cols): 117 | adj = ((i < rows - 1 and dgrid[i][j] * dgrid[i+1, j ] <= 0) or 118 | (i > 0 and dgrid[i][j] * dgrid[i-1, j ] <= 0) or 119 | (j < cols - 1 and dgrid[i][j] * dgrid[i, j+1] <= 0) or 120 | (j > 0 and dgrid[i][j] * dgrid[i, j-1] <= 0)) 121 | interface[i][j] = adj 122 | return interface 123 | 124 | def fourier_error(pred, target, Lx, Ly): 125 | r""" This function is taken and modified from PDEBench 126 | https://github.com/pdebench/PDEBench/blob/main/pdebench/models/metrics.py 127 | """ 128 | ILOW = 4 129 | IHIGH = 12 130 | 131 | assert pred.dim() == 3 132 | assert pred.size() == target.size() 133 | pred_F = torch.fft.fftn(pred, dim=[1, 2]) 134 | target_F = torch.fft.fftn(target, dim=[1, 2]) 135 | idxs = target.size() 136 | nb = target.size(0) 137 | nx, ny = idxs[1:3] 138 | print(nx, ny) 139 | _err_F = torch.abs(pred_F - target_F) ** 2 140 | err_F = torch.zeros((nb, min(nx // 2, ny // 2))) 141 | for i in range(nx // 2): 142 | for j in range(ny // 2): 143 | it = math.floor(math.sqrt(i ** 2 + j ** 2)) 144 | if it > min(nx // 2, ny // 2) - 1: 145 | continue 146 | err_F[:, it] += _err_F[:, i, j] 147 | _err_F = torch.sqrt(torch.mean(err_F, axis=0)) / (nx * ny) * Lx * Ly 148 | low_err = torch.mean(_err_F[:ILOW]) 149 | mid_err = torch.mean(_err_F[ILOW:IHIGH]) 150 | high_err = torch.mean(_err_F[IHIGH:]) 151 | return low_err, mid_err, high_err 152 | -------------------------------------------------------------------------------- /sciml/op_lib/nucleation.py: -------------------------------------------------------------------------------- 1 | import h5py as h5 2 | import numpy as np 3 | from scipy.stats import qmc 4 | import matplotlib.pyplot as plt 5 | 6 | DX = 0.03125 # Grid spacing in FlashX simulations 7 | 8 | def heater_init(xmin, xmax, num_sites): 9 | r""" 10 | Initialize the nucleation sites on the 1-D heater. Returns a 1D line of sites 11 | that is dependant on wall temperature. 12 | 13 | Args: 14 | xmin (float): The x-coordinate where heater begins. 15 | xmax (float): The x-coordinate where heater ends. 16 | num_sites (int): The number of nucleation sites in the domain. 17 | 18 | Returns: 19 | numpy.ndarray: The coordinates of the heater nucleation sites. 20 | """ 21 | x_sites = np.ndarray(num_sites, dtype=float) 22 | y_sites = np.ndarray(num_sites, dtype=float) 23 | 24 | halton_sequence = qmc.Halton(d=2, seed=1) 25 | halton_sample = halton_sequence.random(num_sites) 26 | 27 | x_sites[:] = xmin + halton_sample[:, 0] * (xmax - xmin) 28 | y_sites[:] = 1e-13 29 | 30 | return x_sites, y_sites 31 | 32 | def dfun_init(x_grid, y_grid, x_sites, y_sites, seed_radius): 33 | r""" 34 | Initialize the distance function for given nucleation sites. 35 | 36 | Args: 37 | x_grid (numpy.ndarray): The x-coordinates of the grid. 38 | y_grid (numpy.ndarray): The y-coordinates of the grid. 39 | x_sites (numpy.ndarray): The x-coordinates of the nucleation sites. 40 | y_sites (numpy.ndarray): The y-coordinates of the nucleation sites. 41 | seed_radius (float): The radius of the nucleation site. 42 | 43 | Returns: 44 | numpy.ndarray: The initial distance function with nucleated bubbles. 45 | """ 46 | dfun = np.zeros_like(x_grid) - np.inf 47 | seed_height = seed_radius * np.cos(np.pi/4) 48 | for htr_points_xy in zip(x_sites, y_sites): 49 | seed_x = htr_points_xy[0] 50 | seed_y = htr_points_xy[1] + seed_height 51 | 52 | interim_dfun = seed_radius - np.sqrt((x_grid - seed_x)**2 + (y_grid - seed_y)**2) 53 | dfun = np.maximum(dfun, interim_dfun) 54 | return dfun 55 | 56 | def tag_renucleation(x_sites, y_sites, dfun, coordx, coordy, seed_radius, curr_iter, nuc_wait_time=0.4): 57 | r""" 58 | Tag the nucleation sites for renucleation after a certain time. 59 | 60 | Args: 61 | x_sites (numpy.ndarray): The x-coordinates of the nucleation sites. 62 | y_sites (numpy.ndarray): The y-coordinates of the nucleation sites. 63 | dfun (numpy.ndarray): The distance function at the current time. 64 | coordx (numpy.ndarray): The x-coordinates of the first row of the grid. 65 | coordy (numpy.ndarray): The y-coordinates of the first column of the grid. 66 | curr_iter (int): The current iteration of the model. 67 | nuc_wait_time (float): The minimum time after which renucleation happens. Multiple of 0.1 68 | 69 | Returns: 70 | numpy.ndarray: The tagged nucleation sites. 71 | """ 72 | tagged_sites = np.zeros_like(x_sites, dtype=bool) 73 | nuc_plot_interval = nuc_wait_time * 10 74 | seed_height = seed_radius * np.cos(np.pi/4) 75 | for i, htr_points_xy in enumerate(zip(x_sites, y_sites)): 76 | seed_x = htr_points_xy[0] 77 | seed_y = htr_points_xy[1] + seed_height 78 | x_i = np.searchsorted(coordx, seed_x) 79 | y_i = np.searchsorted(coordy, seed_y) 80 | 81 | dfun_site = (dfun[y_i, x_i] + dfun[y_i+1, x_i] + dfun[y_i, x_i+1] + dfun[y_i+1, x_i+1])/4.0 # Average of the 4 cells surrounding the nucleation site 82 | 83 | if dfun_site < 0 and curr_iter % nuc_plot_interval == 0: 84 | tagged_sites[i] = True 85 | 86 | return tagged_sites 87 | 88 | def renucleate(x_grid, y_grid, x_sites, y_sites, tagged_sites, curr_dfun, seed_radius): 89 | r""" 90 | Renucleate the sites that are tagged for renucleation. 91 | 92 | Args: 93 | x_grid (numpy.ndarray): The x-coordinates of the grid. 94 | y_grid (numpy.ndarray): The y-coordinates of the grid. 95 | x_sites (numpy.ndarray): The x-coordinates of the nucleation sites. 96 | y_sites (numpy.ndarray): The y-coordinates of the nucleation sites. 97 | curr_dfun (numpy.ndarray): The distance function at the current time. 98 | seed_radius (float): The radius of the nucleation site. 99 | 100 | Returns: 101 | numpy.ndarray: The updated nucleation sites. 102 | """ 103 | seed_height = seed_radius * np.cos(np.pi/4) 104 | for i, htr_points_xy in enumerate(zip(x_sites, y_sites)): 105 | if tagged_sites[i]: 106 | seed_x = htr_points_xy[0] 107 | seed_y = htr_points_xy[1] + seed_height 108 | 109 | interim_dfun = seed_radius - np.sqrt((x_grid - seed_x)**2 + (y_grid - seed_y)**2) 110 | curr_dfun = np.maximum(curr_dfun, interim_dfun) 111 | 112 | return curr_dfun 113 | 114 | 115 | if __name__ == '__main__': 116 | sim = h5.File('/Users/shakeel/bubbleml_data/PoolBoiling-WallSuperheat-FC72-2D/Twall-100.hdf5', 'r') 117 | dfun_0 = sim['dfun'][...][0] 118 | x_0, y_0 = sim['x'][...][0], sim['y'][...][0] 119 | coordx, coordy = x_0[0], np.transpose(y_0)[0] 120 | 121 | 122 | init_nucl_coordx, init_nucl_coordy = heater_init(-5.0, 5.0, 40) # Coordinates of 40 nucleation sites 123 | 124 | my_dfun = dfun_init(x_0, y_0, init_nucl_coordx, init_nucl_coordy, seed_radius=0.1) # Initialize the distance function with nucleated bubbles at t-0 125 | 126 | # Plot the initial distance function for testing 127 | my_dfun[my_dfun>0] *= (255/my_dfun.max()) 128 | my_dfun[my_dfun<0] = 255 129 | my_dfun = my_dfun.astype(np.uint8) 130 | plt.imsave(f'my_dfun.png', np.flipud(my_dfun), cmap='GnBu') 131 | plt.close() 132 | 133 | dfun_40 = sim['dfun'][...][40] 134 | 135 | # Renucleation algorithm 136 | tagged_nucl_sites = tag_renucleation(init_nucl_coordx, init_nucl_coordy, dfun_40, coordx, coordy, seed_radius=0.1, curr_iter=40, nuc_wait_time=0.4) 137 | dfun_40 = renucleate(x_0, y_0, init_nucl_coordx, init_nucl_coordy, tagged_nucl_sites, dfun_40, seed_radius=0.1) 138 | 139 | # Plot the distance function at t=40 after renucleation for testing 140 | dfun_40[dfun_40>0] *= (255/dfun_40.max()) 141 | dfun_40[dfun_40<0] = 255 142 | dfun_40 = dfun_40.astype(np.uint8) 143 | plt.imsave(f'dfun_40_renucleated.png', np.flipud(dfun_40), cmap='GnBu') 144 | plt.close() 145 | 146 | -------------------------------------------------------------------------------- /sciml/op_lib/plt_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import matplotlib.pyplot as plt 3 | from matplotlib.colors import LinearSegmentedColormap 4 | import numpy as np 5 | import torch 6 | from pathlib import Path 7 | 8 | def temp_cmap(): 9 | temp_ranges = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.134, 0.167, 10 | 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] 11 | color_codes = ['#0000FF', '#0443FF', '#0E7AFF', '#16B4FF', '#1FF1FF', '#21FFD3', 12 | '#22FF9B', '#22FF67', '#22FF15', '#29FF06', '#45FF07', '#6DFF08', 13 | '#9EFF09', '#D4FF0A', '#FEF30A', '#FEB709', '#FD7D08', '#FC4908', 14 | '#FC1407', '#FB0007'] 15 | colors = list(zip(temp_ranges, color_codes)) 16 | cmap = LinearSegmentedColormap.from_list('temperature_colormap', colors) 17 | return cmap 18 | 19 | def plt_iter_mae(temps, labels): 20 | plt.rc("font", family="serif", size=14, weight="bold") 21 | plt.rc("axes", labelweight="bold") 22 | rmses = [] 23 | for i in range(len(temps)): 24 | rmse = torch.sqrt(torch.mean(((temps[i] - labels[i]) ** 2).detach().cpu())) 25 | rmses.append(rmse) 26 | job_id = os.environ['SLURM_JOB_ID'] 27 | job_path = Path(f'test_im/temp/{job_id}/') 28 | job_path.mkdir(parents=True, exist_ok=True) 29 | with open(str(job_path) + 'iter_rmse', 'w+') as f: 30 | for rmse in rmses: 31 | f.write(f'{rmse}\n') 32 | 33 | def plt_temp(temps, labels, model_name): 34 | temps = (temps + 1) / 2 35 | labels = (labels + 1) / 2 36 | 37 | """ 38 | plt.rc("font", family="serif", size=16, weight="bold") 39 | plt.rc("axes", labelweight="bold") 40 | for i in range(len(temps)): 41 | i_str = str(i).zfill(3) 42 | 43 | def plt_temp_arr(f, ax, arr, title): 44 | cm_object = ax.imshow(arr, vmin=0, vmax=1, cmap=temp_cmap()) 45 | #ax.set_title(title) 46 | ax.axis('off') 47 | return cm_object 48 | 49 | temp = temps[i].numpy() 50 | label = labels[i].numpy() 51 | f, axarr = plt.subplots(1, 3, layout="constrained") 52 | cm_object = plt_temp_arr(f, axarr[0], np.flipud(label), 'Ground Truth') 53 | cm_object = plt_temp_arr(f, axarr[1], np.flipud(temp), model_name) 54 | 55 | err = np.abs(temp - label) 56 | cm_object = plt_temp_arr(f, axarr[2], np.flipud(err), 'Absolute Error') 57 | f.tight_layout() 58 | f.colorbar(cm_object, 59 | ax=axarr.ravel().tolist(), 60 | ticks=[0, 0.2, 0.6, 0.9], 61 | fraction=0.04, 62 | pad=0.02) 63 | f.set_size_inches(w=6, h=3) 64 | 65 | job_id = os.environ['SLURM_JOB_ID'] 66 | im_path = Path(f'test_im/temp/{job_id}/') 67 | im_path.mkdir(parents=True, exist_ok=True) 68 | plt.savefig(f'{str(im_path)}/{i_str}.png', 69 | dpi=400, 70 | bbox_inches='tight', 71 | transparent=True) 72 | plt.close() 73 | """ 74 | job_id = os.environ['SLURM_JOB_ID'] 75 | im_path = Path(f'test_im/temp/{job_id}/') 76 | im_path.mkdir(parents=True, exist_ok=True) 77 | torch.save(temps, f'{im_path}/model_ouput.pt') 78 | torch.save(labels, f'{im_path}/sim_ouput.pt') 79 | 80 | def plt_vel(vel_preds, vel_labels, 81 | velx_preds, velx_labels, 82 | vely_preds, vely_labels, 83 | model_name): 84 | 85 | #vel_preds = (vel_preds + 1) / 2 86 | #vel_labels = (vel_labels + 1) / 2 87 | 88 | max_mag = vel_labels.max() 89 | min_mag = vel_labels.min() 90 | 91 | """ 92 | for i in range(len(vel_preds)): 93 | i_str = str(i).zfill(3) 94 | 95 | def plt_temp_arr(f, ax, arr, title): 96 | cm_object = ax.imshow(arr, vmin=0, vmax=0.3, cmap='jet') 97 | ax.set_title(title) 98 | ax.axis('off') 99 | return cm_object 100 | 101 | f, axarr = plt.subplots(1, 2, layout="constrained") 102 | pred_mag = vel_preds[i] 103 | label_mag = vel_labels[i] 104 | 105 | cm_object = plt_temp_arr(f, axarr[0], np.flipud(label_mag), 'Ground Truth') 106 | plt_temp_arr(f, axarr[1], np.flipud(pred_mag), model_name) 107 | f.colorbar(cm_object, ax=axarr[1], fraction=0.05) 108 | 109 | #cm_object = plt_temp_arr(f, axarr[2], np.flipud(np.abs(label_mag - pred_mag)), 1, 'Absolute Error') 110 | #f.colorbar(cm_object, ax=axarr[2], fraction=0.05) 111 | 112 | #vx_pred, vx_label = velx_preds[i], velx_labels[i] 113 | #vy_pred, vy_label = vely_preds[i], vely_labels[i] 114 | #xd = (vx_pred - vx_label) ** 2 115 | #yd = (vy_pred - vy_label) ** 2 116 | #n = np.sqrt(xd + yd) 117 | #cm_object = plt_temp_arr(f, axarr[1, 1], np.flipud(n), 1, 'L2 Error') 118 | #f.colorbar(cm_object, ax=axarr[1, 1], fraction=0.05) 119 | 120 | job_id = os.environ['SLURM_JOB_ID'] 121 | im_path = Path(f'test_im/vel/{job_id}/') 122 | im_path.mkdir(parents=True, exist_ok=True) 123 | plt.savefig(f'{str(im_path)}/{i_str}.png', dpi=500) 124 | plt.close() 125 | """ 126 | job_id = os.environ['SLURM_JOB_ID'] 127 | im_path = Path(f'test_im/vel/{job_id}/') 128 | im_path.mkdir(parents=True, exist_ok=True) 129 | torch.save(vel_preds, f'{im_path}/mag_ouput.pt') 130 | torch.save(vel_labels, f'{im_path}/mag_label.pt') 131 | torch.save(velx_preds, f'{im_path}/velx_output.pt') 132 | torch.save(velx_labels, f'{im_path}/velx_label.pt') 133 | torch.save(vely_preds, f'{im_path}/vely_output.pt') 134 | torch.save(vely_labels, f'{im_path}/vely_label.pt') 135 | -------------------------------------------------------------------------------- /sciml/op_lib/schedule_utils.py: -------------------------------------------------------------------------------- 1 | from torch.optim.lr_scheduler import LambdaLR 2 | 3 | class LinearWarmupLR(LambdaLR): 4 | def __init__(self, optimizer, warmup_iters): 5 | self.warmup_iters = warmup_iters 6 | warmup_func = lambda current_step: min(1, current_step / self.warmup_iters) 7 | super().__init__(optimizer, lr_lambda=warmup_func) 8 | 9 | -------------------------------------------------------------------------------- /sciml/op_lib/temp_trainer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from torch import nn 4 | import torchvision 5 | import torch.nn.functional as F 6 | from torch.optim.lr_scheduler import ReduceLROnPlateau, PolynomialLR 7 | from torch.utils.data import ConcatDataset, DataLoader 8 | from torch.utils.tensorboard import SummaryWriter 9 | import torchvision.transforms.functional as TF 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | 13 | from .hdf5_dataset import HDF5Dataset, TempVelDataset 14 | from .metrics import compute_metrics, write_metrics 15 | from .losses import LpLoss 16 | from .plt_util import plt_temp, plt_iter_mae 17 | from .heatflux import heatflux 18 | from .dist_utils import local_rank, is_leader_process 19 | from .downsample import downsample_domain 20 | 21 | from torch.cuda import nvtx 22 | import time 23 | 24 | t_bulk_map = { 25 | 'wall_super_heat': 58, 26 | 'subcooled': 50 27 | } 28 | 29 | class TempTrainer: 30 | def __init__(self, 31 | model, 32 | future_window, 33 | push_forward_steps, 34 | train_dataloader, 35 | val_dataloader, 36 | optimizer, 37 | lr_scheduler, 38 | val_variable, 39 | writer, 40 | cfg): 41 | self.model = model 42 | self.train_dataloader = train_dataloader 43 | self.val_dataloader = val_dataloader 44 | self.optimizer = optimizer 45 | self.lr_scheduler = lr_scheduler 46 | self.val_variable = val_variable 47 | self.writer = writer 48 | self.cfg = cfg 49 | self.loss = LpLoss(d=2, reduce_dims=[0, 1]) 50 | 51 | self.push_forward_steps = push_forward_steps 52 | self.future_window = future_window 53 | self.local_rank = local_rank() 54 | 55 | def train(self, max_epochs, *args, **kwargs): 56 | for epoch in range(max_epochs): 57 | print('epoch ', epoch) 58 | self.train_step(epoch) 59 | self.val_step(epoch) 60 | # test each epoch 61 | val_dataset = self.val_dataloader.dataset.datasets[0] 62 | self.test(val_dataset) 63 | 64 | def _forward_int(self, coords, temp, vel): 65 | input = torch.cat((temp, vel), dim=1) 66 | if self.cfg.train.use_coords: 67 | input = torch.cat((coords, input), dim=1) 68 | pred = self.model(input) 69 | return pred 70 | 71 | def push_forward_trick(self, coords, temp, vel): 72 | if self.cfg.train.noise: 73 | temp += torch.empty_like(temp).normal_(0, 0.01) 74 | vel += torch.empty_like(vel).normal_(0, 0.01) 75 | pred = self._forward_int(coords, temp, vel) 76 | return pred 77 | 78 | def train_step(self, epoch): 79 | self.model.train() 80 | 81 | for iter, (coords, temp, vel, label) in enumerate(self.train_dataloader): 82 | coords = coords.to(self.local_rank).float() 83 | temp = temp.to(self.local_rank).float() 84 | vel = vel.to(self.local_rank).float() 85 | label = label.to(self.local_rank).float() 86 | coords, temp, vel, label = downsample_domain(self.cfg.train.downsample_factor, coords, temp, vel, label) 87 | 88 | pred = self.push_forward_trick(coords, temp, vel) 89 | 90 | print(pred.size(), label.size()) 91 | 92 | loss = self.loss(pred, label) 93 | self.optimizer.zero_grad() 94 | loss.backward() 95 | #nn.utils.clip_grad_norm_(self.model.parameters(), 1.0) 96 | self.optimizer.step() 97 | self.lr_scheduler.step() 98 | 99 | mse_loss = F.mse_loss(pred, label).detach() 100 | print(f'train loss: {loss}, mse: {mse_loss}') 101 | global_iter = epoch * len(self.train_dataloader) + iter 102 | write_metrics(pred, label, global_iter, 'Train', self.writer) 103 | del temp, vel, label 104 | 105 | def val_step(self, epoch): 106 | self.model.eval() 107 | for iter, (coords, temp, vel, label) in enumerate(self.val_dataloader): 108 | coords = coords.to(self.local_rank).float() 109 | temp = temp.to(self.local_rank).float() 110 | vel = vel.to(self.local_rank).float() 111 | label = label.to(self.local_rank).float() 112 | with torch.no_grad(): 113 | pred = self._forward_int(coords, temp, vel) 114 | temp_loss = F.mse_loss(pred, label) 115 | loss = temp_loss 116 | print(f'val loss: {loss}') 117 | global_iter = epoch * len(self.val_dataloader) + iter 118 | write_metrics(pred, label, global_iter, 'Val', self.writer) 119 | del temp, vel, label 120 | 121 | def test(self, dataset, max_timestep=200): 122 | if is_leader_process(): 123 | self.model.eval() 124 | temps = [] 125 | labels = [] 126 | time_lim = min(len(dataset), max_timestep) 127 | 128 | start = time.time() 129 | for timestep in range(0, time_lim, self.future_window): 130 | coords, temp, vel, label = dataset[timestep] 131 | coords = coords.to(self.local_rank).float().unsqueeze(0) 132 | temp = temp.to(self.local_rank).float().unsqueeze(0) 133 | vel = vel.to(self.local_rank).float().unsqueeze(0) 134 | label = label.to(self.local_rank).float() 135 | with torch.no_grad(): 136 | pred = self._forward_int(coords, temp, vel) 137 | temp = F.hardtanh(pred.squeeze(0), -1, 1) 138 | dataset.write_temp(temp, timestep) 139 | temps.append(temp.detach().cpu()) 140 | labels.append(label.detach().cpu()) 141 | dur = time.time() - start 142 | print(f'rollout time {dur} (s)') 143 | 144 | 145 | temps = torch.cat(temps, dim=0) 146 | labels = torch.cat(labels, dim=0) 147 | dfun = dataset.get_dfun()[:temps.size(0)] 148 | 149 | print(temps.max(), temps.min()) 150 | print(labels.max(), labels.min()) 151 | 152 | metrics = compute_metrics(temps, labels, dfun) 153 | print(metrics) 154 | 155 | #xgrid = dataset.get_x().permute((2, 0, 1)) 156 | #print(heatflux(temps, dfun, self.val_variable, xgrid, dataset.get_dy())) 157 | #print(heatflux(labels, dfun, self.val_variable, xgrid, dataset.get_dy())) 158 | 159 | plt_temp(temps, labels, self.model.__class__.__name__) 160 | plt_iter_mae(temps, labels) 161 | 162 | dataset.reset() 163 | 164 | return metrics 165 | -------------------------------------------------------------------------------- /sciml/op_lib/vel_trainer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import torchvision 4 | import torch.nn.functional as F 5 | from torch.optim.lr_scheduler import ReduceLROnPlateau, PolynomialLR 6 | from torch.utils.data import ConcatDataset, DataLoader 7 | from torch.utils.tensorboard import SummaryWriter 8 | import torchvision.transforms.functional as TF 9 | import matplotlib.pyplot as plt 10 | import numpy as np 11 | import time 12 | from pathlib import Path 13 | 14 | from .hdf5_dataset import HDF5Dataset, TempVelDataset 15 | from .metrics import compute_metrics 16 | from .losses import LpLoss 17 | from .plt_util import plt_temp, plt_vel 18 | 19 | class VelTrainer: 20 | def __init__(self, 21 | model, 22 | train_dataloader, 23 | val_dataloader, 24 | optimizer, 25 | lr_scheduler, 26 | val_variable, 27 | writer, 28 | cfg): 29 | self.model = model 30 | self.train_dataloader = train_dataloader 31 | self.val_dataloader = val_dataloader 32 | self.optimizer = optimizer 33 | self.lr_scheduler = lr_scheduler 34 | self.val_variable = val_variable 35 | self.writer = writer 36 | self.cfg = cfg 37 | self.loss = LpLoss(d=2) 38 | 39 | def save_checkpoint(self, dataset_name): 40 | timestamp = int(time.time()) 41 | if self.cfg.distributed: 42 | model_name = self.model.module.__class__.__name__ 43 | else: 44 | model_name = self.model.__class__.__name__ 45 | ckpt_file = f'{model_name}_{cfg.torch_dataset_name}_{cfg.train.max_epochs}_{timestamp}.pt' 46 | ckpt_root = Path.home() / f'{log_dir}/{dataset_name}' 47 | Path(ckpt_root).mkdir(parents=True, exist_ok=True) 48 | ckpt_path = f'{ckpt_root}/{ckpt_file}' 49 | print(f'saving model to {ckpt_path}') 50 | if cfg.distributed: 51 | torch.save(self.model.module.state_dict(), f'{ckpt_path}') 52 | else: 53 | torch.save(self.model.state_dict(), f'{ckpt_path}') 54 | 55 | def train(self, max_epochs, dataset_name): 56 | for epoch in range(max_epochs): 57 | print('epoch ', epoch) 58 | self.train_step(epoch) 59 | self.val_step(epoch) 60 | self.lr_scheduler.step() 61 | val_dataset = self.val_dataloader.dataset.datasets[0] 62 | self.test(val_dataset) 63 | self.save_checkpoint(dataset_name) 64 | 65 | def train_step(self, epoch): 66 | self.model.train() 67 | for iter, (input, label) in enumerate(self.train_dataloader): 68 | input = input.cuda().float() 69 | label = label.cuda().float() 70 | pred = self.model(input) 71 | print(pred.size(), label.size()) 72 | temp_loss = self.loss(pred[:, 0], label[:, 0]) 73 | velx_loss = self.loss(pred[:, 1], label[:, 1]) 74 | vely_loss = self.loss(pred[:, 2], label[:, 2]) 75 | print(f'{temp_loss}, {velx_loss}, {vely_loss}') 76 | loss = (temp_loss + velx_loss + vely_loss) / 3 77 | self.optimizer.zero_grad() 78 | loss.backward() 79 | self.optimizer.step() 80 | print(f'train loss: {loss}') 81 | del input, label 82 | 83 | def val_step(self, epoch): 84 | self.model.eval() 85 | for iter, (input, label) in enumerate(self.val_dataloader): 86 | input = input.cuda().float() 87 | label = label.cuda().float() 88 | with torch.no_grad(): 89 | pred = self.model(input) 90 | temp_loss = F.mse_loss(pred[:, 0], label[:, 0]) 91 | velx_loss = F.mse_loss(pred[:, 1], label[:, 1]) 92 | vely_loss = F.mse_loss(pred[:, 2], label[:, 2]) 93 | print(f'{temp_loss}, {velx_loss}, {vely_loss}') 94 | loss = (temp_loss + velx_loss + vely_loss) / 3 95 | print(f'val loss: {loss}') 96 | del input, label 97 | 98 | def test(self, dataset): 99 | self.model.eval() 100 | temp_preds = [] 101 | velx_preds = [] 102 | vely_preds = [] 103 | temp_labels = [] 104 | velx_labels = [] 105 | vely_labels = [] 106 | for timestep in range(len(dataset)): 107 | input, label = dataset[timestep] 108 | input = input.cuda().float().unsqueeze(0) 109 | label = label.cuda().float().unsqueeze(0) 110 | print(input.size(), label.size()) 111 | with torch.no_grad(): 112 | pred = self.model(input) 113 | temp = pred[:, 0] 114 | velx = F.hardtanh(pred[:, 1], min_val=-1, max_val=1) 115 | vely = F.hardtanh(pred[:, 2], min_val=-1, max_val=1) 116 | dataset.write_temp(temp, timestep) 117 | dataset.write_velx(velx, timestep) 118 | dataset.write_vely(vely, timestep) 119 | temp_preds.append(temp.detach().cpu()) 120 | velx_preds.append(velx.detach().cpu()) 121 | vely_preds.append(vely.detach().cpu()) 122 | temp_labels.append(label[:, 0].detach().cpu()) 123 | velx_labels.append(label[:, 1].detach().cpu()) 124 | vely_labels.append(label[:, 2].detach().cpu()) 125 | 126 | temp_preds = torch.cat(temp_preds, dim=0) 127 | velx_preds = torch.cat(velx_preds, dim=0) 128 | vely_preds = torch.cat(vely_preds, dim=0) 129 | temp_labels = torch.cat(temp_labels, dim=0) 130 | velx_labels = torch.cat(velx_labels, dim=0) 131 | vely_labels = torch.cat(vely_labels, dim=0) 132 | 133 | def mag(velx, vely): 134 | return torch.sqrt(velx**2 + vely**2) 135 | mag_preds = mag(velx_preds, vely_preds) 136 | mag_labels = mag(velx_labels, vely_labels) 137 | 138 | def print_metrics(pred, label): 139 | metrics = compute_metrics(pred, label, dataset.get_dfun().permute((2,0,1))) 140 | print(metrics) 141 | 142 | print('temp metrics:') 143 | print_metrics(temp_preds, temp_labels) 144 | print('velx metrics:') 145 | print_metrics(velx_preds, velx_labels) 146 | print('vely metrics:') 147 | print_metrics(vely_preds, vely_labels) 148 | print('mag metrics:') 149 | print_metrics(mag_preds, mag_labels) 150 | 151 | model_name = self.model.__class__.__name__ 152 | plt_temp(temp_preds, temp_labels, model_name) 153 | max_mag = mag_labels.max() 154 | plt_vel(mag_preds, mag_labels, max_mag, model_name) 155 | 156 | return metrics 157 | -------------------------------------------------------------------------------- /scripts/boxkit_dataset.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import boxkit 3 | import numpy as np 4 | import torch 5 | from torch.utils.data import ConcatDataset, Dataset 6 | from pathlib import Path 7 | import matplotlib.pyplot as plt 8 | import h5py 9 | from joblib import Parallel, delayed 10 | 11 | class BoilingDataset(Dataset): 12 | def __init__(self, directory): 13 | super().__init__() 14 | filenames = sorted(glob.glob(directory + '/*')) 15 | self._filenames = [f for f in filenames if 'plt_cnt' in f][:-1] 16 | with h5py.File(self._filenames[0]) as f: 17 | print(f.keys()) 18 | if len(self._filenames) > 0: 19 | self._data = self._load_data() 20 | self._load_dims() 21 | 22 | def to_hdf5(self, filename): 23 | if len(self._filenames) == 0: 24 | return 25 | print(filename) 26 | perm = (2, 0, 1) 27 | with h5py.File(filename, 'w') as f: 28 | f.create_dataset('temperature', data=self._data['temp'].permute(perm)) 29 | f.create_dataset('velx', data=self._data['velx'].permute(perm)) 30 | f.create_dataset('vely', data=self._data['vely'].permute(perm)) 31 | f.create_dataset('dfun', data=self._data['dfun'].permute(perm)) 32 | f.create_dataset('pressure', data=self._data['pres'].permute(perm)) 33 | f.create_dataset('massflux', data=self._data['mflx'].permute(perm)) 34 | f.create_dataset('normx', data=self._data['nrmx'].permute(perm)) 35 | f.create_dataset('normy', data=self._data['nrmy'].permute(perm)) 36 | f.create_dataset('x', data=self._data['x'].permute(perm)) 37 | f.create_dataset('y', data=self._data['y'].permute(perm)) 38 | 39 | REAL_RUNTIME_PARAMS = 'real runtime parameters' 40 | INT_RUNTIME_PARAMS = 'integer runtime parameters' 41 | 42 | if REAL_RUNTIME_PARAMS in f.keys(): 43 | f.create_dataset('real-runtime-params', data=f[REAL_RUNTIME_PARAMS][:]) 44 | if INT_RUNTIME_PARAMS in f.keys(): 45 | f.create_dataset('int-runtime-params', data=f[INT_RUNTIME_PARAMS][:]) 46 | 47 | def _load_data(self): 48 | frame_dicts = self._load_files_par() 49 | var_dict = self._stack_frame_dicts(frame_dicts) 50 | return var_dict 51 | 52 | def _stack_frame_dicts(self, frame_dicts): 53 | var_list = frame_dicts[0].keys() 54 | var_dict = dict((v, []) for v in var_list) 55 | for frame in frame_dicts: 56 | for var in var_list: 57 | var_dict[var].append(torch.from_numpy(frame[var])) 58 | for var in var_list: 59 | var_dict[var] = torch.stack(var_dict[var], -1) 60 | return var_dict 61 | 62 | def _runtime_params(self, f, key): 63 | with h5py.File(self._filenames[0], 'r') as f: 64 | return f[key][:] 65 | 66 | def _load_dims(self): 67 | frame0 = boxkit.read_dataset(self._filenames[0], source='flash') 68 | self.xmin, self.xmax = frame0.xmin, frame0.xmax 69 | self.ymin, self.ymax = frame0.ymin, frame0.ymax 70 | 71 | def _load_files_par(self): 72 | NJOBS = 30 73 | output = Parallel(n_jobs=NJOBS)(delayed(self._load_file)(idx, filename) for idx, filename in enumerate(self._filenames)) 74 | return output 75 | 76 | def _load_file(self, idx, filename): 77 | frame = boxkit.read_dataset(filename, source='flash') 78 | blocks = frame.blocklist 79 | y_bs, x_bs = frame.nyb, frame.nxb 80 | 81 | blockx_pixel = x_bs * round(int((frame.xmax - frame.xmin)/blocks[0].dx)/x_bs) 82 | blocky_pixel = y_bs * round(int((frame.ymax - frame.ymin)/blocks[0].dy)/y_bs) 83 | nblockx = int(blockx_pixel/ x_bs) 84 | nblocky = int(blocky_pixel/ y_bs) 85 | 86 | nxb = nblockx * x_bs 87 | nyb = nblocky * y_bs 88 | 89 | var_dict = {} 90 | for key in frame.varlist: 91 | var_dict[key] = np.empty((nyb, nxb)) 92 | for block in blocks: 93 | r = y_bs * round(int((nyb * (block.ymin - frame.ymin))/(frame.ymax - frame.ymin))/y_bs) 94 | c = x_bs * round(int((nxb * (block.xmin - frame.xmin))/(frame.xmax - frame.xmin))/x_bs) 95 | var_dict[key][r:r+y_bs, c:c+x_bs] = block[key] 96 | 97 | var_dict['x'] = np.empty((nyb, nxb)) 98 | var_dict['y'] = np.empty((nyb, nxb)) 99 | block_idx = 0 100 | for block in blocks: 101 | x, y = np.meshgrid(block.xrange('center'), 102 | block.yrange('center')) 103 | r = y_bs * round(int((nyb * (block.ymin - frame.ymin))/(frame.ymax - frame.ymin))/y_bs) 104 | c = x_bs * round(int((nxb * (block.xmin - frame.xmin))/(frame.xmax - frame.xmin))/x_bs) 105 | var_dict['x'][r:r+y_bs,c:c+x_bs] = x 106 | var_dict['y'][r:r+y_bs,c:c+x_bs] = y 107 | 108 | return var_dict 109 | 110 | TWALL = 'Twall-' 111 | 112 | def unblock_dataset(write_dir, read_dir): 113 | b = BoilingDataset(read_dir) 114 | 115 | filename = Path(read_dir).stem 116 | print(filename) 117 | assert TWALL in filename, f'eek {TWALL} not in filename' 118 | wall_temp = int(filename[len(TWALL):]) 119 | print(wall_temp) 120 | 121 | dir_name = read_dir[read_dir.find(TWALL):] 122 | b.to_hdf5(f'{target}/{dir_name}.hdf5') 123 | 124 | if __name__ == '__main__': 125 | target = str(Path.home() / '/share/crsp/lab/ai4ts/share/simul_ts_0.1/SubCooled-FC72-2D_HDF5/') 126 | Path(target).mkdir(parents=True, exist_ok=True) 127 | 128 | base = str(Path.home() / '/share/crsp/lab/ai4ts/share/simul_ts_0.1/SubCooled-FC72-2D/') 129 | 130 | subdirs = [f for f in glob.glob(f'{base}/*') if TWALL in f] 131 | print(subdirs) 132 | 133 | for idx, subdir in enumerate(subdirs): 134 | print(f'processing {subdir} {idx}/{len(subdirs)}') 135 | unblock_dataset(target, subdir) 136 | 137 | print('done!') 138 | -------------------------------------------------------------------------------- /scripts/downsample_data.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Downsample a dataset so it can be kept in the repo to use as an example 3 | """ 4 | 5 | 6 | import h5py 7 | import numpy as np 8 | 9 | src_dir = '/share/crsp/lab/amowli/share/BubbleML2/PoolBoiling-SubCooled-FC72-2D/' 10 | dst_dir = '/share/crsp/lab/amowli/share/BubbleML2/example/' 11 | files = ['Twall-100.hdf5', 'Twall-103.hdf5', 'Twall-106.hdf5'] 12 | 13 | 14 | src_files = [src_dir + f for f in files] 15 | dst_files = [dst_dir + f for f in files] 16 | 17 | keys_to_downsample = [ 18 | 'temperature', 19 | 'velx', 20 | 'vely', 21 | 'dfun', 22 | 'pressure', 23 | 'x', 24 | 'y' 25 | ] 26 | 27 | keys_to_copy = [ 28 | 'real-runtime-params', 29 | 'int-runtime-params' 30 | ] 31 | 32 | for src_file, dst_file in zip(src_files, dst_files): 33 | with h5py.File(src_file, 'r') as sf: 34 | with h5py.File(dst_file, 'w') as df: 35 | for key in keys_to_downsample: 36 | df.create_dataset(key, data=sf[key][:, ::8,::8]) 37 | for key in keys_to_copy: 38 | df.create_dataset(key, data=sf[key]) 39 | -------------------------------------------------------------------------------- /scripts/fourier.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Estimate the radially averaged power spectrum 3 | """ 4 | 5 | import matplotlib.pyplot as plt 6 | import torch 7 | import numpy as np 8 | from scipy import stats 9 | from scipy import signal 10 | 11 | # Note: this is data for Temp-only PB_Gravity! 12 | output_tensors = { 13 | 'Simulation': 'scripts/data/sim_output.pt', 14 | 'UNet$_{bench}$': 'scripts/data/unet2d_output.pt', 15 | 'UNO': 'scripts/data/uno_output.pt', 16 | 'FNO': 'scripts/data/fno_output.pt' 17 | } 18 | 19 | 20 | 21 | tensor = torch.load(output_tensors['Simulation']) 22 | 23 | for time in range(50, 70): 24 | t = tensor[time].numpy() 25 | 26 | f = np.fft.fft2(t) 27 | fshift = np.fft.fftshift(f) 28 | original = np.copy(fshift) 29 | 30 | # zero out the low frequencies 31 | [rows, cols] = original.shape 32 | crow, ccol = rows//2, cols//2 33 | d = 64 34 | fshift[crow-d:crow+d, ccol-d:ccol+d] = 0 35 | 36 | # subtract high frequencies from original 37 | # and convert back to real space 38 | f_ishift= np.fft.ifftshift(original - fshift) 39 | t_low = np.fft.ifft2(f_ishift) 40 | t_low = np.abs(t_low) 41 | 42 | fig, ax = plt.subplots(1, 2) 43 | ax[0].imshow(np.flipud(t), vmin=0, vmax=1) 44 | ax[1].imshow(np.flipud(t_low), vmin=0, vmax=1) 45 | plt.savefig(f'low_freq_{time}', dpi=400) 46 | plt.close() 47 | 48 | -------------------------------------------------------------------------------- /scripts/permute_dataset.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Convert an HDF5 file laid out [XxYxT] to [TxXxY]. 3 | """ 4 | 5 | import argparse 6 | import glob 7 | import h5py 8 | from pathlib import Path 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--src', type=str, help='path to hdf5 file to permute') 12 | parser.add_argument('--dst', type=str, help='path to write permuted file') 13 | args = parser.parse_args() 14 | 15 | keys_to_permute = [ 16 | 'temperature', 17 | 'velx', 18 | 'vely', 19 | 'pressure', 20 | 'dfun', 21 | 'x', 22 | 'y' 23 | ] 24 | 25 | keys_to_copy = [ 26 | 'real-runtime-params', 27 | 'int-runtime-params' 28 | ] 29 | 30 | # change from [XxYxT] to [TxXxY] 31 | perm = (2, 0, 1) 32 | 33 | src_files = [Path(fn) for fn in glob.glob(f'{args.src}/*.hdf5')] 34 | print(src_files) 35 | 36 | for src_file in src_files: 37 | with h5py.File(src_file, 'r') as src: 38 | dst_file = f'{args.dst}/{src_file.name}' 39 | print(f'copying {src_file} to {dst_file}') 40 | with h5py.File(dst_file, 'w') as dst: 41 | for key in keys_to_permute: 42 | dst.create_dataset(key, data=src[key][:].transpose(perm)) 43 | for key in keys_to_copy: 44 | dst.create_dataset(key, data=src[key][:]) 45 | -------------------------------------------------------------------------------- /scripts/plt_hf.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | plt.rc("font", family="serif", size=18, weight="bold") 5 | plt.rc("axes", labelweight="bold") 6 | #plt.rc("text", usetex=True) 7 | #plt.figure(figsize=(5, 4), dpi=400) 8 | 9 | t_wall_subcooled = [79, 81, 85, 90, 95, 98, 100, 103, 106, 110] 10 | y1 = [11823.9753, 17338.4293, 17819.4369, 23312.0327, 26425.6244, 28200.2418, 30611.4471, 34459.4348, 36112.5162, 38661.8841] 11 | 12 | t_wall_saturated = [65, 70, 75, 80, 85, 90, 95, 100]#, 150] 13 | y2 = [3384.0490, 7500.3820, 9104.2976, 13099.5624, 15099.1912, 17036.5389, 18199.7169, 20111.4978]#, 24238.6561] 14 | 15 | x1 = [(x-58) for x in list(sorted(t_wall_subcooled))] 16 | x2 = [(x-58) for x in list(sorted(t_wall_saturated))] 17 | y1 = np.array(y1)/np.max(y1) 18 | 19 | # hard-code a max HF value, since we don't actually cover 20 | # the full range in the cross validation. 21 | # This max is approximately the expected heatflux and is used 22 | # for normalization only 23 | y2 = np.array(y2)/23000 24 | 25 | plt.scatter(x1, y1, color='red', label='Subcooled Prediction') 26 | plt.scatter(x2, y2, color='blue', marker='^', label='Saturated Prediction') 27 | expected_x1 = [20, 25, 30, 35, 40, 45, 50] 28 | expected_y1 = [0.3, 0.5, 0.6, 0.72, 0.8, 0.9, 1.0] 29 | expected_x2 = [7, 12, 22, 27, 37, 52] 30 | expected_y2 = [0.2, 0.4, 0.6, 0.7, 0.8, 1.0] 31 | plt.plot(expected_x1, expected_y1, '--', color='red', label='Subcooled Expected') 32 | plt.plot(expected_x2, expected_y2, '--', color='blue', label='Saturated Expected') 33 | plt.xticks(np.arange(0, 65, 5)) 34 | plt.yticks(np.arange(0, 1.1, 0.2)) 35 | plt.xlabel(r'$T_{wall} - T_{sat}(^\circ C)$') 36 | plt.ylabel(r'${q}/{q_{max}}$', rotation='horizontal', labelpad=-80) 37 | plt.legend(loc='lower right', fontsize="16") 38 | plt.savefig('boiling_curve.png', bbox_inches="tight", dpi=500) 39 | plt.show() 40 | -------------------------------------------------------------------------------- /scripts/plt_rmse.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import torch 4 | 5 | def read_tensors(path): 6 | temp_pred = torch.load(path + 'model_ouput.pt') 7 | temp_label = torch.load(path + 'sim_ouput.pt') 8 | 9 | velx_pred = torch.load(path + 'velx_output.pt') 10 | velx_label = torch.load(path + 'velx_label.pt') 11 | 12 | vely_pred = torch.load(path + 'vely_output.pt') 13 | vely_label = torch.load(path + 'vely_label.pt') 14 | 15 | temp_rmses = [] 16 | vel_rmses = [] 17 | velx_rmses = [] 18 | vely_rmses = [] 19 | 20 | for i in range(temp_pred.shape[0] // 2): 21 | temp_rmse = torch.sqrt(torch.mean((temp_pred[i] - temp_label[i]) ** 2)) 22 | 23 | velx_rmse = torch.sqrt(torch.mean((velx_pred[i] - velx_label[i]) ** 2)) 24 | vely_rmse = torch.sqrt(torch.mean((vely_pred[i] - vely_label[i]) ** 2)) 25 | 26 | vel_pred = torch.stack((velx_pred[i], vely_pred[i]), dim=0) 27 | vel_label = torch.stack((velx_label[i], vely_label[i]), dim=0) 28 | vel_rmse = torch.sqrt(torch.mean((vel_pred - vel_label) ** 2).sum(dim=0)) 29 | 30 | temp_rmses.append(temp_rmse) 31 | velx_rmses.append(velx_rmse) 32 | vely_rmses.append(vely_rmse) 33 | vel_rmses.append(vel_rmse) 34 | 35 | return temp_rmses, velx_rmses, vely_rmses, vel_rmses 36 | 37 | t1, vx1, vy1, v1 = read_tensors('scripts/data/long_rollout/') 38 | t2, vx2, vy2, v2 = read_tensors('scripts/data/long_rollout_push/') 39 | 40 | plt.rc("font", family="serif", size=18, weight="bold") 41 | plt.rc("axes", labelweight="bold") 42 | 43 | plt.plot(range(len(t1)), t1, label='UNet$_{mod}$', linewidth=3, color='g') 44 | plt.plot(range(len(t2)), t2, label='P-UNet$_{mod}$', linewidth=3, color='r') 45 | plt.xlabel('Iteration') 46 | plt.ylabel('RMSE', labelpad=-75) 47 | plt.legend(fontsize='16') 48 | plt.savefig('temp_iter_rmse.png', bbox_inches='tight', dpi=500) 49 | plt.close() 50 | 51 | plt.plot(range(len(t1)), vx1, label='UNet$_{mod}$', linewidth=3) 52 | plt.plot(range(len(t2)), vx2, label='P-UNet$_{mod}$', linewidth=3) 53 | plt.xlabel('Iteration') 54 | plt.ylabel('RMSE', labelpad=-80) 55 | plt.legend(fontsize='16') 56 | plt.savefig('velx_iter_rmse.png', bbox_inches='tight', dpi=500) 57 | plt.close() 58 | 59 | plt.plot(range(len(t1)), vy1, label='UNet$_{mod}$', linewidth=3) 60 | plt.plot(range(len(t2)), vy2, label='P-UNet$_{mod}$', linewidth=3) 61 | plt.xlabel('Iteration') 62 | plt.ylabel('RMSE', labelpad=-80) 63 | plt.legend(fontsize='16') 64 | plt.savefig('vely_iter_rmse.png', bbox_inches='tight', dpi=500) 65 | plt.close() 66 | 67 | plt.plot(range(len(t1)), v1, label='UNet$_{mod}$', linewidth=3, color='g') 68 | plt.plot(range(len(t2)), v2, label='P-UNet$_{mod}$', linewidth=3, color='r') 69 | plt.xlabel('Iteration') 70 | plt.ylabel('RMSE', labelpad=-75) 71 | plt.legend(fontsize='16') 72 | plt.savefig('vel_iter_rmse.png', bbox_inches='tight', dpi=500) 73 | plt.close() 74 | -------------------------------------------------------------------------------- /scripts/psd.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Estimate the radially averaged power spectrum 3 | """ 4 | 5 | import matplotlib.pyplot as plt 6 | import torch 7 | import numpy as np 8 | from scipy import stats 9 | from scipy import signal 10 | 11 | output_tensors = { 12 | 'Simulation Temperature': 'scripts/data/vel_unet_mod_push/velx_label.pt', 13 | 'UNet$_{mod}$ Temperature': 'scripts/data/vel_unet_mod/velx_output.pt', 14 | 'P-UNet$_{mod}$ Temperature': 'scripts/data/vel_unet_mod_push/velx_output.pt', 15 | 'UNO Temperature': 'scripts/data/uno/velx_output.pt' 16 | } 17 | 18 | output_tensors = { 19 | 'Simulation': 'scripts/data/vel_unet_mod_push/sim_ouput.pt', 20 | 'UNet$_{mod}$': 'scripts/data/vel_unet_mod/model_ouput.pt', 21 | 'P-UNet$_{mod}$': 'scripts/data/vel_unet_mod_push/model_ouput.pt', 22 | 'UNO': 'scripts/data/uno/model_ouput.pt' 23 | } 24 | 25 | data = [(name, torch.load(pth)) for (name, pth) in output_tensors.items()] 26 | 27 | power = {} 28 | 29 | plt.rc("font", family="serif", size=18, weight="bold") 30 | plt.rc("axes", labelweight="bold") 31 | 32 | steps = [0, 15, 30, 60] 33 | 34 | fig, ax = plt.subplots(1, len(steps), figsize=(15, 5)) 35 | 36 | for idx, time in enumerate(steps): 37 | ax[idx].set_title(f'Step {time}') 38 | ax[idx].set_yscale('log') 39 | if idx == 0: 40 | ax[idx].set_ylabel('Magnitude') 41 | ax[idx].set_xlabel('Frequency') 42 | #ax[idx].set_ylim([0, 100000]) 43 | for name, tensor in data: 44 | print(tensor.size()) 45 | timestep = tensor[time].numpy() 46 | 47 | fourier = np.fft.fftn(timestep) 48 | fourier_amp = np.abs(fourier) ** 2 49 | 50 | npix = timestep.shape[0] 51 | kfreq = np.fft.fftfreq(npix) * npix 52 | kfreq2d = np.meshgrid(kfreq, kfreq) 53 | knrm = np.sqrt(kfreq2d[0] ** 2 + kfreq[1] ** 2) 54 | 55 | knrm = knrm.flatten() 56 | fourier_amp = fourier_amp.flatten() 57 | 58 | kbins = np.arange(0.5, npix//2 + 1, 1.) 59 | kvals = 0.5 * (kbins[1:] + kbins[:-1]) 60 | 61 | Abins, _, _ = stats.binned_statistic(knrm, fourier_amp, 62 | statistic='mean', 63 | bins=kbins) 64 | N = 8 65 | f = np.array([1.0 / N for _ in range(N)]) 66 | 67 | 68 | ax[idx].plot(kvals, signal.lfilter(f, 1, Abins), label=name, linewidth=2) 69 | 70 | #plt.plot(psd[::-1], label=name) 71 | #plt.plot(freq, label=name+'freq') 72 | #plt.tight_layout() 73 | plt.legend(fontsize=14) 74 | plt.tight_layout() 75 | plt.savefig(f'psd_time.png') 76 | plt.close() 77 | 78 | -------------------------------------------------------------------------------- /scripts/viz.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | 4 | import matplotlib.pyplot as plt 5 | from matplotlib.colors import LinearSegmentedColormap 6 | import h5py 7 | import numpy as np 8 | import cv2 9 | 10 | 11 | def plot_arr(dist_fields, temp_fields, pres_fields, velx_fields, vely_fields, velmag_fields, mflux_fields, normx_fields, normy_fields, op_dir): 12 | """ 13 | input: 3D array of variable frames (x, y, t) 14 | input: output directory 15 | Plots frames for each individual timestep 16 | """ 17 | timesteps = dist_fields.shape[0] 18 | 19 | for i in range(timesteps): 20 | i_str = str(i).zfill(3) 21 | 22 | dist_field = np.copy(np.flipud(dist_fields[i, :, :])) 23 | temp_field = np.flipud(temp_fields[i, :, :]) 24 | pres_field = np.flipud(pres_fields[i, :, :]) 25 | velx_field = np.flipud(velx_fields[i, :, :]) 26 | vely_field = -1 * np.flipud(vely_fields[i, :, :]) 27 | velmag_field = np.flipud(velmag_fields[i, :, :]) 28 | mflux_field = np.flipud(mflux_fields[i, :, :]) 29 | normx_field = np.flipud(normx_fields[i, :, :]) 30 | normy_field = np.flipud(normy_fields[i, :, :]) 31 | 32 | # Plot distance field 33 | dist_field[dist_field>0] *= (255/dist_field.max()) 34 | dist_field[dist_field<0] = 255 35 | dist_field = dist_field.astype(np.uint8) 36 | edge_map = cv2.Canny(dist_field, 0, 255) 37 | kernel = np.ones((3,3),np.uint8) 38 | edge_map = cv2.dilate(edge_map, kernel, iterations=1) 39 | mask = np.where(edge_map > 0, 0, 255) 40 | alpha = np.where(mask > 0, 0, 255) 41 | overlay = np.dstack((mask, mask, mask, alpha)) 42 | pathlib.Path(f'{op_dir}/dist').mkdir(parents=True, exist_ok=True) 43 | plt.imsave(f'{op_dir}/dist/{i_str}.png', dist_field, cmap='GnBu') 44 | plt.close() 45 | 46 | # Plot temperature field 47 | temp_ranges = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.134, 0.167, 48 | 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] 49 | color_codes = ['#0000FF', '#0443FF', '#0E7AFF', '#16B4FF', '#1FF1FF', '#21FFD3', 50 | '#22FF9B', '#22FF67', '#22FF15', '#29FF06', '#45FF07', '#6DFF08', 51 | '#9EFF09', '#D4FF0A', '#FEF30A', '#FEB709', '#FD7D08', '#FC4908', 52 | '#FC1407', '#FB0007'] 53 | colors = list(zip(temp_ranges, color_codes)) 54 | cmap = LinearSegmentedColormap.from_list('temperature_colormap', colors) 55 | 56 | fig, ax = plt.subplots() 57 | im = ax.imshow(temp_field, cmap=cmap) 58 | ax.imshow(overlay, alpha=1) 59 | 60 | ax.set_aspect('equal') 61 | ax.axis('off') 62 | fig.colorbar(im, ticks=[0, 0.2, 0.6, 0.9], fraction=0.04, pad=0.05, location='bottom') 63 | pathlib.Path(f'{op_dir}/temp').mkdir(parents=True, exist_ok=True) 64 | plt.savefig(f'{op_dir}/temp/{i_str}.png', bbox_inches='tight') 65 | plt.close() 66 | 67 | # Plot pressure field 68 | pres_field = (pres_field - pres_field.min())/(pres_field.max() - pres_field.min()) 69 | fig, ax = plt.subplots() 70 | mask = np.where(edge_map > 0, 0, 255) 71 | alpha = np.where(mask > 0, 0, 255) 72 | overlay = np.dstack((mask, mask, mask, alpha)) 73 | 74 | im = ax.imshow(pres_field, cmap='seismic') 75 | ax.imshow(overlay, alpha=1) 76 | 77 | ax.set_aspect('equal') 78 | ax.axis('off') 79 | fig.colorbar(im, fraction=0.04, pad=0.05, location='bottom') 80 | pathlib.Path(f'{op_dir}/pres').mkdir(parents=True, exist_ok=True) 81 | plt.savefig(f'{op_dir}/pres/{i_str}.png', bbox_inches='tight') 82 | plt.close() 83 | 84 | # Plot velocity field 85 | x = np.arange(0,velmag_field.shape[1],1) 86 | y = np.arange(0,velmag_field.shape[0],1) 87 | X,Y = np.meshgrid(x,y) 88 | 89 | velmag_field[np.flipud(dist_fields[i, :, :])<0] = 0 90 | velx_field[np.flipud(dist_fields[i, :, :])>0] = 0 91 | vely_field[np.flipud(dist_fields[i, :, :])>0] = 0 92 | 93 | fig, ax = plt.subplots() 94 | im = ax.imshow(velmag_field, vmin=0, vmax=3, cmap='Purples') 95 | ax.imshow(overlay, alpha=1) 96 | ax.streamplot(X,Y,velx_field,vely_field, density=1.5, color='red') 97 | ax.set_aspect('equal') 98 | ax.axis('off') 99 | fig.colorbar(im, fraction=0.04, pad=0.05, location='bottom') 100 | pathlib.Path(f'{op_dir}/vel').mkdir(parents=True, exist_ok=True) 101 | plt.savefig(f'{op_dir}/vel/{i_str}.png', bbox_inches='tight') 102 | plt.close() 103 | 104 | fig, ax = plt.subplots() 105 | q = ax.quiver(X[::4,::4], Y[::4,::4], normx_field[::4,::4], normy_field[::4,::4], scale=30) 106 | ax.imshow(overlay, alpha=1) 107 | ax.set_aspect('equal') 108 | ax.axis('off') 109 | pathlib.Path(f'{op_dir}/norm_vecs').mkdir(parents=True, exist_ok=True) 110 | plt.savefig(f'{op_dir}/norm_vecs/{i_str}.png', bbox_inches='tight') 111 | plt.close() 112 | 113 | fig, ax = plt.subplots() 114 | im = ax.imshow(mflux_field, vmax=0.005, vmin=-0.005, cmap='bwr') 115 | ax.set_aspect('equal') 116 | fig.colorbar(im, fraction=0.04, pad=0.05) 117 | pathlib.Path(f'{op_dir}/mflux').mkdir(parents=True, exist_ok=True) 118 | plt.savefig(f'{op_dir}/mflux/{i_str}.png', bbox_inches='tight') 119 | plt.close() 120 | 121 | if __name__ == '__main__': 122 | parser = argparse.ArgumentParser() 123 | parser.add_argument('--file', type=str, help='path to hdf5 file to visualize') 124 | parser.add_argument('--output_dir', type=str, help='path to output directory') 125 | args = parser.parse_args() 126 | 127 | simul_file = h5py.File(args.file, "r") 128 | 129 | dist_fields = simul_file['dfun'][:] 130 | temp_fields = simul_file['temperature'][:] 131 | pres_fields = simul_file['pressure'][:] 132 | velx_fields = simul_file['velx'][:] 133 | vely_fields = simul_file['vely'][:] 134 | velmag_fields = np.sqrt(velx_fields**2 + vely_fields**2) 135 | mflux_fields = simul_file['massflux'][:] 136 | normx_fields = simul_file['normx'][:] 137 | normy_fields = simul_file['normy'][:] 138 | 139 | plot_arr(dist_fields, temp_fields, pres_fields, velx_fields, vely_fields, velmag_fields, mflux_fields, normx_fields, normy_fields, op_dir=args.output_dir) 140 | -------------------------------------------------------------------------------- /scripts/viz_temp.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import matplotlib.pyplot as plt 4 | from matplotlib.colors import LinearSegmentedColormap, BoundaryNorm 5 | import numpy as np 6 | import os 7 | from pathlib import Path 8 | import subprocess 9 | import scipy.fft as sfft 10 | from dataclasses import dataclass 11 | 12 | def parse_args(): 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--path', required=True, type=str, 15 | help='Path to directory with model and sim output.pt files') 16 | return parser.parse_args() 17 | 18 | @dataclass 19 | class BoilingData: 20 | temp: torch.Tensor 21 | 22 | def load_vel_data(temp_path): 23 | pred = BoilingData( 24 | torch.load(f'{temp_path}/model_ouput.pt').numpy()) 25 | label = BoilingData( 26 | torch.load(f'{temp_path}/sim_ouput.pt').numpy()) 27 | return pred, label 28 | 29 | def main(): 30 | args = parse_args() 31 | 32 | job_id = '25057303/' 33 | pred, label = load_vel_data(f'test_im/temp/{job_id}') 34 | 35 | plt_temp(pred.temp, label.temp, args.path, 'model') 36 | 37 | subprocess.call( 38 | f'ffmpeg -y -framerate 25 -pattern_type glob -i "{args.path}/*.png" output.mp4', 39 | shell=True) 40 | 41 | def temp_cmap(): 42 | temp_ranges = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.134, 0.167, 43 | 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] 44 | color_codes = ['#0000FF', '#0443FF', '#0E7AFF', '#16B4FF', '#1FF1FF', '#21FFD3', 45 | '#22FF9B', '#22FF67', '#22FF15', '#29FF06', '#45FF07', '#6DFF08', 46 | '#9EFF09', '#D4FF0A', '#FEF30A', '#FEB709', '#FD7D08', '#FC4908', 47 | '#FC1407', '#FB0007'] 48 | colors = list(zip(temp_ranges, color_codes)) 49 | cmap = LinearSegmentedColormap.from_list('temperature_colormap', colors) 50 | return cmap 51 | 52 | def fft(x): 53 | x_fft = sfft.fft2(x) 54 | x_shift = np.abs(sfft.fftshift(x_fft)) 55 | return x_shift 56 | 57 | def mag(velx, vely): 58 | return np.sqrt(velx**2 + vely**2) 59 | 60 | def plt_vel(pred, label, path, model_name): 61 | plt.rc("font", family="serif", size=16, weight="bold") 62 | plt.rc("axes", labelweight="bold") 63 | 64 | label_mag = mag(label.velx, label.vely) 65 | pred_mag = mag(pred.velx, pred.vely) 66 | mag_vmax = abs(pred_mag[:50]).max() 67 | print(label_mag.max(), pred_mag.max()) 68 | 69 | frames = min(pred.temp.shape[0], 100) 70 | for i in range(frames): 71 | i_str = str(i).zfill(3) 72 | f, ax = plt.subplots(2, 2, layout='constrained') 73 | 74 | #x_vmax, x_vmin = label.velx.max(), label.velx.min() 75 | #y_vmax, y_vmin = label.vely.max(), label.vely.min() 76 | 77 | cm_object = ax[0, 0].imshow(np.flipud(label.temp[i]), vmin=0, vmax=1, cmap=temp_cmap()) 78 | #ax[1, 0].imshow(np.flipud(label.velx[i]), vmin=x_vmin, vmax=x_vmax, cmap='jet') 79 | #ax[2, 0].imshow(np.flipud(label.vely[i]), vmin=y_vmin, vmax=y_vmax, cmap='jet') 80 | #ax[1, 0].imshow(np.flipud(label_mag[i]), vmin=0, vmax=mag_vmax, cmap='jet') 81 | 82 | ax[0, 1].imshow(np.flipud(np.nan_to_num(pred.temp[i])), vmin=0, vmax=1, cmap=temp_cmap()) 83 | #ax[1, 1].imshow(np.flipud(pred.velx[i]), vmin=x_vmin, vmax=x_vmax, cmap='jet') 84 | #ax[2, 1].imshow(np.flipud(pred.vely[i]), vmin=y_vmin, vmax=x_vmax, cmap='jet') 85 | #ax[1, 1].imshow(np.flipud(pred_mag[i]), vmin=0, vmax=mag_vmax, cmap='jet') 86 | 87 | ax[0, 0].axis('off') 88 | ax[1, 0].axis('off') 89 | ax[0, 1].axis('off') 90 | ax[1, 1].axis('off') 91 | 92 | #ax[0, 2].imshow(np.flipud(fft(label.temp[i]))) 93 | #ax[1, 2].imshow(np.flipud(fft(label.velx[i]))) 94 | #ax[2, 2].imshow(np.flipud(fft(label.vely[i]))) 95 | #ax[3, 2].imshow(np.flipud(fft(label_mag))) 96 | 97 | #ax[0, 3].imshow(np.flipud(fft(pred.temp[i]))) 98 | #ax[1, 3].imshow(np.flipud(fft(pred.velx[i]))) 99 | #ax[2, 3].imshow(np.flipud(fft(pred.vely[i]))) 100 | #ax[3, 3].imshow(np.flipud(fft(pred_mag))) 101 | 102 | im_path = Path(path) 103 | im_path.mkdir(parents=True, exist_ok=True) 104 | plt.savefig(f'{str(im_path)}/{i_str}.png', 105 | dpi=200, 106 | bbox_inches='tight', 107 | transparent=True) 108 | plt.close() 109 | 110 | 111 | def plt_temp(temps, labels, path, model_name): 112 | print(temps.min(), temps.max(), 113 | labels.min(), labels.max()) 114 | 115 | plt.rc("font", family="serif", size=16, weight="bold") 116 | plt.rc("axes", labelweight="bold") 117 | for i in range(len(temps)): 118 | i_str = str(i).zfill(3) 119 | 120 | def plt_temp_arr(f, ax, arr, title): 121 | cm_object = ax.imshow(arr, vmin=0, vmax=1, cmap=temp_cmap()) 122 | #ax.set_title(title) 123 | ax.axis('off') 124 | return cm_object 125 | 126 | temp = temps[i] 127 | label = labels[i] 128 | f, axarr = plt.subplots(2, 3, layout="constrained") 129 | cm_object = plt_temp_arr(f, axarr[0, 0], np.flipud(label), 'Ground Truth') 130 | cm_object = plt_temp_arr(f, axarr[0, 1], np.flipud(temp), model_name) 131 | 132 | err = np.abs(temp - label) 133 | cm_object = plt_temp_arr(f, axarr[0, 2], np.flipud(err), 'Absolute Error') 134 | f.tight_layout() 135 | f.colorbar(cm_object, 136 | ax=axarr.ravel().tolist(), 137 | ticks=[0, 0.2, 0.6, 0.9], 138 | fraction=0.04, 139 | pad=0.02) 140 | f.set_size_inches(w=6, h=3) 141 | 142 | label_h = fft(label) 143 | temp_h = fft(temp) 144 | err_h = np.abs(label_h - temp_h) 145 | 146 | axarr[1, 0].imshow(np.flipud(label_h)) 147 | axarr[1, 1].imshow(np.flipud(temp_h)) 148 | axarr[1, 2].imshow(np.flipud(err_h)) 149 | 150 | im_path = Path(path) 151 | im_path.mkdir(parents=True, exist_ok=True) 152 | plt.savefig(f'{str(im_path)}/{i_str}.png', 153 | dpi=600, 154 | bbox_inches='tight', 155 | transparent=True) 156 | plt.close() 157 | 158 | if __name__ == '__main__': 159 | main() 160 | -------------------------------------------------------------------------------- /scripts/viz_temp2.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import matplotlib.pyplot as plt 4 | from matplotlib.colors import LinearSegmentedColormap, BoundaryNorm 5 | import numpy as np 6 | import os 7 | from pathlib import Path 8 | import subprocess 9 | import scipy.fft as sfft 10 | from dataclasses import dataclass 11 | 12 | def parse_args(): 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--path', required=True, type=str, 15 | help='Path to directory with model and sim output.pt files') 16 | return parser.parse_args() 17 | 18 | @dataclass 19 | class BoilingData: 20 | temp: torch.Tensor 21 | 22 | def load_vel_data(temp_path): 23 | pred = BoilingData( 24 | torch.load(f'{temp_path}/model_ouput.pt').numpy()) 25 | label = BoilingData( 26 | torch.load(f'{temp_path}/sim_ouput.pt').numpy()) 27 | return pred, label 28 | 29 | def main(): 30 | args = parse_args() 31 | 32 | job_id = '25032868/' 33 | pred, label = load_vel_data(f'test_im/temp/{job_id}') 34 | 35 | plt_temp(pred.temp, label.temp, args.path, 'model') 36 | 37 | subprocess.call( 38 | f'ffmpeg -y -framerate 25 -pattern_type glob -i "{args.path}/*.png" output.mp4', 39 | shell=True) 40 | 41 | def temp_cmap(): 42 | temp_ranges = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.134, 0.167, 43 | 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] 44 | color_codes = ['#0000FF', '#0443FF', '#0E7AFF', '#16B4FF', '#1FF1FF', '#21FFD3', 45 | '#22FF9B', '#22FF67', '#22FF15', '#29FF06', '#45FF07', '#6DFF08', 46 | '#9EFF09', '#D4FF0A', '#FEF30A', '#FEB709', '#FD7D08', '#FC4908', 47 | '#FC1407', '#FB0007'] 48 | colors = list(zip(temp_ranges, color_codes)) 49 | cmap = LinearSegmentedColormap.from_list('temperature_colormap', colors) 50 | return cmap 51 | 52 | def fft(x): 53 | x_fft = sfft.fft2(x) 54 | x_shift = np.abs(sfft.fftshift(x_fft)) 55 | return x_shift 56 | 57 | def mag(velx, vely): 58 | return np.sqrt(velx**2 + vely**2) 59 | 60 | def plt_vel(pred, label, path, model_name): 61 | plt.rc("font", family="serif", size=16, weight="bold") 62 | plt.rc("axes", labelweight="bold") 63 | 64 | label_mag = mag(label.velx, label.vely) 65 | pred_mag = mag(pred.velx, pred.vely) 66 | mag_vmax = abs(pred_mag[:50]).max() 67 | print(label_mag.max(), pred_mag.max()) 68 | 69 | frames = min(pred.temp.shape[0], 100) 70 | for i in range(frames): 71 | i_str = str(i).zfill(3) 72 | f, ax = plt.subplots(2, 2, layout='constrained') 73 | 74 | #x_vmax, x_vmin = label.velx.max(), label.velx.min() 75 | #y_vmax, y_vmin = label.vely.max(), label.vely.min() 76 | 77 | cm_object = ax[0, 0].imshow(np.flipud(label.temp[i]), vmin=0, vmax=1, cmap=temp_cmap()) 78 | #ax[1, 0].imshow(np.flipud(label.velx[i]), vmin=x_vmin, vmax=x_vmax, cmap='jet') 79 | #ax[2, 0].imshow(np.flipud(label.vely[i]), vmin=y_vmin, vmax=y_vmax, cmap='jet') 80 | #ax[1, 0].imshow(np.flipud(label_mag[i]), vmin=0, vmax=mag_vmax, cmap='jet') 81 | 82 | ax[0, 1].imshow(np.flipud(np.nan_to_num(pred.temp[i])), vmin=0, vmax=1, cmap=temp_cmap()) 83 | #ax[1, 1].imshow(np.flipud(pred.velx[i]), vmin=x_vmin, vmax=x_vmax, cmap='jet') 84 | #ax[2, 1].imshow(np.flipud(pred.vely[i]), vmin=y_vmin, vmax=x_vmax, cmap='jet') 85 | #ax[1, 1].imshow(np.flipud(pred_mag[i]), vmin=0, vmax=mag_vmax, cmap='jet') 86 | 87 | ax[0, 0].axis('off') 88 | ax[1, 0].axis('off') 89 | ax[0, 1].axis('off') 90 | ax[1, 1].axis('off') 91 | 92 | #ax[0, 2].imshow(np.flipud(fft(label.temp[i]))) 93 | #ax[1, 2].imshow(np.flipud(fft(label.velx[i]))) 94 | #ax[2, 2].imshow(np.flipud(fft(label.vely[i]))) 95 | #ax[3, 2].imshow(np.flipud(fft(label_mag))) 96 | 97 | #ax[0, 3].imshow(np.flipud(fft(pred.temp[i]))) 98 | #ax[1, 3].imshow(np.flipud(fft(pred.velx[i]))) 99 | #ax[2, 3].imshow(np.flipud(fft(pred.vely[i]))) 100 | #ax[3, 3].imshow(np.flipud(fft(pred_mag))) 101 | 102 | im_path = Path(path) 103 | im_path.mkdir(parents=True, exist_ok=True) 104 | plt.savefig(f'{str(im_path)}/{i_str}.png', 105 | dpi=200, 106 | bbox_inches='tight', 107 | transparent=True) 108 | plt.close() 109 | 110 | 111 | def plt_temp(temps, labels, path, model_name): 112 | print(temps.min(), temps.max(), 113 | labels.min(), labels.max()) 114 | 115 | plt.rc("font", family="serif", size=16, weight="bold") 116 | plt.rc("axes", labelweight="bold") 117 | for i in range(len(temps)): 118 | i_str = str(i).zfill(3) 119 | 120 | def plt_temp_arr(f, ax, arr, title): 121 | cm_object = ax.imshow(arr, vmin=0, vmax=1, cmap=temp_cmap()) 122 | #ax.set_title(title) 123 | ax.axis('off') 124 | return cm_object 125 | 126 | temp = temps[i] 127 | label = labels[i] 128 | f, axarr = plt.subplots(2, 3, layout="constrained") 129 | cm_object = plt_temp_arr(f, axarr[0, 0], np.flipud(label), 'Ground Truth') 130 | cm_object = plt_temp_arr(f, axarr[0, 1], np.flipud(temp), model_name) 131 | 132 | err = np.abs(temp - label) 133 | cm_object = plt_temp_arr(f, axarr[0, 2], np.flipud(err), 'Absolute Error') 134 | f.tight_layout() 135 | f.colorbar(cm_object, 136 | ax=axarr.ravel().tolist(), 137 | ticks=[0, 0.2, 0.6, 0.9], 138 | fraction=0.04, 139 | pad=0.02) 140 | f.set_size_inches(w=6, h=3) 141 | 142 | label_h = fft(label) 143 | temp_h = fft(temp) 144 | err_h = np.abs(label_h - temp_h) 145 | 146 | axarr[1, 0].imshow(np.flipud(label_h)) 147 | axarr[1, 1].imshow(np.flipud(temp_h)) 148 | axarr[1, 2].imshow(np.flipud(err_h)) 149 | 150 | im_path = Path(path) 151 | im_path.mkdir(parents=True, exist_ok=True) 152 | plt.savefig(f'{str(im_path)}/{i_str}.png', 153 | dpi=600, 154 | bbox_inches='tight', 155 | transparent=True) 156 | plt.close() 157 | 158 | if __name__ == '__main__': 159 | main() 160 | -------------------------------------------------------------------------------- /scripts/viz_vel.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import matplotlib.pyplot as plt 4 | from matplotlib.colors import LinearSegmentedColormap, BoundaryNorm 5 | import numpy as np 6 | import os 7 | from pathlib import Path 8 | import subprocess 9 | import scipy.fft as sfft 10 | from dataclasses import dataclass 11 | 12 | def parse_args(): 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--path', required=True, type=str, 15 | help='Path to directory with model and sim output.pt files') 16 | return parser.parse_args() 17 | 18 | @dataclass 19 | class BoilingData: 20 | temp: torch.Tensor 21 | velx: torch.Tensor 22 | vely: torch.Tensor 23 | 24 | def load_vel_data(temp_path, vel_path): 25 | pred = BoilingData( 26 | torch.load(f'{temp_path}/model_ouput.pt').numpy(), 27 | torch.load(f'{vel_path}/velx_output.pt').numpy(), 28 | torch.load(f'{vel_path}/vely_output.pt').numpy()) 29 | label = BoilingData( 30 | torch.load(f'{temp_path}/sim_ouput.pt').numpy(), 31 | torch.load(f'{vel_path}/velx_label.pt').numpy(), 32 | torch.load(f'{vel_path}/vely_label.pt').numpy()) 33 | return pred, label 34 | 35 | def main(): 36 | args = parse_args() 37 | 38 | job_id = '25042240/' 39 | pred, label = load_vel_data(f'test_im/temp/{job_id}', f'test_im/vel/{job_id}') 40 | 41 | plt_vel(pred, label, args.path, 'model') 42 | 43 | subprocess.call( 44 | f'ffmpeg -y -framerate 25 -pattern_type glob -i "{args.path}/*.png" output.mp4', 45 | shell=True) 46 | 47 | def temp_cmap(): 48 | temp_ranges = [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.134, 0.167, 49 | 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] 50 | color_codes = ['#0000FF', '#0443FF', '#0E7AFF', '#16B4FF', '#1FF1FF', '#21FFD3', 51 | '#22FF9B', '#22FF67', '#22FF15', '#29FF06', '#45FF07', '#6DFF08', 52 | '#9EFF09', '#D4FF0A', '#FEF30A', '#FEB709', '#FD7D08', '#FC4908', 53 | '#FC1407', '#FB0007'] 54 | colors = list(zip(temp_ranges, color_codes)) 55 | cmap = LinearSegmentedColormap.from_list('temperature_colormap', colors) 56 | return cmap 57 | 58 | def fft(x): 59 | x_fft = sfft.fft2(x) 60 | x_shift = np.abs(sfft.fftshift(x_fft)) 61 | return x_shift 62 | 63 | def mag(velx, vely): 64 | return np.sqrt(velx**2 + vely**2) 65 | 66 | def plt_vel(pred, label, path, model_name): 67 | plt.rc("font", family="serif", size=16, weight="bold") 68 | plt.rc("axes", labelweight="bold") 69 | 70 | label_mag = mag(label.velx, label.vely) 71 | pred_mag = mag(pred.velx, pred.vely) 72 | mag_vmax = abs(pred_mag[:50]).max() 73 | print(label_mag.max(), pred_mag.max()) 74 | 75 | frames = min(pred.temp.shape[0], 100) 76 | for i in range(frames): 77 | i_str = str(i).zfill(3) 78 | f, ax = plt.subplots(2, 2, layout='constrained') 79 | 80 | x_vmax, x_vmin = label.velx.max(), label.velx.min() 81 | y_vmax, y_vmin = label.vely.max(), label.vely.min() 82 | 83 | cm_object = ax[0, 0].imshow(np.flipud(label.temp[i]), vmin=0, vmax=1, cmap=temp_cmap()) 84 | #ax[1, 0].imshow(np.flipud(label.velx[i]), vmin=x_vmin, vmax=x_vmax, cmap='jet') 85 | #ax[2, 0].imshow(np.flipud(label.vely[i]), vmin=y_vmin, vmax=y_vmax, cmap='jet') 86 | ax[1, 0].imshow(np.flipud(label_mag[i]), vmin=0, vmax=mag_vmax, cmap='jet') 87 | 88 | ax[0, 1].imshow(np.flipud(np.nan_to_num(pred.temp[i])), vmin=0, vmax=1, cmap=temp_cmap()) 89 | #ax[1, 1].imshow(np.flipud(pred.velx[i]), vmin=x_vmin, vmax=x_vmax, cmap='jet') 90 | #ax[2, 1].imshow(np.flipud(pred.vely[i]), vmin=y_vmin, vmax=x_vmax, cmap='jet') 91 | ax[1, 1].imshow(np.flipud(pred_mag[i]), vmin=0, vmax=mag_vmax, cmap='jet') 92 | 93 | ax[0, 0].axis('off') 94 | ax[1, 0].axis('off') 95 | ax[0, 1].axis('off') 96 | ax[1, 1].axis('off') 97 | 98 | #ax[0, 2].imshow(np.flipud(fft(label.temp[i]))) 99 | #ax[1, 2].imshow(np.flipud(fft(label.velx[i]))) 100 | #ax[2, 2].imshow(np.flipud(fft(label.vely[i]))) 101 | #ax[3, 2].imshow(np.flipud(fft(label_mag))) 102 | 103 | #ax[0, 3].imshow(np.flipud(fft(pred.temp[i]))) 104 | #ax[1, 3].imshow(np.flipud(fft(pred.velx[i]))) 105 | #ax[2, 3].imshow(np.flipud(fft(pred.vely[i]))) 106 | #ax[3, 3].imshow(np.flipud(fft(pred_mag))) 107 | 108 | im_path = Path(path) 109 | im_path.mkdir(parents=True, exist_ok=True) 110 | plt.savefig(f'{str(im_path)}/{i_str}.png', 111 | dpi=200, 112 | bbox_inches='tight', 113 | transparent=True) 114 | plt.close() 115 | 116 | 117 | def plt_temp(temps, labels, path, model_name): 118 | print(temps.min(), temps.max(), 119 | labels.min(), labels.max()) 120 | 121 | plt.rc("font", family="serif", size=16, weight="bold") 122 | plt.rc("axes", labelweight="bold") 123 | for i in range(len(temps)): 124 | i_str = str(i).zfill(3) 125 | 126 | def plt_temp_arr(f, ax, arr, title): 127 | cm_object = ax.imshow(arr, vmin=0, vmax=1, cmap=temp_cmap()) 128 | #ax.set_title(title) 129 | ax.axis('off') 130 | return cm_object 131 | 132 | temp = temps[i].numpy() 133 | label = labels[i].numpy() 134 | f, axarr = plt.subplots(2, 3, layout="constrained") 135 | cm_object = plt_temp_arr(f, axarr[0, 0], np.flipud(label), 'Ground Truth') 136 | cm_object = plt_temp_arr(f, axarr[0, 1], np.flipud(temp), model_name) 137 | 138 | err = np.abs(temp - label) 139 | cm_object = plt_temp_arr(f, axarr[0, 2], np.flipud(err), 'Absolute Error') 140 | f.tight_layout() 141 | f.colorbar(cm_object, 142 | ax=axarr.ravel().tolist(), 143 | ticks=[0, 0.2, 0.6, 0.9], 144 | fraction=0.04, 145 | pad=0.02) 146 | f.set_size_inches(w=6, h=3) 147 | 148 | label_h = fft(label) 149 | temp_h = fft(temp) 150 | err_h = np.abs(label_h - temp_h) 151 | 152 | axarr[1, 0].imshow(np.flipud(label_h)) 153 | axarr[1, 1].imshow(np.flipud(temp_h)) 154 | axarr[1, 2].imshow(np.flipud(err_h)) 155 | 156 | im_path = Path(path) 157 | im_path.mkdir(parents=True, exist_ok=True) 158 | plt.savefig(f'{str(im_path)}/{i_str}.png', 159 | dpi=600, 160 | bbox_inches='tight', 161 | transparent=True) 162 | plt.close() 163 | 164 | if __name__ == '__main__': 165 | main() 166 | -------------------------------------------------------------------------------- /submit/basic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p free-gpu 3 | #SBATCH --nodes=1 4 | #SBATCH --gres=gpu:A100:1 5 | #SBATCH --ntasks-per-node=1 6 | #SBATCH --cpus-per-task=20 7 | #SBATCH --time=016:00:00 8 | 9 | module load anaconda/2022.05 10 | . ~/.mycondaconf 11 | conda activate bubble-sciml 12 | module load gcc/11.2.0 13 | 14 | python sciml/train.py \ 15 | data_base_dir=/share/crsp/lab/amowli/share/BubbleML2/ \ 16 | log_dir=/share/crsp/lab/ai4ts/afeeney/log_dir \ 17 | dataset=PB_WallSuperHeat \ 18 | experiment=temp_fno \ 19 | experiment.train.max_epochs=2 \ 20 | #experiment.lr_scheduler.patience=50 21 | #model_checkpoint=/share/crsp/lab/ai4ts/afeeney/log_dir/23089030/subcooled/UNet2d_vel_dataset_100_1691046606.pt \ 22 | #model_checkpoint=/data/homezvol2/afeeney/crsp/ai4ts/afeeney/thermal_models/subcooled/UNet2d_temp_input_dataset_500_1690005305.pt \ 23 | -------------------------------------------------------------------------------- /submit/cross_val.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A30:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=10 8 | #SBATCH --time=20:00:00 9 | 10 | module load anaconda/2022.05 11 | . ~/.mycondaconf 12 | conda activate mf-pytorch2 13 | module load gcc/11.2.0 14 | 15 | python sciml/train.py \ 16 | dataset=PB_WallSuperHeat_CrossVal150 \ 17 | experiment=temp_unet2d \ 18 | experiment.torch_dataset_name=temp_input_dataset \ 19 | experiment.train.max_epochs=100 20 | -------------------------------------------------------------------------------- /submit/data_convert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p free 3 | #SBATCH --nodes=1 4 | #SBATCH --ntasks-per-node=1 5 | #SBATCH --cpus-per-task=40 6 | #SBATCH --time=4:00:00 7 | 8 | module load anaconda/2022.05 9 | . ~/.mycondaconf 10 | conda activate bubble-sciml 11 | module load gcc/11.2.0 12 | 13 | 14 | DATASET=FlowBoiling-VelScale-FC72-2D 15 | DATASET=PoolBoiling-Gravity-FC72-2D 16 | SRC=/share/crsp/lab/ai4ts/share/BubbleML/$DATASET 17 | DST=/share/crsp/lab/amowli/share/BubbleML2/$DATASET 18 | 19 | mkdir -p $DST 20 | 21 | python scripts/permute_dataset.py --src $SRC --dst $DST 22 | -------------------------------------------------------------------------------- /submit/data_unblock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p free 3 | #SBATCH --nodes=1 4 | #SBATCH --ntasks-per-node=1 5 | #SBATCH --cpus-per-task=40 6 | #SBATCH --time=4:00:00 7 | 8 | module load anaconda/2022.05 9 | . ~/.mycondaconf 10 | conda activate bubble-sciml 11 | module load gcc/11.2.0 12 | 13 | python scripts/boxkit_dataset.py 14 | -------------------------------------------------------------------------------- /submit/debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p free-gpu 3 | #SBATCH --nodes=1 4 | #SBATCH --gres=gpu:A30:1 5 | #SBATCH --ntasks-per-node=1 6 | #SBATCH --cpus-per-task=10 7 | #SBATCH --mem-per-cpu=4G 8 | #SBATCH --time=0:30:00 9 | 10 | # One node needs to be used as the "host" for the rendezvuoz 11 | # system used by torch. This just gets a list of the hostnames 12 | # used by the job, and selects the first one. 13 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 14 | NNODES=$(scontrol show hostnames | wc -l) 15 | 16 | module load anaconda/2022.05 17 | . ~/.mycondaconf 18 | conda activate bubble-sciml 19 | 20 | export TORCH_DISTRIBUTED_DEBUG=DETAIL 21 | 22 | python sciml/train.py \ 23 | data_base_dir=/share/crsp/lab/amowli/share/BubbleML2/ \ 24 | dataset=PB_Gravity \ 25 | log_dir=/pub/afeeney/train_log_dir/ \ 26 | experiment.distributed=False \ 27 | experiment=unet_arena/pb_temp \ 28 | experiment.train.max_epochs=1 \ 29 | model_checkpoint=/pub/afeeney/final_model_checkpoints/PB_Gravity/unet_mod.pt 30 | -------------------------------------------------------------------------------- /submit/dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A30:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=20 8 | #SBATCH --mem-per-cpu=5G 9 | #SBATCH --time=48:00:00 10 | 11 | # One node needs to be used as the "host" for the rendezvuoz 12 | # system used by torch. This just gets a list of the hostnames 13 | # used by the job, and selects the first one. 14 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 15 | NNODES=$(scontrol show hostnames | wc -l) 16 | 17 | module load anaconda/2022.05 18 | . ~/.mycondaconf 19 | conda activate bubble-sciml 20 | 21 | #DATASET=PB_SubCooled_0.1 22 | #DATASET=FB_Gravity_0.1 23 | 24 | DATASET=PB_SubCooled 25 | #DATASET=PB_WallSuperHeat 26 | #DATASET=PB_Gravity 27 | #DATASET=FB_Gravity 28 | #DATASET=FB_InletVel 29 | 30 | #EXPERIMENT=temp_unet2d 31 | #EXPERIMENT=temp_unet_mod_attn 32 | #EXPERIMENT=temp_ufnet 33 | #EXPERIMENT=temp_fno 34 | #EXPERIMENT=temp_uno 35 | #EXPERIMENT=temp_ffno 36 | 37 | # GFNO requires multi-gpu... 38 | # Do this last 39 | #EXPERIMENT=temp_gfno 40 | 41 | #data_base_dir=/share/crsp/lab/amowli/share/simul_ts_0.1/ \ 42 | 43 | srun torchrun \ 44 | --nnodes $NNODES \ 45 | --nproc_per_node 1 \ 46 | --max_restarts 0 \ 47 | --rdzv_backend c10d \ 48 | --rdzv_id $SLURM_JOB_ID \ 49 | --rdzv_endpoint $HOST_NODE_ADDR \ 50 | --redirects 3 \ 51 | --tee 3 \ 52 | sciml/train.py \ 53 | data_base_dir=/share/crsp/lab/amowli/share/BubbleML2/ \ 54 | log_dir=/pub/afeeney/train_log_dir \ 55 | dataset=$DATASET \ 56 | experiment=$EXPERIMENT \ 57 | experiment.train.max_epochs=150 \ 58 | -------------------------------------------------------------------------------- /submit/fb_vel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A100:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=20 8 | #SBATCH --mem-per-cpu=3GB 9 | #SBATCH --time=48:00:00 10 | 11 | # One node needs to be used as the "host" for the rendezvuoz 12 | # system used by torch. This just gets a list of the hostnames 13 | # used by the job, and selects the first one. 14 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 15 | NNODES=$(scontrol show hostnames | wc -l) 16 | 17 | module load anaconda/2022.05 18 | . ~/.mycondaconf 19 | conda activate bubble-sciml 20 | 21 | #DATASET=PB_SubCooled_0.1 22 | #DATASET=FB_Gravity_0.1 23 | DATASET=FB_InletVel_0.1 24 | 25 | #DATASET=PB_SubCooled 26 | #DATASET=PB_WallSuperHeat 27 | #DATASET=PB_Gravity 28 | #DATASET=FB_Gravity 29 | #DATASET=FB_InletVel 30 | 31 | MODEL=fno 32 | #MODEL=uno 33 | #MODEL=ffno 34 | #MODEL=gfno 35 | #MODEL=unet_bench 36 | #MODEL=unet_arena 37 | #MODEL=ufnet 38 | 39 | srun torchrun \ 40 | --nnodes $NNODES \ 41 | --nproc_per_node 1 \ 42 | --max_restarts 0 \ 43 | --rdzv_backend c10d \ 44 | --rdzv_id $SLURM_JOB_ID \ 45 | --rdzv_endpoint $HOST_NODE_ADDR \ 46 | --redirects 3 \ 47 | --tee 3 \ 48 | python sciml/train.py \ 49 | data_base_dir=/share/crsp/lab/ai4ts/share/simul_ts_0.1/ \ 50 | log_dir=/pub/afeeney/train_log_dir \ 51 | dataset=$DATASET \ 52 | experiment=$MODEL/fb_vel \ 53 | experiment.train.max_epochs=1 54 | -------------------------------------------------------------------------------- /submit/fno_overfit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A100:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=20 8 | #SBATCH --mem-per-cpu=5G 9 | #SBATCH --time=24:00:00 10 | 11 | # One node needs to be used as the "host" for the rendezvuoz 12 | # system used by torch. This just gets a list of the hostnames 13 | # used by the job, and selects the first one. 14 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 15 | NNODES=$(scontrol show hostnames | wc -l) 16 | 17 | module load anaconda/2022.05 18 | . ~/.mycondaconf 19 | conda activate bubble-sciml 20 | 21 | python scripts/fno_overfit.py 22 | -------------------------------------------------------------------------------- /submit/pb_temp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A30:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=20 8 | #SBATCH --mem-per-cpu=5G 9 | #SBATCH --time=48:00:00 10 | 11 | # One node needs to be used as the "host" for the rendezvuoz 12 | # system used by torch. This just gets a list of the hostnames 13 | # used by the job, and selects the first one. 14 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 15 | NNODES=$(scontrol show hostnames | wc -l) 16 | 17 | module load anaconda/2022.05 18 | . ~/.mycondaconf 19 | conda activate bubble-sciml 20 | 21 | #DATASET=PB_SubCooled_0.1 22 | #DATASET=FB_Gravity_0.1 23 | 24 | DATASET=PB_SubCooled 25 | #DATASET=PB_WallSuperHeat 26 | #DATASET=PB_Gravity 27 | #DATASET=FB_Gravity 28 | #DATASET=FB_InletVel 29 | 30 | #MODEL=fno 31 | #MODEL=uno 32 | #MODEL=ffno 33 | MODEL=gfno 34 | #MODEL=unet_bench 35 | #MODEL=unet_arena 36 | #MODEL=ufnet 37 | 38 | srun torchrun \ 39 | --nnodes $NNODES \ 40 | --nproc_per_node 1 \ 41 | --max_restarts 0 \ 42 | --rdzv_backend c10d \ 43 | --rdzv_id $SLURM_JOB_ID \ 44 | --rdzv_endpoint $HOST_NODE_ADDR \ 45 | --redirects 3 \ 46 | --tee 3 \ 47 | sciml/train.py \ 48 | data_base_dir=/share/crsp/lab/amowli/share/BubbleML2/ \ 49 | log_dir=/pub/afeeney/train_log_dir \ 50 | dataset=$DATASET \ 51 | experiment=gfno_test/cosine \ 52 | experiment.train.max_epochs=1 53 | -------------------------------------------------------------------------------- /submit/pb_temp_0.1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A30:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=20 8 | #SBATCH --mem-per-cpu=5G 9 | #SBATCH --time=48:00:00 10 | 11 | # One node needs to be used as the "host" for the rendezvuoz 12 | # system used by torch. This just gets a list of the hostnames 13 | # used by the job, and selects the first one. 14 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 15 | NNODES=$(scontrol show hostnames | wc -l) 16 | 17 | module load anaconda/2022.05 18 | . ~/.mycondaconf 19 | conda activate bubble-sciml 20 | 21 | DATASET=PB_SubCooled_0.1 22 | #DATASET=FB_Gravity_0.1 23 | 24 | #DATASET=PB_SubCooled 25 | #DATASET=PB_WallSuperHeat 26 | #DATASET=PB_Gravity 27 | #DATASET=FB_Gravity 28 | #DATASET=FB_InletVel 29 | 30 | #MODEL=fno 31 | #MODEL=uno 32 | #MODEL=ffno 33 | MODEL=gfno 34 | #MODEL=unet_bench 35 | #MODEL=unet_arena 36 | #MODEL=ufnet 37 | 38 | srun torchrun \ 39 | --nnodes $NNODES \ 40 | --nproc_per_node 1 \ 41 | --max_restarts 0 \ 42 | --rdzv_backend c10d \ 43 | --rdzv_id $SLURM_JOB_ID \ 44 | --rdzv_endpoint $HOST_NODE_ADDR \ 45 | --redirects 3 \ 46 | --tee 3 \ 47 | sciml/train.py \ 48 | data_base_dir=/share/crsp/lab/ai4ts/share/simul_ts_0.1/ \ 49 | log_dir=/pub/afeeney/train_log_dir \ 50 | dataset=$DATASET \ 51 | experiment=gfno_test/cosine \ 52 | experiment.train.max_epochs=1 53 | -------------------------------------------------------------------------------- /submit/pb_vel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A amowli_lab_gpu 3 | #SBATCH -p gpu 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:A30:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=20 8 | #SBATCH --time=48:00:00 9 | 10 | # One node needs to be used as the "host" for the rendezvuoz 11 | # system used by torch. This just gets a list of the hostnames 12 | # used by the job, and selects the first one. 13 | HOST_NODE_ADDR=$(scontrol show hostnames | head -n 1) 14 | NNODES=$(scontrol show hostnames | wc -l) 15 | 16 | module load anaconda/2022.05 17 | . ~/.mycondaconf 18 | conda activate bubble-sciml 19 | 20 | #DATASET=PB_SubCooled_0.1 21 | #DATASET=FB_Gravity_0.1 22 | DATASET=FB_InletVel_0.1 23 | 24 | #DATASET=PB_SubCooled 25 | #DATASET=PB_WallSuperHeat 26 | #DATASET=PB_Gravity 27 | #DATASET=FB_Gravity 28 | #DATASET=FB_InletVel 29 | 30 | MODEL=fno 31 | #MODEL=uno 32 | #MODEL=ffno 33 | #MODEL=gfno 34 | #MODEL=unet_bench 35 | #MODEL=unet_arena 36 | #MODEL=ufnet 37 | 38 | srun torchrun \ 39 | --nnodes $NNODES \ 40 | --nproc_per_node 1 \ 41 | --max_restarts 0 \ 42 | --rdzv_backend c10d \ 43 | --rdzv_id $SLURM_JOB_ID \ 44 | --rdzv_endpoint $HOST_NODE_ADDR \ 45 | --redirects 3 \ 46 | --tee 3 \ 47 | sciml/train.py \ 48 | data_base_dir=/share/crsp/lab/ai4ts/share/simul_ts_0.1/ \ 49 | log_dir=/pub/afeeney/train_log_dir \ 50 | dataset=$DATASET \ 51 | experiment=$MODEL/pb_vel \ 52 | experiment.train.max_epochs=2 53 | -------------------------------------------------------------------------------- /video/README.md: -------------------------------------------------------------------------------- 1 | # Dataset GIFs 2 | 3 | This shows some sample videos for different simulation types: 4 | 5 | ## Pool Boiling SubCooled 6 | 7 | In this case, the liquid is below the boiling temperature. 8 | As bubbles form and leave the surface, vapor trails are produced. 9 | These vapor trails are very difficult to capture accurately with 10 | machine learning models. 11 | 12 | ![](subcooled.gif) 13 | 14 | ## Pool Boiling Saturated 15 | 16 | Saturated boiling means the liquid is already hot. No vapor 17 | trails are formed. 18 | 19 | ![](saturated.gif) 20 | 21 | ## Pool Boiling Subcooled, Velocity Field 22 | 23 | This shows the velocity field for subcooled boiling. This is the result 24 | of the UNet-mod, trained using the push-forward trick. 25 | 26 | ![](vel.gif) 27 | 28 | ## Flow Boiling Inlet Velocity 29 | 30 | This illustrates how changing the inlet velocity affects 31 | flow boiling problems. With higher velocities, everything 32 | obviously moves faster. This also means that errors in machine 33 | learning models are more likely to get pushed out of frame. 34 | 35 | #### Inlet Velocity of 1.0 36 | ![](temp-1.0.gif) 37 | 38 | #### Inlet Velocity of 2.0 39 | ![](temp-2.0.gif) 40 | 41 | #### Inlet Velocity of 4.0 42 | ![](temp-4.0.gif) 43 | -------------------------------------------------------------------------------- /video/saturated.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/video/saturated.gif -------------------------------------------------------------------------------- /video/subcooled.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/video/subcooled.gif -------------------------------------------------------------------------------- /video/temp-1.0.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/video/temp-1.0.gif -------------------------------------------------------------------------------- /video/temp-2.0.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/video/temp-2.0.gif -------------------------------------------------------------------------------- /video/temp-4.0.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/video/temp-4.0.gif -------------------------------------------------------------------------------- /video/vel.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPCForge/BubbleML/dc6cca854162cb7f46c68c0f6e9818c9d114add4/video/vel.gif --------------------------------------------------------------------------------