├── .github
└── workflows
│ ├── python-package.yml
│ └── python-publish.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE
├── README.md
├── pyproject.toml
├── requirements.txt
├── src_matlab
└── magnet_challenge_loss_models.m
├── src_py
└── magnethub
│ ├── __init__.py
│ ├── loss.py
│ ├── models
│ ├── paderborn
│ │ ├── changelog.md
│ │ ├── cnn_3C90_experiment_1b4d8_model_f3915868_seed_0_fold_0.pt
│ │ ├── cnn_3C92_experiment_ea1fe_model_72510647_seed_0_fold_0.pt
│ │ ├── cnn_3C94_experiment_56441_model_55693612_seed_0_fold_0.pt
│ │ ├── cnn_3C95_experiment_ad6ec_model_046383a5_seed_0_fold_0.pt
│ │ ├── cnn_3E6_experiment_a7817_model_a1035e58_seed_0_fold_0.pt
│ │ ├── cnn_3F4_experiment_c234d_model_2d43b97d_seed_0_fold_0.pt
│ │ ├── cnn_77_experiment_268ae_model_5b7c92ed_seed_0_fold_0.pt
│ │ ├── cnn_78_experiment_e5297_model_77fbd758_seed_0_fold_0.pt
│ │ ├── cnn_79_experiment_45989_model_7227af72_seed_0_fold_0.pt
│ │ ├── cnn_A_experiment_c9cfe_model_d893c778_p.pt
│ │ ├── cnn_B_experiment_c9cfe_model_b6a920cc_p.pt
│ │ ├── cnn_C_experiment_c9cfe_model_c1ced7b6_p.pt
│ │ ├── cnn_D_experiment_c9cfe_model_11672810_p.pt
│ │ ├── cnn_E_experiment_c9cfe_model_5ae50f9e_p.pt
│ │ ├── cnn_ML95S_experiment_1c978_model_883a5d2f_seed_0_fold_0.pt
│ │ ├── cnn_N27_experiment_8b7f0_model_20954a2a_seed_0_fold_0.pt
│ │ ├── cnn_N30_experiment_5a78c_model_6bd86623_seed_0_fold_0.pt
│ │ ├── cnn_N49_experiment_27442_model_d6234a32_seed_0_fold_0.pt
│ │ ├── cnn_N87_experiment_985cb_model_d51b0f7e_seed_0_fold_0.pt
│ │ └── cnn_T37_experiment_a084a_model_fb31325e_seed_0_fold_0.pt
│ └── sydney
│ │ ├── 3C90.pt
│ │ ├── 3C92.pt
│ │ ├── 3C94.pt
│ │ ├── 3C95.pt
│ │ ├── 3E6.pt
│ │ ├── 3F4.pt
│ │ ├── 77.pt
│ │ ├── 78.pt
│ │ ├── 79.pt
│ │ ├── ML95S.pt
│ │ ├── N27.pt
│ │ ├── N30.pt
│ │ ├── N49.pt
│ │ ├── N87.pt
│ │ └── T37.pt
│ ├── paderborn.py
│ └── sydney.py
└── tests
├── debug.py
├── test_files
└── unit_test_data_ploss_at_450kWpm3.csv
├── test_paderborn.py
└── test_sydney.py
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | paths: ['src_py/magnethub/**', 'tests/**']
10 | pull_request:
11 | branches: [ "main" ]
12 | paths: ['src_py/magnethub/**', 'tests/**']
13 |
14 | # Allows you to run this workflow manually from the Actions tab
15 | workflow_dispatch:
16 |
17 | jobs:
18 | build:
19 |
20 | runs-on: ubuntu-latest
21 | strategy:
22 | fail-fast: false
23 | matrix:
24 | python-version: ["3.10", "3.11"]
25 |
26 | steps:
27 | - uses: actions/checkout@v3
28 | - name: Set up Python ${{ matrix.python-version }}
29 | uses: actions/setup-python@v3
30 | with:
31 | python-version: ${{ matrix.python-version }}
32 | - name: Install dependencies
33 | run: |
34 | python -m pip install --upgrade pip
35 | python -m pip install ruff pytest
36 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
37 | # do not install from PyPi but rather directly from checked out repo
38 | python -m pip install .
39 | - name: Lint with ruff
40 | run: |
41 | # stop the build if there are Python syntax errors or undefined names
42 | ruff check --fix -v --show-files src_py/magnethub/*.py
43 | - name: Test with pytest
44 | run: |
45 | pytest
46 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 | # Allows you to run this workflow manually from the Actions tab
15 | workflow_dispatch:
16 |
17 | permissions:
18 | contents: read
19 |
20 | jobs:
21 | deploy:
22 |
23 | runs-on: ubuntu-latest
24 |
25 | steps:
26 | - uses: actions/checkout@v3
27 | - name: Set up Python
28 | uses: actions/setup-python@v3
29 | with:
30 | python-version: '3.10'
31 | - name: Install dependencies
32 | run: |
33 | python -m pip install --upgrade pip
34 | pip install hatch-requirements-txt
35 | pip install build
36 | - name: Build package
37 | run: python -m build
38 | - name: Publish package
39 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
40 | with:
41 | user: __token__
42 | password: ${{ secrets.PYPI_API_TOKEN }}
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7 |
8 | ## [Unreleased]
9 |
10 | ## [0.0.11] - 2024-08-27
11 | ### Fixed
12 | - Tag bug, PiPy upload was not working
13 |
14 | ## [0.0.10] - 2024-08-04
15 | We fixed a bug with the Sydney model, where only the last batch would be returned during batchful evaluation, instead of the full data set.
16 |
17 | ## [0.0.9] - 2024-05-24
18 | The API was changed to accept and yield sequences of arbitrary length.
19 | It is assumed that a provided sequence describes a full period.
20 | Internally, the different team models work with fix sequence lengths, so the user input is 1d-interpolated linearly accordingly.
21 |
22 | ## [0.0.8] - 2024-05-23
23 | Bump to new subversion.
24 |
25 | ## [0.0.7] - 2024-05-23
26 | ### Fixed
27 | Small bug fixes and allow pandas >= 2 instead of >= 2.0.3, in order to avoid requirements conflict in magnet-engine
28 |
29 | ## [0.0.6] - 2024-04-30
30 |
31 | ## [0.0.5] - 2024-04-13
32 | ### Added
33 | - Sydney model
34 |
35 | ## [0.0.4] - 2024-03-30
36 | ### Changed
37 | - Change python import package name to be magnethub to avoid underscores
38 |
39 | ## [0.0.3] - 2024-03-28
40 | ### Changed
41 | - Change pip installation name from mag_net_hub to mag-net-hub
42 |
43 | ## [0.0.2] - 2024-03-27
44 | ### Fixed
45 | - Missing requirements and missing models in pip package
46 |
47 | ## [0.0.1] - 2024-03-26
48 | ### Added
49 | - Paderborn Model
50 |
51 | [unreleased]: https://github.com/upb-lea/mag-net-hub/compare/0.0.11...HEAD
52 | [0.0.11]: https://github.com/upb-lea/mag-net-hub/compare/v0.0.10...0.0.11
53 | [0.0.10]: https://github.com/upb-lea/mag-net-hub/compare/0.0.9...v0.0.10
54 | [0.0.9]: https://github.com/upb-lea/mag-net-hub/compare/0.0.8...0.0.9
55 | [0.0.8]: https://github.com/upb-lea/mag-net-hub/compare/0.0.7...0.0.8
56 | [0.0.7]: https://github.com/upb-lea/mag-net-hub/compare/0.0.6...0.0.7
57 | [0.0.6]: https://github.com/upb-lea/mag-net-hub/compare/0.0.5...0.0.6
58 | [0.0.5]: https://github.com/upb-lea/mag-net-hub/compare/0.0.4...0.0.5
59 | [0.0.4]: https://github.com/upb-lea/mag-net-hub/compare/0.0.3...0.0.4
60 | [0.0.3]: https://github.com/upb-lea/mag-net-hub/compare/0.0.2...0.0.3
61 | [0.0.2]: https://github.com/upb-lea/mag-net-hub/compare/0.0.1...0.0.2
62 | [0.0.1]: https://github.com/upb-lea/mag-net-hub/releases/tag/0.0.1
63 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Paderborn University - LEA
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
MagNet Toolkit
4 | Certified Models of the MagNet Challenge
5 |
6 |
7 | 
8 |
9 | This repository acts as a hub for selected power loss models that were elaborated by different competitors during the [MagNet Challenge 2023](https://github.com/minjiechen/magnetchallenge).
10 | Feel free to use these loss models for your power converter design as a complement to your datasheet.
11 |
12 | The loss models are designed such that you can request a certain frequency, temperature, material and $B$ wave (sequence), in order to be provided with a scalar power loss estimate and a corresponding $H$ wave estimate.
13 |
14 | __Disclaimer__: Only steady-state and no varying DC-Bias is supported yet.
15 | Moreover, training data stemmed from measurements on toroid-shaped ferrites that had a fix size.
16 |
17 | Supported materials:
18 | - ML95S
19 | - T37
20 | - 3C90
21 | - 3C92
22 | - 3C94
23 | - 3C95
24 | - 3E6
25 | - 3F4
26 | - 77
27 | - 78
28 | - 79
29 | - N27
30 | - N30
31 | - N49
32 | - N87
33 |
34 |
35 | ## Installation
36 |
37 | ### Python
38 | We strongly recommend Python __3.10__.
39 | Higher versions may also work.
40 |
41 | Then install through pip:
42 |
43 | ```
44 | pip install mag-net-hub
45 | ```
46 |
47 | or, alternatively, clone this repo and execute
48 |
49 | ```
50 | cd mag-net-hub
51 | pip install .
52 | ```
53 |
54 | ## Usage
55 | Models are provided as executable code with readily trained coefficients.
56 | Hence, no training is conducted in this project.
57 |
58 | ### Python
59 | ```py
60 | import numpy as np
61 | import magnethub as mh
62 |
63 | # instantiate material-specific model
64 | mdl = mh.loss.LossModel(material="3C92", team="paderborn")
65 |
66 | # dummy B field data (one trajectory with 1024 samples)
67 | b_wave = np.random.randn(1024)* 200e-3 # in T
68 | freq = 124062 # Hz
69 | temp = 58 # °C
70 |
71 | # get power loss in W/m³ and estimated H wave in A/m
72 | p, h = mdl(b_wave, freq, temp)
73 |
74 | # batch execution for 100 trajectories
75 | b_waves = np.random.randn(100, 1024)* 200e-3 # in T
76 | freqs = np.random.randint(100e3, 750e3, size=100)
77 | temps = np.random.randint(20, 80, size=100)
78 | p, h = mdl(b_waves, freqs, temps)
79 |
80 | ```
81 |
82 |
83 | ## Contributing
84 | Whether you want to contribute your submission to the MagNet Challenge, or you are a single contributor who wants to add an awesome model to this hub -- any contribution is welcome.
85 |
86 | Open a pull request to directly suggest small improvements to the infrastructure or to add your model (with performance statistics preferred).
87 | For larger suggestions, please first open an issue or go to the discussion section to discuss your ideas.
88 |
89 | See the below folder structure overview with annotations on how to contribute a model.
90 |
91 | ```
92 | .
93 | ├── src_py
94 | │ └── magnethub
95 | │ ├── __init__.py
96 | │ ├── loss.py
97 | │ ├── models
98 | │ │ ├── paderborn
99 | │ │ │ ├── changelog.md
100 | │ │ │ ├── cnn_3C90_experiment_1b4d8_model_f3915868_seed_0_fold_0.pt
101 | │ │ │ ├── cnn_3C92_experiment_ea1fe_model_72510647_seed_0_fold_0.pt
102 | | | | └── ...
103 | │ │ ├── sydney
104 | │ │ │ └── ...
105 | │ │ └──
106 | │ │ │ └──
107 | │ ├── paderborn.py
108 | | ├── sydney.py
109 | | ├──
110 |
111 | ```
112 |
113 | Any number of models can be incorporated easily according to this code structure policy.
114 | If you have added model coefficients and execution logic via code, it only requires to be hooked in
115 | `loss.py` and you are ready to fire this pull request (PR).
116 |
117 | If it is possible, please also consider adding tests for your model logic under `tests/`, writing comprehensive docstrings in your code with some comments, and discuss the performance of your model in your PR.
118 |
119 | Other open ToDos are:
120 | - Matlab implementation
121 | - improve documentation
122 |
123 | Thank you!
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "mag-net-hub"
3 | version = "0.0.11"
4 | authors = [
5 | { name = "Wilhelm Kirchgässner" },
6 | ]
7 | description = "MagNet Toolkit - Certified Models of the MagNet Challenge"
8 | readme = "README.md"
9 | requires-python = "~=3.10"
10 | classifiers = [
11 | "Programming Language :: Python :: 3",
12 | "License :: OSI Approved :: MIT License",
13 | "Operating System :: OS Independent",
14 | ]
15 | dynamic = ["dependencies"] # commented due to packaging issues: "optional-dependencies"
16 |
17 | [tool.setuptools.dynamic]
18 | dependencies = { file = ["requirements.txt"] }
19 | optional-dependencies = { dev = { file = ["requirements-dev.txt"] } }
20 |
21 | [project.urls]
22 | Homepage = "https://github.com/upb-lea/mag-net-hub"
23 | Issues = "https://github.com/upb-lea/mag-net-hub/issues"
24 |
25 | [build-system]
26 | requires = ["hatchling", "hatch-requirements-txt"]
27 | build-backend = "hatchling.build"
28 |
29 | [tool.hatch.metadata.hooks.requirements_txt]
30 | files = ["requirements.txt"]
31 |
32 | [tool.hatch.build.targets.wheel]
33 | packages = ["src_py/magnethub"]
34 |
35 | [tool.hatch.build.targets.sdist]
36 | include = [
37 | "src_py/magnethub/*.py",
38 | "src_py/magnethub/models/paderborn/*.pt",
39 | "src_py/magnethub/models/sydney/*.pt",
40 | "tests",
41 | "requirements.txt"
42 | ]
43 |
44 |
45 |
46 | [tool.ruff]
47 | exclude = [
48 | ".eggs",
49 | ".git",
50 | ".venv",
51 | "venv"]
52 |
53 | line-length = 120
54 | indent-width = 4
55 |
56 | target-version = "py310"
57 |
58 | [tool.ruff.lint]
59 | select = ["E4", "E7", "E9", "F", "B", "D", "D417"]
60 | # extend-select = ["D417"] deactivated by default in case of pep257 codestyle.
61 | # see also: https://docs.astral.sh/ruff/rules/undocumented-param/
62 | ignore = ["B008", "D107", "D203", "D212", "D213", "D402", "D413", "D415", "D416", "E722", "E731", "F403", "F405", "F841",]
63 | fixable = ["ALL"]
64 | unfixable = []
65 | # ignore list in docstring according to numpy codestyles for Dxxx.
66 | # http://www.pydocstyle.org/en/5.0.1/error_codes.html#default-conventions
67 |
68 | [tool.ruff.lint.pydocstyle]
69 | convention = "pep257"
70 |
71 | [tool.ruff.format]
72 | quote-style = "double"
73 | indent-style = "space"
74 | skip-magic-trailing-comma = false
75 | line-ending = "auto"
76 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Python 3.10 !
2 | tqdm
3 | pandas>=2
4 | torch>=2.0.1
5 | torchinfo>=1.8.0
6 | joblib>=1.3.2
7 | scipy
8 |
--------------------------------------------------------------------------------
/src_matlab/magnet_challenge_loss_models.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_matlab/magnet_challenge_loss_models.m
--------------------------------------------------------------------------------
/src_py/magnethub/__init__.py:
--------------------------------------------------------------------------------
1 | """Init file for python package."""
2 | import magnethub.loss
3 | import magnethub.paderborn
4 | import magnethub.sydney
--------------------------------------------------------------------------------
/src_py/magnethub/loss.py:
--------------------------------------------------------------------------------
1 | """The general loss model.
2 |
3 | The LossModel class wraps all other teams' models.
4 | It sanitizes user arguments that would be boilerplate code for any team's code.
5 | """
6 |
7 | from pathlib import Path
8 | import magnethub.paderborn as pb
9 | import magnethub.sydney as sy
10 | import numpy as np
11 |
12 | L = 1024 # expected sequence length
13 |
14 |
15 | MATERIALS = [
16 | "ML95S",
17 | "T37",
18 | "3C90",
19 | "3C92",
20 | "3C94",
21 | "3C95",
22 | "3E6",
23 | "3F4",
24 | "77",
25 | "78",
26 | "79",
27 | "N27",
28 | "N30",
29 | "N49",
30 | "N87",
31 | ]
32 |
33 | MODEL_ROOT = Path(__file__).parent / "models"
34 |
35 | TEAMS = {
36 | "paderborn": pb.MAT2FILENAME,
37 | "sydney": sy.MAT2FILENAME,
38 | }
39 |
40 |
41 | class LossModel:
42 | """LossModel definition."""
43 |
44 | def __init__(self, material="3C92", team="paderborn"):
45 | self.material = material.upper()
46 | self.team = team.lower()
47 |
48 | # value checks
49 | if self.material not in MATERIALS:
50 | raise ValueError(f"Chosen material '{self.material}' not supported. Must be either {', '.join(MATERIALS)}")
51 | if self.team not in list(TEAMS.keys()):
52 | raise ValueError(f"Chosen team '{self.team}' not supported. Must be in {', '.join(TEAMS.keys())}")
53 |
54 | model_file_name = TEAMS[self.team].get(self.material, None)
55 | if model_file_name is None:
56 | raise ValueError(f"Team {self.team.capitalize()} does not offer a model for material {self.material}")
57 | model_path = MODEL_ROOT / self.team / model_file_name
58 |
59 | # load corresponding model
60 | match self.team:
61 | case "paderborn":
62 | self.mdl = pb.PaderbornModel(model_path, self.material)
63 | case "sydney":
64 | self.mdl = sy.SydneyModel(model_path, self.material)
65 |
66 | def __call__(self, b_field, frequency, temperature):
67 | """Evaluate trajectory and estimate power loss.
68 |
69 | Args
70 | ----
71 | b_field: (X, Y) array_like
72 | The magnetic flux density array(s) in T. First dimension X describes the batch size, the second Y
73 | the time length of a full period
74 | frequency: scalar or 1D array-like
75 | The frequency operation point(s) in Hz
76 | temperature: scalar or 1D array-like
77 | The temperature operation point(s) in °C
78 |
79 | Return
80 | ------
81 | p, h: (X,) np.array, (X, Y) np.ndarray
82 | The estimated power loss (p) in W/m³ and the estimated magnetic field strength (h) in A/m.
83 | """
84 | if b_field.ndim == 1:
85 | b_field = b_field.reshape(1, -1)
86 | original_seq_len = b_field.shape[-1]
87 |
88 | L = self.mdl.expected_seq_len
89 | if b_field.shape[-1] != L:
90 | actual_len = b_field.shape[-1]
91 | query_points = np.arange(L)
92 | support_points = np.arange(actual_len) * L / actual_len
93 | # TODO Does a vectorized form of 1d interpolation exist?
94 | b_field = np.row_stack(
95 | [np.interp(query_points, support_points, b_field[i]) for i in range(b_field.shape[0])]
96 | )
97 |
98 | p, h_seq = self.mdl(b_field, frequency, temperature)
99 |
100 | if h_seq is not None:
101 | assert (
102 | h_seq.ndim == 2
103 | ), f"H sequence has ndim={h_seq.ndim}, but 2 were expected with (#periods, #samples-per-period)"
104 | # may interpolate to original sample size if h_seq too short or too long
105 | if h_seq.shape[-1] != original_seq_len:
106 | actual_len = h_seq.shape[-1]
107 | query_points = np.arange(original_seq_len)
108 | support_points = np.arange(actual_len) * original_seq_len / actual_len
109 | h_seq = np.row_stack([np.interp(query_points, support_points, h_seq[i]) for i in range(h_seq.shape[0])])
110 | return p, h_seq
111 |
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/changelog.md:
--------------------------------------------------------------------------------
1 | # Changelog of models
2 |
3 | ## 2024-03-17
4 | - cnn_3C92_experiment_ea1fe_model_72510647_seed_0_fold_0.pt
5 | - cnn_3C95_experiment_ad6ec_model_046383a5_seed_0_fold_0.pt
6 | - cnn_3E6_experiment_a7817_model_a1035e58_seed_0_fold_0.pt
7 | - cnn_3F4_experiment_c234d_model_2d43b97d_seed_0_fold_0.pt
8 | - cnn_77_experiment_268ae_model_5b7c92ed_seed_0_fold_0.pt
9 | - cnn_78_experiment_e5297_model_77fbd758_seed_0_fold_0.pt
10 | - cnn_79_experiment_45989_model_7227af72_seed_0_fold_0.pt
11 | - cnn_ML95S_experiment_1c978_model_883a5d2f_seed_0_fold_0.pt
12 | - cnn_N27_experiment_8b7f0_model_20954a2a_seed_0_fold_0.pt
13 | - cnn_N30_experiment_5a78c_model_6bd86623_seed_0_fold_0.pt
14 | - cnn_N49_experiment_27442_model_d6234a32_seed_0_fold_0.pt
15 | - cnn_T37_experiment_a084a_model_fb31325e_seed_0_fold_0.pt
16 |
17 | Models are trained for 5k epochs on all available data provided by the Princeton University.
18 | This includes the held-out test data for all materials.
19 | Topology and other optimization parameters remain the same as in the winning strategy.
20 |
21 | ## 2024-03-10
22 | - cnn_A_experiment_c9cfe_model_d893c778_p.pt
23 | - cnn_B_experiment_c9cfe_model_b6a920cc_p.pt
24 | - cnn_C_experiment_c9cfe_model_c1ced7b6_p.pt
25 | - cnn_D_experiment_c9cfe_model_11672810_p.pt
26 | - cnn_E_experiment_c9cfe_model_5ae50f9e_p.pt
27 |
28 | These are the final submitted models from the Paderborn University team to the MagNet Challenge 2023.
29 | They denote the winning models of the challenge (performance critera).
30 | Trained on the official training set with 10k epochs each.
31 |
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_3C90_experiment_1b4d8_model_f3915868_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_3C90_experiment_1b4d8_model_f3915868_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_3C92_experiment_ea1fe_model_72510647_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_3C92_experiment_ea1fe_model_72510647_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_3C94_experiment_56441_model_55693612_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_3C94_experiment_56441_model_55693612_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_3C95_experiment_ad6ec_model_046383a5_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_3C95_experiment_ad6ec_model_046383a5_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_3E6_experiment_a7817_model_a1035e58_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_3E6_experiment_a7817_model_a1035e58_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_3F4_experiment_c234d_model_2d43b97d_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_3F4_experiment_c234d_model_2d43b97d_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_77_experiment_268ae_model_5b7c92ed_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_77_experiment_268ae_model_5b7c92ed_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_78_experiment_e5297_model_77fbd758_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_78_experiment_e5297_model_77fbd758_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_79_experiment_45989_model_7227af72_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_79_experiment_45989_model_7227af72_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_A_experiment_c9cfe_model_d893c778_p.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_A_experiment_c9cfe_model_d893c778_p.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_B_experiment_c9cfe_model_b6a920cc_p.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_B_experiment_c9cfe_model_b6a920cc_p.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_C_experiment_c9cfe_model_c1ced7b6_p.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_C_experiment_c9cfe_model_c1ced7b6_p.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_D_experiment_c9cfe_model_11672810_p.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_D_experiment_c9cfe_model_11672810_p.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_E_experiment_c9cfe_model_5ae50f9e_p.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_E_experiment_c9cfe_model_5ae50f9e_p.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_ML95S_experiment_1c978_model_883a5d2f_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_ML95S_experiment_1c978_model_883a5d2f_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_N27_experiment_8b7f0_model_20954a2a_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_N27_experiment_8b7f0_model_20954a2a_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_N30_experiment_5a78c_model_6bd86623_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_N30_experiment_5a78c_model_6bd86623_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_N49_experiment_27442_model_d6234a32_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_N49_experiment_27442_model_d6234a32_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_N87_experiment_985cb_model_d51b0f7e_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_N87_experiment_985cb_model_d51b0f7e_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/paderborn/cnn_T37_experiment_a084a_model_fb31325e_seed_0_fold_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/paderborn/cnn_T37_experiment_a084a_model_fb31325e_seed_0_fold_0.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/3C90.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/3C90.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/3C92.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/3C92.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/3C94.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/3C94.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/3C95.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/3C95.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/3E6.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/3E6.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/3F4.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/3F4.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/77.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/77.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/78.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/78.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/79.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/79.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/ML95S.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/ML95S.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/N27.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/N27.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/N30.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/N30.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/N49.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/N49.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/N87.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/N87.pt
--------------------------------------------------------------------------------
/src_py/magnethub/models/sydney/T37.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/upb-lea/mag-net-hub/30dac0244bb7c6b47704e661b01b540346a7cd39/src_py/magnethub/models/sydney/T37.pt
--------------------------------------------------------------------------------
/src_py/magnethub/paderborn.py:
--------------------------------------------------------------------------------
1 | """
2 | File contains the model according to the Paderborn University approach for the magnet challenge.
3 |
4 | Source: https://github.com/upb-lea/hardcore-magnet-challenge
5 | """
6 |
7 | import numpy as np
8 | import pandas as pd
9 | import torch
10 |
11 |
12 | L = 1024 # expected sequence length
13 | ALL_B_COLS = [f"B_t_{k}" for k in range(L)]
14 | ALL_H_COLS = [f"H_t_{k}" for k in range(L)]
15 | FREQ_SCALE = 150_000.0 # in Hz
16 |
17 | # material constants
18 | MAT_CONST_B_MAX = {
19 | "3C90": 0.282254066096809,
20 | "3C94": 0.281179823717941,
21 | "3E6": 0.199842551960829,
22 | "3F4": 0.312954906128548,
23 | "77": 0.315644038162322,
24 | "78": 0.3166405692215,
25 | "3C92": 0.319193837507623,
26 | "T37": 0.253934182092085,
27 | "3C95": 0.322678797082694,
28 | "79": 0.314715273611617,
29 | "ML95S": 0.330102949741973,
30 | "N27": 0.317335585296054,
31 | "N30": 0.201167700159802,
32 | "N49": 0.317828937072173,
33 | "N87": 0.280909134946228,
34 | } # in T
35 | MAT_CONST_H_MAX = {
36 | "3C90": 84.7148502254261,
37 | "3C94": 64.8575649838852,
38 | "3E6": 74.1579701817075,
39 | "3F4": 150,
40 | "77": 86.5681744566843,
41 | "78": 87.5896894086919,
42 | "3C92": 150,
43 | "T37": 87.7490689795367,
44 | "3C95": 72.0625845199264,
45 | "79": 150,
46 | "ML95S": 150,
47 | "N27": 119.039616554254,
48 | "N30": 116.951204964406,
49 | "N49": 150,
50 | "N87": 100.674197407678,
51 | } # in A/m
52 |
53 | # Normalization constants for misc. input features (absolute maximum over entire data set)
54 | NORM_DENOM = {
55 | "3C90": {
56 | "b_peak2peak": 0.562775,
57 | "log_peak2peak": 3.854492,
58 | "mean_abs_dbdt": 0.001098,
59 | "log_mean_abs_dbdt": 10.097290,
60 | "sample_time": 0.000020,
61 | "b_deriv": 9888.829113924028,
62 | "b_deriv_sq": 677552639.6192261,
63 | },
64 | "3C92": {
65 | "b_peak2peak": 0.638772,
66 | "log_peak2peak": 3.952986,
67 | "mean_abs_dbdt": 0.001246,
68 | "log_mean_abs_dbdt": 10.191507,
69 | "sample_time": 0.000020,
70 | "b_deriv": 8367.455279302658,
71 | "b_deriv_sq": 352400581.9367888,
72 | },
73 | "3C94": {
74 | "b_peak2peak": 0.561696288415998,
75 | "log_peak2peak": 3.8527982020339397,
76 | "mean_abs_dbdt": 0.001094609016982872,
77 | "log_mean_abs_dbdt": 10.09547460054903,
78 | "sample_time": 2.002002002002002e-05,
79 | "b_deriv": 9369.848364186466,
80 | "b_deriv_sq": 722100525.0000031,
81 | },
82 | "3C95": {
83 | "b_peak2peak": 0.643972929264389,
84 | "log_peak2peak": 3.951775170875342,
85 | "mean_abs_dbdt": 0.001255971974873893,
86 | "log_mean_abs_dbdt": 10.19012406875038,
87 | "sample_time": 2.000400080016003e-05,
88 | "b_deriv": 8434.886044920588,
89 | "b_deriv_sq": 394837993.91482246,
90 | },
91 | "3E6": {
92 | "b_peak2peak": 0.39860703974419,
93 | "log_peak2peak": 3.9943679877190745,
94 | "mean_abs_dbdt": 0.0007754383944842351,
95 | "log_mean_abs_dbdt": 10.237231774124638,
96 | "sample_time": 2.002002002002002e-05,
97 | "b_deriv": 10961.711420802148,
98 | "b_deriv_sq": 594537528.5676531,
99 | },
100 | "3F4": {
101 | "b_peak2peak": 0.62579,
102 | "log_peak2peak": 3.966479191224205,
103 | "mean_abs_dbdt": 0.0012214485516744327,
104 | "log_mean_abs_dbdt": 10.204883300750092,
105 | "sample_time": 2.002002002002002e-05,
106 | "b_deriv": 11289.048646402262,
107 | "b_deriv_sq": 767625384.1351097,
108 | },
109 | "77": {
110 | "b_peak2peak": 0.62941,
111 | "log_peak2peak": 3.969122597468719,
112 | "mean_abs_dbdt": 0.001227556207233634,
113 | "log_mean_abs_dbdt": 10.207264569238735,
114 | "sample_time": 2.002002002002002e-05,
115 | "b_deriv": 10512.189743589744,
116 | "b_deriv_sq": 705767456.3398341,
117 | },
118 | "78": {
119 | "b_peak2peak": 0.6315575499770669,
120 | "log_peak2peak": 3.9789526676175275,
121 | "mean_abs_dbdt": 0.001231771095979432,
122 | "log_mean_abs_dbdt": 10.217365718778147,
123 | "sample_time": 2.002002002002002e-05,
124 | "b_deriv": 10374.080553295375,
125 | "b_deriv_sq": 688850200.676364,
126 | },
127 | "79": {
128 | "b_peak2peak": 0.648230254830443,
129 | "log_peak2peak": 3.9557162612448136,
130 | "mean_abs_dbdt": 0.0012638156025685287,
131 | "log_mean_abs_dbdt": 10.194239618633777,
132 | "sample_time": 2.00160128102482e-05,
133 | "b_deriv": 8233.983883950554,
134 | "b_deriv_sq": 495462859.8505281,
135 | },
136 | "T37": {
137 | "b_peak2peak": 0.509931827049285,
138 | "log_peak2peak": 3.9681646195793543,
139 | "mean_abs_dbdt": 0.0009933972666621286,
140 | "log_mean_abs_dbdt": 10.207037964928544,
141 | "sample_time": 2.00160128102482e-05,
142 | "b_deriv": 10816.309729628758,
143 | "b_deriv_sq": 757546320.923036,
144 | },
145 | "N27": {
146 | "b_peak2peak": 0.6318699999999999,
147 | "log_peak2peak": 3.96936864553962,
148 | "mean_abs_dbdt": 0.0012323949169110505,
149 | "log_mean_abs_dbdt": 10.207788696606462,
150 | "sample_time": 2.002002002002002e-05,
151 | "b_deriv": 10348.1536885246,
152 | "b_deriv_sq": 655163308.8104846,
153 | },
154 | "N30": {
155 | "b_peak2peak": 0.40126,
156 | "log_peak2peak": 3.9675357153584048,
157 | "mean_abs_dbdt": 0.0007812218963831858,
158 | "log_mean_abs_dbdt": 10.20594094230823,
159 | "sample_time": 2.002002002002002e-05,
160 | "b_deriv": 10772.915082382753,
161 | "b_deriv_sq": 640974360.0000046,
162 | },
163 | "N49": {
164 | "b_peak2peak": 0.6349207696798069,
165 | "log_peak2peak": 3.729801733358157,
166 | "mean_abs_dbdt": 0.0012362016813373098,
167 | "log_mean_abs_dbdt": 9.972437816965455,
168 | "sample_time": 2.002002002002002e-05,
169 | "b_deriv": 10407.538775510218,
170 | "b_deriv_sq": 742250594.1211493,
171 | },
172 | "N87": {
173 | "b_peak2peak": 0.561429326791338,
174 | "log_peak2peak": 3.8619698511811014,
175 | "mean_abs_dbdt": 0.001094995270801449,
176 | "log_mean_abs_dbdt": 10.104824015905193,
177 | "sample_time": 2.002002002002002e-05,
178 | "b_deriv": 10006.669716376984,
179 | "b_deriv_sq": 616830982.1245888,
180 | },
181 | "ML95S": {
182 | "b_peak2peak": 0.660470219971137,
183 | "log_peak2peak": 3.684324864126149,
184 | "mean_abs_dbdt": 0.0012853467115523114,
185 | "log_mean_abs_dbdt": 9.922533485496238,
186 | "sample_time": 1.997602876548142e-05,
187 | "b_deriv": 8000.255232581114,
188 | "b_deriv_sq": 434328441.4934993,
189 | },
190 | }
191 |
192 | MAT2FILENAME = {
193 | "3C90": "cnn_3C90_experiment_1b4d8_model_f3915868_seed_0_fold_0.pt",
194 | "3C92": "cnn_3C92_experiment_ea1fe_model_72510647_seed_0_fold_0.pt",
195 | "3C94": "cnn_3C94_experiment_56441_model_55693612_seed_0_fold_0.pt",
196 | "3C95": "cnn_3C95_experiment_ad6ec_model_046383a5_seed_0_fold_0.pt",
197 | "3E6": "cnn_3E6_experiment_a7817_model_a1035e58_seed_0_fold_0.pt",
198 | "3F4": "cnn_3F4_experiment_c234d_model_2d43b97d_seed_0_fold_0.pt",
199 | "77": "cnn_77_experiment_268ae_model_5b7c92ed_seed_0_fold_0.pt",
200 | "78": "cnn_78_experiment_e5297_model_77fbd758_seed_0_fold_0.pt",
201 | "79": "cnn_79_experiment_45989_model_7227af72_seed_0_fold_0.pt",
202 | "T37": "cnn_T37_experiment_a084a_model_fb31325e_seed_0_fold_0.pt",
203 | "N27": "cnn_N27_experiment_8b7f0_model_20954a2a_seed_0_fold_0.pt",
204 | "N30": "cnn_N30_experiment_5a78c_model_6bd86623_seed_0_fold_0.pt",
205 | "N49": "cnn_N49_experiment_27442_model_d6234a32_seed_0_fold_0.pt",
206 | "N87": "cnn_N87_experiment_985cb_model_d51b0f7e_seed_0_fold_0.pt",
207 | "ML95S": "cnn_ML95S_experiment_1c978_model_883a5d2f_seed_0_fold_0.pt",
208 | }
209 |
210 |
211 | def form_factor(x):
212 | """
213 | Calculate the form factor.
214 |
215 | definition: kf = rms(x) / mean(abs(x))
216 | for ideal sine: np.pi/(2*np.sqrt(2))
217 | """
218 | return np.sqrt(np.mean(x**2, axis=1)) / np.mean(np.abs(x), axis=1)
219 |
220 |
221 | def crest_factor(x):
222 | """
223 | Calculate the crest factor.
224 |
225 | definition: kc = rms(x) / max(x)
226 | for ideal sine: np.sqrt(2)
227 | """
228 | return np.max(np.abs(x), axis=1) / np.sqrt(np.mean(x**2, axis=1))
229 |
230 |
231 | def bool_filter_sine(b, rel_kf=0.01, rel_kc=0.01, rel_0_dev=0.1):
232 | """
233 | Bool classification for sinusoidal waveforms, used by function get_waveform_est.
234 |
235 | b: input flux density (nxm)-array with n m-dimensional flux density waveforms
236 | rel_kf: (allowed) relative deviation of the form factor for sine classification
237 | rel_kc: (allowed) relative deviation of the crest factor for sine classification
238 | rel_0_dev: (allowed) relative deviation of the first value from zero (normalized on the peak value)
239 | """
240 | kf_sine = np.pi / (2 * np.sqrt(2))
241 | kc_sine = np.sqrt(2)
242 |
243 | b_ff = form_factor(b)
244 | b_cf = crest_factor(b)
245 | b_max = np.max(b, axis=1)
246 | mask = np.all(
247 | np.column_stack(
248 | [
249 | b_ff < kf_sine * (1 + rel_kf), # form factor based checking
250 | b_ff > kf_sine * (1 - rel_kf), # form factor based checking
251 | b_cf < kc_sine * (1 + rel_kc), # crest factor based checking
252 | b_cf > kc_sine * (1 - rel_kc), # crest factor based checking
253 | b[:, 0] < b_max * rel_0_dev, # starting value based checking
254 | b[:, 0] > -b_max * rel_0_dev, # starting value based checking
255 | ]
256 | ),
257 | axis=1,
258 | )
259 |
260 | return mask
261 |
262 |
263 | def bool_filter_triangular(b, rel_kf=0.005, rel_kc=0.005):
264 | """Bool classification for triangular waveforms, used by function get_waveform_est."""
265 | kf_triangular = 2 / np.sqrt(3)
266 | kc_triangular = np.sqrt(3)
267 |
268 | b_ff = form_factor(b)
269 | b_cf = crest_factor(b)
270 |
271 | mask = np.all(
272 | np.column_stack(
273 | [
274 | b_ff < kf_triangular * (1 + rel_kf),
275 | b_ff > kf_triangular * (1 - rel_kf),
276 | b_cf < kc_triangular * (1 + rel_kc),
277 | b_cf > kc_triangular * (1 - rel_kc),
278 | ]
279 | ),
280 | axis=1,
281 | )
282 |
283 | return mask
284 |
285 |
286 | def get_waveform_est(full_b):
287 | """
288 | Classify the waveforms into [other, square, triangular, sine].
289 |
290 | From Till's tp-1.4.7.3.1 NB, return waveform class.
291 | Postprocessing from wk-1.1-EDA NB.
292 |
293 | Return class estimate 'k', where [0, 1, 2, 3] corresponds to
294 | [other, square, triangular, sine].
295 | """
296 | # labels init all with 'other'
297 | k = np.zeros(full_b.shape[0], dtype=int)
298 |
299 | # square
300 | k[
301 | np.all(
302 | np.abs(full_b[:, 250:500:50] - full_b[:, 200:450:50]) / np.max(np.abs(full_b), axis=1, keepdims=True)
303 | < 0.05,
304 | axis=1,
305 | )
306 | & np.all(full_b[:, -200:] < 0, axis=1)
307 | ] = 1
308 |
309 | # triangular
310 | k[bool_filter_triangular(full_b, rel_kf=0.01, rel_kc=0.01)] = 2
311 |
312 | # sine
313 | k[bool_filter_sine(full_b, rel_kf=0.01, rel_kc=0.01)] = 3
314 |
315 | # postprocess "other" signals in frequency-domain, to recover some more squares, triangles, and sines
316 | n_subsample = 32
317 | other_b = full_b[k == 0, ::n_subsample]
318 | other_b /= np.abs(other_b).max(axis=1, keepdims=True)
319 | other_b_ft = np.abs(np.fft.fft(other_b, axis=1))
320 | other_b_ft /= other_b_ft.max(axis=1, keepdims=True)
321 | msk_of_newly_identified_sines = np.all((other_b_ft[:, 3:10] < 0.03) & (other_b_ft[:, [2]] < 0.2), axis=1)
322 | msk_of_newly_identified_triangs = np.all(((other_b_ft[:, 1:8] - other_b_ft[:, 2:9]) > 0), axis=1) | np.all(
323 | ((other_b_ft[:, 1:8:2] > 1e-2) & (other_b_ft[:, 2:9:2] < 1e-2)), axis=1
324 | )
325 | msk_of_newly_identified_triangs = msk_of_newly_identified_triangs & ~msk_of_newly_identified_sines
326 | msk_of_newly_identified_squares = np.all((other_b_ft[:, 1:4:2] > 1e-2) & (other_b_ft[:, 2:5:2] < 1e-3), axis=1)
327 | msk_of_newly_identified_squares = (
328 | msk_of_newly_identified_squares & ~msk_of_newly_identified_sines & ~msk_of_newly_identified_triangs
329 | )
330 | idx_sines = np.arange(k.size)[k == 0][msk_of_newly_identified_sines]
331 | idx_triangs = np.arange(k.size)[k == 0][msk_of_newly_identified_triangs]
332 | idx_squares = np.arange(k.size)[k == 0][msk_of_newly_identified_squares]
333 | k[idx_squares] = 1
334 | k[idx_triangs] = 2
335 | k[idx_sines] = 3
336 | return k
337 |
338 |
339 | def engineer_features(b_seq, freq, temp, material):
340 | """Add engineered features to data set."""
341 | match b_seq:
342 | case str():
343 | raise NotImplementedError("b_seq must be an array-like yet")
344 | case np.ndarray():
345 | # check b_seq shapes
346 | match b_seq.ndim:
347 | case 1:
348 | b_seq = b_seq[np.newaxis, :]
349 | case 2:
350 | pass
351 | case _:
352 | raise ValueError(f"Expected b_seq to have either one or two dimensions, but is has {b_seq.ndim}.")
353 | case list() | tuple():
354 | b_seq = np.array(b_seq)
355 | case _:
356 | raise ValueError(f"Type of b_seq={type(b_seq)} nut supported. Please provide as np.ndarray or list")
357 |
358 | waveforms = get_waveform_est(b_seq)
359 | waveforms_df = pd.DataFrame(
360 | np.zeros((len(waveforms), 4)),
361 | columns=["wav_other", "wav_square", "wav_triangular", "wav_sine"],
362 | )
363 | # one hot encode
364 | waveform_dummies = pd.get_dummies(waveforms, prefix="wav", dtype=float).rename(
365 | columns={
366 | "wav_0": "wav_other",
367 | "wav_1": "wav_square",
368 | "wav_2": "wav_triangular",
369 | "wav_3": "wav_sine",
370 | }
371 | )
372 | for c in waveform_dummies:
373 | waveforms_df.loc[:, c] = waveform_dummies.loc[:, c]
374 | ds = pd.DataFrame(b_seq, columns=ALL_B_COLS).assign(
375 | freq=freq,
376 | temp=temp,
377 | material=material,
378 | **{c: waveforms_df.loc[:, c] for c in waveforms_df},
379 | )
380 |
381 | dbdt = b_seq[:, 1:] - b_seq[:, :-1]
382 | b_peak2peak = b_seq.max(axis=1) - b_seq.min(axis=1)
383 |
384 | ds = ds.assign(
385 | b_peak2peak=b_peak2peak,
386 | log_peak2peak=np.log(b_peak2peak),
387 | mean_abs_dbdt=np.mean(np.abs(dbdt), axis=1),
388 | log_mean_abs_dbdt=np.log(np.mean(np.abs(dbdt), axis=1)),
389 | sample_time=1 / freq,
390 | )
391 |
392 | return ds
393 |
394 |
395 | def construct_tensor_seq2seq(
396 | df,
397 | x_cols,
398 | b_limit,
399 | h_limit,
400 | b_limit_pp=None,
401 | ln_ploss_mean=0,
402 | ln_ploss_std=1,
403 | training_data=True,
404 | ):
405 | """
406 | Generate tensors.
407 |
408 | Shapes as following:
409 | - For time series tensors (#time steps, #profiles/periods, #features),
410 | - for scalar tensors (#profiles, #features).
411 | """
412 | full_b = df.loc[:, ALL_B_COLS].to_numpy()
413 | if training_data:
414 | full_h = df.loc[:, ALL_H_COLS].to_numpy()
415 | mat = df.iloc[0, :].loc["material"]
416 | df = df.drop(columns=[c for c in df if c.startswith(("H_t_", "B_t_", "material"))])
417 | assert len(df) > 0, "empty dataframe error"
418 | # put freq on first place since Architecture expects it there
419 | x_cols.insert(0, x_cols.pop(x_cols.index("freq")))
420 | X = df.loc[:, x_cols].astype(np.float32)
421 |
422 | # normalization
423 | full_b /= b_limit
424 | if training_data:
425 | full_h /= h_limit
426 | orig_freq = X.loc[:, ["freq"]].copy().to_numpy()
427 | X.loc[:, ["temp", "freq"]] /= np.array([75.0, FREQ_SCALE], dtype=np.float32)
428 | X.loc[:, "freq"] = np.log(X.freq)
429 | other_cols = [c for c in x_cols if c not in ["temp", "freq"] and not c.startswith("wav_")]
430 | for other_col in other_cols:
431 | X.loc[:, other_col] /= NORM_DENOM[mat][other_col]
432 |
433 | if training_data:
434 | # add p loss as target (only used as target when predicting p loss directly), must be last column
435 | X = X.assign(ln_ploss=(np.log(df.ploss) - ln_ploss_mean) / ln_ploss_std)
436 | # tensor list
437 | tens_l = []
438 | if b_limit_pp is not None:
439 | # add another B curve with different normalization
440 | per_profile_scaled_b = full_b * b_limit / b_limit_pp
441 | # add timeseries derivatives
442 | b_deriv = np.empty((full_b.shape[0], full_b.shape[1] + 2))
443 | b_deriv[:, 1:-1] = per_profile_scaled_b
444 | b_deriv[:, 0] = per_profile_scaled_b[:, -1]
445 | b_deriv[:, -1] = per_profile_scaled_b[:, 0]
446 | b_deriv = np.gradient(b_deriv, axis=1) * orig_freq
447 | b_deriv_sq = np.gradient(b_deriv, axis=1) * orig_freq
448 | b_deriv = b_deriv[:, 1:-1]
449 | b_deriv_sq = b_deriv_sq[:, 1:-1]
450 | tantan_b = -np.tan(0.9 * np.tan(per_profile_scaled_b)) / 6 # tan-tan feature
451 | tens_l += [
452 | torch.tensor(per_profile_scaled_b.T[..., np.newaxis], dtype=torch.float32),
453 | torch.tensor(
454 | b_deriv.T[..., np.newaxis] / NORM_DENOM[mat]["b_deriv"],
455 | dtype=torch.float32,
456 | ),
457 | torch.tensor(
458 | b_deriv_sq.T[..., np.newaxis] / NORM_DENOM[mat]["b_deriv_sq"],
459 | dtype=torch.float32,
460 | ),
461 | torch.tensor(tantan_b.T[..., np.newaxis], dtype=torch.float32),
462 | ]
463 | tens_l += [torch.tensor(full_b.T[..., np.newaxis], dtype=torch.float32)] # b field is penultimate column
464 | if training_data:
465 | tens_l += [
466 | torch.tensor(full_h.T[..., np.newaxis], dtype=torch.float32), # target is last column
467 | ]
468 |
469 | # return ts tensor with shape: (#time steps, #profiles, #features), and scalar tensor with (#profiles, #features)
470 | return torch.dstack(tens_l), torch.tensor(X.to_numpy(), dtype=torch.float32)
471 |
472 |
473 | class PaderbornModel:
474 | """The Paderborn model.
475 |
476 | HARDCORE: H-field and power loss estimation for arbitrary waveforms with residual,
477 | dilated convolutional neural networks in ferrite cores
478 | N Förster, W Kirchgässner, T Piepenbrock, O Schweins, O Wallscheid
479 | arXiv preprint arXiv:2401.11488
480 |
481 | """
482 |
483 | expected_seq_len = 1024 # the expected sequence length
484 |
485 | def __init__(self, model_path, material):
486 | self.model_path = model_path
487 | self.material = material
488 | self.mdl = torch.jit.load(model_path)
489 | self.mdl.eval()
490 | assert (
491 | material in MAT_CONST_H_MAX and material in MAT_CONST_B_MAX
492 | ), f"Requested material '{material}' is not supported"
493 | self.b_limit = MAT_CONST_B_MAX[material]
494 | self.h_limit = MAT_CONST_H_MAX[material]
495 | self.predicts_p_directly = True
496 |
497 | def __call__(self, b_seq, frequency, temperature):
498 | """Evaluate trajectory and estimate power loss.
499 |
500 | Args
501 | ----
502 | b_seq: (X, Y) array_like
503 | The magnetic flux density array(s) in T. First dimension X describes the batch, the second Y
504 | the time length (will always be interpolated to 1024 samples)
505 | frequency: scalar or 1D array-like
506 | The frequency operation point(s) in Hz
507 | temperature: scalar or 1D array-like
508 | The temperature operation point(s) in °C
509 |
510 | Return
511 | ------
512 | p, h: (X,) np.array, (X, Y) np.ndarray
513 | The estimated power loss (p) in W/m³ and the estimated magnetic field strength (h) in A/m.
514 | """
515 | ds = engineer_features(b_seq, frequency, temperature, self.material)
516 | # construct tensors
517 | x_cols = [c for c in ds if c not in ["ploss", "kfold", "material"] and not c.startswith(("B_t_", "H_t_"))]
518 | b_limit_per_profile = np.abs(ds.loc[:, ALL_B_COLS].to_numpy()).max(axis=1).reshape(-1, 1)
519 | h_limit = self.h_limit * b_limit_per_profile / self.b_limit
520 | b_limit_test_fold = self.b_limit
521 | b_limit_test_fold_pp = b_limit_per_profile
522 | h_limit_test_fold = h_limit
523 | with torch.inference_mode():
524 | val_tensor_ts, val_tensor_scalar = construct_tensor_seq2seq(
525 | ds,
526 | x_cols,
527 | b_limit_test_fold,
528 | h_limit_test_fold,
529 | b_limit_pp=b_limit_test_fold_pp,
530 | training_data=False,
531 | )
532 |
533 | if self.predicts_p_directly:
534 | # prepare torch tensors for normalization scales
535 | b_limit_test_fold_torch = torch.as_tensor(b_limit_test_fold, dtype=torch.float32)
536 | h_limit_test_fold_torch = torch.as_tensor(h_limit_test_fold, dtype=torch.float32)
537 | freq_scale_torch = torch.as_tensor(FREQ_SCALE, dtype=torch.float32)
538 |
539 | val_pred_p, val_pred_h = self.mdl(
540 | val_tensor_ts.permute(1, 2, 0),
541 | val_tensor_scalar,
542 | b_limit_test_fold_torch,
543 | h_limit_test_fold_torch,
544 | freq_scale_torch,
545 | )
546 | else:
547 | val_pred_h = self.mdl(
548 | val_tensor_ts.permute(1, 2, 0),
549 | val_tensor_scalar,
550 | ).permute(2, 0, 1)
551 | val_pred_p = None
552 | h_pred = val_pred_h.squeeze().cpu().numpy().T * h_limit_test_fold
553 | if val_pred_p is None:
554 | p_pred = frequency * np.trapz(h_pred, b_seq, axis=1)
555 | else:
556 | p_pred = np.exp(val_pred_p.squeeze().cpu().numpy())
557 | return p_pred.astype(np.float32), h_pred.astype(np.float32)
558 |
--------------------------------------------------------------------------------
/src_py/magnethub/sydney.py:
--------------------------------------------------------------------------------
1 | """
2 | File contains the model according to the Sydney University approach for the magnet challenge.
3 |
4 | Source: https://github.com/moetomg/magnet-engine
5 | """
6 |
7 | import torch
8 | import numpy as np
9 | from scipy.signal import savgol_filter
10 |
11 | MAT2FILENAME = {
12 | "3C90": "3C90.pt",
13 | "3C92": "3C92.pt",
14 | "3C94": "3C94.pt",
15 | "3C95": "3C95.pt",
16 | "3E6": "3E6.pt",
17 | "3F4": "3F4.pt",
18 | "77": "77.pt",
19 | "78": "78.pt",
20 | "79": "79.pt",
21 | "T37": "T37.pt",
22 | "N27": "N27.pt",
23 | "N30": "N30.pt",
24 | "N49": "N49.pt",
25 | "N87": "N87.pt",
26 | "ML95S": "ML95S.pt",
27 | }
28 | # Material normalization data (1.B 2.H 3.F 4.T 5.dB/dt)
29 | normsDict = {
30 | "77": [
31 | [-2.63253458e-19, 7.47821754e-02],
32 | [-7.60950004e-18, 1.10664739e01],
33 | [5.24678898e00, 2.89351404e-01],
34 | [5.87473793e01, 2.40667381e01],
35 | [6.16727829e00, 3.83645439e01],
36 | ],
37 | "78": [
38 | [5.67033925e-19, 7.22424510e-02],
39 | [-1.54283684e-16, 1.15338828e01],
40 | [5.23810768e00, 2.89979160e-01],
41 | [5.87434082e01, 2.40685291e01],
42 | [6.09561586e00, 3.81356049e01],
43 | ],
44 | "79": [
45 | [1.70344847e-13, 9.41321492e-02],
46 | [-4.54025068e-02, 3.20463941e01],
47 | [5.21954346e00, 2.66715437e-01],
48 | [5.52068787e01, 2.37196522e01],
49 | [6.77422905e00, 3.90895233e01],
50 | ],
51 | "N27": [
52 | [7.52738469e-19, 7.48951129e-02],
53 | [-8.97477366e-17, 1.47606605e01],
54 | [5.24649334e00, 2.89964765e-01],
55 | [5.87355194e01, 2.40766029e01],
56 | [6.17841434e00, 3.84738274e01],
57 | ],
58 | "N30": [
59 | [1.43320465e-19, 6.56044649e-02],
60 | [-1.57874135e-16, 1.09083332e01],
61 | [5.31786680e00, 2.78960317e-01],
62 | [5.86466904e01, 2.40616817e01],
63 | [7.01255989e00, 4.09709969e01],
64 | ],
65 | "N49": [
66 | [-8.99073580e-19, 8.94479227e-02],
67 | [4.15423721e-16, 3.70622618e01],
68 | [5.25545311e00, 3.00384015e-01],
69 | [5.94716339e01, 2.44349327e01],
70 | [6.75209475e00, 3.91901703e01],
71 | ],
72 | "N87": [
73 | [1.72051200e-13, 6.26231476e-02],
74 | [4.02299992e-02, 7.61060358e00],
75 | [5.26309967e00, 2.87137657e-01],
76 | [5.83059006e01, 2.40639057e01],
77 | [6.53078842e00, 3.93127785e01],
78 | ],
79 | "3E6": [
80 | [1.01579639e-18, 7.04261607e-02],
81 | [2.34374135e-16, 7.21573964e00],
82 | [5.34307003e00, 2.66708523e-01],
83 | [5.86578026e01, 2.40552864e01],
84 | [7.23155785e00, 4.15975838e01],
85 | ],
86 | "3F4": [
87 | [-1.75200068e-19, 5.98892952e-02],
88 | [-9.48865199e-18, 4.74414811e01],
89 | [5.14398336e00, 3.04210454e-01],
90 | [5.76523476e01, 2.43824081e01],
91 | [6.23030663e00, 3.64991379e01],
92 | ],
93 | "T37": [
94 | [1.72051200e-13, 6.26231476e-02],
95 | [4.02299992e-02, 7.61060358e00],
96 | [5.26309967e00, 2.87137657e-01],
97 | [5.83059006e01, 2.40639057e01],
98 | [6.53078842e00, 3.93127785e01],
99 | ],
100 | "3C90": [
101 | [-3.27923689e-19, 6.56109348e-02],
102 | [6.99196716e-17, 1.26583787e01],
103 | [5.19875193e00, 2.68499136e-01],
104 | [5.86049919e01, 2.40574703e01],
105 | [6.29652929e00, 3.84585190e01],
106 | ],
107 | "3C92": [
108 | [-2.35520104e-13, 6.53518693e-02],
109 | [1.18689366e-01, 1.23585692e01],
110 | [5.16579533e00, 2.73998171e-01],
111 | [5.84305267e01, 2.40970516e01],
112 | [5.88209248e00, 3.69935722e01],
113 | ],
114 | "3C94": [
115 | [1.21232679e-19, 7.44383659e-02],
116 | [-2.19613879e-17, 1.18042579e01],
117 | [5.22766781e00, 2.68348873e-01],
118 | [5.87128143e01, 2.40769634e01],
119 | [6.53718996e00, 3.91955910e01],
120 | ],
121 | "3C95": [
122 | [5.64116728e-14, 7.90115297e-02],
123 | [1.11898437e-01, 1.29696641e01],
124 | [5.18842697e00, 2.69014776e-01],
125 | [5.86223640e01, 2.40957470e01],
126 | [6.25767517e00, 3.84026108e01],
127 | ],
128 | "ML95S": [
129 | [-1.53185180e-13, 1.15827541e-01],
130 | [3.84426934e-01, 4.45061606e01],
131 | [5.21606445e00, 2.65364528e-01],
132 | [5.70770302e01, 2.44398289e01],
133 | [7.30377579e00, 4.04136391e01],
134 | ],
135 | }
136 |
137 |
138 | # %% Initialize model
139 | class SydneyModel:
140 | """The Sydney model."""
141 |
142 | expected_seq_len = 128 # the expected sequence length
143 |
144 | def __init__(self, mdl_path, material):
145 | # Select GPU as default device
146 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
147 |
148 | # 1.Create model isntances
149 | self.mdl = MMINet(material).to(self.device)
150 |
151 | # 2.Load specific model
152 | state_dict = torch.load(mdl_path, map_location=self.device)
153 | self.mdl.load_state_dict(state_dict, strict=True)
154 |
155 | def __call__(self, data_B, data_F, data_T):
156 | """Call method."""
157 | # ----------------------------------------------------------- batch execution
158 | # 1.Get dataloader
159 | if data_B.ndim == 1:
160 | data_B = np.array(data_B).reshape(1, -1)
161 |
162 | _, ts_feats, scalar_feats = get_dataloader(data_B, data_F, data_T, self.mdl.norm)
163 |
164 | # 2.Validate the models
165 | self.mdl.eval()
166 | with torch.inference_mode():
167 | # Start model evaluation explicitly
168 | data_P, h_series = self.mdl(ts_feats.to(self.device), scalar_feats.to(self.device))
169 |
170 | data_P, h_series = data_P.cpu().numpy(), h_series.cpu().numpy()
171 |
172 | # 3.Return results
173 | if data_P.size == 1:
174 | data_P = data_P.item()
175 | if h_series.ndim == 1:
176 | h_series = h_series.reshape(1, -1)
177 |
178 | return data_P, h_series
179 |
180 |
181 | class MMINet(torch.nn.Module):
182 | """
183 | Magnetization mechanism-determined neural network.
184 |
185 | Parameters:
186 | - hidden_size: number of eddy current slices (RNN neuron)
187 | - operator_size: number of operators
188 | - input_size: number of inputs (1.B 2.dB 3.dB/dt)
189 | - var_size: number of supplenmentary variables (1.F 2.T)
190 | - output_size: number of outputs (1.H)
191 | """
192 |
193 | def __init__(self, Material, hidden_size=30, operator_size=30, input_size=3, var_size=2, output_size=1):
194 | super().__init__()
195 | self.input_size = input_size
196 | self.var_size = var_size
197 | self.hidden_size = hidden_size
198 | self.output_size = output_size
199 | self.operator_size = operator_size
200 | self.norm = normsDict[Material] # normalization data
201 | self.n_init = 32
202 |
203 | # Consturct the network
204 | self.rnn1 = StopOperatorCell(self.operator_size)
205 | self.dnn1 = torch.nn.Linear(self.operator_size + 2, 1)
206 | self.rnn2 = EddyCell(4, self.hidden_size, output_size)
207 | self.dnn2 = torch.nn.Linear(self.hidden_size, 1)
208 |
209 | self.rnn2_hx = None
210 |
211 | def forward(self, x, var):
212 | """
213 | Forward function.
214 |
215 | Parameters:
216 | x: batch,seq,input_size
217 | Input features (1.B, 2.dB, 3.dB/dt)
218 | var: batch,var_size
219 | Supplementary inputs (1.F 2.T)
220 | """
221 | batch_size = x.size(0) # Batch size
222 | seq_size = x.size(1) # Ser
223 |
224 | # Initialize operator state
225 | self.rnn1_hx = var[:, 2:]
226 |
227 | # Initialize DNN2 input (1.B 2.dB/dt)
228 | x2 = torch.cat((x[:, :, 0:1], x[:, :, 2:3]), dim=2)
229 | for t in range(seq_size):
230 | # RNN1 input (dB,state)
231 | self.rnn1_hx = self.rnn1(x[:, t, 1:2], self.rnn1_hx)
232 |
233 | # DNN1 input (rnn1_hx,F,T)
234 | dnn1_in = torch.cat((self.rnn1_hx, var[:, 0:2]), dim=1)
235 |
236 | # H hysteresis prediction
237 | H_hyst_pred = self.dnn1(dnn1_in)
238 |
239 | # DNN2 input (B,dB/dt,T,F)
240 | rnn2_in = torch.cat((x2[:, t, :], var[:, 0:2]), dim=1)
241 |
242 | # Initialize second rnn state
243 | if t == 0:
244 | H_eddy_init = x[:, t, 0:1] - H_hyst_pred
245 | buffer = x.new_ones(x.size(0), self.hidden_size)
246 | self.rnn2_hx = torch.autograd.Variable((buffer / torch.sum(self.dnn2.weight, dim=1)) * H_eddy_init)
247 |
248 | self.rnn2_hx = self.rnn2(rnn2_in, self.rnn2_hx)
249 |
250 | # H eddy prediction
251 | H_eddy = self.dnn2(self.rnn2_hx)
252 |
253 | # H total
254 | H_total = (H_hyst_pred + H_eddy).view(batch_size, 1, self.output_size)
255 |
256 | if t == 0:
257 | output = H_total
258 | else:
259 | output = torch.cat((output, H_total), dim=1)
260 |
261 | # Compute the power loss density
262 | B = x[:, self.n_init :, 0:1] * self.norm[0][1] + self.norm[0][0]
263 | H = output[:, self.n_init :, :] * self.norm[1][1] + self.norm[1][0]
264 | Pv = torch.trapz(H, B, axis=1) * (10 ** (var[:, 0:1] * self.norm[2][1] + self.norm[2][0]))
265 |
266 | # Return results
267 | H = savgol_filter(H.detach().to("cpu").numpy(), window_length=7, polyorder=2, axis=1)
268 | H = torch.from_numpy(H).view(batch_size, -1, 1)
269 | real_H = torch.cat((H[:, -self.n_init :, :], H[:, : -self.n_init, :]), dim=1)
270 | return torch.flatten(Pv).cpu(), real_H[:, :, 0].cpu()
271 |
272 |
273 | class StopOperatorCell:
274 | """
275 | MMINN Sub-layer: Static hysteresis prediction using stop operators.
276 |
277 | Parameters:
278 | - operator_size: number of operator
279 | """
280 |
281 | def __init__(self, operator_size):
282 | self.operator_thre = (
283 | torch.pow(
284 | torch.arange(1, operator_size + 1, dtype=torch.float) / (operator_size + 1), torch.tensor(3.0)
285 | ).view(1, -1)
286 | * 1
287 | )
288 |
289 | def sslu(self, X):
290 | """Hardsigmoid-like or symmetric saturated linear unit definition."""
291 | a = torch.ones_like(X)
292 | return torch.max(-a, torch.min(a, X))
293 |
294 | def __call__(self, dB, state):
295 | """Update operator of each time step."""
296 | r = self.operator_thre.to(dB.device)
297 | output = self.sslu((dB + state) / r) * r
298 | return output.float()
299 |
300 |
301 | class EddyCell(torch.nn.Module):
302 | """
303 | MMINN subsubnetwork: Dynamic hysteresis prediction.
304 |
305 | Parameters:
306 | - input_size: feature size
307 | - hidden_size: number of hidden units (eddy current layers)
308 | - output_size: number of the output
309 | """
310 |
311 | def __init__(self, input_size, hidden_size, output_size=1):
312 | super().__init__()
313 | self.input_size = input_size
314 | self.hidden_size = hidden_size
315 | self.output_size = output_size
316 |
317 | self.x2h = torch.nn.Linear(input_size, hidden_size, bias=False)
318 | self.h2h = torch.nn.Linear(hidden_size, hidden_size, bias=False)
319 |
320 | def forward(self, x, hidden=None):
321 | """
322 | Forward function.
323 |
324 | Parameters:
325 | x: batch,input_size
326 | features (1.B 2.dB/dt 3.F 4.T)
327 | hidden: batch,hidden_size
328 | dynamic hysteresis effects at each eddy current layer
329 | """
330 | hidden = self.x2h(x) + self.h2h(hidden)
331 | hidden = torch.sigmoid(hidden)
332 | return hidden
333 |
334 |
335 | def get_dataloader(data_B, data_F, data_T, norm, n_init=32):
336 | """Preprocess data into a data loader.
337 | Get a test dataloader.
338 |
339 | Parameters
340 | ---------
341 | data_B: array
342 | B data
343 | data_F
344 | F data
345 | data_T
346 | T data
347 | norm : list
348 | B/F/T normalization data
349 | n_init : int
350 | Additional points for computing the history magnetization
351 | """
352 |
353 | # Data pre-process
354 | # 1. Down-sample to 128 points
355 | seq_length = 128
356 |
357 | if data_B.shape[-1] != seq_length:
358 | cols = np.array(range(0, data_B.shape[1], round(data_B.shape[1] / seq_length)))
359 | data_B = data_B[:, cols]
360 |
361 | # 2. Add extra points for initial magnetization calculation
362 | data_length = seq_length + n_init
363 | data_B = np.hstack((data_B, data_B[:, 1 : 1 + n_init]))
364 |
365 | # 3. Format data into tensors
366 | B = torch.from_numpy(data_B).view(-1, data_length, 1).float()
367 | if np.isscalar(data_F):
368 | data_F = np.array([data_F])
369 | if np.isscalar(data_T):
370 | data_T = np.array([data_T])
371 | T = torch.from_numpy(data_T).view(-1, 1).float()
372 | F = torch.from_numpy(np.log10(data_F)).view(-1, 1).float()
373 |
374 | # 4. Data Normalization
375 | in_B = (B - norm[0][0]) / norm[0][1]
376 | in_F = (F - norm[2][0]) / norm[2][1]
377 | in_T = (T - norm[3][0]) / norm[3][1]
378 |
379 | # 5. Extra features
380 | in_dB = torch.diff(in_B, dim=1) # Flux density change
381 | in_dB = torch.cat((in_dB[:, 0:1, :], in_dB), dim=1)
382 |
383 | dB_dt = in_dB * (seq_length * F.reshape(-1, 1, 1))
384 | in_dB_dt = (dB_dt - norm[4][0]) / norm[4][1] # Flux density change rate
385 |
386 | max_B, _ = torch.max(in_B, dim=1)
387 | min_B, _ = torch.min(in_B, dim=1)
388 |
389 | s0 = get_operator_init(in_B[:, 0, 0] - in_dB[:, 0, 0], in_dB, max_B, min_B) # Operator inital state
390 |
391 | ts_feats = torch.cat((in_B, in_dB, in_dB_dt), dim=2)
392 | scalar_feats = torch.cat((in_F, in_T, s0), dim=1)
393 | # 6. Create dataloader to speed up data processing
394 | test_dataset = torch.utils.data.TensorDataset(ts_feats, scalar_feats)
395 | kwargs = {"num_workers": 0, "batch_size": 128, "drop_last": False}
396 | test_loader = torch.utils.data.DataLoader(test_dataset, **kwargs)
397 |
398 | return test_loader, ts_feats, scalar_feats
399 |
400 |
401 | # %% Predict the operator state at t0
402 | def get_operator_init(B0, dB, Bmax, Bmin, operator_size=30, max_out_H=1):
403 | """Compute the initial state of hysteresis operators.
404 |
405 | Parameters
406 | ---------
407 | B0 : torch_like (batch)
408 | Stop operator excitation at t1
409 | dB : torch_like (batch, data_length)
410 | Flux density changes at each t
411 | Bmax: torch_like (batch)
412 | Max flux density of each cycle
413 | Bmin: torch_like (batch)
414 | Min flux density of each cycle
415 | operator_size: int
416 | The number of operators
417 | max_out_H:
418 | The maximum output of field strength
419 | """
420 | # 1. Parameter setting
421 | batch = dB.shape[0]
422 | state = torch.zeros((batch, operator_size))
423 | operator_thre = (
424 | torch.pow(torch.arange(1, operator_size + 1, dtype=torch.float) / operator_size + 1, torch.tensor(3.0)).view(
425 | 1, -1
426 | )
427 | * max_out_H
428 | )
429 |
430 | # 2. Iterate each excitation for the operator inital state computation
431 | for i in range(B0.__len__()):
432 | for j in range(operator_size):
433 | r = operator_thre[0, j]
434 | if (Bmax[i] >= r) or (Bmin[i] <= -r):
435 | if dB[i, 0] >= 0:
436 | if B0[i] > Bmin[i] + 2 * r:
437 | state[i, j] = r
438 | else:
439 | state[i, j] = B0[i] - (r + Bmin[i])
440 | else:
441 | if B0[i] < Bmax[i] - 2 * r:
442 | state[i, j] = -r
443 | else:
444 | state[i, j] = B0[i] + (r - Bmax[i])
445 |
446 | return state
447 |
--------------------------------------------------------------------------------
/tests/debug.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from pathlib import Path
4 | from magnethub.loss import LossModel, MATERIALS
5 |
6 | test_ds = pd.read_csv(
7 | Path.cwd() / "tests" / "test_files" / "all_data.csv.gzip", dtype={"material": str}
8 | )
9 | errs_d = {}
10 | for m_lbl in MATERIALS:
11 | mdl = LossModel(material=m_lbl, team="paderborn")
12 | test_mat_df = test_ds.query("material == @m_lbl")
13 | p, h = mdl(
14 | test_mat_df.loc[:, [c for c in test_mat_df if c.startswith("B_t_")]].to_numpy(),
15 | test_mat_df.loc[:, "freq"].to_numpy(),
16 | test_mat_df.loc[:, "temp"].to_numpy(),
17 | )
18 | rel_err = np.abs(test_mat_df.ploss - p) / test_mat_df.ploss
19 | errs_d[m_lbl] = {
20 | "avg": np.mean(rel_err),
21 | "95th": np.quantile(rel_err, 0.95),
22 | "99th": np.quantile(rel_err, 0.99),
23 | 'samples': len(rel_err),
24 | }
25 | rel_df = pd.DataFrame(errs_d).T
26 | print(f"Rel. errors")
27 | print(rel_df)
28 |
--------------------------------------------------------------------------------
/tests/test_paderborn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from pathlib import Path
4 | import pytest
5 | from magnethub.loss import LossModel, MATERIALS
6 |
7 |
8 | def test_smoke():
9 | mdl = LossModel(material="3C92", team="paderborn")
10 | # dummy B field data (one trajectory with 1024 samples)
11 | b_wave = np.random.randn(1024) * 200e-3 # mT
12 | freq = 124062 # Hz
13 | temp = 58 # °C
14 |
15 | # get loss and estimated H wave
16 | p, h = mdl(b_wave, freq, temp)
17 | assert np.isscalar(p), f"p has shape {p.shape}"
18 | assert h.shape == (1, 1024), f"h has shape {h.shape}"
19 |
20 | # repetition test
21 | p2, h2 = mdl(b_wave, freq, temp)
22 | assert np.allclose(p, p2), f"{p} != {p2}"
23 | assert np.allclose(h, h2), f"{h} != {h2}"
24 |
25 |
26 | def test_shorter_sequence():
27 | mdl = LossModel(material="3C92", team="paderborn")
28 | # dummy B field data (one trajectory with 1024 samples)
29 | b_wave = np.random.randn(233) * 200e-3 # mT
30 | freq = 120_000 # Hz
31 | temp = 77 # °C
32 |
33 | # get scalar power loss
34 | p, h = mdl(b_wave, freq, temp)
35 |
36 | assert np.isscalar(p), f"p has shape {p.shape}"
37 | assert h.shape == (1, 233), f"h has shape {h.shape}"
38 |
39 | def test_longer_sequence():
40 | mdl = LossModel(material="3C92", team="paderborn")
41 | # dummy B field data (one trajectory with 1024 samples)
42 | b_wave = np.random.randn(2313) * 200e-3 # mT
43 | freq = 120_000 # Hz
44 | temp = 77 # °C
45 |
46 | # get scalar power loss
47 | p, h = mdl(b_wave, freq, temp)
48 |
49 | assert np.isscalar(p), f"p has shape {p.shape}"
50 | assert h.shape == (1, 2313), f"h has shape {h.shape}"
51 |
52 |
53 | def test_batch_execution():
54 | mdl = LossModel(material="3C92", team="paderborn")
55 |
56 | b_waves = np.random.randn(100, 1024) * 200e-3 # mT
57 | freqs = np.random.randint(100e3, 750e3, size=100)
58 | temps = np.random.randint(20, 80, size=100)
59 | p, h = mdl(b_waves, freqs, temps)
60 |
61 | assert p.size == 100, f"{p.size=}"
62 | assert h.shape == (100, 1024), f"{h.shape=}"
63 |
64 |
65 | def test_material_availability():
66 | b_wave = np.random.randn(1024) * 200e-3 # mT
67 | freq = 124062 # Hz
68 | temp = 58 # °C
69 |
70 | for m_lbl in MATERIALS:
71 | mdl = LossModel(material=m_lbl, team="paderborn")
72 | p, h = mdl(b_wave, freq, temp)
73 | assert np.isscalar(p), f"p has shape {p.shape}"
74 | assert h.shape == (1, 1024), f"h has shape {h.shape}"
75 |
76 |
77 | @pytest.mark.skip(reason="DEBUG test. The test files do not exist except for the author.")
78 | def test_full_accuracy():
79 | test_ds = pd.read_csv(Path.cwd() / "tests" / "test_files" / "all_data.csv.gzip", dtype={"material": str})
80 | for m_lbl in MATERIALS:
81 | mdl = LossModel(material=m_lbl, team="paderborn")
82 | test_mat_df = test_ds.query("material == @m_lbl")
83 | p, h = mdl(
84 | test_mat_df.loc[:, [c for c in test_mat_df if c.startswith("B_t_")]].to_numpy(),
85 | test_mat_df.loc[:, "freq"].to_numpy(),
86 | test_mat_df.loc[:, "temp"].to_numpy(),
87 | )
88 | rel_err = np.abs(test_mat_df.ploss - p) / test_mat_df.ploss
89 | print(f"{m_lbl}: avg. rel err {np.mean(rel_err):.5%} 95th quantile {np.quantile(rel_err, 0.95):.5%}")
90 | # assert avg_rel_err < 0.70, f"Inaccurate for material {m_lbl} with prediction: {np.abs(test_mat_df.ploss - p)/ test_mat_df.ploss} W/m³"
91 |
92 |
93 | @pytest.mark.skip(reason="DEBUG test. The test files do not exist except for the author.")
94 | def test_full_accuracy_line_by_line():
95 | test_ds = pd.read_csv(Path.cwd() / "tests" / "test_files" / "all_data.csv.gzip", dtype={"material": str})
96 | for m_lbl in MATERIALS:
97 | mdl = LossModel(material=m_lbl, team="paderborn")
98 | test_mat_df = test_ds.query("material == @m_lbl")
99 | p_preds_l = []
100 |
101 | for _, row in test_mat_df.iterrows():
102 | p, h = mdl(
103 | row.loc[[c for c in test_mat_df if c.startswith("B_t_")]].to_numpy().astype(np.float64),
104 | row.loc["freq"],
105 | row.loc["temp"],
106 | )
107 | p_preds_l.append(p)
108 | p = np.array(p_preds_l)
109 | rel_err = np.abs(test_mat_df.ploss - p) / test_mat_df.ploss
110 | print(f"{m_lbl}: avg. rel err {np.mean(rel_err):.5%} 95th quantile {np.quantile(rel_err, 0.95):.5%}")
111 |
112 |
113 | def test_accuracy_slightly():
114 | test_ds = pd.read_csv(
115 | Path(__file__).parent / "test_files" / "unit_test_data_ploss_at_450kWpm3.csv", dtype={"material": str}
116 | )
117 | for m_lbl in MATERIALS:
118 | mdl = LossModel(material=m_lbl, team="paderborn")
119 | test_mat_df = test_ds.query("material == @m_lbl")
120 | p, h = mdl(
121 | test_mat_df.loc[:, [c for c in test_mat_df if c.startswith("B_t_")]].to_numpy(),
122 | test_mat_df.loc[:, "freq"].to_numpy(),
123 | test_mat_df.loc[:, "temp"].to_numpy(),
124 | )
125 | avg_rel_err = np.mean(np.abs(test_mat_df.ploss - p) / test_mat_df.ploss)
126 | assert (
127 | avg_rel_err < 0.08
128 | ), f"Inaccurate for material {m_lbl} with prediction: {np.abs(test_mat_df.ploss - p)/ test_mat_df.ploss} W/m³"
129 |
--------------------------------------------------------------------------------
/tests/test_sydney.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from pathlib import Path
4 | import pytest
5 | from magnethub.loss import LossModel, MATERIALS
6 |
7 | TEAM_NAME = 'sydney'
8 |
9 | def test_smoke():
10 | mdl = LossModel(material="3C92", team=TEAM_NAME)
11 | # dummy B field data (one trajectory with 1024 samples)
12 | b_wave = np.random.randn(1024) * 200e-3 # mT
13 | freq = 124062 # Hz
14 | temp = 58 # °C
15 |
16 | # get loss and estimated H wave
17 | p, h = mdl(b_wave, freq, temp)
18 | assert np.isscalar(p), f"p has shape {p.shape}"
19 | assert h.shape == (1, 1024), f"h has shape {h.shape}"
20 |
21 | # repetition test
22 | p2, h2 = mdl(b_wave, freq, temp)
23 | assert np.allclose(p, p2), f"{p} != {p2}"
24 | assert np.allclose(h, h2), f"{h} != {h2}"
25 |
26 |
27 | def test_shorter_sequence():
28 | mdl = LossModel(material="3C92", team=TEAM_NAME)
29 | # dummy B field data (one trajectory with 1024 samples)
30 | b_wave = np.random.randn(233) * 200e-3 # mT
31 | freq = 120_000 # Hz
32 | temp = 77 # °C
33 |
34 | # get scalar power loss
35 | p, h = mdl(b_wave, freq, temp)
36 |
37 | assert np.isscalar(p), f"p has shape {p.shape}"
38 | assert h.shape == (1, 233), f"h has shape {h.shape}"
39 |
40 | def test_longer_sequence():
41 | mdl = LossModel(material="3C92", team=TEAM_NAME)
42 | # dummy B field data (one trajectory with 1024 samples)
43 | b_wave = np.random.randn(2313) * 200e-3 # mT
44 | freq = 120_000 # Hz
45 | temp = 77 # °C
46 |
47 | # get scalar power loss
48 | p, h = mdl(b_wave, freq, temp)
49 |
50 | assert np.isscalar(p), f"p has shape {p.shape}"
51 | assert h.shape == (1, 2313), f"h has shape {h.shape}"
52 |
53 | def test_batch_execution():
54 | mdl = LossModel(material="3C92", team=TEAM_NAME)
55 | seq_len = 1412
56 | b_waves = np.random.randn(seq_len, 1024) * 200e-3 # mT
57 | freqs = np.random.randint(100e3, 750e3, size=seq_len)
58 | temps = np.random.randint(20, 80, size=seq_len)
59 | p, h = mdl(b_waves, freqs, temps)
60 |
61 | assert p.size == seq_len, f"{p.size=}"
62 | assert h.shape == (seq_len, 1024), f"{h.shape=}"
63 |
64 |
65 | def test_material_availability():
66 | b_wave = np.random.randn(1024) * 200e-3 # mT
67 | freq = 124062 # Hz
68 | temp = 58 # °C
69 |
70 | for m_lbl in MATERIALS:
71 | mdl = LossModel(material=m_lbl, team=TEAM_NAME)
72 | p, h = mdl(b_wave, freq, temp)
73 | assert np.isscalar(p), f"p has shape {p.shape}"
74 | assert h.shape == (1, 1024), f"h has shape {h.shape}"
75 |
76 | def test_accuracy_slightly():
77 | test_ds = pd.read_csv(
78 | Path(__file__).parent / "test_files" / "unit_test_data_ploss_at_450kWpm3.csv", dtype={"material": str}
79 | )
80 | for m_lbl in MATERIALS:
81 | mdl = LossModel(material=m_lbl, team=TEAM_NAME)
82 | test_mat_df = test_ds.query("material == @m_lbl")
83 | p, h = mdl(
84 | test_mat_df.loc[:, [c for c in test_mat_df if c.startswith("B_t_")]].to_numpy(),
85 | test_mat_df.loc[:, "freq"].to_numpy(),
86 | test_mat_df.loc[:, "temp"].to_numpy(),
87 | )
88 | avg_rel_err = np.mean(np.abs(test_mat_df.ploss - p) / test_mat_df.ploss)
89 | assert (
90 | avg_rel_err < 0.47
91 | ), f"Inaccurate for material {m_lbl} with prediction: {np.abs(test_mat_df.ploss - p)/ test_mat_df.ploss} W/m³"
92 |
--------------------------------------------------------------------------------