├── .github └── workflows │ ├── format_check.yml │ ├── lint.yml │ └── spell_check.yml ├── .gitignore ├── .nojekyll ├── CHANGELOG.md ├── LICENSE ├── README.md ├── docstrings.py ├── envs └── osmg.yaml ├── ignore_words.txt ├── img └── img.png ├── pyproject.toml ├── run_checks.sh ├── section_data ├── EXCEL_to_JSON.ipynb ├── aisc-shapes-database-v15.0.xlsx └── get_sections ├── setup.py └── src └── osmg ├── __init__.py ├── analysis ├── __init__.py ├── common.py ├── ground_motion_utils.py ├── load_case.py ├── recorders.py ├── solver.py └── supports.py ├── core ├── __init__.py ├── common.py ├── gridsystem.py ├── model.py ├── osmg_collections.py └── uid_object.py ├── creators ├── __init__.py ├── component.py ├── material.py ├── section.py ├── uid.py └── zerolength.py ├── data └── sections.json ├── geometry ├── __init__.py ├── line.py ├── mesh.py ├── mesh_shapes.py └── transformations.py ├── get_latest_pypi_version.py ├── graphics ├── __init__.py ├── objects.py ├── plotly.py └── visibility.py ├── model_objects ├── __init__.py ├── element.py ├── friction_model.py ├── node.py ├── section.py └── uniaxial_material.py ├── postprocessing └── __init__.py ├── preprocessing └── __init__.py ├── py.typed └── tests ├── __init__.py ├── analysis ├── __init__.py ├── test_load_case.py └── test_supports.py ├── core ├── __init__.py ├── test_common.py ├── test_gridsystem.py └── test_uid_object.py ├── creators ├── __init__.py └── test_uid.py ├── elements ├── __init__.py └── test_node.py ├── groundmotions ├── 1xa.txt └── 1ya.txt ├── test_a.py.inactive ├── test_doc_notebooks.py.inactive ├── test_line.py ├── test_mesh.py ├── test_transformations.py └── verification ├── __init__.py ├── braced_frame.py ├── opensees_only ├── __init__.py ├── openseespy_truss.py └── opsvis_frame.py └── test_offset_and_basic_forces.py /.github/workflows/format_check.yml: -------------------------------------------------------------------------------- 1 | name: Ruff format 2 | on: [push, pull_request] 3 | jobs: 4 | ruff: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: chartboost/ruff-action@v1 9 | with: 10 | args: 'format --check' 11 | version: 0.7.4 12 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Ruff check 2 | on: [push, pull_request] 3 | jobs: 4 | ruff: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: chartboost/ruff-action@v1 9 | with: 10 | version: 0.7.4 11 | -------------------------------------------------------------------------------- /.github/workflows/spell_check.yml: -------------------------------------------------------------------------------- 1 | name: Spell Check 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | spell-check: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v2 12 | 13 | - name: Run codespell 14 | uses: codespell-project/actions-codespell@v2 15 | 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Temporary file folder 132 | /temp 133 | *~/ 134 | notes 135 | /experimental 136 | *# 137 | /.links/ 138 | /conda/ 139 | *_autosummary/* 140 | /docs/source/_autosummary/ 141 | /docs/source/notebooks/*.ipynb 142 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ioannis-vm/OpenSees_Model_Generator/3adaa456176570dc22f6cd4f621339624cb8577f/.nojekyll -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.0.0] - 2025-03-10 11 | 12 | ### Changed 13 | 14 | - **Extensive changes** to the design of the package. Backwards compatibility was **not** maintained. 15 | - Levels no longer serve as repositories of objects. Now the model objects store collections of objects (such as nodes and component assembiles), and they feature a grid system. The grid system contains information on level elevations, as well as grids in the X-Y plane, with dedicated methods to retrieve the coordinates of intersection points. This means that now objects that previously "belonged" to a specific level now all simply belong to the model and are not associated with a particular level. Levels are now only part of the grid system, which can be used to assist element placement. This simplifies the code and allows for component assemblies that span across multiple levels (e.g. a brace spanning two stories or a column spanning two stories without an intermediate connection). 16 | - We now use configuration classes instead of methods with an extremely large number of arguments. We never pass functions as function arguments and dictionaries with additional arguments for those functions, which was extremely cumbersome to define. Now we instantiate an appropriate configuration object and pass that object instead. The function or method can then use the methods of that object and determine how to handle it based on its type. This allows for type aware definitions and code completion, making the user experience a lot nicer. 17 | - We now properly utilize OpenSees recorders instead of relying on output commands and storing data in memory. 18 | - Migrated to Ruff for code formatting and checking. 19 | - Stopped using pickle to store objects to disk. Using JSON going forward, which is human-readable and safer. 20 | - Certain terms were updated because they were inaccurate. 21 | - Most "generator" objects were renamed to "Creator" to avoid confusion with the meaning of "generator" in Python. 22 | 23 | ## Removed 24 | 25 | The documentation was taken offline due to the massive changes in syntax, which broke the example code. 26 | I intend to restore it but can't provide a timeline. 27 | 28 | ## [0.2.7] - 2024-08-25 29 | 30 | ### Fixed 31 | 32 | - Fixed bug in applying gravity loads before a time-history or pushover analysis. 33 | - Replaced pushover convergence tolerance with a more appropriate value. 34 | 35 | ## [0.2.6] - 2024-05-01 36 | 37 | ### Fixed 38 | 39 | Fixed node restraints and time history analysis acceleration inputs for `opensees`. 40 | 41 | ## [0.2.5] - 2024-05-01 42 | 43 | ### Added 44 | 45 | The convergence test tolerance can now be passed as an optional argument for time-history analyses (`test_tolerance`). It defaults to `1e-12`. 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenSees Model Generator 2 | ![PyPI version](https://badge.fury.io/py/osmg.svg) 3 | ![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.7536062.svg) 4 | 5 | This Python package aims to help users define, analyze, and post-process 3D models using OpenSees. 6 | The project has recently undergone a massive re-write, which led to the documentation and unit tests being taken offline. 7 | We intend to reinstate them soon. 8 | 9 | ### External resources 10 | 11 | - Visit the OpenSees [homepage](https://opensees.berkeley.edu/) to learn more about the program. 12 | - Explore the [OpenSeesWiki](https://opensees.berkeley.edu/wiki/index.php/Main_Page) for additional resources and documentation. 13 | - Find the source code for OpenSeesPy on its [Github repository](https://github.com/zhuminjie/OpenSeesPy). 14 | - Access the OpenSeesPy documentation [here](https://openseespydoc.readthedocs.io/en/latest/). 15 | -------------------------------------------------------------------------------- /docstrings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Remove type hints from docstrings. 3 | 4 | This script was used to remove all type-hints from the docstrings, 5 | since the type hints exist in the code itself and Sphinx can pick them 6 | up just fine. If something is enclosed in parenthesis inside a 7 | docstring, immediately followed by a colon symbol, the code picks it 8 | up. 9 | """ 10 | 11 | from __future__ import annotations 12 | 13 | import os 14 | import re 15 | from pathlib import Path 16 | 17 | import numpy as np 18 | 19 | 20 | def list_directories(root_dir: str) -> list[Path]: 21 | """ 22 | List directories. 23 | 24 | Returns: 25 | List of directories. 26 | """ 27 | res = [] 28 | for path, directories, _ in os.walk(root_dir): 29 | for directory in directories: 30 | res.append(Path(path) / directory) # noqa: PERF401 31 | return res 32 | 33 | 34 | def list_python_files(directory: str) -> list[Path]: 35 | """ 36 | List all python files. 37 | 38 | List all Python files in the specified directory and 39 | subdirectories. 40 | 41 | Args: 42 | directory (str): The directory to search in. 43 | 44 | Returns: 45 | ------- 46 | list[str]: A list of paths to Python files. 47 | """ 48 | return [str(file) for file in Path(directory).rglob('*.py')] 49 | 50 | 51 | res = list_directories('src/osmg/') 52 | 53 | not_backup = [] 54 | 55 | for thing in res: 56 | if '.~' not in thing: 57 | not_backup.append(thing) # noqa: PERF401 58 | 59 | not_backup.append('src/osmg') 60 | 61 | # find all available filenames 62 | files = {} 63 | for thing in not_backup: 64 | files[thing] = list_python_files(thing) 65 | 66 | pattern = r'\(*?\):' # type: ignore (this is so silly) 67 | 68 | for paths in files.values(): 69 | for path in paths: 70 | contents = Path(path).read_text(encoding='utf-8') 71 | if contents.startswith('"""'): 72 | contents = '\n\n' + contents 73 | contents_spl = np.array(contents.split('"""')) 74 | contents_docstr = contents_spl[1::2] 75 | for thing in contents_docstr: 76 | lines = thing.split('\n') 77 | for line in lines: 78 | if '>>>' in line: 79 | continue 80 | if '...' in line: 81 | continue 82 | if '(most recent call last):' in line: 83 | continue 84 | match = re.search(pattern, line) 85 | if match: 86 | print('~~~') # noqa: T201 87 | print(line) # noqa: T201 88 | print('~~~') # noqa: T201 89 | print() # noqa: T201 90 | -------------------------------------------------------------------------------- /envs/osmg.yaml: -------------------------------------------------------------------------------- 1 | name: osmg 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - scikit-geometry 7 | - pandoc 8 | -------------------------------------------------------------------------------- /ignore_words.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ioannis-vm/OpenSees_Model_Generator/3adaa456176570dc22f6cd4f621339624cb8577f/ignore_words.txt -------------------------------------------------------------------------------- /img/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ioannis-vm/OpenSees_Model_Generator/3adaa456176570dc22f6cd4f621339624cb8577f/img/img.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "osmg" 7 | version = "1.0.13" 8 | description = "OpenSees Model Generator" 9 | readme = "README.md" 10 | license = { file = "LICENSE" } 11 | authors = [ 12 | { name = "John Vouvakis Manousakis" } 13 | ] 14 | requires-python = ">=3.11" 15 | dependencies = [ 16 | "descartes>=1.1.0", 17 | "dill>=0.3.7", 18 | "json-tricks>=3.17.3", 19 | "matplotlib>=3.4.1", 20 | "numpy>=1.20.2", 21 | "openpyxl>=3.0.7", 22 | "pandas>=1.2.4", 23 | "plotly>=4.14.3", 24 | "scipy>=1.9.0", 25 | "shapely>=1.7.1", 26 | "tqdm>=2.0.0", 27 | "xlrd>=2.0.1", 28 | "pydantic", 29 | "pyarrow" 30 | ] 31 | classifiers = [ 32 | "Programming Language :: Python :: 3", 33 | "Programming Language :: Python :: 3 :: Only", 34 | "Programming Language :: Python :: 3.12", 35 | ] 36 | keywords = ["seismic", "structural engineering", "OpenSees", "modeling"] 37 | 38 | [project.optional-dependencies] 39 | dev = [ 40 | "codespell", 41 | "coverage", 42 | "jupyter", 43 | "jupytext", 44 | "mypy", 45 | "types-requests", 46 | "nbsphinx", 47 | "pytest", 48 | "pytest-cov", 49 | "pytest-xdist", 50 | "ruff", 51 | "sphinx", 52 | "sphinx-autoapi", 53 | "sphinx-rtd-theme", 54 | "sphinx_autodoc_typehints", 55 | "xdoctest" 56 | ] 57 | 58 | [tool.setuptools] 59 | package-dir = { "" = "src" } 60 | include-package-data = true 61 | zip-safe = false 62 | 63 | [tool.setuptools.packages] 64 | find = { where = ["src"] } 65 | 66 | [tool.setuptools.package-data] 67 | osmg = ["**/*.json"] 68 | 69 | [tool.mypy] 70 | python_version = "3.11" 71 | mypy_path = "src" 72 | check_untyped_defs = true 73 | disallow_any_generics = true 74 | disallow_incomplete_defs = true 75 | ignore_missing_imports = true 76 | implicit_optional = true 77 | show_error_codes = true 78 | strict_equality = true 79 | warn_redundant_casts = true 80 | warn_return_any = true 81 | warn_unreachable = false 82 | warn_unused_configs = true 83 | no_implicit_reexport = true 84 | namespace_packages = false 85 | 86 | [tool.ruff] 87 | line-length = 85 88 | 89 | [tool.ruff.lint] 90 | # Enable all known categories 91 | select = ["ALL"] 92 | ignore = [ 93 | "ANN101", "CPY001", "D211", "D212", "Q000", "Q003", "COM812", "D203", 94 | "ISC001", "E501", "ERA001", "PGH003", "FIX002", "TD003", "S101", 95 | "N801", "S311", "G004", "SIM102", "SIM108", "NPY002", "E501", 96 | "T201", "DOC201" 97 | ] 98 | preview = true 99 | 100 | [tool.ruff.lint.per-file-ignores] 101 | "docs/source/notebooks/*" = ["E402"] 102 | "docs/source/*" = ["INP001"] 103 | "src/osmg/tests/*" = ["D103", "PLR2004", "PLR6301"] 104 | 105 | [tool.ruff.lint.pydocstyle] 106 | convention = "google" 107 | 108 | [tool.ruff.lint.pylint] 109 | max-args = 15 110 | max-locals = 50 111 | max-returns = 11 112 | max-branches = 50 113 | max-statements = 150 114 | max-bool-expr = 5 115 | 116 | [tool.ruff.format] 117 | quote-style = "single" 118 | 119 | [tool.codespell] 120 | ignore-words = ["ignore_words.txt"] 121 | skip = ["*.html", "./htmlcov/*", "./docs/build/*"] -------------------------------------------------------------------------------- /run_checks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Spell-check 4 | echo "Spell-checking." 5 | echo 6 | codespell . 7 | if [ $? -ne 0 ]; then 8 | echo "Spell-checking failed." 9 | exit 1 10 | fi 11 | 12 | # # Check formatting with ruff 13 | # echo "Checking formatting with 'ruff format --diff'." 14 | # echo 15 | # ruff format --diff 16 | # if [ $? -ne 0 ]; then 17 | # echo "ruff format failed." 18 | # exit 1 19 | # fi 20 | 21 | # Format code 22 | echo "Formatting with 'ruff format'." 23 | echo 24 | ruff format 25 | if [ $? -ne 0 ]; then 26 | echo "ruff format failed." 27 | exit 1 28 | fi 29 | 30 | # Run ruff for linting 31 | echo "Linting with 'ruff check --fix'." 32 | echo 33 | ruff check --fix --output-format concise 34 | if [ $? -ne 0 ]; then 35 | echo "ruff check failed." 36 | exit 1 37 | fi 38 | 39 | # Run mypy for type checking 40 | echo "Type checking with mypy." 41 | echo 42 | mypy src/osmg 43 | if [ $? -ne 0 ]; then 44 | echo "mypy failed. Exiting." 45 | exit 1 46 | fi 47 | 48 | # Run pytest for testing and generate coverage report 49 | echo "Running unit-tests." 50 | echo 51 | python -m pytest src/osmg/tests --cov=osmg --cov-report html -n auto 52 | if [ $? -ne 0 ]; then 53 | echo "pytest failed. Exiting." 54 | exit 1 55 | fi 56 | 57 | echo "All checks passed successfully." 58 | echo 59 | -------------------------------------------------------------------------------- /section_data/EXCEL_to_JSON.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d278a23a", 6 | "metadata": {}, 7 | "source": [ 8 | "# Excel to JSON\n", 9 | "The following code converts the AISC shapes database excel file to JSON.\n", 10 | "\n", 11 | "The original file contains two sections per row. The left has\n", 12 | "imperial, and the right metric units, but they do not correspond to\n", 13 | "the exact same section (the metric section dimensions are close to\n", 14 | "the imperial size but rounded up, they are not merely\n", 15 | "converted). The following code reads the imperial sections only.\n", 16 | "\n", 17 | "To decrease the lookup time for the subsequent analysis, we only\n", 18 | "extract data for specific section types." 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "id": "5ef75152", 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "import json\n", 29 | "from pathlib import Path\n", 30 | "\n", 31 | "import pandas as pd" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "7e26a59f", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "contents = pd.read_excel(\n", 42 | " 'aisc-shapes-database-v15.0.xlsx', sheet_name='Database v15.0', usecols='A:CF'\n", 43 | ")" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "e097a2e2", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "# instantiate an empty list to store the dictionaries\n", 54 | "sections = {}\n", 55 | "for i in range(len(contents)): # for each row\n", 56 | " # turn row into a dictionary\n", 57 | " dct = dict(contents.loc[i])\n", 58 | "\n", 59 | " # we only want data for specific section types\n", 60 | " if dct['Type'] not in {'W', 'HSS'}:\n", 61 | " continue\n", 62 | "\n", 63 | " # filter out key-value pairs\n", 64 | " # where value = '-'\n", 65 | " # and redundant keys\n", 66 | " new_dct = {}\n", 67 | " for key, value in dct.items():\n", 68 | " if value != '–': # noqa: RUF001\n", 69 | " new_dct[key] = value # noqa: PERF403\n", 70 | " # add it to the list\n", 71 | " sections[dct['AISC_Manual_Label']] = new_dct" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "id": "fc31d454", 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "# save to a JSON file\n", 82 | "with Path('data/sections.json').open('w', encoding='utf-8') as f:\n", 83 | " f.write(json.dumps(sections))" 84 | ] 85 | } 86 | ], 87 | "metadata": { 88 | "kernelspec": { 89 | "display_name": "Python 3", 90 | "language": "python", 91 | "name": "python3" 92 | } 93 | }, 94 | "nbformat": 4, 95 | "nbformat_minor": 5 96 | } -------------------------------------------------------------------------------- /section_data/aisc-shapes-database-v15.0.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ioannis-vm/OpenSees_Model_Generator/3adaa456176570dc22f6cd4f621339624cb8577f/section_data/aisc-shapes-database-v15.0.xlsx -------------------------------------------------------------------------------- /section_data/get_sections: -------------------------------------------------------------------------------- 1 | wget https://www.aisc.org/globalassets/aisc/manual/v15.0-shapes-database/aisc-shapes-database-v15.0.xlsx 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Setup file.""" 2 | 3 | from setuptools import setup 4 | 5 | if __name__ == '__main__': 6 | setup() 7 | -------------------------------------------------------------------------------- /src/osmg/__init__.py: -------------------------------------------------------------------------------- 1 | """OSMG package.""" 2 | -------------------------------------------------------------------------------- /src/osmg/analysis/__init__.py: -------------------------------------------------------------------------------- 1 | """Structural analysis-related objects.""" 2 | -------------------------------------------------------------------------------- /src/osmg/analysis/common.py: -------------------------------------------------------------------------------- 1 | """Common low-level analysis objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | 6 | class ConcentratedValue(tuple[float, ...]): 7 | """Concentrated value, such as a point load or mass.""" 8 | 9 | __slots__ = () 10 | 11 | def __repr__(self) -> str: 12 | """ 13 | Get a string representation. 14 | 15 | Returns: 16 | A simple string representation of the object. 17 | """ 18 | return f"{self.__class__.__name__}({', '.join(map(str, self))})" 19 | 20 | def __add__(self, other: ConcentratedValue) -> ConcentratedValue: 21 | """ 22 | Add two ConcentratedValue instances element-wise. 23 | 24 | Args: 25 | other: Another ConcentratedValue instance. 26 | 27 | Returns: 28 | A new ConcentratedValue instance with the sum of corresponding elements. 29 | """ 30 | if not isinstance(other, ConcentratedValue): 31 | return NotImplemented 32 | return ConcentratedValue(a + b for a, b in zip(self, other)) 33 | 34 | def __mul__(self, scalar: float) -> ConcentratedValue: 35 | """ 36 | Multiply each element of ConcentratedValue by a scalar. 37 | 38 | Args: 39 | scalar: A numeric value to multiply each element by. 40 | 41 | Returns: 42 | A new ConcentratedValue instance with each element scaled. 43 | """ 44 | if not isinstance(scalar, (int, float)): 45 | return NotImplemented 46 | return ConcentratedValue(a * scalar for a in self) 47 | 48 | 49 | class PointLoad(ConcentratedValue): 50 | """Point load.""" 51 | 52 | 53 | class PointMass(ConcentratedValue): 54 | """Point mass.""" 55 | 56 | 57 | class UDL(tuple[float, ...]): 58 | """ 59 | Beamcolumn element UDL. 60 | 61 | Uniformly distributed load expressed in the global coordinate 62 | system of the structure. 63 | """ 64 | 65 | __slots__ = () 66 | 67 | def __repr__(self) -> str: 68 | """ 69 | Get a string representation. 70 | 71 | Returns: 72 | A simple string representation of the object. 73 | """ 74 | return f"{self.__class__.__name__}({', '.join(map(str, self))})" 75 | 76 | def __add__(self, other: UDL) -> UDL: 77 | """ 78 | Add two UDL instances element-wise. 79 | 80 | Args: 81 | other: Another UDL instance. 82 | 83 | Returns: 84 | A new UDL instance with the sum of corresponding elements. 85 | """ 86 | if not isinstance(other, UDL): 87 | return NotImplemented 88 | return UDL(a + b for a, b in zip(self, other)) 89 | 90 | def __mul__(self, scalar: float) -> UDL: 91 | """ 92 | Multiply each element of UDL by a scalar. 93 | 94 | Args: 95 | scalar: A numeric value to multiply each element by. 96 | 97 | Returns: 98 | A new UDL instance with each element scaled. 99 | """ 100 | if not isinstance(scalar, (int, float)): 101 | return NotImplemented 102 | return UDL(a * scalar for a in self) 103 | -------------------------------------------------------------------------------- /src/osmg/analysis/ground_motion_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions. 3 | 4 | Utility functions for handling ground motion files and response 5 | spectra. 6 | """ 7 | 8 | import re 9 | from pathlib import Path 10 | 11 | import numpy as np 12 | import numpy.typing as npt 13 | 14 | numpy_array = npt.NDArray[np.float64] 15 | 16 | 17 | def import_PEER(filename: str) -> numpy_array: # noqa: N802 18 | """ 19 | Import a PEER ground motion. 20 | 21 | Import a ground motion record from a specified PEER ground 22 | motion record file. 23 | Output is a two column matrix of time - acceleration pairs. 24 | Acceleration is in [g] units. 25 | 26 | Returns: 27 | The data in the file. 28 | """ 29 | # Get all data except for the last line, where it may have fewer 30 | # columns and cause an error 31 | a_g = np.genfromtxt(filename, skip_header=4, skip_footer=1) 32 | # Manually read the last line and append 33 | with Path(filename).open(encoding='utf-8') as file: 34 | lines = file.readlines() 35 | last_line = lines[-1] 36 | last = np.fromstring(last_line, sep=' ') 37 | a_g = np.append(a_g, last) 38 | 39 | # Read metadata 40 | with Path(filename).open(encoding='utf-8') as file: 41 | line_containing_units = 2 42 | line_containing_number_of_points = 3 43 | for i, line in enumerate(file): 44 | if i == line_containing_units: 45 | # Units 46 | units = (line.split(sep=' ')[-1]).strip() 47 | elif i == line_containing_number_of_points: 48 | # Number of points 49 | npts = int(re.sub(r'NPTS=\s+', '', line.split(sep=', ')[0])) 50 | # Time step 51 | tmp = re.sub(r'DT=\s+', '', line.split(sep=', ')[1]) 52 | tmp = re.sub(r'\s* SEC', '', tmp) 53 | tmp = tmp.replace('SEC', '') # some files have no space 54 | tmp = tmp.replace('SE', '') 55 | tmp = tmp.replace('dt=', '') 56 | d_t = float(tmp.strip()) 57 | elif i > line_containing_number_of_points: 58 | break 59 | 60 | # Assert correct number of points and units 61 | assert npts == len( 62 | a_g 63 | ), 'Number of points reported in file does not match recovered points' 64 | assert units == 'G', "Expected file to be in G units, but it isn't" 65 | 66 | # Obtain the corresponding time values 67 | t = np.array([x * d_t for x in range(npts)]) 68 | 69 | # Return data in the form of a matrix 70 | return np.column_stack((t, a_g)) 71 | 72 | 73 | def response_spectrum( 74 | th: numpy_array, dt: float, zeta: float, num_points: int = 200 75 | ) -> numpy_array: 76 | """ 77 | Calculate a response spectrum. 78 | 79 | Calculate the linear response spectrum of an acceleration 80 | time history of fixed time interval dt and values given in vector 81 | th, and damping ratio zeta. 82 | n_Pts is the number of log-spaced points of the response spectrum 83 | 84 | Returns: 85 | The response spectrum. 86 | """ 87 | T = np.logspace( # noqa: N806 88 | -2, 1, num_points - 1 89 | ) # -1 because we also include PGA @ T=0s 90 | # we may have to upsample the ground motion time history 91 | # to ensure convergence of the central difference method 92 | if dt > 0.1 * T[0]: 93 | t_max = float(len(th)) * dt 94 | upscale = dt / (0.1 * T[0]) 95 | old_ts = np.linspace(0, t_max, num=len(th)) 96 | new_ts = np.linspace(0, t_max, num=int(upscale + 1.0) * len(th)) 97 | th = np.interp(new_ts, old_ts, th) 98 | dt = new_ts[1] - new_ts[0] 99 | assert dt < 0.1 * T[0] 100 | omega = 2 * np.pi / T 101 | c = 2 * zeta * omega 102 | k = omega**2 103 | n = len(th) 104 | # Initial calculations 105 | u = np.full(len(T), 0.00) # initialize arrays 106 | u_prev = np.full(len(T), 0.00) 107 | umax = np.full(len(T), 0.00) 108 | khut = 1.00 / dt**2 + c / (2.0 * dt) # initial calcs 109 | alpha = 1.00 / dt**2 - c / (2.0 * dt) 110 | beta = k - 2.0 / dt**2 111 | for i in range(1, n): 112 | phut = -th[i] - alpha * u_prev - beta * u 113 | u_prev = u 114 | u = phut / khut # update step 115 | # update maximum displacements 116 | umax[np.abs(u) > umax] = np.abs(u[np.abs(u) > umax]) 117 | # Determine pseudo-spectral acceleration 118 | sa = umax * omega**2 119 | # rs = np.column_stack((T, sa)) # not yet 120 | # Include T = 0 s ~ PGA 121 | Ts = np.concatenate((np.array([0.00]), T)) # noqa: N806 122 | sas = np.concatenate((np.array([np.max(np.abs(th))]), sa)) 123 | return np.column_stack((Ts, sas)) 124 | 125 | 126 | def code_spectrum( 127 | t_array: numpy_array, s_s: float, s_1: float, t_long: float = 8.00 128 | ) -> numpy_array: 129 | """ 130 | Generate a simplified ASCE code response spectrum. 131 | 132 | Returns: 133 | The response spectrum. 134 | """ 135 | num_vals = len(t_array) 136 | code_sa = np.full(num_vals, 0.00) 137 | T_short = s_1 / s_s # noqa: N806 138 | T_zero = 0.20 * T_short # noqa: N806 139 | code_sa[t_array <= T_short] = s_s 140 | code_sa[t_array >= t_long] = s_1 * t_long / t_array[t_array >= t_long] ** 2 141 | sel = np.logical_and(t_array > T_short, t_array < t_long) 142 | code_sa[sel] = s_1 / t_array[sel] 143 | code_sa[t_array < T_zero] = s_s * ( 144 | 0.40 + 0.60 * t_array[t_array < T_zero] / T_zero 145 | ) 146 | return np.column_stack((t_array, code_sa)) 147 | 148 | 149 | if __name__ == '__main__': 150 | pass 151 | -------------------------------------------------------------------------------- /src/osmg/analysis/recorders.py: -------------------------------------------------------------------------------- 1 | """Recorder objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | from pathlib import Path 7 | from typing import Literal 8 | 9 | import pandas as pd 10 | 11 | from osmg.core.uid_object import UIDObject 12 | 13 | 14 | @dataclass 15 | class Recorder(UIDObject): 16 | """Recorder base class.""" 17 | 18 | file_name: str 19 | # TODO(JVM): figure out binary format. 20 | 21 | def __post_init__(self) -> None: 22 | """Post-initialization.""" 23 | self._data = None 24 | 25 | def ops_args(self) -> list[object]: # noqa: PLR6301 26 | """Obtain the OpenSees arguments.""" 27 | msg = 'Child classes should implement this.' 28 | raise NotImplementedError(msg) 29 | 30 | def get_data(self) -> pd.DataFrame: # noqa: PLR6301 31 | """Retrieve the data.""" 32 | msg = 'Child classes should implement this.' 33 | raise NotImplementedError(msg) 34 | 35 | def set_data(self, data: pd.DataFrame) -> None: 36 | """Overwrite the data.""" 37 | self._data = data 38 | 39 | def clear_data(self) -> None: 40 | """Clear data.""" 41 | self._data = None 42 | 43 | 44 | @dataclass 45 | class NodeRecorder(Recorder): 46 | """ 47 | OpenSees Node recorder. 48 | 49 | The Node recorder type records the response of a number of nodes 50 | at every converged step. 51 | 52 | Note: I haven't been able to get the `eigen` type working. 53 | 54 | https://opensees.berkeley.edu/wiki/index.php?title=Node_Recorder 55 | """ 56 | 57 | recorder_type: Literal['Node', 'EnvelopeNode'] 58 | nodes: tuple[int, ...] 59 | dofs: tuple[int, ...] 60 | response_type: Literal[ 61 | 'disp', 'vel', 'accel', 'incrDisp', 'reaction', 'rayleighForces' 62 | ] 63 | number_of_significant_digits: int 64 | output_time: bool | None 65 | delta_t: float | None = field(default=None) 66 | time_series_tag: int | None = field(default=None) 67 | 68 | def ops_args(self) -> list[object]: 69 | """ 70 | Obtain the OpenSees arguments. 71 | 72 | Returns: 73 | The OpenSees arguments. 74 | """ 75 | output: list[object] = [ 76 | self.recorder_type, 77 | '-file', 78 | self.file_name, 79 | '-precision', 80 | self.number_of_significant_digits, 81 | ] 82 | if self.time_series_tag: 83 | output.extend( 84 | [ 85 | '-timeSeries', 86 | self.time_series_tag, 87 | ] 88 | ) 89 | if self.output_time: 90 | output.extend(['-time']) 91 | if self.delta_t: 92 | output.extend(['-dT', self.delta_t]) 93 | output.extend(['-node', *self.nodes]) 94 | output.extend(['-dof', *self.dofs]) 95 | output.append(self.response_type) 96 | return output 97 | 98 | def get_data(self) -> pd.DataFrame: 99 | """ 100 | Retrieve the data. 101 | 102 | Returns: 103 | The data. 104 | """ 105 | if self.recorder_type == 'EnvelopeNode': 106 | raise NotImplementedError 107 | 108 | if self._data is None: 109 | index_col = 0 if self.output_time else None 110 | data = pd.read_csv( 111 | self.file_name, 112 | sep=' ', 113 | index_col=index_col, 114 | header=None, 115 | engine='pyarrow', 116 | ) 117 | data = data.astype(float) 118 | header_data = [(node, dof) for node in self.nodes for dof in self.dofs] 119 | data.columns = pd.MultiIndex.from_tuples( 120 | header_data, names=('node', 'dof') 121 | ) 122 | data.index.name = 'time' if self.output_time else None 123 | self._data = data 124 | return self._data 125 | 126 | 127 | @dataclass 128 | class DriftRecorder(Recorder): 129 | """ 130 | OpenSees Drift recorder. 131 | 132 | The Drift type records the displacement drift between two 133 | nodes. The drift is taken as the ratio between the prescribed 134 | relative displacement and the specified distance between the 135 | nodes. 136 | https://opensees.berkeley.edu/wiki/index.php?title=Node_Recorder 137 | """ 138 | 139 | nodes_i: tuple[int, ...] 140 | nodes_j: tuple[int, ...] 141 | perpendicular_directions: tuple[int, ...] 142 | dofs: tuple[int, ...] 143 | file_name: str 144 | number_of_significant_digits: int 145 | output_time: bool 146 | # delta_t: float | None = field(default=None) 147 | 148 | def ops_args(self) -> list[object]: 149 | """ 150 | Obtain the OpenSees arguments. 151 | 152 | Returns: 153 | The OpenSees arguments. 154 | """ 155 | output: list[object] = [ 156 | 'Drift', 157 | '-file', 158 | self.file_name, 159 | '-precision', 160 | self.number_of_significant_digits, 161 | ] 162 | if self.output_time: 163 | output.extend(['-time']) 164 | # if self.delta_t: 165 | # output.extend(['-dT', self.delta_t]) 166 | output.extend(['-iNode', *self.nodes_i]) 167 | output.extend(['-jNode', *self.nodes_j]) 168 | output.extend(['-dof', *self.dofs]) 169 | output.extend(['-perpDirn', *self.perpendicular_directions]) 170 | # TODO(JVM): see if delta_t is supported. 171 | return output 172 | 173 | 174 | @dataclass 175 | class ElementRecorder(Recorder): 176 | """ 177 | OpenSees Element recorder. 178 | 179 | The Element recorder type records the response of a number of 180 | elements at every converged step. The response recorded is 181 | element-dependent and also depends on the arguments which are 182 | passed to the setResponse() element method. 183 | https://opensees.berkeley.edu/wiki/index.php?title=Node_Recorder 184 | """ 185 | 186 | recorder_type: Literal['Element', 'EnvelopeElement'] 187 | elements: tuple[int, ...] 188 | element_arguments: tuple[str, ...] 189 | file_name: str 190 | number_of_significant_digits: int | None 191 | output_time: bool 192 | delta_t: float | None = field(default=None) 193 | 194 | def ops_args(self) -> list[object]: 195 | """ 196 | Obtain the OpenSees arguments. 197 | 198 | Returns: 199 | The OpenSees arguments. 200 | """ 201 | output: list[object] = [ 202 | self.recorder_type, 203 | '-file', 204 | self.file_name, 205 | ] 206 | if self.number_of_significant_digits: 207 | output.extend( 208 | [ 209 | '-precision', 210 | self.number_of_significant_digits, 211 | ] 212 | ) 213 | if self.output_time: 214 | output.extend(['-time']) 215 | if self.delta_t: 216 | output.extend(['-dT', self.delta_t]) 217 | output.extend(['-ele', *self.elements]) 218 | output.extend([*self.element_arguments]) 219 | return output 220 | 221 | def get_data(self, *, update_index: bool = True) -> pd.DataFrame: 222 | """ 223 | Retrieve the data. 224 | 225 | Returns: 226 | The data. 227 | """ 228 | if self.recorder_type == 'EnvelopeElement': 229 | raise NotImplementedError 230 | 231 | if self._data is None: 232 | index_col = 0 if self.output_time else None 233 | data = pd.read_csv( 234 | self.file_name, 235 | sep=' ', 236 | index_col=index_col, 237 | header=None, 238 | engine='pyarrow', 239 | ) 240 | data = data.astype(float) 241 | if update_index: 242 | # get number of dofs 243 | num_dof = int(data.shape[1] / len(self.elements) / 2.0) 244 | # construct header 245 | header_data = [ 246 | (element, station, dof) 247 | for element in self.elements 248 | for station in (0.00, 1.00) 249 | for dof in range(1, num_dof + 1) 250 | ] 251 | data.columns = pd.MultiIndex.from_tuples( 252 | header_data, names=('element', 'station', 'dof') 253 | ) 254 | data.index.name = 'time' if self.output_time else None 255 | self._data = data 256 | return self._data 257 | -------------------------------------------------------------------------------- /src/osmg/analysis/supports.py: -------------------------------------------------------------------------------- 1 | """Node supports.""" 2 | 3 | from __future__ import annotations 4 | 5 | 6 | class FixedSupport(tuple[bool, ...]): 7 | """Fixed support.""" 8 | 9 | __slots__: list[str] = [] 10 | 11 | 12 | class ElasticSupport(tuple[float | bool, ...]): 13 | """Flexible support.""" 14 | 15 | __slots__: list[str] = [] 16 | -------------------------------------------------------------------------------- /src/osmg/core/__init__.py: -------------------------------------------------------------------------------- 1 | """Core OSMG objects.""" 2 | -------------------------------------------------------------------------------- /src/osmg/core/common.py: -------------------------------------------------------------------------------- 1 | """Common definitions.""" 2 | 3 | from __future__ import annotations 4 | 5 | import re 6 | from pprint import pprint 7 | from typing import Hashable, OrderedDict, TypeVar 8 | 9 | import numpy as np 10 | import numpy.typing as npt 11 | 12 | # very big, very small numbers used for 13 | # comparing floats and hashing 14 | EPSILON = 1.00e-6 15 | ALPHA = 1.00e8 16 | 17 | # gravitational acceleration 18 | G_CONST_IMPERIAL = 386.22 # in/s**2 19 | G_CONST_SI = 9.81 # m/s**2 20 | 21 | # quantities to use for extreme stiffnesses 22 | STIFF_ROT = 1.0e15 23 | STIFF = 1.0e10 24 | TINY = 1.0e-12 25 | 26 | NDM: dict[str, int] = { 27 | '1D1DOF': 1, 28 | '2D Truss': 2, 29 | '2D Frame': 2, 30 | '3D Truss': 3, 31 | '3D Frame': 3, 32 | } 33 | NDF: dict[str, int] = { 34 | '1D1DOF': 1, 35 | '2D Truss': 2, 36 | '2D Frame': 3, 37 | '3D Truss': 3, 38 | '3D Frame': 6, 39 | } 40 | 41 | TWO_DIMENSIONAL = 2 42 | THREE_DIMENSIONAL = 3 43 | 44 | numpy_array = npt.NDArray[np.float64] 45 | 46 | 47 | def methods(obj: object) -> list[str]: 48 | """ 49 | Get the methods of an object. 50 | 51 | Returns: 52 | The names of all methods of an object, excluding the dunder 53 | methods. 54 | 55 | Example: 56 | >>> class TestClass: 57 | ... def method_1(self): 58 | ... pass 59 | ... 60 | ... def method_2(self): 61 | ... pass 62 | ... 63 | >>> obj = TestClass() 64 | >>> methods(obj) 65 | ['method_1', 'method_2'] 66 | """ 67 | object_methods = [ 68 | method_name 69 | for method_name in dir(obj) 70 | if callable(getattr(obj, method_name)) 71 | ] 72 | pattern = r'__.*__' 73 | return [s for s in object_methods if not re.match(pattern, s)] 74 | 75 | 76 | def print_methods(obj: object) -> None: 77 | """Print the methods of an object.""" 78 | object_methods = methods(obj) 79 | pprint(object_methods) # noqa: T203 80 | 81 | 82 | def print_dir(obj: object) -> None: 83 | """Print the entire output of `dir()` of an object.""" 84 | pprint(dir(obj)) # noqa: T203 85 | 86 | 87 | K = TypeVar('K', bound=Hashable) # Represents the key type (must be Hashable) 88 | V = TypeVar('V') # Represents the value type 89 | 90 | 91 | def previous_element(dct: OrderedDict[K, V], key: K) -> V | None: 92 | """ 93 | Get the previous element. 94 | 95 | Returns the value of the element that comes before the given key 96 | in an ordered dictionary. 97 | If the key is not in the dictionary, or if it is the first element 98 | in the dictionary, returns None. 99 | 100 | Arguments: 101 | dct: An ordered dictionary. 102 | key: The key of the element whose previous element we want to 103 | find. 104 | 105 | Returns: 106 | The value of the element that comes before the given key in 107 | the dictionary, or None if there is no such element. 108 | """ 109 | if key in dct: 110 | key_list = list(dct.keys()) 111 | idx = key_list.index(key) 112 | if idx == 0: 113 | result = None 114 | else: 115 | result = dct[key_list[idx - 1]] 116 | else: 117 | result = None 118 | return result 119 | -------------------------------------------------------------------------------- /src/osmg/core/gridsystem.py: -------------------------------------------------------------------------------- 1 | """Grid system.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | from typing import TYPE_CHECKING, Generic, TypeVar 7 | 8 | from osmg.geometry.line import Line 9 | 10 | if TYPE_CHECKING: 11 | from osmg.core.common import numpy_array 12 | 13 | 14 | T = TypeVar('T') 15 | 16 | 17 | class LevelWrapper: 18 | """A wrapper for a level that allows chained navigation. 19 | 20 | Attributes: 21 | system: The grid system that this level belongs to. 22 | level_name: The name of the level. 23 | """ 24 | 25 | def __init__(self, system: BaseGridSystem, level_name: str) -> None: # type: ignore 26 | """ 27 | Initialize a LevelWrapper. 28 | 29 | Args: 30 | system: The grid system containing the levels. 31 | level_name: The name of the level to wrap. 32 | """ 33 | self.system = system 34 | self.level_name = level_name 35 | 36 | def previous(self) -> LevelWrapper: 37 | """ 38 | Navigate to the previous level in a circular fashion. 39 | 40 | Returns: 41 | A LevelWrapper for the previous level. 42 | """ 43 | previous_level = self.system.get_previous_level(self.level_name) 44 | return LevelWrapper(self.system, previous_level) 45 | 46 | def next(self) -> LevelWrapper: 47 | """ 48 | Navigate to the next level in a circular fashion. 49 | 50 | Returns: 51 | A LevelWrapper for the next level. 52 | """ 53 | next_level = self.system.get_next_level(self.level_name) 54 | return LevelWrapper(self.system, next_level) 55 | 56 | def elevation(self) -> float: 57 | """ 58 | Get the elevation of this level. 59 | 60 | Returns: 61 | The elevation of the level as a float. 62 | """ 63 | return self.system.levels[self.level_name] 64 | 65 | def __repr__(self) -> str: 66 | """ 67 | Represent the LevelWrapper as a string. 68 | 69 | Returns: 70 | A string representation of the LevelWrapper including the 71 | level name and elevation. 72 | """ 73 | return f"LevelWrapper(level_name='{self.level_name}', elevation={self.elevation()})" 74 | 75 | 76 | class GridWrapper(Generic[T]): 77 | """A wrapper for a grid that allows chained navigation.""" 78 | 79 | def __init__(self, system: BaseGridSystem, grid_name: str) -> None: # type: ignore 80 | """ 81 | Initialize a GridWrapper. 82 | 83 | Args: 84 | system: The grid system containing the grids. 85 | grid_name: The name of the grid to wrap. 86 | """ 87 | self.system = system 88 | self.grid_name: str = grid_name 89 | 90 | def previous(self) -> GridWrapper[T]: 91 | """ 92 | Navigate to the previous grid in a circular fashion. 93 | 94 | Returns: 95 | A GridWrapper for the previous grid. 96 | """ 97 | previous_grid = self.system.get_previous_grid(self.grid_name) 98 | return GridWrapper(self.system, previous_grid) 99 | 100 | def next(self) -> GridWrapper[T]: 101 | """ 102 | Navigate to the next grid in a circular fashion. 103 | 104 | Returns: 105 | A GridWrapper for the next grid. 106 | """ 107 | next_grid = self.system.get_next_grid(self.grid_name) 108 | return GridWrapper(self.system, next_grid) 109 | 110 | def data(self) -> T: 111 | """ 112 | Get the data associated with this grid. 113 | 114 | Returns: 115 | The data of the grid. 116 | """ 117 | return self.system.grids[self.grid_name] # type: ignore 118 | 119 | def __repr__(self) -> str: 120 | """ 121 | Represent the GridWrapper as a string. 122 | 123 | Returns: 124 | A string representation of the GridWrapper including the grid name and its data. 125 | """ 126 | return f"GridWrapper(grid_name='{self.grid_name}', data={self.data()})" 127 | 128 | 129 | @dataclass 130 | class BaseGridSystem(Generic[T]): 131 | """Base class for grid systems. 132 | 133 | Attributes: 134 | grids: A dictionary of grids, parameterized by the type `T`. 135 | levels: A dictionary of levels, each represented by a float. 136 | """ 137 | 138 | grids: dict[str, T] = field(default_factory=dict) 139 | levels: dict[str, float] = field(default_factory=dict) 140 | 141 | def add_level(self, name: str, elevation: float) -> None: 142 | """ 143 | Add a level to the dictionary. 144 | 145 | Args: 146 | name: The name of the level. 147 | elevation: The elevation of the level. 148 | """ 149 | self.levels[name] = elevation 150 | 151 | def get_level(self, level_name: str) -> LevelWrapper: 152 | """ 153 | Retrieve a LevelWrapper for a given level. 154 | 155 | Args: 156 | level_name: The name of the level to retrieve. 157 | 158 | Returns: 159 | A LevelWrapper object for the specified level. 160 | 161 | Raises: 162 | ValueError: If the specified level does not exist. 163 | """ 164 | if level_name not in self.levels: 165 | msg = f"Level '{level_name}' does not exist." 166 | raise ValueError(msg) 167 | return LevelWrapper(self, level_name) 168 | 169 | def get_previous_level(self, level_name: str) -> str: 170 | """ 171 | Get the name of the previous level in a circular fashion. 172 | 173 | Args: 174 | level_name: The name of the current level. 175 | 176 | Returns: 177 | The name of the previous level. 178 | 179 | Raises: 180 | ValueError: If the specified level does not exist. 181 | """ 182 | level_names = list(self.levels.keys()) 183 | if level_name not in level_names: 184 | msg = f'Level `{level_name}` does not exist.' 185 | raise ValueError(msg) 186 | index = level_names.index(level_name) 187 | return level_names[index - 1] 188 | 189 | def get_next_level(self, level_name: str) -> str: 190 | """ 191 | Get the name of the next level in a circular fashion. 192 | 193 | Args: 194 | level_name: The name of the current level. 195 | 196 | Returns: 197 | The name of the next level. 198 | 199 | Raises: 200 | ValueError: If the specified level does not exist. 201 | """ 202 | level_names = list(self.levels.keys()) 203 | if level_name not in level_names: 204 | msg = f'Level `{level_name}` does not exist.' 205 | raise ValueError(msg) 206 | index = level_names.index(level_name) 207 | return level_names[(index + 1) % len(level_names)] 208 | 209 | def add_grid(self, name: str, data: T) -> None: 210 | """ 211 | Add a grid to the dictionary. 212 | 213 | Args: 214 | name: The name of the grid. 215 | data: The data associated with the grid. 216 | """ 217 | self.grids[name] = data 218 | 219 | def get_grid(self, grid_name: str) -> GridWrapper[T]: 220 | """ 221 | Retrieve a GridWrapper for a given grid. 222 | 223 | Args: 224 | grid_name: The name of the grid to retrieve. 225 | 226 | Returns: 227 | A GridWrapper object for the specified grid. 228 | 229 | Raises: 230 | ValueError: If the specified grid does not exist. 231 | """ 232 | if grid_name not in self.grids: 233 | msg = f"Grid '{grid_name}' does not exist." 234 | raise ValueError(msg) 235 | return GridWrapper(self, grid_name) 236 | 237 | def get_previous_grid(self, grid_name: str) -> str: 238 | """ 239 | Get the name of the previous grid in a circular fashion. 240 | 241 | Args: 242 | grid_name: The name of the current grid. 243 | 244 | Returns: 245 | The name of the previous grid. 246 | 247 | Raises: 248 | ValueError: If the specified grid does not exist. 249 | """ 250 | grid_names = list(self.grids.keys()) 251 | if grid_name not in grid_names: 252 | msg = f'Grid `{grid_name}` does not exist.' 253 | raise ValueError(msg) 254 | index = grid_names.index(grid_name) 255 | return grid_names[index - 1] 256 | 257 | def get_next_grid(self, grid_name: str) -> str: 258 | """ 259 | Get the name of the next grid in a circular fashion. 260 | 261 | Args: 262 | grid_name: The name of the current grid. 263 | 264 | Returns: 265 | The name of the next grid. 266 | 267 | Raises: 268 | ValueError: If the specified grid does not exist. 269 | """ 270 | grid_names = list(self.grids.keys()) 271 | if grid_name not in grid_names: 272 | msg = f'Grid `{grid_name}` does not exist.' 273 | raise ValueError(msg) 274 | index = grid_names.index(grid_name) 275 | return grid_names[(index + 1) % len(grid_names)] 276 | 277 | 278 | @dataclass 279 | class GridSystem(BaseGridSystem[Line]): 280 | """ 281 | Grid system using dictionaries for grids and levels. 282 | 283 | Attributes: 284 | grids: A dictionary of `Line` objects representing the grids. 285 | levels: A dictionary of level elevations. 286 | """ 287 | 288 | def add_grid(self, name: str, start: numpy_array, end: numpy_array) -> None: # type: ignore 289 | """ 290 | Add a grid to the dictionary. 291 | 292 | Args: 293 | name: The name of the grid. 294 | start: Starting coordinates of the grid line. 295 | end: Ending coordinates of the grid line. 296 | """ 297 | self.grids[name] = Line(tag=name, start=start, end=end) 298 | 299 | def get_intersection_coordinates( 300 | self, grid_name_1: str, grid_name_2: str 301 | ) -> numpy_array | None: 302 | """ 303 | Find the intersection of two grids. 304 | 305 | Args: 306 | grid_name_1: The name of the first grid. 307 | grid_name_2: The name of the second grid. 308 | 309 | Returns: 310 | The coordinates of the intersection point if it exists. 311 | 312 | Raises: 313 | ValueError: If either of the specified grids does not exist. 314 | """ 315 | grid_1 = self.grids.get(grid_name_1) 316 | grid_2 = self.grids.get(grid_name_2) 317 | 318 | if grid_1 is None or grid_2 is None: 319 | msg = f"Grids '{grid_name_1}' and '{grid_name_2}' must exist." 320 | raise ValueError(msg) 321 | 322 | return grid_1.intersect(grid_2) 323 | 324 | 325 | @dataclass 326 | class GridSystem2D(BaseGridSystem[float]): 327 | """ 328 | Grid system for 2D models using dictionaries for grids and levels. 329 | 330 | Attributes: 331 | grids: A dictionary of grid coordinates as floats. 332 | levels: A dictionary of level elevations. 333 | """ 334 | 335 | def add_grid(self, name: str, location: float) -> None: 336 | """ 337 | Add a grid to the dictionary. 338 | 339 | Args: 340 | name: The name of the grid. 341 | location: The x or y coordinate of the grid line. 342 | """ 343 | self.grids[name] = location 344 | 345 | def get_grid_location(self, grid_name: str) -> float: 346 | """ 347 | Retrieve the location of a grid by its name. 348 | 349 | Args: 350 | grid_name: The name of the grid. 351 | 352 | Returns: 353 | The location of the grid. 354 | 355 | Raises: 356 | ValueError: If the specified grid does not exist. 357 | """ 358 | if grid_name not in self.grids: 359 | msg = f"Grid '{grid_name}' does not exist." 360 | raise ValueError(msg) 361 | return self.grids[grid_name] 362 | -------------------------------------------------------------------------------- /src/osmg/core/model.py: -------------------------------------------------------------------------------- 1 | """Defines Model objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | from typing import TYPE_CHECKING, Literal 7 | 8 | import numpy as np 9 | 10 | from osmg.core.common import NDM 11 | from osmg.core.gridsystem import GridSystem, GridSystem2D 12 | from osmg.core.osmg_collections import ComponentAssemblyCollection, NodeCollection 13 | from osmg.creators.uid import UIDGenerator 14 | 15 | if TYPE_CHECKING: 16 | from osmg.model_objects.node import Node 17 | 18 | from osmg.core.common import numpy_array 19 | 20 | 21 | @dataclass(repr=False) 22 | class Model: 23 | """ 24 | Base Model object. 25 | 26 | A general representation of a structural model. 27 | 28 | Attributes: 29 | name (str): Name of the model. 30 | grid_system (GridSystem): Grid system of the model. 31 | uid_generator (UIDGenerator): Object for generating unique IDs. 32 | """ 33 | 34 | name: str 35 | dimensionality: Literal['2D Truss', '2D Frame', '3D Truss', '3D Frame'] 36 | uid_generator: UIDGenerator = field(default_factory=UIDGenerator) 37 | nodes: NodeCollection = field(default_factory=NodeCollection) 38 | components: ComponentAssemblyCollection = field( 39 | default_factory=ComponentAssemblyCollection 40 | ) 41 | 42 | def bounding_box(self, padding: float) -> tuple[numpy_array, numpy_array]: 43 | """ 44 | Obtain the axis-aligned bounding box of the building. 45 | 46 | Returns: 47 | Bounding box. 48 | """ 49 | num_dimensions = NDM[self.dimensionality] 50 | p_min = np.full(num_dimensions, np.inf) 51 | p_max = np.full(num_dimensions, -np.inf) 52 | for node in list(self.nodes.values()): 53 | point: numpy_array = np.array(node.coordinates) 54 | p_min = np.minimum(p_min, point) 55 | p_max = np.maximum(p_max, point) 56 | p_min -= np.full(num_dimensions, padding) 57 | p_max += np.full(num_dimensions, padding) 58 | return p_min, p_max 59 | 60 | def reference_length(self) -> float: 61 | """ 62 | Obtain the largest bounding box dimension. 63 | 64 | (used in graphics) 65 | 66 | Returns: 67 | The largest dimension. 68 | """ 69 | p_min, p_max = self.bounding_box(padding=0.00) 70 | return float(np.max(p_max - p_min)) 71 | 72 | def get_all_nodes( 73 | self, ignore_by_tag: set[str] | None = None 74 | ) -> dict[int, Node]: 75 | """ 76 | Get all nodes in the model. 77 | 78 | Params: 79 | ignore_by_tag: Set of tags of components to ignore. 80 | 81 | Returns: 82 | A dictionary with the nodes. Keys are their UIDs. 83 | """ 84 | # primary nodes 85 | primary_nodes = self.nodes 86 | # internal nodes (of component assemblies) 87 | internal_nodes: dict[int, Node] = {} 88 | components = self.components.values() 89 | for component in components: 90 | if ignore_by_tag and component.tags & ignore_by_tag: 91 | continue 92 | internal_nodes.update(component.internal_nodes) 93 | 94 | all_nodes: dict[int, Node] = {} 95 | all_nodes.update(primary_nodes) 96 | all_nodes.update(internal_nodes) 97 | return all_nodes 98 | 99 | def __repr__(self) -> str: 100 | """Return a string representation of the object.""" 101 | return f'~~~ Model Object: {self.name} ~~~' 102 | 103 | 104 | @dataclass(repr=False) 105 | class Model2D(Model): 106 | """ 107 | 2D Model object. 108 | 109 | A 2D model representation. 110 | 111 | Attributes: 112 | name (str): Name of the model. 113 | grid_system (GridSystem2D): Grid system for the 2D model. 114 | """ 115 | 116 | dimensionality: Literal['2D Truss', '2D Frame'] 117 | grid_system: GridSystem2D = field(default_factory=GridSystem2D) 118 | 119 | def __post_init__(self) -> None: 120 | """ 121 | Post-initialization. 122 | 123 | Raises: 124 | ValueError: If the `dimensionality` assignment is invalid. 125 | """ 126 | if self.dimensionality not in {'2D Truss', '2D Frame'}: 127 | msg = f'Dimensionality `{self.dimensionality}` is not compatible with a `Model2D` object.' 128 | raise ValueError(msg) 129 | 130 | def __repr__(self) -> str: 131 | """Return a string representation of the object.""" 132 | return f'~~~ 2D Model Object: {self.name} ~~~' 133 | 134 | 135 | @dataclass(repr=False) 136 | class Model3D(Model): 137 | """ 138 | 3D Model object. 139 | 140 | A 3D model representation. 141 | 142 | Attributes: 143 | name (str): Name of the model. 144 | grid_system (GridSystem): Grid system for the 3D model. 145 | """ 146 | 147 | dimensionality: Literal['3D Truss', '3D Frame'] 148 | grid_system: GridSystem = field(default_factory=GridSystem) 149 | 150 | def __post_init__(self) -> None: 151 | """ 152 | Post-initialization. 153 | 154 | Raises: 155 | ValueError: If the `dimensionality` assignment is invalid. 156 | """ 157 | if self.dimensionality not in {'3D Truss', '3D Frame'}: 158 | msg = f'Dimensionality `{self.dimensionality}` is not compatible with a `Model3D` object.' 159 | raise ValueError(msg) 160 | 161 | def __repr__(self) -> str: 162 | """Return a string representation of the object.""" 163 | return f'~~~ 3D Model Object: {self.name} ~~~' 164 | -------------------------------------------------------------------------------- /src/osmg/core/uid_object.py: -------------------------------------------------------------------------------- 1 | """ 2 | UID Object. 3 | 4 | Parent class for all objects that have a unique identifier (UID). 5 | """ 6 | 7 | from dataclasses import dataclass 8 | 9 | from osmg.creators.uid import UIDGenerator 10 | 11 | 12 | @dataclass 13 | class UIDObject: 14 | """Base class for objects with a unique identifier (UID).""" 15 | 16 | uid_generator: UIDGenerator 17 | 18 | def __post_init__(self) -> None: 19 | """Post-initialization.""" 20 | self._uid = self.uid_generator.new(self) 21 | 22 | @property 23 | def uid(self) -> int: 24 | """Get the UID.""" 25 | return self._uid 26 | 27 | def __hash__(self) -> int: 28 | """Return the hash of the object based on its UID.""" 29 | return hash(self.uid) 30 | 31 | def __eq__(self, other: object) -> bool: 32 | """ 33 | Check equality based on the UID. 34 | 35 | Returns: 36 | True if it is equal, False otherwise. 37 | """ 38 | if not isinstance(other, UIDObject): 39 | return False 40 | return self.uid == other.uid 41 | -------------------------------------------------------------------------------- /src/osmg/creators/__init__.py: -------------------------------------------------------------------------------- 1 | """Component creator objects.""" 2 | -------------------------------------------------------------------------------- /src/osmg/creators/section.py: -------------------------------------------------------------------------------- 1 | """objects that create sections.""" 2 | 3 | from __future__ import annotations 4 | 5 | import importlib.resources 6 | import json 7 | from pathlib import Path 8 | from typing import TYPE_CHECKING 9 | 10 | import numpy as np 11 | from pydantic import BaseModel 12 | 13 | from osmg.geometry.mesh_shapes import w_mesh 14 | from osmg.model_objects.section import ElasticSection 15 | 16 | if TYPE_CHECKING: 17 | from osmg.creators.uid import UIDGenerator 18 | 19 | 20 | # Base class for sections 21 | class BaseSectionData(BaseModel): 22 | """Base model for section data.""" 23 | 24 | Type: str 25 | A: float 26 | Ix: float 27 | Iy: float 28 | J: float 29 | W: float 30 | 31 | 32 | # W Section model 33 | class WSectionData(BaseSectionData): 34 | """Model for W sections.""" 35 | 36 | bf: float 37 | d: float 38 | tw: float 39 | tf: float 40 | Zx: float 41 | Zy: float 42 | rx: float 43 | ry: float 44 | 45 | 46 | # HSS Section model 47 | class HSSSectionData(BaseSectionData): 48 | """Model for HSS sections.""" 49 | 50 | t: float 51 | D: float 52 | B: float 53 | 54 | 55 | # Union for supported section types 56 | SectionData = WSectionData | HSSSectionData 57 | 58 | 59 | class SectionDatabase(BaseModel): 60 | """Model for the entire section database.""" 61 | 62 | sections: dict[str, SectionData] 63 | 64 | 65 | class AISC_Database_Section_Creator: 66 | """ 67 | Create frame member sections from a predefined database. 68 | 69 | Force units are `lb` and length units are `in`. 70 | """ 71 | 72 | def __init__( 73 | self, uid_generator: UIDGenerator, database_path: str | None = None 74 | ) -> None: 75 | """Instantiate object.""" 76 | self.uid_generator = uid_generator 77 | self.database_path = database_path 78 | self._load_database() 79 | 80 | def _load_database(self) -> None: 81 | """Load and validate the JSON database.""" 82 | if self.database_path is None: 83 | with importlib.resources.open_text('osmg.data', 'sections.json') as f: 84 | raw_data = f.read() 85 | else: 86 | with Path(self.database_path).open('r', encoding='utf-8') as f: 87 | raw_data = f.read() 88 | 89 | json_data = json.loads(raw_data) 90 | valid_sections: dict[str, SectionData] = {} 91 | unsupported_types = [] 92 | for name, section in json_data.items(): 93 | section_type = section['Type'] 94 | if section_type == 'W': 95 | valid_sections[name] = WSectionData(**section) 96 | # elif section_type == 'HSS': 97 | # valid_sections[name] = HSSSectionData(**section) 98 | else: 99 | unsupported_types.append(section_type) 100 | if unsupported_types: 101 | print(f'Skipping unsupported section types: {set(unsupported_types)}') # noqa: T201 102 | 103 | self.section_database = SectionDatabase(sections=valid_sections) 104 | 105 | def get_available(self) -> list[str]: 106 | """ 107 | Determine which sections are available in the database. 108 | 109 | Returns: 110 | List of available sections. 111 | """ 112 | return list(self.section_database.sections.keys()) 113 | 114 | def load_elastic_section( 115 | self, section_label: str, e_modulus: float, g_modulus: float 116 | ) -> ElasticSection: 117 | """ 118 | Load a section from the database. 119 | 120 | Args: 121 | section_label: Label of the section to load. 122 | e_modulus: Elastic modulus. 123 | g_modulus: Shear modulus. 124 | 125 | Returns: 126 | ElasticSection: The section. 127 | 128 | Raises: 129 | ValueError: If the provided `section_label` is unavailable. 130 | NotImplementedError: If the section type is unsupported. 131 | """ 132 | section_data = self.section_database.sections.get(section_label) 133 | 134 | if section_data is None: 135 | msg = f'The provided `section_label` is unavailable: {section_label}.' 136 | raise ValueError(msg) 137 | 138 | if isinstance(section_data, WSectionData): 139 | # Geometry calculations for W sections 140 | outside_shape = w_mesh( 141 | section_data.bf, 142 | section_data.d, 143 | section_data.tw, 144 | section_data.tf, 145 | section_data.A, 146 | ) 147 | bbox = outside_shape.bounding_box() 148 | z_min, y_min, z_max, y_max = bbox.flatten() 149 | snap_points = { 150 | 'centroid': np.array([0.0, 0.0]), 151 | 'top_center': np.array([0.0, -y_max]), 152 | 'top_left': np.array([-z_min, -y_max]), 153 | 'top_right': np.array([-z_max, -y_max]), 154 | 'center_left': np.array([-z_min, 0.0]), 155 | 'center_right': np.array([-z_max, 0.0]), 156 | 'bottom_center': np.array([0.0, -y_min]), 157 | 'bottom_left': np.array([-z_min, -y_min]), 158 | 'bottom_right': np.array([-z_max, -y_min]), 159 | } 160 | 161 | return ElasticSection( 162 | self.uid_generator, 163 | section_label, 164 | e_modulus, 165 | section_data.A, 166 | section_data.Iy, 167 | section_data.Ix, 168 | g_modulus, 169 | section_data.J, 170 | section_data.W / 12.00, # lb/in 171 | outside_shape, 172 | snap_points, 173 | properties=section_data, 174 | ) 175 | msg = f'Section type is unsupported: {section_data.Type}' 176 | raise NotImplementedError(msg) 177 | -------------------------------------------------------------------------------- /src/osmg/creators/uid.py: -------------------------------------------------------------------------------- 1 | """objects that create unique IDs.""" 2 | 3 | from dataclasses import dataclass 4 | from itertools import count 5 | 6 | # from osmg.model_objects.node import Node 7 | 8 | 9 | @dataclass 10 | class UIDGenerator: 11 | """Generates unique identifiers (uids) for various objects.""" 12 | 13 | def new(self, thing: object) -> int: 14 | """ 15 | Generate a new uid for an object based on its category. 16 | 17 | Arguments: 18 | thing: The object for which to generate a uid. 19 | 20 | Returns: 21 | A unique identifier for an object of the given type. 22 | 23 | Raises: 24 | ValueError: If an unknown object class is specified. 25 | """ 26 | object_type = thing.__class__.__name__ 27 | valid_types = { 28 | 'Node': 'NODE', 29 | 'ElasticSection': 'SECTION', 30 | 'ComponentAssembly': 'COMPONENT', 31 | 'BeamColumnAssembly': 'COMPONENT', 32 | 'BarAssembly': 'COMPONENT', 33 | 'GeomTransf': 'TRANSFORMATION', 34 | 'ElasticBeamColumn': 'ELEMENT', 35 | 'ZeroLength': 'ELEMENT', 36 | 'Bar': 'ELEMENT', 37 | 'TwoNodeLink': 'ELEMENT', 38 | 'NodeRecorder': 'RECORDER', 39 | 'DriftRecorder': 'RECORDER', 40 | 'ElementRecorder': 'RECORDER', 41 | '_TestChild': 'TESTING', 42 | 'Elastic': 'MATERIAL', 43 | 'ElasticPPGap': 'MATERIAL', 44 | 'Steel4': 'MATERIAL', 45 | 'Fatigue': 'MATERIAL', 46 | 'IMKBilin': 'MATERIAL', 47 | 'Pinching4': 'MATERIAL', 48 | 'LeadRubberX': 'ELEMENT', 49 | 'TripleFrictionPendulum': 'ELEMENT', 50 | 'Coulomb': 'FRICTIONMODEL', 51 | } 52 | 53 | if object_type not in valid_types: 54 | msg = f'Unknown object class: {object_type}' 55 | raise ValueError(msg) 56 | 57 | type_category = valid_types[object_type] 58 | if hasattr(self, type_category): 59 | res = next(getattr(self, type_category)) 60 | assert isinstance(res, int) 61 | else: 62 | setattr(self, type_category, count(0)) 63 | res = next(getattr(self, type_category)) 64 | assert isinstance(res, int) 65 | return res 66 | -------------------------------------------------------------------------------- /src/osmg/creators/zerolength.py: -------------------------------------------------------------------------------- 1 | """objects that create ZeroLength elements.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass 6 | from typing import TYPE_CHECKING 7 | 8 | from osmg.model_objects.element import ZeroLength 9 | 10 | if TYPE_CHECKING: 11 | from osmg.core.common import numpy_array 12 | from osmg.creators.material import MaterialCreator 13 | from osmg.creators.uid import UIDGenerator 14 | from osmg.model_objects.uniaxial_material import UniaxialMaterial 15 | from osmg.node import Node 16 | 17 | 18 | @dataclass 19 | class ZeroLengthCreator: 20 | """ 21 | Base class for zero-length creators. 22 | 23 | Handles direction-material assignments using MaterialCreator 24 | objects. 25 | """ 26 | 27 | uid_generator: UIDGenerator 28 | material_creators: dict[int, MaterialCreator] 29 | 30 | def generate(self) -> tuple[list[int], list[UniaxialMaterial]]: 31 | """ 32 | Generate directions and materials. 33 | 34 | Generate directions and materials using the specified material 35 | creators. 36 | 37 | Returns: 38 | directions (list[int]): List of DOF directions. 39 | materials (list[UniaxialMaterial]): Corresponding uniaxial materials. 40 | """ 41 | directions = list(self.material_creators.keys()) 42 | materials = [ 43 | creator.generate() for creator in self.material_creators.values() 44 | ] 45 | return directions, materials 46 | 47 | def define_element( 48 | self, 49 | node_i: Node, 50 | node_j: Node, 51 | x_axis: numpy_array, 52 | y_axis: numpy_array | None, 53 | *, 54 | enable_rayleigh: bool = False, 55 | ) -> ZeroLength: 56 | """ 57 | Define a zerolength element. 58 | 59 | Returns: 60 | The added element. 61 | """ 62 | directions, materials = self.generate() 63 | return ZeroLength( 64 | uid_generator=self.uid_generator, 65 | nodes=[node_i, node_j], 66 | materials=materials, 67 | directions=directions, 68 | vecx=x_axis, 69 | vecyp=y_axis, 70 | enable_rayleigh=enable_rayleigh, 71 | ) 72 | -------------------------------------------------------------------------------- /src/osmg/geometry/__init__.py: -------------------------------------------------------------------------------- 1 | """Code for performing geometrical calculations.""" 2 | -------------------------------------------------------------------------------- /src/osmg/geometry/line.py: -------------------------------------------------------------------------------- 1 | """Defines :obj:`~osmg.line.Line` objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | 7 | import numpy as np 8 | import numpy.typing as npt 9 | 10 | from osmg.core import common 11 | 12 | numpy_array = npt.NDArray[np.float64] 13 | 14 | 15 | @dataclass 16 | class Line: 17 | """ 18 | Finite-length line segment object. 19 | 20 | Used internally whenever operations involving lines are required. 21 | 22 | Attributes: 23 | ---------- 24 | tag: line tag. 25 | start: starting point. 26 | end: end point. 27 | 28 | """ 29 | 30 | tag: str 31 | start: numpy_array = field(repr=False) 32 | end: numpy_array = field(repr=False) 33 | 34 | def __repr__(self) -> str: 35 | """ 36 | Get string representation. 37 | 38 | Returns: 39 | The string representation of the object. 40 | """ 41 | res = '' 42 | res += 'Line object\n' 43 | res += f' start: {self.start}\n' 44 | res += f' end: {self.end}\n' 45 | return res 46 | 47 | def length(self) -> float: 48 | """ 49 | Obtain the length of the line. 50 | 51 | Example: 52 | >>> from osmg.line import Line 53 | >>> l1 = Line('l1', np.array([0, 0]), np.array([2, 2])) 54 | >>> l1.length() == 2.8284271247461903 55 | True 56 | 57 | Returns: 58 | The string representation of the object. 59 | """ 60 | return float(np.linalg.norm(self.end - self.start)) 61 | 62 | def direction(self) -> numpy_array: 63 | """ 64 | Line direction. 65 | 66 | Returns a unit vector pointing from the start to the end of 67 | the line. 68 | 69 | Example: 70 | >>> from osmg.line import Line 71 | >>> l1 = Line('l1', np.array([0, 0]), np.array([2, 2])) 72 | >>> l1.direction() 73 | array([0.70710678, 0.70710678]) 74 | 75 | Returns: 76 | The string representation of the object. 77 | """ 78 | return (self.end - self.start) / self.length() 79 | 80 | def intersect(self, other: Line) -> numpy_array | None: 81 | """ 82 | Intersection point. 83 | 84 | Calculates the intersection point of this line with another 85 | line. Returns None if the lines don't intersect. Note: 'line' 86 | is actually a finite-length line segment. 87 | 88 | Parameters 89 | ---------- 90 | other: the other line 91 | 92 | Example: 93 | >>> from osmg.line import Line 94 | >>> l1 = Line('l1', np.array([0, 0]), np.array([2, 2])) 95 | >>> l2 = Line('l2', np.array([1, 0]), np.array([1, 3])) 96 | >>> l1.intersect(l2) 97 | array([1., 1.]) 98 | 99 | Returns: 100 | The intersection point if it exists. 101 | """ 102 | ra_dir = self.direction() 103 | rb_dir = other.direction() 104 | mat: numpy_array = np.array( 105 | [[ra_dir[0], -rb_dir[0]], [ra_dir[1], -rb_dir[1]]] 106 | ) 107 | if np.abs(np.linalg.det(mat)) <= common.EPSILON: 108 | # The lines are parallel 109 | # in this case, we check if they have 110 | # a common starting or ending point 111 | # (we ignore the case of a common segment, 112 | # as it has no practical use for our purposes). 113 | if ( 114 | np.linalg.norm(self.start - other.start) <= common.EPSILON 115 | or np.linalg.norm(self.start - other.end) <= common.EPSILON 116 | ): 117 | return self.start 118 | if ( 119 | np.linalg.norm(self.end - other.start) <= common.EPSILON 120 | or np.linalg.norm(self.end - other.end) <= common.EPSILON 121 | ): 122 | return self.end 123 | return None 124 | # Get the origins 125 | ra_ori = self.start 126 | rb_ori = other.start 127 | # System left-hand-side 128 | bvec: numpy_array = np.array( 129 | [ 130 | [rb_ori[0] - ra_ori[0]], 131 | [rb_ori[1] - ra_ori[1]], 132 | ] 133 | ) 134 | # Solve to get u and v in a vector 135 | uvvec = np.linalg.solve(mat, bvec) 136 | # Terminate if the intersection point 137 | # does not lie on both lines 138 | if uvvec[0] < 0 - common.EPSILON: 139 | return None 140 | if uvvec[1] < 0 - common.EPSILON: 141 | return None 142 | if uvvec[0] > self.length() + common.EPSILON: 143 | return None 144 | if uvvec[1] > other.length() + common.EPSILON: 145 | return None 146 | # Otherwise the point is valid 147 | point = ra_ori + ra_dir * uvvec[0] 148 | return np.array([point[0], point[1]]) 149 | 150 | def intersects_pt(self, point: numpy_array) -> bool: 151 | """ 152 | Check whether the given point pt lies on the line. 153 | 154 | Parameters 155 | ---------- 156 | point: a point 157 | 158 | Returns: True if the point lies on the line, False otherwise 159 | 160 | Example: 161 | >>> from osmg.line import Line 162 | >>> l = Line('my_line', np.array([0, 0]), np.array([1, 1])) 163 | >>> l.intersects_pt(np.array([0.5, 0.5])) 164 | True 165 | >>> l.intersects_pt(np.array([0, 0])) 166 | True 167 | >>> l.intersects_pt(np.array([1, 1])) 168 | True 169 | >>> l.intersects_pt(np.array([2, 2])) 170 | False 171 | 172 | Returns: 173 | Whether the given point pt lies on the line. 174 | 175 | Raises: 176 | ValueError: If the line has zero length. 177 | """ 178 | r_a = self.end - self.start 179 | norm2 = np.dot(r_a, r_a) 180 | if np.abs(norm2) < common.EPSILON: 181 | msg = 'Line has zero length.' 182 | raise ValueError(msg) 183 | r_b = point - self.start 184 | 185 | r_a_3d = np.append(r_a, 0) 186 | r_b_3d = np.append(r_b, 0) 187 | cross = np.linalg.norm(np.cross(r_a_3d, r_b_3d)) 188 | 189 | dot_normalized = np.dot(r_a, r_b) / norm2 # type: ignore 190 | if cross < common.EPSILON: 191 | res = bool(0.00 <= dot_normalized <= 1.00) 192 | else: 193 | res = False 194 | return res 195 | 196 | def point_distance(self, point: numpy_array) -> float | None: 197 | """ 198 | Minimum distance. 199 | 200 | Calculate the minimum distance between the line segment and a 201 | point. If the point falls on the line but is outside of the 202 | line segment, returns None. 203 | 204 | Parameters: 205 | point: the point 206 | 207 | Returns: 208 | The minimum distance. 209 | 210 | Example: 211 | >>> line = Line(tag='line', 212 | ... start=np.array([1, 1]), 213 | ... end=np.array([3, 3])) 214 | >>> point = np.array([4, 2]) 215 | >>> line.point_distance(point) 216 | 1.4142135623730951 217 | >>> point = np.array([2, 2]) 218 | >>> line.point_distance(point) 219 | 0.0 220 | >>> point = np.array([0, 0]) 221 | >>> line.point_distance(point) 222 | 223 | >>> point = np.array([4, 4]) 224 | >>> line.point_distance(point) 225 | 226 | """ 227 | r_a = self.end - self.start 228 | r_b = point - self.start 229 | proj_point = (r_b @ r_a) / (r_a @ r_a) * r_a 230 | if self.intersects_pt(proj_point + self.start): 231 | res: float | None = float(np.linalg.norm(r_b - proj_point)) 232 | else: 233 | res = None 234 | return res 235 | 236 | def project(self, point: numpy_array) -> numpy_array | None: 237 | """ 238 | Projection. 239 | 240 | Calculates the projection of a point on the line. 241 | If the projection falls on the line segment, it returns the 242 | projected point, otherwise it returns None. 243 | 244 | Arguments: 245 | point: the point's coordinates 246 | 247 | Example: 248 | >>> line = Line('test', np.array([0, 0]), np.array([10, 0])) 249 | >>> line.project(np.array([5, 0])) 250 | array([5., 0.]) 251 | >>> line.project(np.array([5, 5])) 252 | array([5., 0.]) 253 | >>> line.project(np.array([-5, 5])) 254 | 255 | >>> line.project(np.array([15, 5])) 256 | 257 | Returns: 258 | The projection point if it exists. 259 | """ 260 | r_a = self.end - self.start 261 | r_b = point - self.start 262 | proj_point: numpy_array = (r_b @ r_a) / (r_a @ r_a) * r_a + self.start 263 | if self.intersects_pt(proj_point): 264 | return proj_point 265 | return None 266 | -------------------------------------------------------------------------------- /src/osmg/geometry/mesh_shapes.py: -------------------------------------------------------------------------------- 1 | """Generates meshes for preconfigured sections.""" 2 | 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | import numpy.typing as npt 7 | 8 | from osmg.geometry.mesh import ( 9 | Edge, 10 | Mesh, 11 | Vertex, 12 | define_halfedges, 13 | obtain_closed_loops, 14 | orient_loops, 15 | sanity_checks, 16 | ) 17 | 18 | numpy_array = npt.NDArray[np.float64] 19 | 20 | 21 | def generate(edges: list[Edge]) -> Mesh: 22 | """ 23 | Generate a mesh from the given edges. 24 | 25 | Returns: 26 | The generated mesh. 27 | """ 28 | halfedges = define_halfedges(edges) 29 | loops = obtain_closed_loops(halfedges) 30 | _, internal, trivial = orient_loops(loops) 31 | sanity_checks(internal, trivial) 32 | return Mesh(internal[0]) 33 | 34 | 35 | def define_edges(vertices: list[Vertex]) -> list[Edge]: 36 | """ 37 | Define edges from an ordered list of vertices. 38 | 39 | Returns: 40 | The defined edges. 41 | """ 42 | n_v = len(vertices) 43 | edges = [] 44 | for i in range(n_v - 1): 45 | v_i = vertices[i] 46 | v_j = vertices[i + 1] 47 | edges.append(Edge(v_i, v_j)) 48 | v_i = vertices[-1] 49 | v_j = vertices[0] 50 | edges.append(Edge(v_i, v_j)) 51 | return edges 52 | 53 | 54 | def w_mesh( 55 | sec_b: float, 56 | sec_h: float, 57 | sec_tw: float, 58 | sec_tf: float, 59 | target_area: float, 60 | ) -> Mesh: 61 | """ 62 | W-section mesh. 63 | 64 | Defines a loop of counterclockwise halfedges that form the shape 65 | of the W section with the specified parameters. The origin 66 | coincides with the centroid. 67 | 68 | Arguments: 69 | sec_b: total width 70 | sec_h: total height 71 | sec_tw: web thickness 72 | sec_tf: flange thickness 73 | target_area: AISC database area to determine fillets, because 74 | trying to do that using `T` doesn't work. 75 | 76 | Returns: 77 | The generated mesh. 78 | """ 79 | area_diff = target_area - (sec_b * sec_tf * 2.0 + (sec_h - 2 * sec_tf) * sec_tw) 80 | if area_diff < 0: 81 | # This happens for W14X426 82 | area_diff = 1e-4 83 | 84 | dist = np.sqrt(area_diff / (2.0**2 - np.pi)) * 0.9565 85 | # note: 0.9565 is a correction factor to account for 86 | # the fact that we approximate the arcs with 87 | # four line segments, thus putting more material in there 88 | k = (sec_b - 2.0 * dist - sec_tw) / 2.0 89 | vertices = [ 90 | Vertex((sec_b / 2.0, sec_h / 2.0)), 91 | Vertex((-sec_b / 2.0, sec_h / 2.0)), 92 | Vertex((-sec_b / 2.0, sec_h / 2.0 - sec_tf)), 93 | Vertex((-sec_b / 2.0 + k, sec_h / 2.0 - sec_tf)), 94 | Vertex( 95 | ( 96 | -sec_b / 2.0 + k + dist * np.cos(3.0 * np.pi / 8.0), 97 | sec_h / 2.0 - sec_tf - dist + dist * np.sin(3.0 * np.pi / 8.0), 98 | ) 99 | ), 100 | Vertex( 101 | ( 102 | -sec_b / 2.0 + k + dist * np.cos(1.0 * np.pi / 4.0), 103 | sec_h / 2.0 - sec_tf - dist + dist * np.sin(1.0 * np.pi / 4.0), 104 | ) 105 | ), 106 | Vertex( 107 | ( 108 | -sec_b / 2.0 + k + dist * np.cos(1.0 * np.pi / 8.0), 109 | sec_h / 2.0 - sec_tf - dist + dist * np.sin(1.0 * np.pi / 8.0), 110 | ) 111 | ), 112 | Vertex((-sec_b / 2.0 + k + dist, sec_h / 2.0 - sec_tf - dist)), 113 | Vertex((-sec_b / 2.0 + k + dist, -sec_h / 2.0 + sec_tf + dist)), 114 | Vertex( 115 | ( 116 | -sec_b / 2.0 + k + dist * np.cos(1.0 * np.pi / 8.0), 117 | -sec_h / 2.0 + sec_tf + dist - dist * np.sin(1.0 * np.pi / 8.0), 118 | ) 119 | ), 120 | Vertex( 121 | ( 122 | -sec_b / 2.0 + k + dist * np.cos(1.0 * np.pi / 4.0), 123 | -sec_h / 2.0 + sec_tf + dist - dist * np.sin(1.0 * np.pi / 4.0), 124 | ) 125 | ), 126 | Vertex( 127 | ( 128 | -sec_b / 2.0 + k + dist * np.cos(3.0 * np.pi / 8.0), 129 | -sec_h / 2.0 + sec_tf + dist - dist * np.sin(3.0 * np.pi / 8.0), 130 | ) 131 | ), 132 | Vertex((-sec_b / 2.0 + k, -sec_h / 2.0 + sec_tf)), 133 | Vertex((-sec_b / 2.0, -(sec_h / 2.0 - sec_tf))), 134 | Vertex((-sec_b / 2.0, -sec_h / 2.0)), 135 | Vertex((sec_b / 2.0, -sec_h / 2.0)), 136 | Vertex((sec_b / 2.0, -(sec_h / 2 - sec_tf))), 137 | Vertex((+sec_b / 2.0 - k, -sec_h / 2.0 + sec_tf)), 138 | Vertex( 139 | ( 140 | +sec_b / 2.0 - k - dist * np.cos(3.0 * np.pi / 8.0), 141 | -sec_h / 2.0 + sec_tf + dist - dist * np.sin(3.0 * np.pi / 8.0), 142 | ) 143 | ), 144 | Vertex( 145 | ( 146 | +sec_b / 2.0 - k - dist * np.cos(1.0 * np.pi / 4.0), 147 | -sec_h / 2.0 + sec_tf + dist - dist * np.sin(1.0 * np.pi / 4.0), 148 | ) 149 | ), 150 | Vertex( 151 | ( 152 | +sec_b / 2.0 - k - dist * np.cos(1.0 * np.pi / 8.0), 153 | -sec_h / 2.0 + sec_tf + dist - dist * np.sin(1.0 * np.pi / 8.0), 154 | ) 155 | ), 156 | Vertex((+sec_b / 2.0 - k - dist, -sec_h / 2.0 + sec_tf + dist)), 157 | Vertex((+sec_b / 2.0 - k - dist, +sec_h / 2.0 - sec_tf - dist)), 158 | Vertex( 159 | ( 160 | +sec_b / 2.0 - k - dist * np.cos(1.0 * np.pi / 8.0), 161 | +sec_h / 2.0 - sec_tf - dist + dist * np.sin(1.0 * np.pi / 8.0), 162 | ) 163 | ), 164 | Vertex( 165 | ( 166 | +sec_b / 2.0 - k - dist * np.cos(1.0 * np.pi / 4.0), 167 | +sec_h / 2.0 - sec_tf - dist + dist * np.sin(1.0 * np.pi / 4.0), 168 | ) 169 | ), 170 | Vertex( 171 | ( 172 | +sec_b / 2.0 - k - dist * np.cos(3.0 * np.pi / 8.0), 173 | +sec_h / 2.0 - sec_tf - dist + dist * np.sin(3.0 * np.pi / 8.0), 174 | ) 175 | ), 176 | Vertex((+sec_b / 2.0 - k, sec_h / 2.0 - sec_tf)), 177 | Vertex((sec_b / 2.0, sec_h / 2.0 - sec_tf)), 178 | ] 179 | edges = define_edges(vertices) 180 | return generate(edges) 181 | 182 | 183 | def rect_mesh(dim_b: float, dim_h: float) -> Mesh: 184 | """ 185 | Rectangular mesh. 186 | 187 | Defines a loop of counterclockwise halfedges 188 | that form the shape of the rectangular section with 189 | the specified parameters. 190 | The origin coincides with the centroid. 191 | 192 | Arguments: 193 | dim_b: total width 194 | dim_h: total height 195 | 196 | Returns: 197 | The generated mesh. 198 | """ 199 | vertices = [ 200 | Vertex((dim_b / 2.0, dim_h / 2.0)), 201 | Vertex((-dim_b / 2.0, dim_h / 2.0)), 202 | Vertex((-dim_b / 2.0, -dim_h / 2.0)), 203 | Vertex((dim_b / 2.0, -dim_h / 2.0)), 204 | ] 205 | edges = define_edges(vertices) 206 | return generate(edges) 207 | 208 | 209 | def circ_mesh(dim_d: float) -> Mesh: 210 | """ 211 | Circular mesh. 212 | 213 | Defines a loop of counterclockwise halfedges 214 | that form the shape of the circular section with 215 | the specified parameters. 216 | The origin coincides with the centroid. 217 | 218 | Arguments: 219 | dim_d: total diameter 220 | 221 | Returns: 222 | The generated mesh. 223 | """ 224 | radius = dim_d / 2.0 225 | num_vertices = 32 # Number of vertices on the circumference 226 | 227 | angle_increment = 2 * np.pi / num_vertices 228 | 229 | vertices = [] 230 | for i in range(num_vertices): 231 | angle = i * angle_increment 232 | vertices.append(Vertex((radius * np.cos(angle), radius * np.sin(angle)))) 233 | 234 | edges = define_edges(vertices) 235 | return generate(edges) 236 | 237 | 238 | def generic_snap_points(mesh: Mesh) -> dict[str, numpy_array]: 239 | """ 240 | Generate generic snap points for a section object. 241 | 242 | Returns: 243 | The snap points. 244 | """ 245 | bbox = mesh.bounding_box() 246 | z_min, y_min, z_max, y_max = bbox.flatten() 247 | snap_points: dict[str, numpy_array] = {} 248 | snap_points['centroid'] = -np.array([0.0, 0.0]) 249 | snap_points['top_center'] = -np.array([0.0, y_max]) 250 | snap_points['top_left'] = -np.array([z_min, y_max]) 251 | snap_points['top_right'] = -np.array([z_max, y_max]) 252 | snap_points['center_left'] = -np.array([z_min, 0.0]) 253 | snap_points['center_right'] = -np.array([z_max, 0.0]) 254 | snap_points['bottom_center'] = -np.array([0.0, y_min]) 255 | snap_points['bottom_left'] = -np.array([z_min, y_min]) 256 | snap_points['bottom_right'] = -np.array([z_max, y_min]) 257 | return snap_points 258 | -------------------------------------------------------------------------------- /src/osmg/geometry/transformations.py: -------------------------------------------------------------------------------- 1 | """Coordinate transformation operations.""" 2 | 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | 7 | from osmg.core import common 8 | from osmg.core.common import THREE_DIMENSIONAL, TWO_DIMENSIONAL, numpy_array 9 | 10 | 11 | def rotation_matrix_2d(ang: float) -> numpy_array: 12 | """ 13 | Obtain a 2D transformation matrix. 14 | 15 | Parameters 16 | ---------- 17 | ang: Angle in radians to rotate the matrix by. 18 | 19 | Returns: 20 | ------- 21 | A 2x2 transformation matrix. 22 | 23 | Example: 24 | >>> rotation_matrix_2d(np.pi / 2) 25 | array([[ 6.123234e-17, -1.000000e+00], 26 | [ 1.000000e+00, 6.123234e-17]]) 27 | 28 | Raises: 29 | ------ 30 | TypeError: If `ang` is not a float. 31 | 32 | """ 33 | if not isinstance(ang, float): 34 | msg = 'ang parameter should be a float.' 35 | raise TypeError(msg) 36 | return np.array([[np.cos(ang), -np.sin(ang)], [np.sin(ang), np.cos(ang)]]) 37 | 38 | 39 | def rotation_matrix_3d(axis: numpy_array, theta: float) -> numpy_array: 40 | """ 41 | 3D rotation matrix. 42 | 43 | Returns the rotation matrix associated with counterclockwise 44 | rotation about the given axis by theta radians. 45 | 46 | Parameters 47 | ---------- 48 | axis: 3D vector representing the axis of rotation. 49 | theta: Angle of rotation in radians. 50 | 51 | Returns: 52 | ------- 53 | 3x3 transformation matrix representing the rotation. 54 | 55 | Example: 56 | >>> # this is how to run that function: 57 | >>> res = rotation_matrix_3d(np.array([1, 0, 0]), np.pi/2) 58 | >>> # this is the expected result: 59 | >>> expected_res = np.array( 60 | ... [[ 1.00000000e+00, 0.00000000e+00, -0.00000000e+00], 61 | ... [-0.00000000e+00, 2.22044605e-16, -1.00000000e+00], 62 | ... [ 0.00000000e+00, 1.00000000e+00, 2.22044605e-16]]) 63 | >>> assert np.allclose(res, expected_res) 64 | 65 | """ 66 | v_a = np.cos(theta / 2.0) 67 | v_b, v_c, v_d = -axis * np.sin(theta / 2.0) 68 | v_aa, v_bb, v_cc, v_dd = v_a * v_a, v_b * v_b, v_c * v_c, v_d * v_d 69 | v_bc, v_ad, v_ac, v_ab, v_bd, v_cd = ( 70 | v_b * v_c, 71 | v_a * v_d, 72 | v_a * v_c, 73 | v_a * v_b, 74 | v_b * v_d, 75 | v_c * v_d, 76 | ) 77 | return np.array( 78 | [ 79 | [v_aa + v_bb - v_cc - v_dd, 2 * (v_bc + v_ad), 2 * (v_bd - v_ac)], 80 | [2 * (v_bc - v_ad), v_aa + v_cc - v_bb - v_dd, 2 * (v_cd + v_ab)], 81 | [2 * (v_bd + v_ac), 2 * (v_cd - v_ab), v_aa + v_dd - v_bb - v_cc], 82 | ] 83 | ) 84 | 85 | 86 | def transformation_matrix( 87 | vec_x: numpy_array, vec_y: numpy_array, vec_z: numpy_array 88 | ) -> numpy_array: 89 | """ 90 | Obtain a transformation matrix. 91 | 92 | Returns a transformation matrix that transforms points from 93 | the coordinate system in which the x, y and z axes are expressed, 94 | to the local coordinate system defined by them. 95 | 96 | Arguments: 97 | vec_x: Local x axis expressed in the global system 98 | vec_y: (similar) 99 | vec_z: (similar) 100 | 101 | Returns: 102 | ------- 103 | global to local transformation matrix. 104 | 105 | Note: For orthogonal axes, transpose to obtain the inverse transform. 106 | 107 | Example: 108 | >>> # this is how to run that function: 109 | >>> res = transformation_matrix( 110 | ... np.array([1., 0., 0.]), 111 | ... np.array([0., 1., 0.]), 112 | ... np.array([0., 0., 1.])) 113 | >>> expected_result = np.array(( 114 | ... [[1., 0., 0.], 115 | ... [0., 1., 0.], 116 | ... [0., 0., 1.]])) 117 | >>> assert np.allclose(res, expected_result) 118 | >>> res = transformation_matrix( 119 | ... np.array([1., 0., 0.]), 120 | ... np.array([0., 0., 1.]), 121 | ... np.array([0., 1., 0.])) 122 | >>> expected_result = np.array(( 123 | ... [[1., 0., 0.], 124 | ... [0., 0., 1.], 125 | ... [0., 1., 0.]])) 126 | >>> assert np.allclose(res, expected_result) 127 | 128 | """ 129 | tr_global_to_local: numpy_array = np.vstack((vec_x, vec_y, vec_z)) 130 | return tr_global_to_local 131 | 132 | 133 | def transformation_matrix_2d(vec_x: numpy_array, vec_y: numpy_array) -> numpy_array: 134 | """ 135 | Obtain a transformation matrix in 2D. 136 | 137 | Returns a transformation matrix that transforms points from 138 | the coordinate system in which the x and y axes are expressed, 139 | to the local coordinate system defined by them. 140 | 141 | Arguments: 142 | vec_x: Local x axis expressed in the global system 143 | vec_y: (similar) 144 | 145 | Returns: 146 | ------- 147 | global to local transformation matrix. 148 | """ 149 | tr_global_to_local: numpy_array = np.vstack((vec_x, vec_y)) 150 | return tr_global_to_local 151 | 152 | 153 | def local_axes_from_points_and_angle( 154 | point_i: numpy_array, point_j: numpy_array, ang: float 155 | ) -> ( 156 | tuple[numpy_array, numpy_array, numpy_array] 157 | | tuple[numpy_array, None, numpy_array] 158 | ): 159 | """ 160 | Calculate local axes from two points and a rotation angle. 161 | 162 | This function computes the local coordinate system for a linear element 163 | defined by a start and end point, optionally applying a rotation angle 164 | around the local x-axis. It supports both 2D and 3D coordinate systems. 165 | In 2D, the y-axis is omitted, and the local coordinate system is defined 166 | in the XZ plane, with the Y axis assumed to point out of the screen. In 3D, 167 | the full local coordinate system is calculated. 168 | 169 | Args: 170 | point_i (numpy_array): The start point of the element. 171 | point_j (numpy_array): The end point of the element. 172 | ang (float): Rotation angle in radians around the local x-axis. 173 | 174 | Returns: 175 | tuple[numpy_array, numpy_array | None, numpy_array]: A tuple containing: 176 | - The local x-axis (numpy_array). 177 | - The local y-axis (numpy_array) or None (in 2D). 178 | - The local z-axis (numpy_array). 179 | 180 | Raises: 181 | ValueError: If the input points do not have the same dimension. 182 | ValueError: If a vertical element in 3D is defined upside down. 183 | ValueError: If the coordinates are not 2D or 3D. 184 | 185 | Example: 186 | For 3D: 187 | >>> point_i = np.array([0, 0, 0]) 188 | >>> point_j = np.array([1, 0, 0]) 189 | >>> ang = 0 190 | >>> local_axes_from_points_and_angle(point_i, point_j, ang) 191 | (array([1., 0., 0.]), array([0., 0., 1.]), array([ 0., -1., 0.])) 192 | 193 | For 2D: 194 | >>> point_i = np.array([0, 0]) 195 | >>> point_j = np.array([1, 0]) 196 | >>> ang = 0 197 | >>> local_axes_from_points_and_angle(point_i, point_j, ang) 198 | (array([1., 0.]), None, array([ 0., -1.])) 199 | """ 200 | if point_i.shape != point_j.shape: 201 | msg = 'Start and end points must have the same dimension.' 202 | raise ValueError(msg) 203 | 204 | # Determine 2D or 3D 205 | dim = point_i.shape[0] 206 | if dim == TWO_DIMENSIONAL: 207 | assert ang == 0.00, 'Angle should be 0.00 in 2D cases.' 208 | return _local_axes_2d(point_i, point_j) 209 | if dim == THREE_DIMENSIONAL: 210 | return _local_axes_3d(point_i, point_j, ang) 211 | msg = 'Only 2D or 3D coordinates are supported.' 212 | raise ValueError(msg) 213 | 214 | 215 | def _local_axes_2d( 216 | point_i: numpy_array, point_j: numpy_array 217 | ) -> tuple[numpy_array, None, numpy_array]: 218 | """ 219 | Compute local axes for a 2D linear element. 220 | 221 | In the 2D case, the local coordinate system is defined in the XZ plane, 222 | with the Y axis pointing out of the screen. This function calculates the 223 | local x-axis based on the direction of the line and derives the z-axis 224 | by rotating the x-axis 90 degrees counterclockwise. 225 | 226 | Args: 227 | point_i (numpy_array): The start point of the element. 228 | point_j (numpy_array): The end point of the element. 229 | ang (float): Rotation angle (not used in the 2D case). 230 | 231 | Returns: 232 | tuple[numpy_array, None, numpy_array]: A tuple containing: 233 | - The local x-axis (numpy_array). 234 | - None for the y-axis (2D case). 235 | - The local z-axis (numpy_array). 236 | 237 | Example: 238 | >>> point_i = np.array([0, 0]) 239 | >>> point_j = np.array([1, 0]) 240 | >>> _local_axes_2d(point_i, point_j, 0) 241 | (array([1., 0.]), None, array([ 0., -1.])) 242 | """ 243 | # x-axis 244 | x_axis = point_j - point_i 245 | x_axis /= np.linalg.norm(x_axis) 246 | 247 | # z-axis: Rotate x-axis by 90 degrees 248 | z_axis = np.array([-x_axis[1], x_axis[0]]) 249 | z_axis /= np.linalg.norm(z_axis) 250 | 251 | # No y-axis in 2D 252 | return x_axis, None, z_axis 253 | 254 | 255 | def _local_axes_3d( 256 | point_i: numpy_array, point_j: numpy_array, ang: float 257 | ) -> tuple[numpy_array, numpy_array, numpy_array]: 258 | """ 259 | Compute local axes for a 3D linear element. 260 | 261 | In the 3D case, the local coordinate system consists of the x-axis, 262 | y-axis, and z-axis. The function accounts for special cases such as 263 | vertical elements and applies the specified rotation angle around 264 | the x-axis. 265 | 266 | Args: 267 | point_i (numpy_array): The start point of the element. 268 | point_j (numpy_array): The end point of the element. 269 | ang (float): Rotation angle in radians around the local x-axis. 270 | 271 | Returns: 272 | tuple[numpy_array, numpy_array, numpy_array]: A tuple containing: 273 | - The local x-axis (numpy_array). 274 | - The local y-axis (numpy_array). 275 | - The local z-axis (numpy_array). 276 | 277 | Raises: 278 | ValueError: If the element is vertical and defined upside down. 279 | 280 | Example: 281 | >>> point_i = np.array([0, 0, 0]) 282 | >>> point_j = np.array([1, 0, 0]) 283 | >>> ang = np.pi / 4 284 | >>> _local_axes_3d(point_i, point_j, ang) 285 | (array([1., 0., 0.]), array([0., 0., 1.]), array([ 0., -1., 0.])) 286 | """ 287 | # x-axis 288 | x_axis = point_j - point_i 289 | x_axis /= np.linalg.norm(x_axis) 290 | 291 | # Check if the element is vertical 292 | diff = np.abs(np.linalg.norm(x_axis - np.array([0.0, 0.0, -1.0]))) 293 | if diff < common.EPSILON: 294 | # Vertical case 295 | z_axis = np.array([np.cos(ang), np.sin(ang), 0.0]) 296 | y_axis = np.cross(z_axis, x_axis) 297 | else: 298 | # Non-vertical case 299 | diff = np.abs(np.linalg.norm(x_axis - np.array([0.0, 0.0, 1.0]))) 300 | if diff < common.EPSILON: 301 | msg = 'Vertical element defined upside down.' 302 | raise ValueError(msg) 303 | 304 | up_direction = np.array([0.0, 0.0, 1.0]) 305 | # Orthogonalize y-axis with respect to x-axis 306 | y_axis = up_direction - np.dot(up_direction, x_axis) * x_axis 307 | y_axis /= np.linalg.norm(y_axis) 308 | y_axis = np.dot(rotation_matrix_3d(x_axis, ang), y_axis) 309 | 310 | # z-axis 311 | z_axis = np.cross(x_axis, y_axis) 312 | 313 | return x_axis, y_axis, z_axis 314 | 315 | 316 | def offset_transformation_2d( 317 | offset: numpy_array, u_vec: numpy_array, r_angle: float 318 | ) -> numpy_array: 319 | """ 320 | Offset transformation. 321 | 322 | Calculate the displacement at the end of a rigid offset by 323 | specifying the displacement and rotation of the other end. 324 | 325 | A rigid offset connects two nodes and transmits forces between 326 | them, but does not allow any relative displacement or rotation 327 | between the nodes. 328 | 329 | Args: 330 | offset: 331 | Vector pointing from the node of the rigid offset where the 332 | displacement is known to the node where we want to obtain 333 | the displacement. The vector should be given in the global 334 | coordinate system. 335 | u_vec: 336 | Displacement of the node where the displacement is known, 337 | given in the global coordinate system. 338 | r_angle: 339 | Rotation of the node where the displacement is known, given 340 | as a vector of the form [rx, ry, rz] representing the 341 | rotation around the x, y, and z axes, respectively. 342 | 343 | Returns: 344 | ------- 345 | Displacement at the other end of the rigid offset, 346 | given in the global coordinate system. 347 | 348 | """ 349 | result: numpy_array = u_vec + np.array((-offset[1], offset[0])) * r_angle 350 | return result 351 | 352 | 353 | def offset_transformation_3d( 354 | offset: numpy_array, u_vec: numpy_array, r_vec: numpy_array 355 | ) -> numpy_array: 356 | """ 357 | Offset transformation. 358 | 359 | Calculate the displacement at the end of a rigid offset by 360 | specifying the displacement and rotation of the other end. 361 | 362 | A rigid offset connects two nodes and transmits forces between 363 | them, but does not allow any relative displacement or rotation 364 | between the nodes. 365 | 366 | Arguments: 367 | offset: 368 | Vector pointing from the node of the rigid offset where the 369 | displacement is known to the node where we want to obtain 370 | the displacement. The vector should be given in the global 371 | coordinate system. 372 | u_vec: 373 | Displacement of the node where the displacement is known, 374 | given in the global coordinate system. 375 | r_vec: 376 | Rotation of the node where the displacement is known, given 377 | as a vector of the form [rx, ry, rz] representing the 378 | rotation around the x, y, and z axes, respectively. 379 | 380 | Returns: 381 | ------- 382 | Displacement at the other end of the rigid offset, 383 | given in the global coordinate system. 384 | 385 | Example: 386 | Calculate the displacement of the end of a rigid offset with a 387 | length of 1 meter, given a displacement of [4, 5, 6] and a 388 | rotation of [7, 8, 9] at the other end: 389 | 390 | >>> offset_transformation(np.array([1., 0., 0.]), 391 | ... np.array([0.01, -0.02, 0.005]), 392 | ... np.array([0.0002, -0.0003, 0.0001])) 393 | array([ 0.01 , -0.0199, 0.0053]) 394 | 395 | """ 396 | t_rigid: numpy_array = np.array( 397 | [ 398 | [0.00, +offset[2], -offset[1]], 399 | [-offset[2], 0.00, +offset[0]], 400 | [+offset[1], -offset[0], 0.00], 401 | ] 402 | ) 403 | return u_vec + t_rigid @ r_vec 404 | -------------------------------------------------------------------------------- /src/osmg/get_latest_pypi_version.py: -------------------------------------------------------------------------------- 1 | """Determine the latest version of the osmg package on PyPI.""" 2 | 3 | import argparse 4 | 5 | import requests 6 | 7 | 8 | def get_latest_version(package_name: str) -> str: 9 | """ 10 | Determine the latest version of the osmg package on PyPI. 11 | 12 | Returns: 13 | The latest version. 14 | """ 15 | url = f'https://pypi.org/pypi/{package_name}/json' 16 | 17 | response = requests.get(url, timeout=10) 18 | package_info = response.json() 19 | return str(package_info['info']['version']) 20 | 21 | 22 | if __name__ == '__main__': 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument( 25 | '--package_name', default='osmg', help='Name of the package.' 26 | ) 27 | args = parser.parse_args() 28 | 29 | package = args.package_name 30 | version = get_latest_version(package) 31 | print(version) # noqa: T201 32 | -------------------------------------------------------------------------------- /src/osmg/graphics/__init__.py: -------------------------------------------------------------------------------- 1 | """Graphics module.""" 2 | -------------------------------------------------------------------------------- /src/osmg/graphics/objects.py: -------------------------------------------------------------------------------- 1 | """Core plotting objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from math import fabs 6 | 7 | import numpy as np 8 | import plotly.graph_objects as go 9 | 10 | from osmg.core.common import EPSILON 11 | from osmg.geometry.transformations import ( 12 | local_axes_from_points_and_angle, 13 | transformation_matrix, 14 | ) 15 | 16 | 17 | def arrow( 18 | total_length: float = 1.00, 19 | head_length: float = 0.45, 20 | head_width: float = 0.30, 21 | base_width: float = 0.05, 22 | ) -> tuple[ 23 | tuple[tuple[float, float, float], ...], 24 | tuple[tuple[int, int, int], ...], 25 | ]: 26 | """ 27 | Define the vertices and edges of an arrow. 28 | 29 | The tip of the arrow is at the axes origin, and it's pointing 30 | upward. 31 | 32 | Returns: 33 | Tuple containing the vertices and faces of the mesh in the form 34 | of tuples. 35 | """ 36 | tl = total_length 37 | hl = head_length 38 | hhw = head_width / 2.00 39 | bhw = base_width / 2.00 40 | 41 | vertices = ( 42 | (0.0, 0.0, 0.0), 43 | (-hhw, -hhw, -hl), 44 | (+hhw, -hhw, -hl), 45 | (+hhw, +hhw, -hl), 46 | (-hhw, +hhw, -hl), 47 | (-bhw, -bhw, -hl), 48 | (+bhw, -bhw, -hl), 49 | (+bhw, +bhw, -hl), 50 | (-bhw, +bhw, -hl), 51 | (-bhw, -bhw, -tl), 52 | (+bhw, -bhw, -tl), 53 | (+bhw, +bhw, -tl), 54 | (-bhw, +bhw, -tl), 55 | ) 56 | faces = ( 57 | # top part 58 | (0, 1, 2), 59 | (0, 2, 3), 60 | (0, 3, 4), 61 | (0, 4, 1), 62 | # base 63 | (5, 9, 10), 64 | (5, 10, 6), 65 | (6, 10, 11), 66 | (6, 11, 7), 67 | (7, 11, 12), 68 | (7, 12, 8), 69 | (8, 12, 9), 70 | (8, 9, 5), 71 | ) 72 | return vertices, faces 73 | 74 | 75 | def positioned_arrow( 76 | start_location: tuple[float, float, float], 77 | end_location: tuple[float, float, float], 78 | head_length: float = 0.45, 79 | head_width: float = 0.30, 80 | base_width: float = 0.05, 81 | ) -> tuple[ 82 | tuple[tuple[float, float, float], ...], 83 | tuple[tuple[int, int, int], ...], 84 | ]: 85 | """ 86 | Define vertices and faces for a positioned arrow. 87 | 88 | Returns: 89 | Vertices and faces. 90 | 91 | Raises: 92 | ValueError: If the start and end locations are coinciding. 93 | """ 94 | start_vec = np.array(start_location) 95 | end_vec = np.array(end_location) 96 | 97 | arrow_length = float(np.linalg.norm(end_vec - start_vec)) 98 | 99 | # Check for the case where no rotation is required. 100 | if ( 101 | fabs(start_location[0] - end_location[0]) < EPSILON 102 | and fabs(start_location[1] - end_location[1]) < EPSILON 103 | ): 104 | if fabs(start_location[2] - end_location[2]) < EPSILON: 105 | msg = 'Start and end locations should not be the same.' 106 | raise ValueError(msg) 107 | if end_location[2] > start_location[2]: 108 | vertices, faces = arrow( 109 | -arrow_length, -head_length, head_width, base_width 110 | ) 111 | else: 112 | vertices, faces = arrow( 113 | arrow_length, head_length, head_width, base_width 114 | ) 115 | # translation 116 | vertices = tuple(np.array(vertices) + start_vec) 117 | else: 118 | vertices, faces = arrow(arrow_length, head_length, head_width, base_width) 119 | # Rotation and translation required. 120 | x_axis, y_axis, z_axis = local_axes_from_points_and_angle( 121 | end_vec, start_vec, ang=0.00 122 | ) 123 | assert y_axis is not None 124 | orient_to_x_axis = np.array( 125 | ((0.0, 0.0, 1.0), (0.0, 1.0, 0.0), (-1.0, 0.0, 0.0)) 126 | ) 127 | transformation_mat = transformation_matrix(x_axis, y_axis, z_axis).T 128 | vertices = tuple( 129 | (transformation_mat @ orient_to_x_axis @ np.array(vertices).T).T 130 | + start_vec 131 | ) 132 | 133 | return vertices, faces 134 | 135 | 136 | def main() -> None: 137 | """Use for testing.""" 138 | # Define the vertices of the arrow 139 | 140 | vertices, faces = positioned_arrow( 141 | start_location=(0.00, 0.00, 0.00), 142 | end_location=(0.00, 0.00, 1.00), 143 | head_length=0.2, 144 | head_width=0.2, 145 | base_width=0.05, 146 | ) 147 | 148 | x, y, z = zip(*vertices) 149 | i, j, k = zip(*faces) 150 | 151 | # Create a 3D mesh 152 | mesh = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k, opacity=0.5, color='lightblue') 153 | 154 | # Setup the layout of the scene 155 | layout = go.Layout( 156 | scene={ 157 | 'xaxis': { 158 | 'nticks': 4, 159 | 'range': [-1, 2], 160 | }, 161 | 'yaxis': { 162 | 'nticks': 4, 163 | 'range': [-1, 2], 164 | }, 165 | 'zaxis': { 166 | 'nticks': 4, 167 | 'range': [-1, 2], 168 | }, 169 | } 170 | ) 171 | 172 | # Create a figure and add the mesh 173 | fig = go.Figure(data=[mesh], layout=layout) 174 | 175 | # Show the plot 176 | fig.show() 177 | 178 | 179 | if __name__ == '__main__': 180 | main() 181 | -------------------------------------------------------------------------------- /src/osmg/graphics/visibility.py: -------------------------------------------------------------------------------- 1 | """Model Generator for OpenSees ~ visibility.""" 2 | 3 | from dataclasses import dataclass, field 4 | 5 | 6 | @dataclass 7 | class ElementVisibility: 8 | """ 9 | Element visibility object. 10 | 11 | Controls whether an element is displayed in the plots 12 | and whether it is defined in OpenSees or not 13 | """ 14 | 15 | hidden_when_extruded: bool = field(default=False) 16 | hidden_at_line_plots: bool = field(default=False) 17 | skip_opensees_definition: bool = field(default=False) 18 | hidden_basic_forces: bool = field(default=False) 19 | -------------------------------------------------------------------------------- /src/osmg/model_objects/__init__.py: -------------------------------------------------------------------------------- 1 | """OpenSees Interface Objects.""" 2 | -------------------------------------------------------------------------------- /src/osmg/model_objects/friction_model.py: -------------------------------------------------------------------------------- 1 | """Defines OpenSees frictionModel objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | from typing import Literal 7 | 8 | from osmg.core.uid_object import UIDObject 9 | 10 | 11 | @dataclass 12 | class FrictionModel(UIDObject): 13 | """ 14 | OpenSees frictionModel. 15 | 16 | https://openseespydoc.readthedocs.io/en/latest/src/frictionModel.html 17 | 18 | """ 19 | 20 | name: str 21 | 22 | def ops_args(self) -> list[object]: # noqa: PLR6301 23 | """Obtain the OpenSees arguments.""" 24 | msg = 'Subclasses should implement this.' 25 | raise NotImplementedError(msg) 26 | 27 | 28 | @dataclass 29 | class Coulomb(FrictionModel): 30 | """ 31 | Coulomb. 32 | 33 | https://openseespydoc.readthedocs.io/en/latest/src/Coulomb.html 34 | 35 | """ 36 | 37 | mu: float 38 | 39 | def ops_args(self) -> list[object]: 40 | """ 41 | Obtain the OpenSees arguments. 42 | 43 | Returns: 44 | The OpenSees arguments. 45 | """ 46 | return ['Coulomb', self.uid, self.mu] 47 | -------------------------------------------------------------------------------- /src/osmg/model_objects/node.py: -------------------------------------------------------------------------------- 1 | """Defines :obj:`~osmg.model_objects.node.Node` objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass 6 | from functools import total_ordering 7 | from typing import Self 8 | 9 | from osmg.core.uid_object import UIDObject 10 | 11 | 12 | @total_ordering 13 | @dataclass() 14 | class Node(UIDObject): 15 | """ 16 | OpenSees node. 17 | 18 | https://openseespydoc.readthedocs.io/en/latest/src/node.html?highlight=node 19 | 20 | Attributes: 21 | ---------- 22 | uid_generator: Unique ID generator object. 23 | coordinates: List of node coordinates. 24 | uid: Unique ID of the node, assigned using the generator object. 25 | restraint: List of boolean values identifying whether the 26 | corresponding DOF is restrained. 27 | """ 28 | 29 | coordinates: tuple[float, ...] 30 | 31 | def __le__(self, other: Self) -> bool: 32 | """ 33 | Less or equal determination rule. 34 | 35 | Returns: 36 | The outcome of the less or equal operation. 37 | """ 38 | return self.uid <= other.uid 39 | 40 | def __repr__(self) -> str: 41 | """ 42 | Get string representation. 43 | 44 | Returns: 45 | The string representation of the object. 46 | """ 47 | res = '' 48 | res += 'Node object\n' 49 | res += f' uid: {self.uid}\n' 50 | res += f' coordinates: {self.coordinates}\n' 51 | return res 52 | 53 | def __hash__(self) -> int: 54 | """Return the hash of the object based on its UID.""" 55 | return hash(self.uid) 56 | -------------------------------------------------------------------------------- /src/osmg/model_objects/section.py: -------------------------------------------------------------------------------- 1 | """Defines :obj:`~osmg.model_objects.section.Section` objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | from typing import TYPE_CHECKING, Any 7 | 8 | import numpy as np 9 | 10 | from osmg.core import common 11 | from osmg.core.uid_object import UIDObject 12 | from osmg.geometry import mesh 13 | from osmg.geometry.mesh import Mesh, polygon_area 14 | 15 | if TYPE_CHECKING: 16 | from shapely.geometry import Polygon as shapely_Polygon 17 | 18 | from osmg.core.common import numpy_array 19 | from osmg.model_objects.uniaxial_material import UniaxialMaterial 20 | from osmg.physical_material import PhysicalMaterial 21 | 22 | 23 | @dataclass() 24 | class Section(UIDObject): 25 | """ 26 | Section object. 27 | 28 | The axes are defined in the same way as they are 29 | defined in OpenSees. The colors assigned to 30 | the axes for plotting follow the 31 | AutoCAD convention. 32 | 33 | .. code-block:: python 34 | 35 | y(green) 36 | ^ x(red) 37 | : . 38 | : . 39 | : . 40 | === 41 | | -------> z (blue) 42 | === 43 | 44 | """ 45 | 46 | name: str 47 | 48 | 49 | @dataclass 50 | class ElasticSection(Section): 51 | """ 52 | Elastic Section Object. 53 | 54 | Attributes: 55 | ---------- 56 | e_mod: Young's modulus. 57 | area: Cross-sectional area. 58 | i_y: Moment of inertia for strong-axis bending. 59 | i_x: Moment of inertia for weak-axis bending. 60 | g_mod: Shear modulus. 61 | j_mod: Torsional moment of inertia. 62 | sec_w: Weight per unit length. 63 | outside_shape: Mesh defining the outside shape of the section. 64 | snap_points: Dictionary containing coordinates of `snap_points` 65 | used by component-generating methods to position components 66 | relative to existing ones. See 67 | :func:`~osmg.creators.component.beam_placement_lookup` for example. 68 | properties: Dictionary containing section properties. 69 | 70 | """ 71 | 72 | e_mod: float 73 | area: float 74 | i_y: float 75 | i_x: float 76 | g_mod: float 77 | j_mod: float 78 | sec_w: float 79 | outside_shape: Mesh | None = field(default=None, repr=False) 80 | snap_points: dict[str, numpy_array] | None = field(default=None, repr=False) 81 | properties: dict[str, Any] | None = field(default=None, repr=False) 82 | 83 | def weight_per_length(self) -> float: 84 | """ 85 | Weight per unit length. 86 | 87 | Returns the weight per length of a section. 88 | For steel W sections, it adds 15% for misc. steel and 89 | connections. 90 | 91 | Returns: 92 | The weight per unit length. 93 | """ 94 | if self.name[0] == 'W': 95 | res = self.sec_w * 1.15 # misc steel and connections 96 | else: 97 | res = self.sec_w 98 | return res 99 | 100 | def __repr__(self) -> str: 101 | """ 102 | Get string representation. 103 | 104 | Returns: 105 | The string representation of the object. 106 | """ 107 | res = '' 108 | res += 'ElasticSection object\n' 109 | res += f'name: {self.name}\n' 110 | res += f'uid: {self.uid}\n' 111 | res += 'Properties:' 112 | res += f' E: {self.e_mod}\n' 113 | res += f' A: {self.area}\n' 114 | res += f' Iy: {self.i_y}\n' 115 | res += f' Ix: {self.i_x}\n' 116 | res += f' G: {self.g_mod}\n' 117 | res += f' J: {self.j_mod}\n' 118 | res += f' W: {self.sec_w}\n' 119 | if self.outside_shape: 120 | res += 'outside_shape: specified\n' 121 | else: 122 | res += 'outside_shape: None\n' 123 | if self.snap_points: 124 | res += 'snap_points: specified\n' 125 | else: 126 | res += 'snap_points: None\n' 127 | return res 128 | 129 | 130 | @dataclass(repr=False) 131 | class SectionComponent: 132 | """ 133 | Part of a section object, having a single material. 134 | 135 | Arguments: 136 | outside_shape: Mesh defining the outside shape 137 | ops_material: OpenSees material 138 | physical_material: Physical material 139 | parent_section: Parent section. 140 | The parent section is assigned automatically by their 141 | parent section iteslf, at its creation time. 142 | 143 | """ 144 | 145 | outside_shape: Mesh 146 | holes: list[Mesh] 147 | ops_material: UniaxialMaterial 148 | physical_material: PhysicalMaterial 149 | parent_section: FiberSection | None = field(default=None) 150 | 151 | def __repr__(self) -> str: 152 | """ 153 | Get string representation. 154 | 155 | Returns: 156 | The string representation of the object. 157 | """ 158 | res = '' 159 | res += 'SectionComponent object\n' 160 | if self.outside_shape: 161 | res += 'outside_shape: specified\n' 162 | else: 163 | res += 'outside_shape: None\n' 164 | if self.holes: 165 | res += 'holes: exist\n' 166 | else: 167 | res += 'holes: no holes\n' 168 | res += f'ops_material: {self.ops_material.name}\n' 169 | res += f'physical_material: {self.physical_material.name}\n' 170 | return res 171 | 172 | def cut_into_tiny_little_pieces(self) -> list[shapely_Polygon]: 173 | """ 174 | Obtain data used to define fibers in OpenSees. 175 | 176 | Returns: 177 | The data. 178 | """ 179 | # if we have an AISC HSS section, we need to discretize in a 180 | # certain way 181 | assert self.parent_section 182 | sec_name = self.parent_section.name 183 | rectangular_sections = 3 184 | circular_sections = 2 185 | if 'HSS' in sec_name and len(sec_name.split('X')) == rectangular_sections: 186 | # rectangular HSS section! 187 | pieces = mesh.subdivide_hss_rect( 188 | self.parent_section.properties['Ht'], 189 | self.parent_section.properties['B'], 190 | self.parent_section.properties['tdes'], 191 | ) 192 | elif 'HSS' in sec_name and len(sec_name.split('X')) == circular_sections: 193 | # circular HSS section! 194 | pieces = mesh.subdivide_hss_circ( 195 | self.parent_section.properties['OD'], 196 | self.parent_section.properties['tdes'], 197 | ) 198 | 199 | else: 200 | # fallback: use the default rectangular mesh chopper 201 | pieces = mesh.subdivide_polygon( 202 | outside=self.outside_shape, 203 | holes=self.holes, 204 | n_x=self.parent_section.n_x, 205 | n_y=self.parent_section.n_y, 206 | ) 207 | 208 | return pieces 209 | 210 | 211 | @dataclass(repr=False) 212 | class FiberSection(Section): 213 | """ 214 | Fiber section object. 215 | 216 | Can consist of multiple materials. 217 | The primary part of the component must have the key "main". 218 | 219 | """ 220 | 221 | outside_shape: Mesh 222 | section_parts: dict[str, SectionComponent] 223 | j_mod: float 224 | snap_points: dict[str, numpy_array] 225 | properties: dict[str, Any] 226 | n_x: int 227 | n_y: int 228 | 229 | def __post_init__(self) -> None: 230 | """Post-initialization.""" 231 | for part in self.section_parts: 232 | self.section_parts[part].parent_section = self 233 | 234 | def __repr__(self) -> str: 235 | """ 236 | Get string representation. 237 | 238 | Returns: 239 | The string representation of the object. 240 | """ 241 | res = '' 242 | res += 'FiberSection object\n' 243 | for part in self.section_parts: 244 | res += part.__repr__() 245 | if self.snap_points: 246 | res += 'snap_points: specified\n' 247 | else: 248 | res += 'snap_points: None\n' 249 | res += f'n_x: {self.n_x}, n_y: {self.n_y}\n' 250 | return res 251 | 252 | def ops_args(self) -> list[object]: 253 | """ 254 | Obtain the OpenSees arguments. 255 | 256 | Returns: 257 | The OpenSees arguments. 258 | """ 259 | return [ 260 | 'Fiber', 261 | self.uid, 262 | '-GJ', 263 | self.j_mod * self.section_parts['main'].physical_material.g_mod, 264 | ] 265 | 266 | def weight_per_length(self) -> float: 267 | """ 268 | Weight per unit length. 269 | 270 | Returns the weight per length of a section. 271 | For steel W sections, it adds 15% for misc. steel and connections. 272 | 273 | Returns: 274 | The weight per unit length. 275 | """ 276 | if self.name[0] == 'W': 277 | mult = 1.15 # misc steel and connections 278 | else: 279 | mult = 1.00 280 | res = 0.00 281 | for part in self.section_parts.values(): 282 | coordinates: numpy_array = np.array( 283 | [h.vertex.coordinates for h in part.outside_shape.halfedges] 284 | ) 285 | area = polygon_area(coordinates) 286 | for hole in part.holes: 287 | hole_coordinates: numpy_array = np.array( 288 | [h.vertex.coordinates for h in hole.halfedges] 289 | ) 290 | area -= polygon_area(hole_coordinates) 291 | density = part.physical_material.density 292 | # TODO(JVM): units 293 | res += area * density * common.G_CONST_IMPERIAL 294 | return res * mult 295 | -------------------------------------------------------------------------------- /src/osmg/model_objects/uniaxial_material.py: -------------------------------------------------------------------------------- 1 | """Defines OpenSees uniaxialMaterial interfrace objects.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass, field 6 | from typing import Literal 7 | 8 | from osmg.core.uid_object import UIDObject 9 | 10 | 11 | @dataclass 12 | class UniaxialMaterial(UIDObject): 13 | """ 14 | OpenSees uniaxialMaterial. 15 | 16 | https://openseespydoc.readthedocs.io/en/latest/src/uniaxialMaterial.html 17 | 18 | """ 19 | 20 | name: str 21 | 22 | def ops_args(self) -> list[object]: # noqa: PLR6301 23 | """Obtain the OpenSees arguments.""" 24 | msg = 'Subclasses should implement this.' 25 | raise NotImplementedError(msg) 26 | 27 | 28 | @dataclass 29 | class Elastic(UniaxialMaterial): 30 | """ 31 | OpenSees Elastic. 32 | 33 | https://openseespydoc.readthedocs.io/en/latest/src/ElasticUni.html 34 | 35 | """ 36 | 37 | e_mod: float 38 | 39 | def ops_args(self) -> list[object]: 40 | """ 41 | Obtain the OpenSees arguments. 42 | 43 | Returns: 44 | The OpenSees arguments. 45 | """ 46 | return ['Elastic', self.uid, self.e_mod] 47 | 48 | 49 | @dataclass 50 | class ElasticPPGap(UniaxialMaterial): 51 | """ 52 | OpenSees ElasticPPGap. 53 | 54 | https://opensees.berkeley.edu/wiki/index.php/Elastic-Perfectly_Plastic_Gap_Material 55 | 56 | """ 57 | 58 | e_mod: float 59 | fy: float 60 | gap: float 61 | eta: float = field(default=0.0) 62 | damage: Literal['noDamage', 'damage'] = field(default='noDamage') 63 | 64 | def ops_args(self) -> list[object]: 65 | """ 66 | Obtain the OpenSees arguments. 67 | 68 | Returns: 69 | The OpenSees arguments. 70 | """ 71 | return [ 72 | 'ElasticPPGap', 73 | self.uid, 74 | self.e_mod, 75 | self.fy, 76 | self.gap, 77 | self.eta, 78 | self.damage, 79 | ] 80 | 81 | 82 | @dataclass 83 | class Steel02(UniaxialMaterial): 84 | """ 85 | OpenSees Steel02. 86 | 87 | https://openseespydoc.readthedocs.io/en/latest/src/steel02.html 88 | 89 | """ 90 | 91 | Fy: float 92 | E0: float 93 | G: float 94 | b: float 95 | c_r0: float 96 | c_r1: float 97 | c_r2: float 98 | a1: float | None = field(default=None) 99 | a2: float | None = field(default=None) 100 | a3: float | None = field(default=None) 101 | a4: float | None = field(default=None) 102 | sig_init: float | None = field(default=None) 103 | 104 | def ops_args(self) -> list[object]: 105 | """ 106 | Obtain the OpenSees arguments. 107 | 108 | Returns: 109 | The OpenSees arguments. 110 | """ 111 | args = [ 112 | 'Steel02', 113 | self.uid, 114 | self.Fy, 115 | self.E0, 116 | self.b, 117 | self.c_r0, 118 | self.c_r1, 119 | self.c_r2, 120 | ] 121 | if self.a1: 122 | args.append(self.a1) 123 | if self.a2: 124 | args.append(self.a2) 125 | if self.a3: 126 | args.append(self.a3) 127 | if self.a4: 128 | args.append(self.a4) 129 | if self.sig_init: 130 | args.append(self.sig_init) 131 | 132 | return args 133 | 134 | 135 | @dataclass 136 | class Steel4(UniaxialMaterial): 137 | """ 138 | OpenSees Steel4. 139 | 140 | https://openseespydoc.readthedocs.io/en/latest/src/steel4.html 141 | 142 | """ 143 | 144 | Fy: float 145 | E0: float 146 | b_k: float | None = field(default=None) 147 | R_0: float = field(default=20.00) 148 | r_1: float = field(default=0.90) 149 | r_2: float = field(default=0.15) 150 | 151 | b_kc: float | None = field(default=False) 152 | R_0c: float = field(default=20.00) 153 | r_1c: float = field(default=0.90) 154 | r_2c: float = field(default=0.15) 155 | 156 | b_i: float | None = field(default=None) 157 | b_l: float | None = field(default=None) 158 | rho_i: float | None = field(default=None) 159 | R_i: float | None = field(default=None) 160 | l_yp: float | None = field(default=None) 161 | f_u: float | None = field(default=None) 162 | R_u: float | None = field(default=None) 163 | 164 | f_uc: float | None = field(default=None) 165 | R_uc: float | None = field(default=None) 166 | b_ic: float | None = field(default=None) 167 | b_lc: float | None = field(default=None) 168 | rho_ic: float | None = field(default=None) 169 | R_ic: float | None = field(default=None) 170 | 171 | sig_init: float | None = field(default=None) 172 | cycNum: float | None = field(default=None) # noqa: N815 173 | 174 | def ops_args(self) -> list[object]: # noqa: C901 175 | """ 176 | Obtain the OpenSees arguments. 177 | 178 | Returns: 179 | The OpenSees arguments. 180 | """ 181 | # non-symmetric behavior 182 | if self.b_kc: 183 | assert self.R_0c is not None 184 | assert self.r_1c is not None 185 | assert self.r_2c is not None 186 | assert self.b_k 187 | asym = True 188 | else: 189 | asym = False 190 | # ultimate strength limit 191 | if self.f_u: 192 | ultimate = True 193 | if asym: 194 | assert self.f_uc is not None 195 | assert self.R_uc is not None 196 | else: 197 | ultimate = False 198 | # isotropic hardening 199 | if self.b_i: 200 | iso = True 201 | assert self.b_l is not None 202 | assert self.rho_i is not None 203 | assert self.R_i is not None 204 | assert self.l_yp is not None 205 | if asym: 206 | assert self.b_lc is not None 207 | assert self.rho_ic is not None 208 | assert self.R_ic is not None 209 | else: 210 | iso = False 211 | # kinematic hardening 212 | if self.b_k: 213 | kinematic = True 214 | assert self.R_0 is not None 215 | assert self.r_1 is not None 216 | assert self.r_2 is not None 217 | if asym: 218 | assert self.R_0c is not None 219 | assert self.r_1c is not None 220 | assert self.r_2c is not None 221 | else: 222 | kinematic = False 223 | 224 | # 225 | # construct argument list 226 | # 227 | 228 | # these are required and will always be there 229 | args = ['Steel4', self.uid, self.Fy, self.E0] 230 | 231 | # optional arguments: 232 | if asym: 233 | args.extend(['-asym']) 234 | if kinematic: 235 | args.extend(['-kin', self.b_k, self.R_0, self.r_1, self.r_2]) 236 | if asym: 237 | args.extend([self.b_kc, self.R_0c, self.r_1c, self.r_2c]) 238 | if iso: 239 | args.extend( 240 | ['-iso', self.b_i, self.rho_i, self.b_l, self.R_i, self.l_yp] 241 | ) 242 | if asym: 243 | args.extend([self.b_ic, self.rho_ic, self.b_lc, self.R_ic]) 244 | if ultimate: 245 | args.extend(['-ult', self.f_u, self.R_u]) 246 | if asym: 247 | args.extend([self.f_uc, self.R_uc]) 248 | if self.sig_init: 249 | args.extend(['-init', self.sig_init]) 250 | if self.cycNum: 251 | args.extend(['-mem', self.cycNum]) 252 | 253 | return args 254 | 255 | 256 | @dataclass 257 | class Bilin(UniaxialMaterial): 258 | """ 259 | OpenSees Bilin Material. 260 | 261 | https://openseespydoc.readthedocs.io/en/latest/src/Bilin.html 262 | 263 | """ 264 | 265 | K0: float 266 | as_Plus: float # noqa: N815 267 | as_Neg: float # noqa: N815 268 | My_Plus: float 269 | My_Neg: float 270 | Lamda_S: float 271 | Lamda_C: float 272 | Lamda_A: float 273 | Lamda_K: float 274 | c_S: float # noqa: N815 275 | c_C: float # noqa: N815 276 | c_A: float # noqa: N815 277 | c_K: float # noqa: N815 278 | theta_p_Plus: float # noqa: N815 279 | theta_p_Neg: float # noqa: N815 280 | theta_pc_Plus: float # noqa: N815 281 | theta_pc_Neg: float # noqa: N815 282 | Res_Pos: float 283 | Res_Neg: float 284 | theta_u_Plus: float # noqa: N815 285 | theta_u_Neg: float # noqa: N815 286 | D_Plus: float 287 | D_Neg: float 288 | nFactor: float # noqa: N815 289 | 290 | def ops_args(self) -> list[object]: 291 | """ 292 | Obtain the OpenSees arguments. 293 | 294 | Returns: 295 | The OpenSees arguments. 296 | """ 297 | return [ 298 | 'Bilin', 299 | self.uid, 300 | self.K0, 301 | self.as_Plus, 302 | self.as_Neg, 303 | self.My_Plus, 304 | self.My_Neg, 305 | self.Lamda_S, 306 | self.Lamda_C, 307 | self.Lamda_A, 308 | self.Lamda_K, 309 | self.c_S, 310 | self.c_C, 311 | self.c_A, 312 | self.c_K, 313 | self.theta_p_Plus, 314 | self.theta_p_Neg, 315 | self.theta_pc_Plus, 316 | self.theta_pc_Neg, 317 | self.Res_Pos, 318 | self.Res_Neg, 319 | self.theta_u_Plus, 320 | self.theta_u_Neg, 321 | self.D_Plus, 322 | self.D_Neg, 323 | self.nFactor, 324 | ] 325 | 326 | 327 | @dataclass 328 | class IMKBilin(UniaxialMaterial): 329 | """ 330 | OpenSees IMKBilin Material. 331 | 332 | https://portwooddigital.com/2019/12/08/an-update-of-the-imk-models/ 333 | 334 | """ 335 | 336 | K0: float 337 | theta_p_Plus: float # noqa: N815 338 | theta_pc_Plus: float # noqa: N815 339 | theta_u_Plus: float # noqa: N815 340 | My_Plus: float 341 | as_Plus: float # noqa: N815 342 | Res_Pos: float 343 | theta_p_Neg: float # noqa: N815 344 | theta_pc_Neg: float # noqa: N815 345 | theta_u_Neg: float # noqa: N815 346 | My_Neg: float 347 | as_Neg: float # noqa: N815 348 | Res_Neg: float 349 | Lamda_S: float 350 | Lamda_C: float 351 | Lamda_K: float 352 | c_S: float # noqa: N815 353 | c_C: float # noqa: N815 354 | c_K: float # noqa: N815 355 | D_Plus: float 356 | D_Neg: float 357 | 358 | def ops_args(self) -> list[object]: 359 | """ 360 | Obtain the OpenSees arguments. 361 | 362 | Returns: 363 | The OpenSees arguments. 364 | """ 365 | return [ 366 | 'IMKBilin', 367 | self.uid, 368 | self.K0, 369 | self.theta_p_Plus, 370 | self.theta_pc_Plus, 371 | self.theta_u_Plus, 372 | self.My_Plus, 373 | self.as_Plus, 374 | self.Res_Pos, 375 | self.theta_p_Neg, 376 | self.theta_pc_Neg, 377 | self.theta_u_Neg, 378 | self.My_Neg, 379 | self.as_Neg, 380 | self.Res_Neg, 381 | self.Lamda_S, 382 | self.Lamda_C, 383 | self.Lamda_K, 384 | self.c_S, 385 | self.c_C, 386 | self.c_K, 387 | self.D_Plus, 388 | self.D_Neg, 389 | ] 390 | 391 | 392 | @dataclass 393 | class Pinching4(UniaxialMaterial): 394 | """ 395 | OpenSees Pinching4 Material. 396 | 397 | https://openseespydoc.readthedocs.io/en/latest/src/Pinching4.html 398 | 399 | """ 400 | 401 | ePf1: float # noqa: N815 402 | ePf2: float # noqa: N815 403 | ePf3: float # noqa: N815 404 | ePf4: float # noqa: N815 405 | ePd1: float # noqa: N815 406 | ePd2: float # noqa: N815 407 | ePd3: float # noqa: N815 408 | ePd4: float # noqa: N815 409 | eNf1: float # noqa: N815 410 | eNf2: float # noqa: N815 411 | eNf3: float # noqa: N815 412 | eNf4: float # noqa: N815 413 | eNd1: float # noqa: N815 414 | eNd2: float # noqa: N815 415 | eNd3: float # noqa: N815 416 | eNd4: float # noqa: N815 417 | rDispP: float # noqa: N815 418 | fForceP: float # noqa: N815 419 | uForceP: float # noqa: N815 420 | rDispN: float # noqa: N815 421 | fFoceN: float # noqa: N815 422 | uForceN: float # noqa: N815 423 | gK1: float # noqa: N815 424 | gK2: float # noqa: N815 425 | gK3: float # noqa: N815 426 | gK4: float # noqa: N815 427 | gKLim: float # noqa: N815 428 | gD1: float # noqa: N815 429 | gD2: float # noqa: N815 430 | gD3: float # noqa: N815 431 | gD4: float # noqa: N815 432 | gDLim: float # noqa: N815 433 | gF1: float # noqa: N815 434 | gF2: float # noqa: N815 435 | gF3: float # noqa: N815 436 | gF4: float # noqa: N815 437 | gFLim: float # noqa: N815 438 | gE: float # noqa: N815 439 | dmgType: str # noqa: N815 440 | 441 | def ops_args(self) -> list[object]: 442 | """ 443 | Obtain the OpenSees arguments. 444 | 445 | Returns: 446 | The OpenSees arguments. 447 | """ 448 | return [ 449 | 'Pinching4', 450 | self.uid, 451 | self.ePf1, 452 | self.ePf2, 453 | self.ePf3, 454 | self.ePf4, 455 | self.ePd1, 456 | self.ePd2, 457 | self.ePd3, 458 | self.ePd4, 459 | self.eNf1, 460 | self.eNf2, 461 | self.eNf3, 462 | self.eNf4, 463 | self.eNd1, 464 | self.eNd2, 465 | self.eNd3, 466 | self.eNd4, 467 | self.rDispP, 468 | self.fForceP, 469 | self.uForceP, 470 | self.rDispN, 471 | self.fFoceN, 472 | self.uForceN, 473 | self.gK1, 474 | self.gK2, 475 | self.gK3, 476 | self.gK4, 477 | self.gKLim, 478 | self.gD1, 479 | self.gD2, 480 | self.gD3, 481 | self.gD4, 482 | self.gDLim, 483 | self.gF1, 484 | self.gF2, 485 | self.gF3, 486 | self.gF4, 487 | self.gFLim, 488 | self.gE, 489 | self.dmgType, 490 | ] 491 | 492 | 493 | @dataclass 494 | class Hysteretic(UniaxialMaterial): 495 | """ 496 | OpenSees Bilin Material. 497 | 498 | https://openseespydoc.readthedocs.io/en/latest/src/Bilin.html 499 | 500 | """ 501 | 502 | p1: tuple[float, float] 503 | p2: tuple[float, float] 504 | p3: tuple[float, float] 505 | n1: tuple[float, float] 506 | n2: tuple[float, float] 507 | n3: tuple[float, float] 508 | pinchX: float # noqa: N815 509 | pinchY: float # noqa: N815 510 | damage1: float 511 | damage2: float 512 | beta: float 513 | 514 | def ops_args(self) -> list[object]: 515 | """ 516 | Obtain the OpenSees arguments. 517 | 518 | Returns: 519 | The OpenSees arguments. 520 | """ 521 | return [ 522 | 'Hysteretic', 523 | self.uid, 524 | *self.p1, 525 | *self.p2, 526 | *self.p3, 527 | *self.n1, 528 | *self.n2, 529 | *self.n3, 530 | self.pinchX, 531 | self.pinchY, 532 | self.damage1, 533 | self.damage2, 534 | self.beta, 535 | ] 536 | 537 | 538 | @dataclass 539 | class Fatigue(UniaxialMaterial): 540 | """ 541 | OpenSees Fatigue Material. 542 | 543 | https://openseespydoc.readthedocs.io/en/latest/src/Fatigue.html 544 | 545 | """ 546 | 547 | predecessor: UniaxialMaterial 548 | e_mod: float = field(default=0.191) 549 | var_m: float = field(default=-0.458) 550 | var_min: float = field(default=-1.0e16) 551 | var_max: float = field(default=+1.0e16) 552 | 553 | def ops_args(self) -> list[object]: 554 | """ 555 | Obtain the OpenSees arguments. 556 | 557 | Returns: 558 | The OpenSees arguments. 559 | """ 560 | return [ 561 | 'Fatigue', 562 | self.uid, 563 | self.predecessor.uid, 564 | '-E0', 565 | self.e_mod, 566 | '-m', 567 | self.var_m, 568 | '-min', 569 | self.var_min, 570 | '-max', 571 | self.var_max, 572 | ] 573 | 574 | 575 | @dataclass 576 | class MaxStrainRange(UniaxialMaterial): 577 | """ 578 | OpenSees MaxStrainRange Material. 579 | 580 | ~not officially added yet~ 581 | 582 | """ 583 | 584 | predecessor: UniaxialMaterial 585 | msr_fracture: float 586 | min_fracture: float | None = field(default=None) 587 | max_fracture: float | None = field(default=None) 588 | tangent_ratio: float | None = field(default=None) 589 | def_coeff: float | None = field(default=None) 590 | node_tags: tuple[int, int] | None = field(default=None) 591 | elements_to_remove: list[int] | None = field(default=None) 592 | 593 | def ops_args(self) -> list[object]: 594 | """ 595 | Obtain the OpenSees arguments. 596 | 597 | Returns: 598 | The OpenSees arguments. 599 | """ 600 | args = [ 601 | 'MaxStrainRange', 602 | self.uid, 603 | self.predecessor.uid, 604 | self.msr_fracture, 605 | ] 606 | if self.min_fracture: 607 | args.extend(['-min', self.min_fracture]) 608 | if self.max_fracture: 609 | args.extend(['-max', self.max_fracture]) 610 | if self.tangent_ratio: 611 | args.extend(['-tangentRatio', self.tangent_ratio]) 612 | if self.def_coeff: 613 | args.extend(['-defCoeff', self.def_coeff]) 614 | if self.node_tags: 615 | args.extend(['-nodeTags', *self.node_tags]) 616 | if self.elements_to_remove: 617 | args.extend(['-eleTag', *self.elements_to_remove]) 618 | 619 | return args 620 | 621 | 622 | @dataclass 623 | class MinMax(UniaxialMaterial): 624 | """ 625 | OpenSees MinMax Material. 626 | 627 | https://openseespydoc.readthedocs.io/en/latest/src/MinMax.html 628 | 629 | """ 630 | 631 | predecessor: UniaxialMaterial 632 | min_strain: float 633 | max_strain: float 634 | 635 | def ops_args(self) -> list[object]: 636 | """ 637 | Obtain the OpenSees arguments. 638 | 639 | Returns: 640 | The OpenSees arguments. 641 | """ 642 | return [ 643 | 'MinMax', 644 | self.uid, 645 | self.predecessor.uid, 646 | '-min', 647 | self.min_strain, 648 | '-max', 649 | self.max_strain, 650 | ] 651 | -------------------------------------------------------------------------------- /src/osmg/postprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | """Postprocessing module.""" 2 | -------------------------------------------------------------------------------- /src/osmg/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | """Preprocessing module.""" 2 | -------------------------------------------------------------------------------- /src/osmg/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ioannis-vm/OpenSees_Model_Generator/3adaa456176570dc22f6cd4f621339624cb8577f/src/osmg/py.typed -------------------------------------------------------------------------------- /src/osmg/tests/__init__.py: -------------------------------------------------------------------------------- 1 | """OSMG tests.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/analysis/__init__.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the analysis modules.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/analysis/test_load_case.py: -------------------------------------------------------------------------------- 1 | """Unit tests for load case and related functions.""" 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import pytest 6 | 7 | from osmg.analysis.load_case import ( 8 | combine, 9 | combine_single, 10 | ensure_minmax_level_exists_or_add, 11 | ) 12 | 13 | 14 | class TestAddMinMaxLevel: 15 | """Tests the `ensure_minmax_level_exists_or_add` function.""" 16 | 17 | def test_ensure_minmax_level_exists_or_add_single_level(self) -> None: 18 | """Test adding 'min/max' level to a single-level MultiIndex DataFrame.""" 19 | test_df = pd.DataFrame( 20 | [[1, 2], [3, 4]], 21 | columns=pd.MultiIndex.from_tuples( 22 | [('a', 'x'), ('a', 'y')], names=['level_1', 'level_2'] 23 | ), 24 | ) 25 | result = ensure_minmax_level_exists_or_add(test_df) 26 | 27 | expected_columns = pd.MultiIndex.from_tuples( 28 | [ 29 | ('a', 'x', 'max'), 30 | ('a', 'x', 'min'), 31 | ('a', 'y', 'max'), 32 | ('a', 'y', 'min'), 33 | ], 34 | names=['level_1', 'level_2', 'min/max'], 35 | ) 36 | assert result.columns.equals(expected_columns) 37 | assert np.allclose(result.iloc[:, :2].to_numpy(), test_df.to_numpy()) 38 | 39 | def test_ensure_minmax_level_exists_or_add_existing_level(self) -> None: 40 | """Does not alter a DataFrame with an existing 'min/max' level.""" 41 | has_minmax = pd.DataFrame( 42 | [[1, 2], [3, 4]], 43 | columns=pd.MultiIndex.from_tuples( 44 | [('a', 'x', 'max'), ('a', 'y', 'min')], 45 | names=['level_1', 'level_2', 'min/max'], 46 | ), 47 | ) 48 | result = ensure_minmax_level_exists_or_add(has_minmax) 49 | pd.testing.assert_frame_equal(result, has_minmax) 50 | 51 | 52 | class TestCombineSingle: 53 | """Tests the `combine_single` function.""" 54 | 55 | def setup_method(self) -> None: 56 | """Set up common test data.""" 57 | self.df1 = pd.DataFrame( 58 | [[1, 2], [3, 4]], 59 | columns=pd.MultiIndex.from_tuples( 60 | [('a', 'x'), ('a', 'y')], names=['level_1', 'level_2'] 61 | ), 62 | ) 63 | self.df2 = pd.DataFrame( 64 | [[5, 6], [7, 8]], 65 | columns=pd.MultiIndex.from_tuples( 66 | [('a', 'x'), ('a', 'y')], names=['level_1', 'level_2'] 67 | ), 68 | ) 69 | 70 | def test_combine_add(self) -> None: 71 | """Test the 'add' action in combine.""" 72 | result = combine_single(self.df1, self.df2, 'add') 73 | expected = pd.DataFrame( 74 | [[6, 8], [10, 12]], 75 | columns=pd.MultiIndex.from_tuples( 76 | [('a', 'x'), ('a', 'y')], 77 | names=['level_1', 'level_2'], 78 | ), 79 | ) 80 | pd.testing.assert_frame_equal(result, expected) 81 | 82 | def test_combine_envelope(self) -> None: 83 | """Test the 'envelope' action in combine.""" 84 | result = combine_single(self.df1, self.df2, 'envelope') 85 | expected = pd.DataFrame( 86 | [[5, 5, 2, 2], [7, 7, 4, 4]], 87 | columns=pd.MultiIndex.from_tuples( 88 | [ 89 | ('a', 'x', 'max'), 90 | ('a', 'x', 'min'), 91 | ('a', 'y', 'max'), 92 | ('a', 'y', 'min'), 93 | ], 94 | names=['level_1', 'level_2', 'min/max'], 95 | ), 96 | ) 97 | pd.testing.assert_frame_equal(result, expected) 98 | 99 | def test_combine_invalid_action(self) -> None: 100 | """An invalid action raises a ValueError.""" 101 | with pytest.raises( 102 | ValueError, match='Action must be one of `add` or `envelope`.' 103 | ): 104 | combine_single(self.df1, self.df2, 'invalid') # type: ignore 105 | 106 | def test_combine_different_columns(self) -> None: 107 | """Combining DataFrames with different columns raises an error.""" 108 | df3 = pd.DataFrame( 109 | [[9], [10]], # Adjust to match the number of columns 110 | columns=pd.MultiIndex.from_tuples( 111 | [('b', 'z')], names=['level_1', 'level_2'] 112 | ), 113 | ) 114 | with pytest.raises( 115 | ValueError, match='Cannot align DataFrames with different columns' 116 | ): 117 | combine_single(self.df1, df3, 'add') 118 | 119 | 120 | class TestCombine: 121 | """Tests the `combine` function.""" 122 | 123 | def setup_method(self) -> None: 124 | """Set up common test data.""" 125 | self.df1 = pd.DataFrame( 126 | [[1, 2], [3, 4]], 127 | columns=pd.MultiIndex.from_tuples( 128 | [('a', 'x'), ('a', 'y')], names=['level_1', 'level_2'] 129 | ), 130 | ) 131 | self.df2 = pd.DataFrame( 132 | [[5, 6], [7, 8]], 133 | columns=pd.MultiIndex.from_tuples( 134 | [('a', 'x'), ('a', 'y')], names=['level_1', 'level_2'] 135 | ), 136 | ) 137 | self.df3 = pd.DataFrame( 138 | [[9, 10], [11, 12]], 139 | columns=pd.MultiIndex.from_tuples( 140 | [('a', 'x'), ('a', 'y')], names=['level_1', 'level_2'] 141 | ), 142 | ) 143 | 144 | def test_combine_add(self) -> None: 145 | """Test combining DataFrames with the 'add' action.""" 146 | result = combine([self.df1, self.df2, self.df3], 'add') 147 | expected = pd.DataFrame( 148 | [[15, 18], [21, 24]], 149 | columns=pd.MultiIndex.from_tuples( 150 | [('a', 'x'), ('a', 'y')], 151 | names=['level_1', 'level_2'], 152 | ), 153 | ) 154 | pd.testing.assert_frame_equal(result, expected) 155 | 156 | def test_combine_envelope(self) -> None: 157 | """Test combining DataFrames with the 'envelope' action.""" 158 | result = combine([self.df1, self.df2, self.df3], 'envelope') 159 | expected = pd.DataFrame( 160 | [[9, 9, 5, 2], [11, 11, 7, 4]], 161 | columns=pd.MultiIndex.from_tuples( 162 | [ 163 | ('a', 'x', 'max'), 164 | ('a', 'x', 'min'), 165 | ('a', 'y', 'max'), 166 | ('a', 'y', 'min'), 167 | ], 168 | names=['level_1', 'level_2', 'min/max'], 169 | ), 170 | ) 171 | pd.testing.assert_frame_equal(result, expected) 172 | 173 | def test_combine_insufficient_dataframes(self) -> None: 174 | """Test that combining fewer than two DataFrames raises a ValueError.""" 175 | with pytest.raises( 176 | ValueError, match='At least two DataFrames are required to combine.' 177 | ): 178 | combine([self.df1], 'add') 179 | 180 | def test_combine_different_columns(self) -> None: 181 | """Test combining DataFrames with mismatched columns raises an error.""" 182 | df4 = pd.DataFrame( 183 | [[1], [2]], 184 | columns=pd.MultiIndex.from_tuples( 185 | [('b', 'z')], names=['level_1', 'level_2'] 186 | ), 187 | ) 188 | with pytest.raises( 189 | ValueError, match='Cannot align DataFrames with different columns' 190 | ): 191 | combine([self.df1, self.df2, df4], 'add') 192 | 193 | def test_combine_invalid_action(self) -> None: 194 | """Test that an invalid action raises a ValueError.""" 195 | with pytest.raises( 196 | ValueError, match='Action must be one of `add` or `envelope`.' 197 | ): 198 | combine([self.df1, self.df2], 'invalid') # type: ignore 199 | -------------------------------------------------------------------------------- /src/osmg/tests/analysis/test_supports.py: -------------------------------------------------------------------------------- 1 | """Unit tests for supports.""" 2 | 3 | from osmg.analysis.supports import ElasticSupport, FixedSupport 4 | 5 | 6 | def test_fixed_support_initialization() -> None: 7 | """Test initialization of FixedSupport. 8 | 9 | Ensures that FixedSupport is correctly initialized with the 10 | specified degrees of freedom restraints. 11 | """ 12 | support = FixedSupport((True, False, True)) 13 | assert isinstance(support, FixedSupport) 14 | assert support == FixedSupport((True, False, True)) 15 | 16 | 17 | def test_elastic_support_initialization() -> None: 18 | """Test initialization of ElasticSupport. 19 | 20 | Ensures that ElasticSupport is correctly initialized with the 21 | specified degrees of freedom restraints. 22 | """ 23 | support = ElasticSupport((10.0, 5.0, 0.0)) 24 | assert isinstance(support, ElasticSupport) 25 | assert support == ElasticSupport((10.0, 5.0, 0.0)) 26 | -------------------------------------------------------------------------------- /src/osmg/tests/core/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for the core modules.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/core/test_common.py: -------------------------------------------------------------------------------- 1 | """Unit tests for common definitions.""" 2 | 3 | from collections import OrderedDict 4 | 5 | import pytest 6 | 7 | from osmg.core.common import ( 8 | ALPHA, 9 | EPSILON, 10 | methods, 11 | previous_element, 12 | print_dir, 13 | print_methods, 14 | ) 15 | 16 | 17 | class TestConstants: 18 | """Tests for constants defined in the module.""" 19 | 20 | @staticmethod 21 | def test_constants_values() -> None: 22 | """ 23 | Test the values of constants. 24 | 25 | Ensures constants have the expected values. 26 | """ 27 | assert EPSILON == 1.00e-6, 'EPSILON has an incorrect value' 28 | assert ALPHA == 1.00e8, 'ALPHA has an incorrect value' 29 | 30 | 31 | class TestMethodsFunction: 32 | """Tests for the `methods` function.""" 33 | 34 | @staticmethod 35 | def test_methods_extraction() -> None: 36 | """ 37 | Test that `methods` correctly extracts callable methods. 38 | 39 | Verifies that only non-dunder methods are returned. 40 | """ 41 | 42 | class TestClass: 43 | def method_1(self) -> None: 44 | pass 45 | 46 | def method_2(self) -> None: 47 | pass 48 | 49 | obj = TestClass() 50 | result = methods(obj) 51 | expected = ['method_1', 'method_2'] 52 | assert result == expected, f'Expected {expected}, but got {result}' 53 | 54 | 55 | class TestPrintFunctions: 56 | """Tests for `print_methods` and `print_dir` functions.""" 57 | 58 | @staticmethod 59 | def test_print_methods(capsys: pytest.CaptureFixture[str]) -> None: 60 | """ 61 | Test `print_methods` for correct output. 62 | 63 | Verifies that the correct methods are printed. 64 | """ 65 | 66 | class TestClass: 67 | def method_1(self) -> None: 68 | pass 69 | 70 | def method_2(self) -> None: 71 | pass 72 | 73 | obj = TestClass() 74 | print_methods(obj) 75 | captured = capsys.readouterr() 76 | assert "['method_1', 'method_2']" in captured.out, 'Output did not match' 77 | 78 | @staticmethod 79 | def test_print_dir(capsys: pytest.CaptureFixture[str]) -> None: 80 | """ 81 | Test `print_dir` for correct output. 82 | 83 | Verifies that `dir` output is printed. 84 | """ 85 | 86 | class TestClass: 87 | def method_1(self) -> None: 88 | pass 89 | 90 | obj = TestClass() 91 | print_dir(obj) 92 | captured = capsys.readouterr() 93 | assert ( 94 | '__class__' in captured.out 95 | ), 'Output did not include expected attributes' 96 | 97 | 98 | class TestPreviousElementFunction: 99 | """Tests for the `previous_element` function.""" 100 | 101 | def test_previous_element_found(self) -> None: 102 | """ 103 | Test `previous_element` for finding the correct previous element. 104 | 105 | Verifies correct value is returned for a valid key. 106 | """ 107 | dct = OrderedDict([(1, 'a'), (2, 'b'), (3, 'c')]) 108 | assert ( 109 | previous_element(dct, 2) == 'a' 110 | ), 'Incorrect previous element for key 2' 111 | assert ( 112 | previous_element(dct, 3) == 'b' 113 | ), 'Incorrect previous element for key 3' 114 | 115 | def test_previous_element_not_found(self) -> None: 116 | """ 117 | Test `previous_element` when no previous element exists. 118 | 119 | Verifies `None` is returned for the first element or non-existent keys. 120 | """ 121 | dct = OrderedDict([(1, 'a'), (2, 'b'), (3, 'c')]) 122 | assert previous_element(dct, 1) is None, 'Expected None for the first key' 123 | assert ( 124 | previous_element(dct, 4) is None 125 | ), 'Expected None for a non-existent key' 126 | -------------------------------------------------------------------------------- /src/osmg/tests/core/test_gridsystem.py: -------------------------------------------------------------------------------- 1 | """Unit tests for gridsystem.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from osmg.core.gridsystem import BaseGridSystem, GridSystem, GridSystem2D 7 | from osmg.geometry.line import Line 8 | 9 | 10 | class TestBaseGridSystem: 11 | """Tests for the BaseGridSystem class.""" 12 | 13 | def test_add_and_retrieve_level(self) -> None: 14 | """Test adding and retrieving a level.""" 15 | grid_system = BaseGridSystem[str]() 16 | grid_system.add_level('Ground Floor', 0.0) 17 | assert grid_system.get_level('Ground Floor').elevation() == 0.0 # type: ignore 18 | 19 | def test_retrieve_nonexistent_level(self) -> None: 20 | """Test retrieving a nonexistent level raises ValueError.""" 21 | grid_system = BaseGridSystem[str]() 22 | with pytest.raises(ValueError, match="Level 'Basement' does not exist."): 23 | grid_system.get_level('Basement').elevation() # type: ignore 24 | 25 | 26 | class TestGridSystem: 27 | """Tests for the GridSystem class.""" 28 | 29 | def test_add_and_retrieve_grid(self) -> None: 30 | """Test adding and retrieving a grid.""" 31 | grid_system = GridSystem() 32 | start = np.array([0.0, 0.0]) 33 | end = np.array([10.0, 0.0]) 34 | grid_system.add_grid('Grid A', start, end) 35 | assert isinstance(grid_system.grids['Grid A'], Line) 36 | assert np.array_equal(grid_system.grids['Grid A'].start, start) 37 | assert np.array_equal(grid_system.grids['Grid A'].end, end) 38 | 39 | def test_find_intersection(self) -> None: 40 | """Test finding the intersection of two grids.""" 41 | grid_system = GridSystem() 42 | grid_system.add_grid('Grid A', np.array([0.0, 0.0]), np.array([10.0, 0.0])) 43 | grid_system.add_grid('Grid B', np.array([5.0, -5.0]), np.array([5.0, 5.0])) 44 | intersection = grid_system.get_intersection_coordinates('Grid A', 'Grid B') 45 | assert intersection is not None 46 | assert np.array_equal(intersection, np.array([5.0, 0.0])) 47 | 48 | def test_intersection_with_nonexistent_grid(self) -> None: 49 | """Test finding the intersection with a nonexistent grid raises ValueError.""" 50 | grid_system = GridSystem() 51 | grid_system.add_grid('Grid A', np.array([0.0, 0.0]), np.array([10.0, 0.0])) 52 | with pytest.raises( 53 | ValueError, match="Grids 'Grid A' and 'Grid C' must exist." 54 | ): 55 | grid_system.get_intersection_coordinates('Grid A', 'Grid C') 56 | 57 | 58 | class TestGridSystem2D: 59 | """Tests for the GridSystem2D class.""" 60 | 61 | def test_add_and_retrieve_grid(self) -> None: 62 | """Test adding and retrieving a grid.""" 63 | grid_system = GridSystem2D() 64 | grid_system.add_grid('Grid X', 10.0) 65 | assert grid_system.get_grid_location('Grid X') == 10.0 66 | 67 | def test_retrieve_nonexistent_grid(self) -> None: 68 | """Test retrieving a nonexistent grid raises ValueError.""" 69 | grid_system = GridSystem2D() 70 | with pytest.raises(ValueError, match="Grid 'Grid Y' does not exist."): 71 | grid_system.get_grid_location('Grid Y') 72 | 73 | def test_add_and_retrieve_level(self) -> None: 74 | """Test adding and retrieving a level.""" 75 | grid_system = GridSystem2D() 76 | grid_system.add_level('Ground Floor', 0.0) 77 | assert grid_system.get_level('Ground Floor').elevation() == 0.0 # type: ignore 78 | 79 | def test_retrieve_nonexistent_level(self) -> None: 80 | """Test retrieving a nonexistent level raises ValueError.""" 81 | grid_system = GridSystem2D() 82 | with pytest.raises(ValueError, match="Level 'Roof' does not exist."): 83 | grid_system.get_level('Roof').elevation() # type: ignore 84 | -------------------------------------------------------------------------------- /src/osmg/tests/core/test_uid_object.py: -------------------------------------------------------------------------------- 1 | """Unit tests for UIDObject.""" 2 | 3 | from dataclasses import dataclass 4 | 5 | from osmg.core.uid_object import UIDObject 6 | from osmg.creators.uid import UIDGenerator 7 | 8 | 9 | @dataclass 10 | class _TestChild(UIDObject): 11 | """Test child class inheriting from UIDObject.""" 12 | 13 | name: str 14 | 15 | 16 | class TestUIDObject: 17 | """Unit tests for the UIDObject class.""" 18 | 19 | def setup_method(self) -> None: 20 | """Set up test environment.""" 21 | self.uid_generator = UIDGenerator() 22 | 23 | def test_uid_generation(self) -> None: 24 | """Test that a UID is generated and assigned correctly.""" 25 | child1 = _TestChild(self.uid_generator, name='child1') 26 | child2 = _TestChild(self.uid_generator, name='child2') 27 | 28 | assert child1.uid == 0, 'UID for the first object should be 0.' 29 | assert child2.uid == 1, 'UID for the second object should be 1.' 30 | 31 | def test_unique_uid(self) -> None: 32 | """Test that each object gets a unique UID.""" 33 | child1 = _TestChild(self.uid_generator, name='child1') 34 | child2 = _TestChild(self.uid_generator, name='child2') 35 | child3 = _TestChild(self.uid_generator, name='child3') 36 | 37 | uids = {child1.uid, child2.uid, child3.uid} 38 | assert len(uids) == 3, 'Each object should have a unique UID.' 39 | 40 | def test_post_init_uid_assignment(self) -> None: 41 | """Test that UID is assigned during post-init.""" 42 | child = _TestChild(self.uid_generator, name='test_child') 43 | assert hasattr(child, 'uid'), "UIDObject should have a 'uid' attribute." 44 | assert child.uid == 0, 'UID should be correctly assigned during post-init.' 45 | -------------------------------------------------------------------------------- /src/osmg/tests/creators/__init__.py: -------------------------------------------------------------------------------- 1 | """Unit tests for Creator objects.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/creators/test_uid.py: -------------------------------------------------------------------------------- 1 | """Unit tests for UIDGenerator.""" 2 | 3 | import pytest 4 | 5 | from osmg.creators.uid import UIDGenerator 6 | from osmg.model_objects.node import Node 7 | 8 | 9 | class TestUIDGenerator: 10 | """Tests for the UIDGenerator class.""" 11 | 12 | def setup_method(self) -> None: 13 | """Set up the UIDGenerator instance for testing.""" 14 | self.generator = UIDGenerator() 15 | 16 | def test_generate_unique_ids(self) -> None: 17 | """Test that unique IDs are generated.""" 18 | node1 = Node(self.generator, (0.0, 0.0)) 19 | node2 = Node(self.generator, (0.0, 0.0)) 20 | node3 = Node(self.generator, (0.0, 0.0)) 21 | assert node1.uid == 0 22 | assert node2.uid == 1 23 | assert node3.uid == 2 24 | 25 | def test_invalid_object_type(self) -> None: 26 | """Test behavior when an invalid object type is passed.""" 27 | 28 | class InvalidObject: 29 | pass 30 | 31 | invalid_object = InvalidObject() 32 | with pytest.raises( 33 | ValueError, match=r'Unknown object class: .*InvalidObject.*' 34 | ): 35 | self.generator.new(invalid_object) 36 | -------------------------------------------------------------------------------- /src/osmg/tests/elements/__init__.py: -------------------------------------------------------------------------------- 1 | """Unit tests for osmg elements.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/elements/test_node.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the Node class.""" 2 | 3 | from unittest.mock import MagicMock 4 | 5 | from osmg.model_objects.node import Node 6 | 7 | 8 | class TestNode: 9 | """Tests for the Node class.""" 10 | 11 | def setup_method(self) -> None: 12 | """Set up a mock UIDGenerator and other common fixtures.""" 13 | self.mock_uid_generator = MagicMock() 14 | self.mock_uid_generator.new.side_effect = [0, 1, 2] 15 | 16 | def test_node_initialization(self) -> None: 17 | """Test that the Node object initializes correctly.""" 18 | node = Node( 19 | coordinates=(0.0, 0.0, 0.0), uid_generator=self.mock_uid_generator 20 | ) 21 | 22 | assert node.coordinates == (0.0, 0.0, 0.0) 23 | assert node.uid == 0 24 | self.mock_uid_generator.new.assert_called_once_with(node) 25 | 26 | def test_node_uid_generation(self) -> None: 27 | """Test that the UIDGenerator assigns unique IDs to Nodes.""" 28 | node1 = Node( 29 | coordinates=(0.0, 0.0, 0.0), uid_generator=self.mock_uid_generator 30 | ) 31 | node2 = Node( 32 | coordinates=(1.0, 1.0, 1.0), uid_generator=self.mock_uid_generator 33 | ) 34 | node3 = Node( 35 | coordinates=(2.0, 2.0, 2.0), uid_generator=self.mock_uid_generator 36 | ) 37 | 38 | assert node1.uid == 0 39 | assert node2.uid == 1 40 | assert node3.uid == 2 41 | assert self.mock_uid_generator.new.call_count == 3 42 | 43 | def test_node_comparison(self) -> None: 44 | """Test the less-or-equal comparison method for Node objects.""" 45 | node1 = Node( 46 | coordinates=(0.0, 0.0, 0.0), uid_generator=self.mock_uid_generator 47 | ) 48 | node2 = Node( 49 | coordinates=(1.0, 1.0, 1.0), uid_generator=self.mock_uid_generator 50 | ) 51 | 52 | assert node1 <= node2 53 | assert not (node2 <= node1) 54 | assert node1 <= node1 # noqa: PLR0124 Reflexive property 55 | 56 | def test_node_repr(self) -> None: 57 | """Test the string representation of the Node object.""" 58 | node = Node( 59 | coordinates=(1.0, 1.0, 1.0), uid_generator=self.mock_uid_generator 60 | ) 61 | 62 | expected_repr = 'Node object\n uid: 0\n coordinates: (1.0, 1.0, 1.0)\n' 63 | assert repr(node) == expected_repr 64 | -------------------------------------------------------------------------------- /src/osmg/tests/test_a.py.inactive: -------------------------------------------------------------------------------- 1 | """Basic Test Suite.""" 2 | 3 | # import pytest 4 | import numpy as np 5 | import numpy.typing as npt 6 | 7 | from osmg import defaults 8 | from osmg.analysis.solver import PushoverAnalysis 9 | from osmg.core.load_case import LoadCase 10 | from osmg.core.model import Model 11 | from osmg.creators.component import BeamColumnCreator 12 | from osmg.creators.query import ElmQuery 13 | from osmg.creators.section import SectionCreator 14 | from osmg.elements.element import ElasticBeamColumn 15 | from osmg.elements.section import ElasticSection 16 | 17 | # from osmg.creators.zerolength import gravity_shear_tab 18 | from osmg.graphics.postprocessing_3d import show_basic_forces, show_deformed_shape 19 | from osmg.graphics.preprocessing_3d import show 20 | from osmg.preprocessing.self_weight_mass import self_mass, self_weight 21 | 22 | nparr = npt.NDArray[np.float64] 23 | 24 | 25 | def test_a() -> None: 26 | """ 27 | Basic functionality tests. 28 | 29 | Simple frame model. 30 | Imperial units. 31 | 32 | """ 33 | mdl = Model('test_model') 34 | 35 | mcg = BeamColumnCreator(mdl, 'elastic') 36 | secg = SectionCreator(mdl) 37 | query = ElmQuery(mdl) 38 | 39 | mdl.add_level(0, 0.00) 40 | mdl.add_level(1, 15.00 * 12.00) 41 | 42 | defaults.load_default_steel(mdl) 43 | defaults.load_default_fix_release(mdl) 44 | steel_phys_mat = mdl.physical_materials.retrieve_by_attr('name', 'default steel') 45 | 46 | section_type = ElasticSection 47 | element_type = ElasticBeamColumn 48 | sec_collection = mdl.elastic_sections 49 | 50 | mdl.levels.set_active([1]) 51 | 52 | secg.load_aisc_from_database( 53 | 'W', ['W24X131'], 'default steel', 'default steel', section_type 54 | ) 55 | 56 | pt0: nparr = np.array((0.00, 0.00)) 57 | pt1: nparr = np.array((0.00, 25.00 * 12.00)) 58 | 59 | sec = sec_collection.retrieve_by_attr('name', 'W24X131') 60 | 61 | mcg.add_vertical_active( 62 | pt0[0], 63 | pt0[1], 64 | np.zeros(3), 65 | np.zeros(3), 66 | 'Linear', 67 | 1, 68 | sec, 69 | element_type, 70 | 'centroid', 71 | 2.00 * np.pi / 2.00, 72 | ) 73 | 74 | mcg.add_vertical_active( 75 | pt1[0], 76 | pt1[1], 77 | np.zeros(3), 78 | np.zeros(3), 79 | 'Linear', 80 | 1, 81 | sec, 82 | element_type, 83 | 'centroid', 84 | 2.00 * np.pi / 2.00, 85 | ) 86 | 87 | mcg.add_horizontal_active( 88 | pt0[0], 89 | pt0[1], 90 | pt1[0], 91 | pt1[1], 92 | np.array((0.0, 0.0, 0.0)), 93 | np.array((0.0, 0.0, 0.0)), 94 | 'bottom_center', 95 | 'top_center', 96 | 'Linear', 97 | 1, 98 | sec, 99 | element_type, 100 | 'top_center', 101 | method='generate_hinged_component_assembly', 102 | additional_args={ 103 | 'n_x': None, 104 | 'n_y': None, 105 | 'zerolength_gen_i': gravity_shear_tab, 106 | 'zerolength_gen_args_i': { 107 | 'consider_composite': True, 108 | 'section': sec, 109 | 'physical_material': steel_phys_mat, 110 | 'distance': 10.00, 111 | 'n_sub': 1, 112 | }, 113 | 'zerolength_gen_j': gravity_shear_tab, 114 | 'zerolength_gen_args_j': { 115 | 'consider_composite': True, 116 | 'section': sec, 117 | 'physical_material': steel_phys_mat, 118 | 'distance': 10.00, 119 | 'n_sub': 1, 120 | }, 121 | }, 122 | ) 123 | 124 | # fix base 125 | for node in mdl.levels[0].nodes.values(): 126 | node.restraint = [True] * 6 127 | 128 | testcase = LoadCase('test', mdl) 129 | self_weight(mdl, testcase) 130 | self_mass(mdl, testcase) 131 | 132 | show(mdl, testcase) 133 | 134 | control_node = query.search_node_lvl(0.00, 0.00, 1) 135 | 136 | anl = PushoverAnalysis(mdl, {testcase.name: testcase}) 137 | 138 | anl.run('y', [+1.00], control_node, 0.1, loaded_node=control_node) 139 | 140 | show_deformed_shape( 141 | anl, 142 | testcase.name, 143 | anl.results[testcase.name].n_steps_success, 144 | 0.00, 145 | extrude=True, 146 | animation=False, 147 | ) 148 | 149 | show_basic_forces(anl, testcase.name, 0, 1.00, 0.00, 0.00, 0.00, 0.00, 3) 150 | 151 | # zelms = mdl.list_of_zerolength_elements() 152 | # zelm = zelms[0].uid 153 | # res_a = anl.retrieve_release_force_defo(zelm, testcase.name) 154 | 155 | anl.run('y', [-1.00], control_node, 0.1, loaded_node=control_node) 156 | 157 | # deformed_shape(anl, anl.n_steps_success, 0.00, True) 158 | # res_b = anl.retrieve_release_force_defo(zelm, testcase.name) 159 | -------------------------------------------------------------------------------- /src/osmg/tests/test_doc_notebooks.py.inactive: -------------------------------------------------------------------------------- 1 | """ 2 | Test tutorial code. 3 | 4 | Test that the code that generates the tutorial notebooks runs without 5 | producing any errors. 6 | 7 | """ 8 | 9 | import os 10 | 11 | 12 | def test_2_define_a_model() -> None: 13 | from docs.source.notebooks import doc_2_define_a_model # noqa: PLC0415 14 | 15 | 16 | def test_3_run_an_analysis() -> None: 17 | os.chdir('docs/source/notebooks') 18 | from docs.source.notebooks import doc_3_run_an_analysis # noqa: PLC0415 19 | -------------------------------------------------------------------------------- /src/osmg/tests/test_line.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the Line class.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from osmg.geometry.line import Line 7 | 8 | 9 | class TestLine: 10 | """Tests for the Line class.""" 11 | 12 | def test_length(self) -> None: 13 | """Test the length of a line.""" 14 | line = Line('l1', np.array([0, 0]), np.array([2, 2])) 15 | assert line.length() == pytest.approx(2.8284271247461903) 16 | 17 | def test_direction(self) -> None: 18 | """Test the direction vector of a line.""" 19 | line = Line('l1', np.array([0, 0]), np.array([2, 2])) 20 | np.testing.assert_allclose( 21 | line.direction(), np.array([0.70710678, 0.70710678]) 22 | ) 23 | 24 | def test_intersect(self) -> None: 25 | """Test the intersection of two lines.""" 26 | line1 = Line('l1', np.array([0, 0]), np.array([2, 2])) 27 | line2 = Line('l2', np.array([1, 0]), np.array([1, 3])) 28 | np.testing.assert_allclose(line1.intersect(line2), np.array([1.0, 1.0])) # type: ignore 29 | 30 | def test_intersect_no_intersection(self) -> None: 31 | """Test when two lines do not intersect.""" 32 | line1 = Line('l1', np.array([0, 0]), np.array([1, 1])) 33 | line2 = Line('l2', np.array([2, 2]), np.array([3, 3])) 34 | assert line1.intersect(line2) is None 35 | 36 | def test_intersects_pt(self) -> None: 37 | """Test whether a point lies on the line.""" 38 | line = Line('l1', np.array([0, 0]), np.array([1, 1])) 39 | assert line.intersects_pt(np.array([0.5, 0.5])) is True 40 | assert line.intersects_pt(np.array([0, 0])) is True 41 | assert line.intersects_pt(np.array([1, 1])) is True 42 | assert line.intersects_pt(np.array([2, 2])) is False 43 | 44 | def test_intersects_pt_zero_length(self) -> None: 45 | """Test whether intersects_pt raises an error for zero-length line.""" 46 | line = Line('l1', np.array([0, 0]), np.array([0, 0])) 47 | with pytest.raises(ValueError, match='Line has zero length.'): 48 | line.intersects_pt(np.array([0, 0])) 49 | 50 | def test_point_distance(self) -> None: 51 | """Test the distance from a point to the line.""" 52 | line = Line('l1', np.array([1, 1]), np.array([3, 3])) 53 | assert line.point_distance(np.array([4, 2])) == pytest.approx( 54 | 1.4142135623730951 55 | ) 56 | assert line.point_distance(np.array([2, 2])) == pytest.approx(0.0) 57 | assert line.point_distance(np.array([0, 0])) is None 58 | assert line.point_distance(np.array([4, 4])) is None 59 | 60 | def test_project(self) -> None: 61 | """Test the projection of a point onto the line.""" 62 | line = Line('test', np.array([0, 0]), np.array([10, 0])) 63 | np.testing.assert_allclose( 64 | line.project(np.array([5, 0])), # type: ignore 65 | np.array([5.0, 0.0]), # type: ignore 66 | ) 67 | np.testing.assert_allclose( 68 | line.project(np.array([5, 5])), # type: ignore 69 | np.array([5.0, 0.0]), # type: ignore 70 | ) 71 | assert line.project(np.array([-5, 5])) is None 72 | assert line.project(np.array([15, 5])) is None 73 | -------------------------------------------------------------------------------- /src/osmg/tests/test_mesh.py: -------------------------------------------------------------------------------- 1 | """Unit tests for Vertex, Edge, and Halfedge classes.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from osmg.geometry.mesh import Edge, Halfedge, Vertex 7 | 8 | 9 | class TestVertex: 10 | """Tests for the Vertex class.""" 11 | 12 | def test_vertex_initialization(self) -> None: 13 | """Test that a Vertex initializes correctly.""" 14 | v = Vertex((0.0, 0.0)) 15 | assert v.coordinates == (0.0, 0.0) 16 | assert v.edges == [] 17 | assert v.halfedges == [] 18 | assert isinstance(v.uid, int) 19 | 20 | def test_vertex_equality(self) -> None: 21 | """Test equality comparisons for Vertex objects.""" 22 | v1 = Vertex((0, 0)) 23 | v2 = Vertex((1, 1)) 24 | v3 = Vertex((0, 0)) 25 | assert v1 == v1 # noqa: PLR0124 Same object 26 | assert v1 != v2 # Different vertices 27 | assert v1 != v3 # Different UIDs despite same coordinates 28 | 29 | def test_vertex_hash(self) -> None: 30 | """Test that Vertex objects have unique hash values.""" 31 | v1 = Vertex((0, 0)) 32 | v2 = Vertex((1, 1)) 33 | assert hash(v1) != hash(v2) 34 | 35 | 36 | class TestEdge: 37 | """Tests for the Edge class.""" 38 | 39 | def setup_method(self) -> None: 40 | """Set up vertices for Edge tests.""" 41 | self.v1 = Vertex((0.0, 0.0)) 42 | self.v2 = Vertex((1.0, 1.0)) 43 | self.v3 = Vertex((2.0, 2.0)) 44 | 45 | def test_edge_initialization(self) -> None: 46 | """Test that an Edge initializes correctly.""" 47 | e = Edge(self.v1, self.v2) 48 | assert e.v_i == self.v1 49 | assert e.v_j == self.v2 50 | assert e in self.v1.edges 51 | assert e in self.v2.edges 52 | 53 | def test_edge_repr(self) -> None: 54 | """Test the string representation of an Edge.""" 55 | e = Edge(self.v1, self.v2) 56 | assert repr(e) == f'(E{e.uid} @ V{self.v1.uid}, V{self.v2.uid}) ' 57 | 58 | def test_define_halfedge(self) -> None: 59 | """Test defining halfedges for an Edge.""" 60 | e = Edge(self.v1, self.v2) 61 | h1 = e.define_halfedge(self.v1) 62 | h2 = e.define_halfedge(self.v2) 63 | assert h1.vertex == self.v1 64 | assert h2.vertex == self.v2 65 | assert e.h_i == h1 66 | assert e.h_j == h2 67 | 68 | # Test ValueError if halfedge is already defined 69 | with pytest.raises(ValueError, match='Halfedge h_i already defined'): 70 | e.define_halfedge(self.v1) 71 | 72 | def test_other_vertex(self) -> None: 73 | """Test getting the other vertex of an Edge.""" 74 | e = Edge(self.v1, self.v2) 75 | assert e.other_vertex(self.v1) == self.v2 76 | assert e.other_vertex(self.v2) == self.v1 77 | 78 | # Test ValueError if vertex is not part of the Edge 79 | with pytest.raises( 80 | ValueError, match='The edge is not connected to the given vertex' 81 | ): 82 | e.other_vertex(self.v3) 83 | 84 | 85 | class TestHalfedge: 86 | """Tests for the Halfedge class.""" 87 | 88 | def setup_method(self) -> None: 89 | """Set up vertices and edges for Halfedge tests.""" 90 | self.v1 = Vertex((0.0, 0.0)) 91 | self.v2 = Vertex((1.0, 1.0)) 92 | self.edge = Edge(self.v1, self.v2) 93 | 94 | def test_halfedge_initialization(self) -> None: 95 | """Test that a Halfedge initializes correctly.""" 96 | h = Halfedge(self.v1, self.edge) 97 | assert h.vertex == self.v1 98 | assert h.edge == self.edge 99 | assert h.nxt is None 100 | 101 | def test_halfedge_repr(self) -> None: 102 | """Test the string representation of a Halfedge.""" 103 | h1 = Halfedge(self.v1, self.edge) 104 | h2 = Halfedge(self.v2, self.edge) 105 | h1.nxt = h2 106 | assert ( 107 | repr(h1) 108 | == f'(H{h1.uid} from E{h1.edge.uid} to E{h2.edge.uid} next H{h2.uid})' 109 | ) 110 | 111 | def test_halfedge_direction(self) -> None: 112 | """Test the direction calculation for a Halfedge.""" 113 | h = Halfedge(self.v1, self.edge) 114 | direction = h.direction() 115 | expected_direction = np.arctan2(1.0, 1.0) # From (0, 0) to (1, 1) 116 | assert direction == pytest.approx(expected_direction) 117 | -------------------------------------------------------------------------------- /src/osmg/tests/test_transformations.py: -------------------------------------------------------------------------------- 1 | """Unit tests for coordinate transformation operations.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from osmg.geometry.transformations import ( 7 | local_axes_from_points_and_angle, 8 | offset_transformation_3d, 9 | rotation_matrix_2d, 10 | rotation_matrix_3d, 11 | transformation_matrix, 12 | ) 13 | 14 | 15 | class TestRotationMatrix2D: 16 | """Tests for the `rotation_matrix_2d` function.""" 17 | 18 | def test_rotation_matrix_2d(self) -> None: 19 | """Test the 2D rotation matrix.""" 20 | result = rotation_matrix_2d(np.pi / 2) 21 | expected = np.array([[0.0, -1.0], [1.0, 0.0]]) 22 | assert np.allclose(result, expected) 23 | 24 | def test_rotation_matrix_2d_invalid_input(self) -> None: 25 | """Test that a TypeError is raised for invalid input.""" 26 | with pytest.raises(TypeError, match='ang parameter should be a float.'): 27 | rotation_matrix_2d('90') # type: ignore # Invalid input type 28 | 29 | 30 | class TestRotationMatrix3D: 31 | """Tests for the `rotation_matrix_3d` function.""" 32 | 33 | def test_rotation_matrix_3d(self) -> None: 34 | """Test the 3D rotation matrix.""" 35 | axis = np.array([1, 0, 0]) 36 | theta = np.pi / 2 37 | result = rotation_matrix_3d(axis, theta) 38 | expected = np.array( 39 | [ 40 | [1.0, 0.0, 0.0], 41 | [0.0, 0.0, -1.0], 42 | [0.0, 1.0, 0.0], 43 | ] 44 | ) 45 | assert np.allclose(result, expected) 46 | 47 | def test_rotation_matrix_3d_invalid_axis(self) -> None: 48 | """Test that the function handles invalid axis input.""" 49 | axis = np.array([0, 0]) # Invalid axis (not 3D) 50 | theta = np.pi / 2 51 | with pytest.raises(ValueError, match='not enough values to unpack'): 52 | rotation_matrix_3d(axis, theta) 53 | 54 | 55 | class TestTransformationMatrix: 56 | """Tests for the `transformation_matrix` function.""" 57 | 58 | def test_transformation_matrix_identity(self) -> None: 59 | """Test the transformation matrix for identity axes.""" 60 | vec_x = np.array([1.0, 0.0, 0.0]) 61 | vec_y = np.array([0.0, 1.0, 0.0]) 62 | vec_z = np.array([0.0, 0.0, 1.0]) 63 | result = transformation_matrix(vec_x, vec_y, vec_z) 64 | expected = np.eye(3) 65 | assert np.allclose(result, expected) 66 | 67 | def test_transformation_matrix_permuted_axes(self) -> None: 68 | """Test the transformation matrix with permuted axes.""" 69 | vec_x = np.array([1.0, 0.0, 0.0]) 70 | vec_y = np.array([0.0, 0.0, 1.0]) 71 | vec_z = np.array([0.0, 1.0, 0.0]) 72 | result = transformation_matrix(vec_x, vec_y, vec_z) 73 | expected = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]]) 74 | assert np.allclose(result, expected) 75 | 76 | 77 | class TestLocalAxesFromPointsAndAngle: 78 | """Tests for the `local_axes_from_points_and_angle` function.""" 79 | 80 | def test_local_axes_horizontal(self) -> None: 81 | """Test local axes for a horizontal element.""" 82 | point_i = np.array([0.0, 0.0, 0.0]) 83 | point_j = np.array([1.0, 0.0, 0.0]) 84 | ang = 0.0 85 | result = local_axes_from_points_and_angle(point_i, point_j, ang) 86 | expected_x = np.array([1.0, 0.0, 0.0]) 87 | expected_y = np.array([0.0, 0.0, 1.0]) 88 | expected_z = np.array([0.0, -1.0, 0.0]) 89 | assert np.allclose(result[0], expected_x) # type: ignore 90 | assert np.allclose(result[1], expected_y) # type: ignore 91 | assert np.allclose(result[2], expected_z) # type: ignore 92 | 93 | def test_local_axes_vertical(self) -> None: 94 | """Test local axes for a vertical element.""" 95 | point_i = np.array([0.0, 0.0, 0.0]) 96 | point_j = np.array([0.0, 0.0, 1.0]) 97 | ang = np.pi / 4 98 | result = local_axes_from_points_and_angle(point_j, point_i, ang) 99 | expected_x = np.array([0.0, 0.0, -1.0]) 100 | expected_y = np.array([-np.sqrt(2) / 2, np.sqrt(2) / 2, 0.0]) 101 | expected_z = np.array([np.sqrt(2) / 2, np.sqrt(2) / 2, 0.0]) 102 | assert np.allclose(result[0], expected_x) # type: ignore 103 | assert np.allclose(result[1], expected_y) # type: ignore 104 | assert np.allclose(result[2], expected_z) # type: ignore 105 | 106 | def test_local_axes_vertical_upside_down(self) -> None: 107 | """Test that a ValueError is raised for an upside-down vertical element.""" 108 | point_i = np.array([0.0, 0.0, 0.0]) 109 | point_j = np.array([0.0, 0.0, 1.0]) 110 | ang = 0.0 111 | with pytest.raises(ValueError, match='Vertical element defined upside down'): 112 | local_axes_from_points_and_angle(point_i, point_j, ang) 113 | 114 | 115 | class TestOffsetTransformation: 116 | """Tests for the `offset_transformation` function.""" 117 | 118 | def test_offset_transformation(self) -> None: 119 | """Test the offset transformation calculation.""" 120 | offset = np.array([1.0, 0.0, 0.0]) 121 | u_vec = np.array([0.01, -0.02, 0.005]) 122 | r_vec = np.array([0.0002, -0.0003, 0.0001]) 123 | result = offset_transformation_3d(offset, u_vec, r_vec) 124 | expected = np.array([0.01, -0.0199, 0.0053]) 125 | assert np.allclose(result, expected) # type: ignore 126 | -------------------------------------------------------------------------------- /src/osmg/tests/verification/__init__.py: -------------------------------------------------------------------------------- 1 | """Verification tests.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/verification/braced_frame.py: -------------------------------------------------------------------------------- 1 | """ 2 | Braced frame design model. 3 | 4 | Length units: in 5 | Force units: kip 6 | """ 7 | 8 | import numpy as np 9 | 10 | from osmg.analysis.common import UDL, PointLoad, PointMass 11 | from osmg.analysis.load_case import LoadCaseRegistry 12 | from osmg.analysis.supports import FixedSupport 13 | from osmg.core.model import Model2D 14 | from osmg.creators.component import BarGenerator, BeamColumnCreator 15 | from osmg.creators.material import ElasticMaterialCreator 16 | from osmg.creators.section import AISC_Database_Section_Creator 17 | from osmg.graphics.plotly import ( 18 | BasicForceConfiguration, 19 | DeformationConfiguration, 20 | Figure3D, 21 | Figure3DConfiguration, 22 | ) 23 | from osmg.model_objects.node import Node 24 | from osmg.model_objects.section import ElasticSection 25 | 26 | # Instantiate model object 27 | frame = Model2D(name='Gridline B', dimensionality='2D Frame') 28 | 29 | # Add levels and grids 30 | grids = frame.grid_system 31 | grids.add_level('0', 0.00) 32 | grids.add_level('1', 15.00 * 12.00) 33 | grids.add_level('2', (15.00 + 13.00) * 12.00) 34 | grids.add_level('3', (15.00 + 2.0 * 13.00) * 12.00) 35 | grids.add_level('4', (15.00 + 3.0 * 13.00) * 12.00) 36 | grids.add_grid('A', 0.00) 37 | grids.add_grid('B', 25.00 * 12.00) 38 | grids.add_grid('C', 25.00 * 2.0 * 12.00) 39 | grids.add_grid('D', 25.00 * 3.0 * 12.00) 40 | grids.add_grid('E', 25.00 * 4.0 * 12.00) 41 | grids.add_grid('F', 25.00 * 5.0 * 12.00) 42 | grids.add_grid('LC', 25.00 * 6.0 * 12.00) 43 | 44 | # Add primary nodes 45 | for level in ('0', '1', '2', '3', '4'): 46 | for grid in ('A', 'B', 'C', 'D', 'E', 'F', 'LC'): 47 | if (grid == 'A' and level == '4') or (grid == 'F' and level == '4'): 48 | continue 49 | frame.nodes.add( 50 | Node( 51 | uid_generator=frame.uid_generator, 52 | coordinates=( 53 | grids.get_grid_location(grid), 54 | grids.get_level(level).elevation(), 55 | ), 56 | ), 57 | ) 58 | 59 | # Example: find the node at 'Base'-'A' (not used) 60 | found_node = frame.nodes.search_by_coordinates_or_raise( 61 | ( 62 | grids.get_grid('A').data(), 63 | grids.get_level('0').elevation(), 64 | ) 65 | ) 66 | 67 | # Define a common section (not used) 68 | simple_section = ElasticSection( 69 | frame.uid_generator, 70 | 'Test Section', 71 | e_mod=1e3, 72 | area=1e3, 73 | i_y=1.00, 74 | i_x=1.00, 75 | g_mod=1.00, 76 | j_mod=1.00, 77 | sec_w=0.00, 78 | ) 79 | 80 | # Define Steel Young's modulus and shear modulus. 81 | e_modulus = 29000.00 # kip/in2 82 | g_modulus = 11500.00 # kip/in2 83 | 84 | # Define an AISC W section (length unit is `in`) 85 | section_creator = AISC_Database_Section_Creator(frame.uid_generator) 86 | column_section = section_creator.load_elastic_section( 87 | section_label='W14X120', e_modulus=e_modulus, g_modulus=g_modulus 88 | ) 89 | beam_section = section_creator.load_elastic_section( 90 | section_label='W18X119', e_modulus=e_modulus, g_modulus=g_modulus 91 | ) 92 | 93 | 94 | # Add columns 95 | bcg = BeamColumnCreator(frame, 'elastic') 96 | added_columns = [] 97 | for level in ('1', '2', '3', '4'): 98 | for grid in ('A', 'B', 'C', 'D', 'E', 'F'): 99 | if level == '4' and (grid in {'A', 'F'}): 100 | continue 101 | col = bcg.generate_plain_component_assembly( 102 | tags={'Column'}, 103 | node_i=frame.nodes.search_by_coordinates_or_raise( 104 | ( 105 | grids.get_grid(grid).data(), 106 | grids.get_level(level).elevation(), 107 | ) 108 | ), 109 | node_j=frame.nodes.search_by_coordinates_or_raise( 110 | ( 111 | grids.get_grid(grid).data(), 112 | grids.get_level(level).previous().elevation(), 113 | ) 114 | ), 115 | n_sub=1, 116 | eo_i=np.array((0.00, 0.0)), 117 | eo_j=np.array((0.00, 0.0)), 118 | section=column_section, 119 | transf_type='Linear', 120 | ) 121 | added_columns.append(col) 122 | 123 | # Add beams 124 | added_beams = [] 125 | for level in ('1', '2', '3', '4'): 126 | for grid in ('A', 'B', 'C', 'D', 'E'): 127 | if level == '4' and grid in {'A', 'E'}: 128 | continue 129 | beam = bcg.generate_plain_component_assembly( 130 | tags={'Beam'}, 131 | node_i=frame.nodes.search_by_coordinates_or_raise( 132 | ( 133 | grids.get_grid(grid).data(), 134 | grids.get_level(level).elevation(), 135 | ) 136 | ), 137 | node_j=frame.nodes.search_by_coordinates_or_raise( 138 | ( 139 | grids.get_grid(grid).next().data(), 140 | grids.get_level(level).elevation(), 141 | ) 142 | ), 143 | n_sub=1, 144 | eo_i=np.array((0.00, 0.0)), 145 | eo_j=np.array((0.00, 0.0)), 146 | section=beam_section, 147 | transf_type='Linear', 148 | ) 149 | added_beams.append(beam) 150 | 151 | # # Add braces 152 | brg = BarGenerator(frame) 153 | for level_top, level_bottom, grid_top, grid_bottom in zip( 154 | ('1', '1', '1', '1', '2', '2', '3', '4'), 155 | ('0', '0', '0', '0', '1', '1', '2', '3'), 156 | ('C', 'C', 'E', 'E', 'D', 'D', 'C', 'D'), 157 | ('B', 'D', 'D', 'F', 'C', 'E', 'D', 'C'), 158 | ): 159 | brg.add( 160 | tags={'Brace'}, 161 | node_i=frame.nodes.search_by_coordinates_or_raise( 162 | ( 163 | grids.get_grid(grid_top).data(), 164 | grids.get_level(level_top).elevation(), 165 | ) 166 | ), 167 | node_j=frame.nodes.search_by_coordinates_or_raise( 168 | ( 169 | grids.get_grid(grid_bottom).data(), 170 | grids.get_level(level_bottom).elevation(), 171 | ) 172 | ), 173 | eo_i=np.array((0.00, 0.00)), 174 | eo_j=np.array((0.00, 0.00)), 175 | transf_type='Linear', 176 | area=40.00, 177 | material=ElasticMaterialCreator(frame, stiffness=e_modulus).generate(), 178 | outside_shape=None, 179 | weight_per_length=0.00, 180 | ) 181 | 182 | 183 | # Add leaning column 184 | leaning_column_area = 1e4 # in2 185 | for level in ('1', '2', '3', '4'): 186 | brg.add( 187 | tags={'Truss'}, 188 | node_i=frame.nodes.search_by_coordinates_or_raise( 189 | ( 190 | grids.get_grid('LC').data(), 191 | grids.get_level(level).elevation(), 192 | ) 193 | ), 194 | node_j=frame.nodes.search_by_coordinates_or_raise( 195 | ( 196 | grids.get_grid('LC').data(), 197 | grids.get_level(level).previous().elevation(), 198 | ) 199 | ), 200 | eo_i=np.array((0.00, 0.00)), 201 | eo_j=np.array((0.00, 0.00)), 202 | transf_type='Linear', 203 | area=leaning_column_area, 204 | material=ElasticMaterialCreator(frame, stiffness=e_modulus).generate(), 205 | outside_shape=None, 206 | weight_per_length=0.00, 207 | ) 208 | 209 | 210 | def show_model() -> None: 211 | """Show the model (only).""" 212 | fig = Figure3D(Figure3DConfiguration(ndm=2)) 213 | fig.add_nodes(list(frame.nodes.values()), 'primary') 214 | fig.add_components(list(frame.components.values())) 215 | fig.show() 216 | 217 | 218 | # Create a load case registry 219 | load_case_registry = LoadCaseRegistry(frame) 220 | # Define load cases 221 | lc_modal = load_case_registry.modal['modal_1'] 222 | lc_dead = load_case_registry.dead['dead_1'] 223 | load_cases = (lc_modal, lc_dead) 224 | 225 | # Create a load case and add fixed supports 226 | fixed_support = FixedSupport((True, True, True)) 227 | for lc in load_cases: 228 | lc.add_supports_at_level(frame, fixed_support, '0') 229 | 230 | # Add rigid diaphragm 231 | for level in ('1', '2', '3', '4'): 232 | parent_node = frame.nodes.search_by_coordinates_or_raise( 233 | ( 234 | grids.get_grid('LC').data(), 235 | grids.get_level(level).elevation(), 236 | ) 237 | ) 238 | for lc in load_cases: 239 | lc.define_rigid_diaphragm(frame, parent_node) 240 | 241 | 242 | # # Example of how to retrieve a primary node: 243 | # # Locate the nodes at 'A'-'Level 1' and 'B'-'Level 1' 244 | # frame.nodes.search_by_coordinates_or_raise( 245 | # (grids.get_grid('A').data(), grids.get_level(').elevation()) 246 | # ) 247 | 248 | # `lc_dead`: Add UDLs to the beams 249 | for beam in added_beams: 250 | lc_dead.load_registry.component_udl[beam.uid] = UDL((0.0, -1.67e-3)) # kip/in 251 | 252 | # `lc_dead`: Add a concentrated point load at 'B'-'Level 4' 253 | lc_dead.load_registry.nodal_loads[ 254 | frame.nodes.search_by_coordinates_or_raise( 255 | ( 256 | grids.get_grid('B').data(), 257 | grids.get_level('4').elevation(), 258 | ) 259 | ).uid 260 | ] = PointLoad( 261 | (2000.0, 0.00, 0.00) # lb 262 | ) 263 | 264 | # `modal`: Add mass on the leaning column. 265 | for level in ('1', '2', '3', '4'): 266 | lc_modal.mass_registry[ 267 | frame.nodes.search_by_coordinates_or_raise( 268 | ( 269 | grids.get_grid('LC').data(), 270 | grids.get_level(level).elevation(), 271 | ) 272 | ).uid 273 | ] = PointMass((+16.0e3 / 386.22, 0.00, 0.00)) 274 | lc_modal.analysis.settings.num_modes = 2 275 | 276 | # # Example: Add an extra recorder 277 | # load_case_registry.dead['dead_1'].analysis.recorders['node_envelope'] = NodeRecorder( 278 | # uid_generator=frame.uid_generator, 279 | # recorder_type='EnvelopeNode', 280 | # nodes=( 281 | # frame.nodes.search_by_coordinates_or_raise( 282 | # ( 283 | # grids.get_grid('A').data(), 284 | # grids.get_level('3').elevation(), 285 | # ) 286 | # ).uid, 287 | # ), 288 | # dofs=(1, 2, 3), 289 | # response_type='disp', 290 | # file_name='envelope', 291 | # output_time=True, 292 | # number_of_significant_digits=6, 293 | # ) 294 | 295 | 296 | # # Example: change num_steps 297 | # load_case_registry.dead['dead_1'].analysis.settings.num_steps = 10 298 | 299 | # Run analysis 300 | load_case_registry.run() 301 | 302 | # result_dir = load_case_registry.dead['dead_1'].analysis.settings.result_directory 303 | # print(f'Result directory `dead_1`: {result_dir}') 304 | # result_dir = load_case_registry.dead['dead_2'].analysis.settings.result_directory 305 | # print(f'Result directory `dead_2`: {result_dir}') 306 | 307 | 308 | # lc_modal.analysis.recorders['default_node'].get_data() 309 | # lc_modal.analysis.recorders[ 310 | # 'default_basic_force' 311 | # ].get_data() 312 | 313 | # combinations happen here. 314 | # combined_displacements = load_case_registry.combine_recorder('default_node') 315 | 316 | # forces = ( 317 | # load_case_registry.dead['dead_1'] 318 | # .analysis.recorders['default_basic_force'] 319 | # .get_data() 320 | # ) 321 | 322 | 323 | # data = ( 324 | # load_case_registry.dead['dead_1'] 325 | # .analysis.recorders['default_basic_force'] 326 | # .get_data() 327 | # ) 328 | 329 | 330 | step = 0 331 | deformation_configuration = DeformationConfiguration( 332 | reference_length=frame.reference_length(), 333 | ndf=3, 334 | ndm=2, 335 | data=lc_dead.analysis.recorders['default_node'].get_data(), 336 | step=step, 337 | # amplification_factor=None, # Figure it out. 338 | amplification_factor=10.00 / 3.06618, # Figure it out. 339 | ) 340 | basic_force_configuration = BasicForceConfiguration( 341 | reference_length=frame.reference_length(), 342 | ndm=2, 343 | ndf=3, 344 | data=lc_dead.calculate_basic_forces( 345 | 'default_basic_force', 346 | frame.components.get_line_element_lengths(), 347 | ndm=2, 348 | num_stations=12, 349 | ), 350 | step=step, 351 | force_to_length_factor=12 * 12 / 1883.89, 352 | moment_to_length_factor=12 * 12 / 10000.0, 353 | ) 354 | fig = Figure3D(Figure3DConfiguration(ndm=2)) 355 | fig.add_nodes(list(frame.nodes.values()), 'primary', overlay=True) 356 | fig.add_components(list(frame.components.values()), overlay=True) 357 | # fig.add_nodes(list(frame.nodes.values()), 'primary') 358 | # fig.add_components(list(frame.components.values())) 359 | fig.add_nodes(list(frame.nodes.values()), 'primary', deformation_configuration) 360 | fig.add_components(list(frame.components.values()), deformation_configuration) 361 | fig.add_supports(frame.nodes, lc_modal.fixed_supports, symbol_size=12.00) 362 | # fig.add_udl( 363 | # load_case_registry.dead['dead_2'].load_registry.component_udl, 364 | # frame.components, 365 | # force_to_length_factor=2.0, 366 | # offset=0.00, 367 | # ) 368 | # fig.add_loads( 369 | # lc_modal.mass_registry, 370 | # frame.nodes, 371 | # force_to_length_factor=2.00, 372 | # offset=0.0, 373 | # head_length=24.0, 374 | # head_width=24.0, 375 | # base_width=5.0, 376 | # ) 377 | fig.add_basic_forces( 378 | components=list(frame.components.values()), 379 | basic_force_configuration=basic_force_configuration, 380 | ) 381 | fig.show() 382 | -------------------------------------------------------------------------------- /src/osmg/tests/verification/opensees_only/__init__.py: -------------------------------------------------------------------------------- 1 | """Verification scripts using only OpenSees.""" 2 | -------------------------------------------------------------------------------- /src/osmg/tests/verification/opensees_only/openseespy_truss.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple OpenSees example of a plane truss. 3 | 4 | Adapted from the OpenSeesPy documentation (examples). 5 | 6 | Thu Nov 28 10:19:49 AM PST 2024 7 | 8 | """ 9 | 10 | import openseespy.opensees as ops 11 | 12 | ops.wipe() 13 | ops.model('basic', '-ndm', 2, '-ndf', 2) 14 | 15 | ops.node(1, 0.0, 0.0) 16 | ops.node(2, 144.0, 0.0) 17 | ops.node(3, 168.0, 0.0) 18 | ops.node(4, 72.0, 96.0) 19 | 20 | ops.fix(1, 1, 1) 21 | ops.fix(2, 1, 1) 22 | ops.fix(3, 1, 1) 23 | 24 | ops.uniaxialMaterial('Elastic', 1, 3000.0) 25 | 26 | ops.element('Truss', 1, 1, 4, 10.0, 1) 27 | ops.element('Truss', 2, 2, 4, 5.0, 1) 28 | ops.element('Truss', 3, 3, 4, 5.0, 1) 29 | 30 | ops.timeSeries('Linear', 1) 31 | ops.pattern('Plain', 1, 1) 32 | ops.load(4, 100.0, -50.0) 33 | 34 | # Adding a UDL on a truss member is not supported: 35 | # ops.eleLoad('-ele', 1, '-type', '-beamUniform', 0.00, 0.00, 1.00) 36 | 37 | # Add a recorder for the basic forces. 38 | ops.recorder( 39 | 'Element', 40 | '-file', 41 | '/tmp/truss_recorder.txt', # noqa: S108 42 | '-time', 43 | '-ele', 44 | *(1, 2, 3), 45 | 'localForce', 46 | ) 47 | 48 | ops.system('BandSPD') 49 | ops.numberer('RCM') 50 | ops.constraints('Plain') 51 | ops.integrator('LoadControl', 1.0) 52 | ops.algorithm('Linear') 53 | ops.analysis('Static') 54 | 55 | ops.analyze(1) 56 | 57 | ux = ops.nodeDisp(4, 1) 58 | uy = ops.nodeDisp(4, 2) 59 | 60 | assert abs(ux - 0.53009277713228375450) < 1e-12 61 | assert abs(uy + 0.17789363846931768864) < 1e-12 62 | -------------------------------------------------------------------------------- /src/osmg/tests/verification/opensees_only/opsvis_frame.py: -------------------------------------------------------------------------------- 1 | """ 2 | Portal frame analysis benchmark file. 3 | 4 | Added: Mon Nov 25 04:41:46 AM PST 2024 5 | 6 | Comes from the 2D portal frame example in `opsvis`: 7 | https://opsvis.readthedocs.io/en/latest/ex_2d_portal_frame.html 8 | """ 9 | 10 | import openseespy.opensees as ops 11 | 12 | ops.wipe() 13 | ops.model('basic', '-ndm', 2, '-ndf', 3) 14 | 15 | column_length, girder_length = 4.0, 6.0 16 | 17 | Acol, Agir = 2.0e-3, 6.0e-3 18 | IzCol, IzGir = 1.6e-5, 5.4e-5 19 | 20 | E = 200.0e9 21 | 22 | Ep = {1: [E, Acol, IzCol], 2: [E, Acol, IzCol], 3: [E, Agir, IzGir]} 23 | 24 | ops.node(1, 0.0, 0.0) 25 | ops.node(2, 0.0, column_length) 26 | ops.node(3, girder_length, 0.0) 27 | ops.node(4, girder_length, column_length) 28 | 29 | ops.fix(1, 1, 1, 1) 30 | ops.fix(3, 1, 1, 0) 31 | 32 | ops.geomTransf('Linear', 1) 33 | 34 | # columns 35 | ops.element('elasticBeamColumn', 1, 1, 2, Acol, E, IzCol, 1) 36 | ops.element('elasticBeamColumn', 2, 3, 4, Acol, E, IzCol, 1) 37 | # girder 38 | ops.element('elasticBeamColumn', 3, 2, 4, Agir, E, IzGir, 1) 39 | 40 | Px = 2.0e3 41 | Wy = -10.0e3 42 | Wx = 0.0 43 | 44 | Ew = {3: ['-beamUniform', Wy, Wx]} 45 | 46 | ops.timeSeries('Constant', 1) 47 | ops.pattern('Plain', 1, 1) 48 | ops.load(2, Px, 0.0, 0.0) 49 | 50 | for etag in Ew: 51 | ops.eleLoad('-ele', etag, '-type', Ew[etag][0], Ew[etag][1], Ew[etag][2]) 52 | 53 | # recorder 54 | ops.recorder( 55 | 'Node', '-file', './disp.txt', '-time', '-node', 4, '-dof', 1, 2, 3, 'disp' 56 | ) 57 | 58 | ops.constraints('Transformation') 59 | ops.numberer('RCM') 60 | ops.system('BandGeneral') 61 | ops.test('NormDispIncr', 1.0e-6, 6, 2) 62 | ops.algorithm('Linear') 63 | ops.integrator('LoadControl', 1) 64 | ops.analysis('Static') 65 | ops.analyze(1) 66 | -------------------------------------------------------------------------------- /src/osmg/tests/verification/test_offset_and_basic_forces.py: -------------------------------------------------------------------------------- 1 | """ 2 | Offset: basic force verification. 3 | 4 | This simple verification check ensures that basic forces are correctly 5 | assigned using a single beamcolumn element in a variety of 6 | arrangements. 7 | 8 | Written: Thu Nov 28 06:45:08 PM PST 2024 9 | 10 | """ 11 | 12 | from __future__ import annotations 13 | 14 | import numpy as np 15 | 16 | from osmg.analysis.common import PointLoad 17 | from osmg.analysis.load_case import LoadCaseRegistry 18 | from osmg.analysis.supports import FixedSupport 19 | from osmg.core.model import Model2D, Model3D 20 | from osmg.creators.component import BeamColumnCreator 21 | from osmg.graphics.plotly import ( 22 | BasicForceConfiguration, 23 | DeformationConfiguration, 24 | Figure3D, 25 | Figure3DConfiguration, 26 | ) 27 | from osmg.model_objects.node import Node 28 | from osmg.model_objects.section import ElasticSection 29 | 30 | 31 | def no_offset_2d() -> None: 32 | """Axially loaded cantilever column, 2D, no offset.""" 33 | column_model = Model2D(name='Test model', dimensionality='2D Frame') 34 | n1 = Node(uid_generator=column_model.uid_generator, coordinates=((0.00, 0.00))) 35 | n2 = Node(uid_generator=column_model.uid_generator, coordinates=((0.00, 5.00))) 36 | column_model.nodes.add(n1) 37 | column_model.nodes.add(n2) 38 | simple_section = ElasticSection( 39 | column_model.uid_generator, 40 | 'Test Section', 41 | e_mod=1e3, 42 | area=1e3, 43 | i_y=1.00, 44 | i_x=1.00, 45 | g_mod=1.00, 46 | j_mod=1.00, 47 | sec_w=0.00, 48 | ) 49 | bcg = BeamColumnCreator(column_model, 'elastic') 50 | bcg.generate_plain_component_assembly( 51 | tags={'column'}, 52 | node_i=n2, 53 | node_j=n1, 54 | n_sub=1, 55 | eo_i=np.array((0.00, 0.00)), 56 | eo_j=np.array((0.00, 0.00)), 57 | section=simple_section, 58 | transf_type='Linear', 59 | ) 60 | load_case_registry = LoadCaseRegistry(column_model) 61 | fixed_support = FixedSupport((True, True, True)) 62 | load_case_registry.dead['test'].fixed_supports[n1.uid] = fixed_support 63 | load_case_registry.dead['test'].load_registry.nodal_loads[n2.uid] = PointLoad( 64 | (0.00, -10.00, 0.00) # lb 65 | ) 66 | load_case_registry.run() 67 | 68 | axial_df, shear_y_df, shear_z_df, torsion_df, moment_y_df, moment_z_df = ( 69 | load_case_registry.dead['test'].calculate_basic_forces( 70 | 'default_basic_force', 71 | column_model.components.get_line_element_lengths(), 72 | ndm=2, 73 | num_stations=12, 74 | ) 75 | ) 76 | assert np.allclose(axial_df.to_numpy(), -10.00) 77 | assert np.allclose(shear_y_df.to_numpy(), 0.00) 78 | assert np.allclose(shear_z_df.to_numpy(), 0.00) 79 | assert np.allclose(torsion_df.to_numpy(), 0.00) 80 | assert np.allclose(moment_y_df.to_numpy(), 0.00) 81 | assert np.allclose(moment_z_df.to_numpy(), 0.00) 82 | 83 | displacements = ( 84 | load_case_registry.dead['test'].analysis.recorders['default_node'].get_data() 85 | ) 86 | assert np.allclose( 87 | displacements.to_numpy(), np.array((0.0, 0.0, 0.0, 0.0, -5.0e-05, 0.0)) 88 | ) 89 | # plot(column_model, load_case_registry) # appears correct. 90 | 91 | 92 | def offset_2d() -> None: 93 | """Axially loaded cantilever column, 2D, offset.""" 94 | column_model = Model2D(name='Test model', dimensionality='2D Frame') 95 | n1 = Node(uid_generator=column_model.uid_generator, coordinates=((0.00, 0.00))) 96 | n2 = Node(uid_generator=column_model.uid_generator, coordinates=((0.00, 5.00))) 97 | column_model.nodes.add(n1) 98 | column_model.nodes.add(n2) 99 | simple_section = ElasticSection( 100 | column_model.uid_generator, 101 | 'Test Section', 102 | e_mod=1e3, 103 | area=1e3, 104 | i_y=1.00, 105 | i_x=1.00, 106 | g_mod=1.00, 107 | j_mod=1.00, 108 | sec_w=0.00, 109 | ) 110 | bcg = BeamColumnCreator(column_model, 'elastic') 111 | bcg.generate_plain_component_assembly( 112 | tags={'column'}, 113 | node_i=n2, 114 | node_j=n1, 115 | n_sub=1, 116 | eo_i=np.array((1.00, 0.00)), 117 | eo_j=np.array((1.00, 0.00)), 118 | section=simple_section, 119 | transf_type='Linear', 120 | ) 121 | load_case_registry = LoadCaseRegistry(column_model) 122 | fixed_support = FixedSupport((True, True, True)) 123 | load_case_registry.dead['test'].fixed_supports[n1.uid] = fixed_support 124 | load_case_registry.dead['test'].load_registry.nodal_loads[n2.uid] = PointLoad( 125 | (0.00, -10.00, 0.00) # lb 126 | ) 127 | load_case_registry.run() 128 | 129 | axial_df, shear_y_df, shear_z_df, torsion_df, moment_y_df, moment_z_df = ( 130 | load_case_registry.dead['test'].calculate_basic_forces( 131 | 'default_basic_force', 132 | column_model.components.get_line_element_lengths(), 133 | ndm=2, 134 | num_stations=12, 135 | ) 136 | ) 137 | assert np.allclose(axial_df.to_numpy(), -10.00) 138 | assert np.allclose(shear_y_df.to_numpy(), 0.00) 139 | assert np.allclose(shear_z_df.to_numpy(), 0.00) 140 | assert np.allclose(torsion_df.to_numpy(), 0.00) 141 | assert np.allclose(moment_y_df.to_numpy(), 0.00) 142 | assert np.allclose(moment_z_df.to_numpy(), -10.00) 143 | 144 | displacements = ( 145 | load_case_registry.dead['test'].analysis.recorders['default_node'].get_data() 146 | ) 147 | assert np.allclose( 148 | displacements.to_numpy(), np.array((0.0, 0.0, 0.0, -0.125, -0.05005, 0.05)) 149 | ) 150 | # plot(column_model, load_case_registry) # appears correct. 151 | 152 | 153 | def no_offset_3d() -> None: 154 | """Axially loaded cantilever column, 3D, no offset.""" 155 | column_model = Model3D(name='Test model', dimensionality='3D Frame') 156 | n1 = Node( 157 | uid_generator=column_model.uid_generator, coordinates=((0.00, 0.00, 0.00)) 158 | ) 159 | n2 = Node( 160 | uid_generator=column_model.uid_generator, coordinates=((0.00, 0.00, 5.00)) 161 | ) 162 | column_model.nodes.add(n1) 163 | column_model.nodes.add(n2) 164 | simple_section = ElasticSection( 165 | column_model.uid_generator, 166 | 'Test Section', 167 | e_mod=1e3, 168 | area=1e3, 169 | i_y=1.00, 170 | i_x=1.00, 171 | g_mod=1.00, 172 | j_mod=1.00, 173 | sec_w=0.00, 174 | ) 175 | bcg = BeamColumnCreator(column_model, 'elastic') 176 | bcg.generate_plain_component_assembly( 177 | tags={'column'}, 178 | node_i=n2, 179 | node_j=n1, 180 | n_sub=1, 181 | eo_i=np.array((0.00, 0.00, 0.0)), 182 | eo_j=np.array((0.00, 0.00, 0.0)), 183 | section=simple_section, 184 | transf_type='Linear', 185 | angle=90.00 / 360.00 * 2.00 * np.pi, 186 | ) 187 | load_case_registry = LoadCaseRegistry(column_model) 188 | fixed_support = FixedSupport((True, True, True, True, True, True)) 189 | load_case_registry.dead['test'].fixed_supports[n1.uid] = fixed_support 190 | load_case_registry.dead['test'].load_registry.nodal_loads[n2.uid] = PointLoad( 191 | (0.00, 0.00, -10.00, 0.00, 0.00, 0.00) # lb 192 | ) 193 | load_case_registry.run() 194 | axial_df, shear_y_df, shear_z_df, torsion_df, moment_y_df, moment_z_df = ( 195 | load_case_registry.dead['test'].calculate_basic_forces( 196 | 'default_basic_force', 197 | column_model.components.get_line_element_lengths(), 198 | ndm=3, 199 | num_stations=12, 200 | ) 201 | ) 202 | assert np.allclose(axial_df.to_numpy(), -10.00) 203 | assert np.allclose(shear_y_df.to_numpy(), 0.00) 204 | assert np.allclose(shear_z_df.to_numpy(), 0.00) 205 | assert np.allclose(torsion_df.to_numpy(), 0.00) 206 | assert np.allclose(moment_y_df.to_numpy(), 0.00) 207 | assert np.allclose(moment_z_df.to_numpy(), 0.00) 208 | 209 | displacements = ( 210 | load_case_registry.dead['test'].analysis.recorders['default_node'].get_data() 211 | ) 212 | assert np.allclose( 213 | displacements.to_numpy(), 214 | np.array((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -5.0e-05, 0.0, 0.0, 0.0)), 215 | ) 216 | plot(column_model, load_case_registry) # appears correct. 217 | 218 | 219 | def offset_3d() -> None: 220 | """Axially loaded cantilever column, 3D, offset.""" 221 | column_model = Model3D(name='Test model', dimensionality='3D Frame') 222 | n1 = Node( 223 | uid_generator=column_model.uid_generator, coordinates=((0.00, 0.00, 0.00)) 224 | ) 225 | n2 = Node( 226 | uid_generator=column_model.uid_generator, coordinates=((0.00, 0.00, 5.00)) 227 | ) 228 | column_model.nodes.add(n1) 229 | column_model.nodes.add(n2) 230 | simple_section = ElasticSection( 231 | column_model.uid_generator, 232 | 'Test Section', 233 | e_mod=1e3, 234 | area=1e3, 235 | i_y=1.00, 236 | i_x=1.00, 237 | g_mod=1.00, 238 | j_mod=1.00, 239 | sec_w=0.00, 240 | ) 241 | bcg = BeamColumnCreator(column_model, 'elastic') 242 | bcg.generate_plain_component_assembly( 243 | tags={'column'}, 244 | node_i=n2, 245 | node_j=n1, 246 | n_sub=1, 247 | eo_i=np.array((1.00, 0.00, 0.0)), 248 | eo_j=np.array((1.00, 0.00, 0.0)), 249 | section=simple_section, 250 | transf_type='Linear', 251 | angle=90.00 / 360.00 * 2.00 * np.pi, 252 | ) 253 | load_case_registry = LoadCaseRegistry(column_model) 254 | fixed_support = FixedSupport((True, True, True, True, True, True)) 255 | load_case_registry.dead['test'].fixed_supports[n1.uid] = fixed_support 256 | load_case_registry.dead['test'].load_registry.nodal_loads[n2.uid] = PointLoad( 257 | (0.00, 0.00, -10.00, 0.00, 0.00, 0.00) # lb 258 | ) 259 | load_case_registry.run() 260 | axial_df, shear_y_df, shear_z_df, torsion_df, moment_y_df, moment_z_df = ( 261 | load_case_registry.dead['test'].calculate_basic_forces( 262 | 'default_basic_force', 263 | column_model.components.get_line_element_lengths(), 264 | ndm=3, 265 | num_stations=12, 266 | ) 267 | ) 268 | assert np.allclose(axial_df.to_numpy(), -10.00) 269 | assert np.allclose(shear_y_df.to_numpy(), 0.00) 270 | assert np.allclose(shear_z_df.to_numpy(), 0.00) 271 | assert np.allclose(torsion_df.to_numpy(), 0.00) 272 | assert np.allclose(moment_y_df.to_numpy(), 0.00) 273 | assert np.allclose(moment_z_df.to_numpy(), 10.00) 274 | 275 | displacements = ( 276 | load_case_registry.dead['test'].analysis.recorders['default_node'].get_data() 277 | ) 278 | assert np.allclose( 279 | displacements.to_numpy(), 280 | np.array( 281 | (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.125, 0.0, -0.05005, 0.0, -0.05, 0) 282 | ), 283 | ) 284 | # plot(column_model, load_case_registry) # appears correct. 285 | 286 | 287 | def plot(model: Model2D | Model3D, load_case_registry: LoadCaseRegistry) -> None: 288 | """Plot the deformed shape and basic forces.""" 289 | if isinstance(model, Model2D): 290 | ndm = 2 291 | ndf = 3 292 | else: 293 | ndm = 3 294 | ndf = 6 295 | deformation_configuration = DeformationConfiguration( 296 | reference_length=model.reference_length(), 297 | ndf=ndf, 298 | ndm=ndm, 299 | data=load_case_registry.dead['test'] 300 | .analysis.recorders['default_node'] 301 | .get_data(), 302 | step=0, 303 | amplification_factor=None, # Figure it out. 304 | ) 305 | basic_force_configuration = BasicForceConfiguration( 306 | reference_length=model.reference_length(), 307 | ndf=ndf, 308 | ndm=ndm, 309 | data=load_case_registry.dead['test'].calculate_basic_forces( 310 | 'default_basic_force', 311 | model.components.get_line_element_lengths(), 312 | ndm=ndm, 313 | num_stations=12, 314 | ), 315 | step=-1, 316 | force_to_length_factor=0.1, 317 | moment_to_length_factor=0.1, 318 | ) 319 | 320 | fig = Figure3D(Figure3DConfiguration(ndm=ndm)) # type: ignore 321 | fig.add_nodes(list(model.nodes.values()), 'primary', overlay=True) 322 | fig.add_components(list(model.components.values()), overlay=True) 323 | fig.add_nodes(list(model.nodes.values()), 'primary', deformation_configuration) 324 | fig.add_components(list(model.components.values()), deformation_configuration) 325 | fig.add_supports( 326 | model.nodes, 327 | load_case_registry.dead['test'].fixed_supports, 328 | symbol_size=12.00 / 120.0, 329 | ) 330 | fig.add_udl( 331 | load_case_registry.dead['test'].load_registry.component_udl, 332 | model.components, 333 | force_to_length_factor=0.10, 334 | offset=0.00, 335 | ) 336 | fig.add_loads( 337 | load_case_registry.dead['test'].load_registry.nodal_loads, 338 | model.nodes, 339 | force_to_length_factor=0.10, 340 | offset=0.0, 341 | head_length=24.0 / 120.0, 342 | head_width=24.0 / 120.0, 343 | base_width=5.0 / 120.0, 344 | ) 345 | fig.add_basic_forces( 346 | components=list(model.components.values()), 347 | basic_force_configuration=basic_force_configuration, 348 | ) 349 | fig.show() 350 | --------------------------------------------------------------------------------