├── fluidize ├── adapters │ └── local │ │ ├── simulation.py │ │ ├── __init__.py │ │ ├── adapter.py │ │ └── projects.py ├── core │ ├── modules │ │ ├── tracking │ │ │ └── __init__.py │ │ ├── run │ │ │ ├── node │ │ │ │ ├── methods │ │ │ │ │ ├── base │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── execstrat.py │ │ │ │ │ │ └── Execute.py │ │ │ │ │ └── local │ │ │ │ │ │ ├── execstrat.py │ │ │ │ │ │ └── Environment.py │ │ │ │ ├── __init__.py │ │ │ │ └── node_runner.py │ │ │ ├── project │ │ │ │ ├── __init__.py │ │ │ │ ├── methods │ │ │ │ │ └── local.py │ │ │ │ └── project_runner.py │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── projects │ │ │ └── __init__.py │ │ ├── graph │ │ │ ├── __init__.py │ │ │ ├── nodes │ │ │ │ └── node.py │ │ │ ├── parameters.py │ │ │ ├── edges │ │ │ │ └── edge.py │ │ │ └── process.py │ │ └── execute │ │ │ ├── utilities │ │ │ ├── __init__.py │ │ │ └── resource_builder.py │ │ │ └── __init__.py │ ├── utils │ │ ├── retrieval │ │ │ ├── __init__.py │ │ │ ├── main.py │ │ │ └── handler.py │ │ ├── exceptions │ │ │ └── __init__.py │ │ ├── pathfinder │ │ │ └── methods │ │ │ │ ├── local.py │ │ │ │ └── base.py │ │ └── dataloader │ │ │ ├── loader │ │ │ ├── writer_local.py │ │ │ ├── loader_local.py │ │ │ └── writer_base.py │ │ │ ├── data_writer.py │ │ │ └── data_loader.py │ ├── __init__.py │ ├── types │ │ ├── files.py │ │ ├── __init__.py │ │ ├── execution_models │ │ │ ├── execution_mode.py │ │ │ ├── workflow_context.py │ │ │ ├── __init__.py │ │ │ ├── execution_hints.py │ │ │ └── resource_requirements.py │ │ ├── parameters.py │ │ ├── project.py │ │ ├── graph.py │ │ ├── file_models │ │ │ ├── parameters_model.py │ │ │ ├── metadata_model.py │ │ │ ├── properties_model.py │ │ │ ├── json_file_model_base.py │ │ │ └── file_model_base.py │ │ ├── runs.py │ │ └── node.py │ └── constants.py ├── __init__.py ├── managers │ ├── simulations.py │ ├── runs.py │ ├── __init__.py │ ├── registry.py │ └── project.py ├── client.py └── config.py ├── tests ├── unit │ ├── core │ │ ├── types │ │ │ ├── file_models │ │ │ │ └── __init__.py │ │ │ └── test_files.py │ │ ├── __init__.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── logger │ │ │ │ └── __init__.py │ │ └── modules │ │ │ ├── __init__.py │ │ │ ├── graph │ │ │ └── __init__.py │ │ │ └── projects │ │ │ └── __init__.py │ ├── __init__.py │ ├── backends │ │ ├── __init__.py │ │ └── local │ │ │ └── __init__.py │ ├── managers │ │ ├── __init__.py │ │ └── test_simulation.py │ └── conftest.py ├── fixtures │ ├── __init__.py │ ├── docker_projects │ │ └── project-1754038373536 │ │ │ ├── runs │ │ │ └── run_1 │ │ │ │ ├── outputs │ │ │ │ ├── node-1754038461760 │ │ │ │ │ └── output.txt │ │ │ │ └── node-1754038465820 │ │ │ │ │ └── output.txt │ │ │ │ ├── parameters.json │ │ │ │ ├── node-1754038461760 │ │ │ │ ├── requirements.txt │ │ │ │ ├── parameters.json │ │ │ │ ├── Dockerfile │ │ │ │ ├── metadata.yaml │ │ │ │ ├── properties.yaml │ │ │ │ ├── main.sh │ │ │ │ └── source │ │ │ │ │ └── main.py │ │ │ │ ├── node-1754038465820 │ │ │ │ ├── requirements.txt │ │ │ │ ├── parameters.json │ │ │ │ ├── Dockerfile │ │ │ │ ├── properties.yaml │ │ │ │ ├── metadata.yaml │ │ │ │ ├── main.sh │ │ │ │ └── source │ │ │ │ │ └── main.py │ │ │ │ ├── metadata.yaml │ │ │ │ └── graph.json │ │ │ ├── node-1754038461760 │ │ │ ├── requirements.txt │ │ │ ├── parameters.json │ │ │ ├── Dockerfile │ │ │ ├── properties.yaml │ │ │ ├── metadata.yaml │ │ │ ├── main.sh │ │ │ └── source │ │ │ │ └── main.py │ │ │ ├── node-1754038465820 │ │ │ ├── requirements.txt │ │ │ ├── parameters.json │ │ │ ├── Dockerfile │ │ │ ├── properties.yaml │ │ │ ├── metadata.yaml │ │ │ ├── main.sh │ │ │ └── source │ │ │ │ └── main.py │ │ │ ├── parameters.json │ │ │ ├── metadata.yaml │ │ │ └── graph.json │ └── sample_projects.py └── integration │ ├── __init__.py │ ├── test_simulations_manager.py │ └── conftest.py ├── docs ├── getting-started │ ├── quickstart.md │ └── examples.md ├── core-modules │ ├── node.md │ ├── projects.md │ ├── run.md │ ├── client.md │ ├── graph.md │ └── index.md └── index.md ├── examples └── example-projects │ ├── MUJOCO │ ├── parameters.json │ ├── metadata.yaml │ ├── Mujoco-Simulation │ │ ├── metadata.yaml │ │ ├── properties.yaml │ │ ├── parameters.json │ │ ├── cloudbuild.yaml │ │ ├── Dockerfile │ │ ├── source │ │ │ ├── test_simple.py │ │ │ ├── debug_tutorial.py │ │ │ └── test_pinata.py │ │ ├── main.sh │ │ └── README.md │ └── graph.json │ └── MUJOCO.zip ├── codecov.yaml ├── .devcontainer ├── postCreateCommand.sh └── devcontainer.json ├── .github ├── workflows │ ├── validate-codecov-config.yml │ ├── on-release-main.yml │ └── main.yml └── actions │ └── setup-python-env │ └── action.yml ├── tox.ini ├── Dockerfile ├── .pre-commit-config.yaml ├── LICENSE ├── Makefile ├── mkdocs.yml ├── pyproject.toml └── CONTRIBUTING.md /fluidize/adapters/local/simulation.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fluidize/core/modules/tracking/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fluidize/core/utils/retrieval/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/core/types/file_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | """Unit tests package.""" 2 | -------------------------------------------------------------------------------- /docs/getting-started/quickstart.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/methods/base/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/backends/__init__.py: -------------------------------------------------------------------------------- 1 | """Adapter unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/unit/core/__init__.py: -------------------------------------------------------------------------------- 1 | """Core module unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/unit/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Unit tests for utils 2 | -------------------------------------------------------------------------------- /tests/unit/managers/__init__.py: -------------------------------------------------------------------------------- 1 | """Manager unit tests.""" 2 | -------------------------------------------------------------------------------- /fluidize/core/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core Fluidize Logic 3 | """ 4 | -------------------------------------------------------------------------------- /tests/fixtures/__init__.py: -------------------------------------------------------------------------------- 1 | """Test fixtures and sample data.""" 2 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | """Integration tests package.""" 2 | -------------------------------------------------------------------------------- /tests/unit/backends/local/__init__.py: -------------------------------------------------------------------------------- 1 | """Local adapter unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/unit/core/modules/__init__.py: -------------------------------------------------------------------------------- 1 | """Core modules unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/unit/core/modules/graph/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for graph module.""" 2 | -------------------------------------------------------------------------------- /tests/unit/core/utils/logger/__init__.py: -------------------------------------------------------------------------------- 1 | # Unit tests for logger utilities 2 | -------------------------------------------------------------------------------- /docs/getting-started/examples.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | *Examples coming soon...* 4 | -------------------------------------------------------------------------------- /tests/unit/core/modules/projects/__init__.py: -------------------------------------------------------------------------------- 1 | """Projects module unit tests.""" 2 | -------------------------------------------------------------------------------- /fluidize/core/modules/__init__.py: -------------------------------------------------------------------------------- 1 | """Core modules for Fluidize business logic.""" 2 | -------------------------------------------------------------------------------- /fluidize/core/modules/projects/__init__.py: -------------------------------------------------------------------------------- 1 | """Projects module for local filesystem operations.""" 2 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": {} 4 | } 5 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/outputs/node-1754038461760/output.txt: -------------------------------------------------------------------------------- 1 | CANDY 2 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/outputs/node-1754038465820/output.txt: -------------------------------------------------------------------------------- 1 | CANDYCANDY 2 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/requirements.txt: -------------------------------------------------------------------------------- 1 | # No external dependencies required 2 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/requirements.txt: -------------------------------------------------------------------------------- 1 | # No external dependencies required 2 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": {} 4 | } 5 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Fluidize-Inc/fluidize-python/HEAD/examples/example-projects/MUJOCO.zip -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": {} 4 | } 5 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/__init__.py: -------------------------------------------------------------------------------- 1 | """Node runner module.""" 2 | 3 | from .node_runner import RunJob 4 | 5 | __all__ = ["RunJob"] 6 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/requirements.txt: -------------------------------------------------------------------------------- 1 | # No external dependencies required 2 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/requirements.txt: -------------------------------------------------------------------------------- 1 | # No external dependencies required 2 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": [] 4 | } 5 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": [] 4 | } 5 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": [] 4 | } 5 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": {}, 3 | "parameters": [] 4 | } 5 | -------------------------------------------------------------------------------- /fluidize/adapters/local/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Local adapter implementation. 3 | """ 4 | 5 | from .adapter import LocalAdapter 6 | 7 | __all__ = ["LocalAdapter"] 8 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/project/__init__.py: -------------------------------------------------------------------------------- 1 | """Project runner module.""" 2 | 3 | from .project_runner import ProjectRunner 4 | 5 | __all__ = ["ProjectRunner"] 6 | -------------------------------------------------------------------------------- /codecov.yaml: -------------------------------------------------------------------------------- 1 | coverage: 2 | range: 70..100 3 | round: down 4 | precision: 1 5 | status: 6 | project: 7 | default: 8 | target: 90% 9 | threshold: 0.5% 10 | -------------------------------------------------------------------------------- /docs/core-modules/node.md: -------------------------------------------------------------------------------- 1 | # Node Module 2 | 3 | ::: fluidize.managers.node.NodeManager 4 | options: 5 | show_source: false 6 | heading_level: 3 7 | show_root_heading: true 8 | -------------------------------------------------------------------------------- /fluidize/core/modules/graph/__init__.py: -------------------------------------------------------------------------------- 1 | """Graph module for managing project graphs.""" 2 | 3 | from fluidize.core.modules.graph.processor import GraphProcessor 4 | 5 | __all__ = ["GraphProcessor"] 6 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/metadata.yaml: -------------------------------------------------------------------------------- 1 | project: 2 | description: A MuJoCo simulation project 3 | id: MUJOCO 4 | label: MUJOCO DEMO 5 | location: '' 6 | metadata_version: '1.0' 7 | status: active 8 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/metadata.yaml: -------------------------------------------------------------------------------- 1 | project: 2 | description: '' 3 | id: project-1754038373536 4 | label: SIMPLETEST 5 | location: '' 6 | metadata_version: '1.0' 7 | status: active 8 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | 7 | RUN pip install -r requirements.txt 8 | 9 | CMD ["python"] 10 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | 7 | RUN pip install -r requirements.txt 8 | 9 | CMD ["python"] 10 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | 7 | RUN pip install -r requirements.txt 8 | 9 | CMD ["python"] 10 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | 7 | RUN pip install -r requirements.txt 8 | 9 | CMD ["python"] 10 | -------------------------------------------------------------------------------- /fluidize/core/types/files.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | # Response Model for Getting File Content 5 | class FileMetadata(BaseModel): 6 | path: str 7 | filename: str 8 | size: int 9 | mime_type: str 10 | language: str 11 | -------------------------------------------------------------------------------- /.devcontainer/postCreateCommand.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # Install uv 4 | curl -LsSf https://astral.sh/uv/install.sh | sh 5 | 6 | # Install Dependencies 7 | uv sync 8 | 9 | # Install pre-commit hooks 10 | uv run pre-commit install --install-hooks 11 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/__init__.py: -------------------------------------------------------------------------------- 1 | """Run module for executing flows and nodes.""" 2 | 3 | from fluidize.core.modules.run.node.node_runner import RunJob 4 | from fluidize.core.modules.run.project.project_runner import ProjectRunner 5 | 6 | __all__ = ["ProjectRunner", "RunJob"] 7 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/properties.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | container_image: python:3.9-slim 3 | image_name: test2-duplicate 4 | source_output_folder: output 5 | last_run: null 6 | node_id: node-1753485831833 7 | status: not_run 8 | should_run: true 9 | simulation_mount_path: source 10 | version: 1.0 11 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/properties.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | container_image: python:3.9-slim 3 | image_name: test1-simple 4 | source_output_folder: source/output 5 | last_run: null 6 | node_id: node-1753485831832 7 | status: not_run 8 | should_run: true 9 | simulation_mount_path: source 10 | version: 1.0 11 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/metadata.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | authors: [] 3 | code_url: null 4 | date: '2025-08-13' 5 | description: A demo simulation of a humanoid in MuJoCo 6 | id: mujoco-humanoid-demo 7 | metadata_version: '1.0' 8 | mlflow_run_id: null 9 | name: MuJoCo Humanoid Locomotion Demo 10 | paper_url: null 11 | tags: [] 12 | version: '0.1' 13 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/graph.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": [ 3 | { 4 | "id": "Mujoco-Simulation", 5 | "position": { 6 | "x": 150.0, 7 | "y": 100.0 8 | }, 9 | "data": { 10 | "label": "MuJoCo Humanoid Simulation", 11 | "simulation_id": null 12 | }, 13 | "type": "physics-simulation" 14 | } 15 | ], 16 | "edges": [] 17 | } 18 | -------------------------------------------------------------------------------- /fluidize/core/modules/graph/nodes/node.py: -------------------------------------------------------------------------------- 1 | from fluidize.core.types.graph import GraphNode 2 | 3 | 4 | def parse_node_from_json(data: dict) -> list[GraphNode]: 5 | # If "nodes" is missing or not a list, treat it as empty 6 | raw_nodes = data.get("nodes") 7 | if not isinstance(raw_nodes, list): 8 | raw_nodes = [] 9 | return [GraphNode.model_validate(item) for item in raw_nodes] 10 | -------------------------------------------------------------------------------- /docs/core-modules/projects.md: -------------------------------------------------------------------------------- 1 | # Projects Module 2 | 3 | ## Registry 4 | ::: fluidize.managers.registry.RegistryManager 5 | options: 6 | show_source: false 7 | heading_level: 3 8 | show_root_heading: true 9 | 10 | ## Project 11 | ::: fluidize.managers.project.ProjectManager 12 | options: 13 | show_source: false 14 | heading_level: 3 15 | show_root_heading: true 16 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/properties.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | container_image: northamerica-northeast1-docker.pkg.dev/fluidize/fluidize-docker/mujoco-example 3 | last_run: null 4 | node_id: Mujoco-Simulation 5 | properties_version: '1.0' 6 | run_status: NOT_RUN 7 | should_run: true 8 | simulation_mount_path: source 9 | source_output_folder: source/outputs 10 | version: '1.0' 11 | -------------------------------------------------------------------------------- /fluidize/core/modules/graph/parameters.py: -------------------------------------------------------------------------------- 1 | from fluidize.core.types.parameters import Parameter 2 | 3 | 4 | def parse_parameters_from_json(data: dict) -> list[Parameter]: 5 | # If "parameters" is missing or not a list, treat it as empty 6 | raw_params = data.get("parameters") 7 | if not isinstance(raw_params, list): 8 | raw_params = [] 9 | return [Parameter.model_validate(item) for item in raw_params] 10 | -------------------------------------------------------------------------------- /fluidize/core/types/__init__.py: -------------------------------------------------------------------------------- 1 | """Types module for Fluidize core data structures.""" 2 | 3 | from fluidize.core.types.graph import GraphData, GraphEdge, GraphNode 4 | from fluidize.core.types.project import ProjectSummary 5 | from fluidize.core.types.runs import RunFlowPayload 6 | 7 | __all__ = [ 8 | "GraphData", 9 | "GraphEdge", 10 | "GraphNode", 11 | "ProjectSummary", 12 | "RunFlowPayload", 13 | ] 14 | -------------------------------------------------------------------------------- /fluidize/core/modules/graph/edges/edge.py: -------------------------------------------------------------------------------- 1 | from fluidize.core.types.graph import GraphEdge 2 | 3 | 4 | def parse_edge_from_json(data: dict) -> list[GraphEdge]: 5 | # If "edges" is missing or not a list, treat it as empty 6 | raw_edges = data.get("edges") 7 | if not isinstance(raw_edges, list): 8 | raw_edges = [] 9 | return [GraphEdge.model_validate(item) for item in raw_edges] 10 | 11 | 12 | # ISSUE #8 13 | -------------------------------------------------------------------------------- /.github/workflows/validate-codecov-config.yml: -------------------------------------------------------------------------------- 1 | name: validate-codecov-config 2 | 3 | on: 4 | pull_request: 5 | paths: [codecov.yaml] 6 | push: 7 | branches: [main] 8 | 9 | jobs: 10 | validate-codecov-config: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Validate codecov configuration 15 | run: curl -sSL --fail-with-body --data-binary @codecov.yaml https://codecov.io/validate 16 | -------------------------------------------------------------------------------- /fluidize/core/types/execution_models/execution_mode.py: -------------------------------------------------------------------------------- 1 | """ 2 | Execution modes for Fluidize nodes. 3 | """ 4 | 5 | from enum import Enum 6 | 7 | 8 | class ExecutionMode(Enum): 9 | """Supported execution modes for Fluidize nodes.""" 10 | 11 | LOCAL_DOCKER = "local_docker" 12 | VM_DOCKER = "vm_docker" 13 | KUBERNETES = "kubernetes" 14 | CLOUD_BATCH = "cloud_batch" 15 | CLUSTER_SLURM = "cluster_slurm" 16 | ARGO_WORKFLOW = "argo_workflow" 17 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | skipsdist = true 3 | envlist = py39, py310, py311, py312, py313 4 | 5 | [gh-actions] 6 | python = 7 | 3.9: py39 8 | 3.10: py310 9 | 3.11: py311 10 | 3.12: py312 11 | 3.13: py313 12 | 13 | [testenv] 14 | passenv = PYTHON_VERSION 15 | allowlist_externals = uv 16 | commands = 17 | uv sync --python {envpython} 18 | uv run python -m pytest --doctest-modules tests --cov --cov-config=pyproject.toml --cov-report=xml 19 | mypy 20 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "parameters": [ 3 | { 4 | "value": "20.0", 5 | "description": "Control signal strength for bat motor (higher = faster swing, more collision force)", 6 | "type": "text", 7 | "label": "Motor Strength", 8 | "name": "motor_strength", 9 | "latex": null, 10 | "location": [ 11 | "source/pinata_simulation.py" 12 | ], 13 | "options": null, 14 | "scope": "simulation" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/metadata.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | metadata_version: "1.0" 3 | name: test1-simple 4 | id: test1-simple 5 | description: Simple test node that outputs 'CANDY' to output.txt 6 | version: "1.0" 7 | date: "2025-08-01" 8 | code_url: null 9 | paper_url: null 10 | authors: 11 | - name: Henry Bae 12 | institution: Harvard University 13 | email: henry@fluidize.ai 14 | tags: 15 | - name: testing 16 | - name: simple 17 | - name: local 18 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/metadata.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | metadata_version: "1.0" 3 | name: test1-simple 4 | id: test1-simple 5 | description: Simple test node that outputs 'CANDY' to output.txt 6 | version: "1.0" 7 | date: "2025-08-01" 8 | code_url: null 9 | paper_url: null 10 | authors: 11 | - name: Henry Bae 12 | institution: Harvard University 13 | email: henry@fluidize.ai 14 | tags: 15 | - name: testing 16 | - name: simple 17 | - name: local 18 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/properties.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | container_image: python:3.9-slim 3 | image_name: test2-duplicate 4 | last_run: null 5 | node_id: node-1754038465820 6 | output_path: /Users/henrybae/Files/GitHub/fluidize_fast_api/fluidize_local/projects/project-1754038373536/runs/run_1/node-1754038465820/output 7 | properties_version: '1.0' 8 | run_status: SUCCESS 9 | should_run: true 10 | simulation_mount_path: source 11 | source_output_folder: output 12 | version: 1.0 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Install uv 2 | FROM python:3.12-slim 3 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv 4 | 5 | # Change the working directory to the `app` directory 6 | WORKDIR /app 7 | 8 | # Copy the lockfile and `pyproject.toml` into the image 9 | COPY uv.lock /app/uv.lock 10 | COPY pyproject.toml /app/pyproject.toml 11 | 12 | # Install dependencies 13 | RUN uv sync --frozen --no-install-project 14 | 15 | # Copy the project into the image 16 | COPY . /app 17 | 18 | # Sync the project 19 | RUN uv sync --frozen 20 | 21 | CMD [ "python", "fluidize/foo.py" ] 22 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/properties.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | container_image: python:3.9-slim 3 | image_name: test1-simple 4 | last_run: null 5 | node_id: node-1754038461760 6 | output_path: /Users/henrybae/Files/GitHub/fluidize_fast_api/fluidize_local/projects/project-1754038373536/runs/run_1/node-1754038461760/source/output 7 | properties_version: '1.0' 8 | run_status: SUCCESS 9 | should_run: true 10 | simulation_mount_path: source 11 | source_output_folder: source/output 12 | version: 1.0 13 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: 'gcr.io/cloud-builders/docker' 3 | args: 4 | - 'build' 5 | - '-t' 6 | - 'northamerica-northeast1-docker.pkg.dev/fluidize/fluidize-docker/mujoco-example:latest' 7 | - '.' 8 | - name: 'gcr.io/cloud-builders/docker' 9 | args: 10 | - 'push' 11 | - 'northamerica-northeast1-docker.pkg.dev/fluidize/fluidize-docker/mujoco-example:latest' 12 | 13 | images: 14 | - 'northamerica-northeast1-docker.pkg.dev/fluidize/fluidize-docker/mujoco-example:latest' 15 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/metadata.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | metadata_version: "1.0" 3 | name: test2-duplicate 4 | id: test2-duplicate 5 | description: Takes input from Test1 and duplicates the content (CANDY -> CANDYCANDY) 6 | version: "1.0" 7 | date: "2025-08-01" 8 | code_url: null 9 | paper_url: null 10 | authors: 11 | - name: Henry Bae 12 | institution: Harvard University 13 | email: henry@fluidize.ai 14 | tags: 15 | - name: testing 16 | - name: simple 17 | - name: local 18 | - name: duplicate 19 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/metadata.yaml: -------------------------------------------------------------------------------- 1 | simulation: 2 | metadata_version: "1.0" 3 | name: test2-duplicate 4 | id: test2-duplicate 5 | description: Takes input from Test1 and duplicates the content (CANDY -> CANDYCANDY) 6 | version: "1.0" 7 | date: "2025-08-01" 8 | code_url: null 9 | paper_url: null 10 | authors: 11 | - name: Henry Bae 12 | institution: Harvard University 13 | email: henry@fluidize.ai 14 | tags: 15 | - name: testing 16 | - name: simple 17 | - name: local 18 | - name: duplicate 19 | -------------------------------------------------------------------------------- /fluidize/core/modules/execute/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Execution Utilities 3 | 4 | Shared utility modules for building universal container specifications 5 | from ExecutionContext across all execution methods. 6 | """ 7 | 8 | from .environment_builder import EnvironmentBuilder 9 | from .path_converter import PathConverter 10 | from .resource_builder import ResourceBuilder 11 | from .universal_builder import UniversalContainerBuilder 12 | from .volume_builder import VolumeBuilder 13 | 14 | __all__ = ["EnvironmentBuilder", "PathConverter", "ResourceBuilder", "UniversalContainerBuilder", "VolumeBuilder"] 15 | -------------------------------------------------------------------------------- /fluidize/core/types/parameters.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This module defines the structure of parameters.json 4 | 5 | """ 6 | 7 | from typing import Optional 8 | 9 | from pydantic import BaseModel 10 | 11 | 12 | class ParameterOption(BaseModel): 13 | value: str 14 | label: str 15 | 16 | 17 | class Parameter(BaseModel): 18 | value: str 19 | description: str 20 | # type specifies the type of the value, e.g. "text", "dropdown" 21 | type: str 22 | label: str 23 | name: str 24 | latex: Optional[str] = None 25 | location: Optional[list[str]] = None 26 | options: Optional[list[ParameterOption]] = None 27 | scope: Optional[str] = None 28 | -------------------------------------------------------------------------------- /docs/core-modules/run.md: -------------------------------------------------------------------------------- 1 | # Run Module 2 | 3 | ## Run Management 4 | 5 | ::: fluidize.managers.runs.RunsManager 6 | options: 7 | show_source: false 8 | heading_level: 3 9 | show_root_heading: true 10 | members: 11 | - run_flow 12 | - list 13 | - get_status 14 | 15 | ## Run Execution 16 | 17 | ::: fluidize.core.modules.run.RunJob 18 | options: 19 | show_source: false 20 | heading_level: 3 21 | show_signature: false 22 | show_root_heading: true 23 | 24 | ::: fluidize.core.modules.run.project.ProjectRunner 25 | options: 26 | show_source: false 27 | heading_level: 3 28 | show_root_heading: true 29 | -------------------------------------------------------------------------------- /fluidize/core/types/project.py: -------------------------------------------------------------------------------- 1 | from .file_models.metadata_model import MetadataModel 2 | 3 | # ISSUE 17: ADDING MORE PROPERTIES TO PROJECT SUMMARY 4 | 5 | """ This is the ProjectSummary that is used to communicate back and forth with the frontend. 6 | This is somewhat legacy and all the YAML properties based operations are deal with in ProjectMetadata. 7 | 8 | """ 9 | 10 | # ISSUE 19 (IMPORTANT) 11 | 12 | 13 | class ProjectSummary(MetadataModel): 14 | metadata_version: str = "1.0" 15 | id: str 16 | label: str = "" 17 | description: str = "" 18 | status: str = "" 19 | location: str = "" 20 | 21 | class Key: 22 | key: str = "project" 23 | metadata_version: str = "1.0" 24 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: "v5.0.0" 4 | hooks: 5 | - id: check-case-conflict 6 | - id: check-merge-conflict 7 | - id: check-toml 8 | - id: check-yaml 9 | - id: check-json 10 | exclude: ^.devcontainer/devcontainer.json 11 | - id: pretty-format-json 12 | exclude: ^.devcontainer/devcontainer.json 13 | args: [--autofix, --no-sort-keys] 14 | - id: end-of-file-fixer 15 | - id: trailing-whitespace 16 | 17 | - repo: https://github.com/astral-sh/ruff-pre-commit 18 | rev: "v0.11.5" 19 | hooks: 20 | - id: ruff 21 | args: [--exit-non-zero-on-fix] 22 | - id: ruff-format 23 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/metadata.yaml: -------------------------------------------------------------------------------- 1 | project: 2 | description: '' 3 | id: project-1754038373536 4 | label: SIMPLETEST 5 | location: '' 6 | metadata_version: '1.0' 7 | status: active 8 | run: 9 | date_created: '2025-08-05T14:13:10.308858' 10 | date_modified: '2025-08-05T14:13:10.308878' 11 | description: Testing the new LocalExecutionManagerNew with utilities 12 | id: project-1754038373536_run_1 13 | metadata_version: '1.0' 14 | mlflow_experiment_id: '335927476181895958' 15 | mlflow_run_id: e89ac2e48be8467088efbf13b64a341f 16 | name: Test New Local Execution 17 | run_folder: run_1 18 | run_number: 1 19 | run_status: SUCCESS 20 | tags: 21 | - test 22 | - local 23 | - new-utilities 24 | -------------------------------------------------------------------------------- /fluidize/adapters/local/adapter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Local adapter implementation - aggregates all local handlers. 3 | """ 4 | 5 | from typing import Any 6 | 7 | from .graph import GraphHandler 8 | from .projects import ProjectsHandler 9 | from .runs import RunsHandler 10 | 11 | 12 | class LocalAdapter: 13 | """ 14 | Local adapter that provides SDK-compatible interface using local handlers. 15 | """ 16 | 17 | def __init__(self, config: Any) -> None: 18 | """ 19 | Initialize the local adapter with all handlers. 20 | 21 | Args: 22 | config: FluidizeConfig instance 23 | """ 24 | self.config = config 25 | 26 | self.projects = ProjectsHandler(config) 27 | self.graph = GraphHandler() 28 | self.runs = RunsHandler(config) 29 | -------------------------------------------------------------------------------- /fluidize/__init__.py: -------------------------------------------------------------------------------- 1 | """Fluidize Python library for scientific computing pipeline automation.""" 2 | 3 | import fluidize.core.modules.run.node.node_runner 4 | import fluidize.core.modules.run.project.methods.local 5 | 6 | # Auto-register run modules to ensure they are loaded 7 | import fluidize.core.modules.run.project.project_runner 8 | import fluidize.core.utils.dataloader.loader.loader_local 9 | import fluidize.core.utils.dataloader.loader.writer_local 10 | 11 | # Auto-register local handlers when fluidize is imported 12 | # This ensures handlers are available without manual imports 13 | import fluidize.core.utils.pathfinder.methods.local # noqa: F401 14 | 15 | from .client import FluidizeClient 16 | from .config import config 17 | 18 | __version__ = "0.0.2" 19 | __all__ = ["FluidizeClient", "config"] 20 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Test1 Simple Node Script 4 | 5 | echo "=== Test1 Simple Node ===" 6 | echo "Working directory: $(pwd)" 7 | echo "Directory contents:" 8 | ls -la 9 | 10 | echo "=== Environment Variables ===" 11 | echo "Node ID: ${FLUIDIZE_NODE_ID}" 12 | echo "Input Path: ${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" 13 | echo "Output Path: ${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 14 | echo "Execution Mode: ${FLUIDIZE_EXECUTION_MODE}" 15 | 16 | echo "=== Starting Test1 execution ===" 17 | # Run the Python script with environment variable paths (with fallback to hardcoded paths) 18 | python main.py "${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" "${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 19 | 20 | echo "=== Test1 execution completed successfully ===" 21 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Test2 Duplicate Node Script 4 | 5 | echo "=== Test2 Duplicate Node ===" 6 | echo "Working directory: $(pwd)" 7 | echo "Directory contents:" 8 | ls -la 9 | 10 | echo "=== Environment Variables ===" 11 | echo "Node ID: ${FLUIDIZE_NODE_ID}" 12 | echo "Input Path: ${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" 13 | echo "Output Path: ${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 14 | echo "Execution Mode: ${FLUIDIZE_EXECUTION_MODE}" 15 | 16 | echo "=== Starting Test2 execution ===" 17 | # Run the Python script with environment variable paths (with fallback to hardcoded paths) 18 | python main.py "${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" "${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 19 | 20 | echo "=== Test2 execution completed successfully ===" 21 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Test1 Simple Node Script 4 | 5 | echo "=== Test1 Simple Node ===" 6 | echo "Working directory: $(pwd)" 7 | echo "Directory contents:" 8 | ls -la 9 | 10 | echo "=== Environment Variables ===" 11 | echo "Node ID: ${FLUIDIZE_NODE_ID}" 12 | echo "Input Path: ${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" 13 | echo "Output Path: ${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 14 | echo "Execution Mode: ${FLUIDIZE_EXECUTION_MODE}" 15 | 16 | echo "=== Starting Test1 execution ===" 17 | # Run the Python script with environment variable paths (with fallback to hardcoded paths) 18 | python main.py "${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" "${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 19 | 20 | echo "=== Test1 execution completed successfully ===" 21 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Test2 Duplicate Node Script 4 | 5 | echo "=== Test2 Duplicate Node ===" 6 | echo "Working directory: $(pwd)" 7 | echo "Directory contents:" 8 | ls -la 9 | 10 | echo "=== Environment Variables ===" 11 | echo "Node ID: ${FLUIDIZE_NODE_ID}" 12 | echo "Input Path: ${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" 13 | echo "Output Path: ${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 14 | echo "Execution Mode: ${FLUIDIZE_EXECUTION_MODE}" 15 | 16 | echo "=== Starting Test2 execution ===" 17 | # Run the Python script with environment variable paths (with fallback to hardcoded paths) 18 | python main.py "${FLUIDIZE_INPUT_PATH:-/mnt/inputs}" "${FLUIDIZE_OUTPUT_PATH:-/mnt/outputs}" 19 | 20 | echo "=== Test2 execution completed successfully ===" 21 | -------------------------------------------------------------------------------- /docs/core-modules/client.md: -------------------------------------------------------------------------------- 1 | # Client Module 2 | 3 | The Fluidize Client is the primary interface to create and edit projects. There are two interfaces for this, with more on the way. 4 | 5 | - **Local Mode**: Works with your local device, uses Docker to sequentially execute nodes. 6 | 7 | - **API Mode**: Runs on Fluidize API to manage projects and workflows in the cloud. 8 | 9 | ::: fluidize.client.FluidizeClient 10 | options: 11 | show_source: false 12 | heading_level: 3 13 | show_root_heading: true 14 | members: 15 | - mode 16 | - adapters 17 | - projects 18 | - runs 19 | 20 | ::: fluidize.config.FluidizeConfig 21 | options: 22 | show_source: false 23 | heading_level: 3 24 | show_root_heading: true 25 | members: 26 | - is_local_mode 27 | - is_api_mode 28 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/graph.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": [ 3 | { 4 | "id": "node-1754038461760", 5 | "position": { 6 | "x": -25.5, 7 | "y": -76.5 8 | }, 9 | "data": { 10 | "label": "TEST1", 11 | "simulation_id": "test1-simple" 12 | }, 13 | "type": "defaultsimulation" 14 | }, 15 | { 16 | "id": "node-1754038465820", 17 | "position": { 18 | "x": 42.0, 19 | "y": 63.5 20 | }, 21 | "data": { 22 | "label": "TEST2", 23 | "simulation_id": "test2-duplicate" 24 | }, 25 | "type": "defaultsimulation" 26 | } 27 | ], 28 | "edges": [ 29 | { 30 | "id": "edge-1754038470422", 31 | "source": "node-1754038461760", 32 | "target": "node-1754038465820", 33 | "type": "button" 34 | } 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /.github/actions/setup-python-env/action.yml: -------------------------------------------------------------------------------- 1 | name: "Setup Python Environment" 2 | description: "Set up Python environment for the given Python version" 3 | 4 | inputs: 5 | python-version: 6 | description: "Python version to use" 7 | required: true 8 | default: "3.12" 9 | uv-version: 10 | description: "uv version to use" 11 | required: true 12 | default: "0.6.14" 13 | 14 | runs: 15 | using: "composite" 16 | steps: 17 | - uses: actions/setup-python@v5 18 | with: 19 | python-version: ${{ inputs.python-version }} 20 | 21 | - name: Install uv 22 | uses: astral-sh/setup-uv@v6 23 | with: 24 | version: ${{ inputs.uv-version }} 25 | enable-cache: 'true' 26 | cache-suffix: ${{ matrix.python-version }} 27 | 28 | - name: Install Python dependencies 29 | run: uv sync --frozen 30 | shell: bash 31 | -------------------------------------------------------------------------------- /fluidize/core/utils/exceptions/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom exceptions for the Fluidize project. 3 | 4 | This module provides custom exception classes for better error handling 5 | and debugging throughout the Fluidize application. 6 | """ 7 | 8 | 9 | class FluidizeError(Exception): 10 | """Base exception class for all Fluidize-related errors.""" 11 | 12 | pass 13 | 14 | 15 | class ProjectAlreadyExistsError(FluidizeError): 16 | """Raised when attempting to create a project that already exists.""" 17 | 18 | def __init__(self, project_id: str) -> None: 19 | """ 20 | Initialize the exception. 21 | 22 | Args: 23 | project_id: The ID of the project that already exists 24 | """ 25 | super().__init__(f"Project '{project_id}' already exists. Use update to modify existing projects.") 26 | self.project_id = project_id 27 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/graph.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": [ 3 | { 4 | "id": "node-1754038461760", 5 | "position": { 6 | "x": -25.5, 7 | "y": -76.5 8 | }, 9 | "data": { 10 | "label": "TEST1", 11 | "simulation_id": "test1-simple" 12 | }, 13 | "type": "defaultsimulation" 14 | }, 15 | { 16 | "id": "node-1754038465820", 17 | "position": { 18 | "x": 42.0, 19 | "y": 63.5 20 | }, 21 | "data": { 22 | "label": "TEST2", 23 | "simulation_id": "test2-duplicate" 24 | }, 25 | "type": "defaultsimulation" 26 | } 27 | ], 28 | "edges": [ 29 | { 30 | "id": "edge-1754038470422", 31 | "source": "node-1754038461760", 32 | "target": "node-1754038465820", 33 | "type": "button" 34 | } 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/Dockerfile: -------------------------------------------------------------------------------- 1 | # MuJoCo Simulation Container - With OpenGL support 2 | FROM python:3.9 3 | 4 | # Install system dependencies for OpenGL and MuJoCo 5 | RUN apt-get update && apt-get install -y \ 6 | libosmesa6-dev \ 7 | libgl1-mesa-dev \ 8 | libglfw3 \ 9 | xvfb \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | # Set working directory 13 | WORKDIR /app 14 | 15 | # Install MuJoCo Python package and dependencies 16 | RUN pip install --no-cache-dir \ 17 | mujoco \ 18 | numpy \ 19 | imageio \ 20 | imageio-ffmpeg \ 21 | matplotlib \ 22 | seaborn 23 | 24 | # Set environment variables for MuJoCo 25 | ENV MUJOCO_GL=osmesa 26 | ENV PYTHONPATH=/app/source 27 | 28 | # Make sure main.sh will be executable when mounted 29 | RUN mkdir -p /app/node 30 | 31 | # Entry point - execute main.sh script 32 | CMD ["/bin/bash"] 33 | -------------------------------------------------------------------------------- /fluidize/core/utils/pathfinder/methods/local.py: -------------------------------------------------------------------------------- 1 | from upath import UPath 2 | 3 | from fluidize.config import config 4 | from fluidize.core.utils.retrieval.handler import register_handler 5 | 6 | from .base import BasePathFinder 7 | 8 | 9 | class LocalPathFinder(BasePathFinder): 10 | def get_projects_path(self) -> UPath: 11 | """Get the path to the projects directory for local storage""" 12 | return UPath(config.local_projects_path) 13 | 14 | def get_simulations_path(self, sim_global: bool) -> UPath: 15 | return UPath(config.local_simulations_path) 16 | 17 | def get_mlflow_tracking_uri(self) -> str: 18 | """Get the MLFlow tracking URI for local storage""" 19 | return f"file://{config.local_base_path.resolve()}/mlruns" 20 | 21 | 22 | # Auto-register this handler when module is imported 23 | register_handler("pathfinder", "local", LocalPathFinder) 24 | -------------------------------------------------------------------------------- /fluidize/core/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Immutable constants for Fluidize file structure and naming conventions. 3 | 4 | These constants define the Fluidize specification and should NEVER be changed 5 | as they ensure compatibility across all Fluidize implementations. 6 | """ 7 | 8 | 9 | class FileConstants: 10 | """Immutable file naming standards for Fluidize.""" 11 | 12 | # Core file suffixes 13 | GRAPH_SUFFIX = "graph.json" 14 | PARAMETERS_SUFFIX = "parameters.json" 15 | METADATA_SUFFIX = "metadata.yaml" 16 | PROPERTIES_SUFFIX = "properties.yaml" 17 | SIMULATIONS_SUFFIX = "simulations.json" 18 | RETRIEVAL_MODE_SUFFIX = "retrieval_mode.json" 19 | 20 | # Directory structure standards 21 | RUNS_DIR = "runs" 22 | OUTPUTS_DIR = "outputs" 23 | SIMULATIONS_DIR = "simulations" 24 | PROJECTS_DIR = "projects" 25 | 26 | # Run naming convention 27 | RUN_PREFIX = "run_" 28 | 29 | # File encoding 30 | DEFAULT_ENCODING = "utf-8" 31 | -------------------------------------------------------------------------------- /fluidize/core/types/execution_models/workflow_context.py: -------------------------------------------------------------------------------- 1 | """ 2 | Workflow context for DAG-based execution. 3 | """ 4 | 5 | from dataclasses import dataclass, field 6 | from typing import Optional 7 | 8 | from .execution_hints import RetryPolicy 9 | 10 | 11 | @dataclass 12 | class WorkflowContext: 13 | """Context information for workflow execution.""" 14 | 15 | # Workflow identification 16 | workflow_id: str 17 | workflow_name: str 18 | step_name: str 19 | 20 | # Parallel execution 21 | parallel_group: Optional[str] = None 22 | execution_order: int = 0 23 | 24 | # Dependencies and flow 25 | depends_on: list[str] = field(default_factory=list) 26 | 27 | # Retry and error handling 28 | retry_policy: Optional[RetryPolicy] = None 29 | continue_on_failure: bool = False 30 | 31 | # Workflow metadata 32 | workflow_labels: dict[str, str] = field(default_factory=dict) 33 | workflow_annotations: dict[str, str] = field(default_factory=dict) 34 | -------------------------------------------------------------------------------- /fluidize/core/types/execution_models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Execution Models for Universal Container Specifications 3 | 4 | This package provides enhanced context classes that extend beyond the simple 5 | node + prev_node pattern to support complex dependencies, resource requirements, 6 | and advanced scheduling for all execution methods. 7 | """ 8 | 9 | from .container_spec import ContainerSpec, PodSpec, Volume, VolumeMount 10 | from .execution_context import ExecutionContext, create_execution_context 11 | from .execution_hints import ExecutionHints, RetryPolicy 12 | from .execution_mode import ExecutionMode 13 | from .resource_requirements import GPUType, NodePoolType, ResourceRequirements 14 | from .workflow_context import WorkflowContext 15 | 16 | __all__ = [ 17 | "ContainerSpec", 18 | "ExecutionContext", 19 | "ExecutionHints", 20 | "ExecutionMode", 21 | "GPUType", 22 | "NodePoolType", 23 | "PodSpec", 24 | "ResourceRequirements", 25 | "RetryPolicy", 26 | "Volume", 27 | "VolumeMount", 28 | "WorkflowContext", 29 | "create_execution_context", 30 | ] 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Fluidize 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/source/test_simple.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Simple test script to verify container execution.""" 3 | 4 | import os 5 | import sys 6 | 7 | print("=== SIMPLE TEST SCRIPT ===") 8 | print(f"Python version: {sys.version}") 9 | print(f"Current working directory: {os.getcwd()}") 10 | print("Environment variables:") 11 | for key, value in os.environ.items(): 12 | if key.startswith("FLUIDIZE") or key.startswith("SIMULATION"): 13 | print(f" {key}: {value}") 14 | 15 | # Test if we can import mujoco 16 | try: 17 | import mujoco 18 | 19 | print(f"✅ MuJoCo imported successfully: version {mujoco.__version__}") 20 | except Exception as e: 21 | print(f"❌ Failed to import MuJoCo: {e}") 22 | 23 | # Create a simple output file 24 | output_path = os.environ.get("SIMULATION_OUTPUT_PATH", "outputs") 25 | os.makedirs(output_path, exist_ok=True) 26 | test_file = os.path.join(output_path, "test_output.txt") 27 | with open(test_file, "w") as f: 28 | f.write("Test completed successfully!\n") 29 | print(f"✅ Created test file: {test_file}") 30 | 31 | print("=== TEST COMPLETED ===") 32 | -------------------------------------------------------------------------------- /docs/core-modules/graph.md: -------------------------------------------------------------------------------- 1 | # Graph Module 2 | 3 | ## Graph Manager 4 | ::: fluidize.managers.graph.GraphManager 5 | options: 6 | show_source: false 7 | heading_level: 3 8 | show_root_heading: true 9 | members: 10 | - get 11 | - add_node 12 | - update_node_position 13 | - delete_node 14 | - add_edge 15 | - delete_edge 16 | 17 | ## Graph Processor 18 | ::: fluidize.core.modules.graph.GraphProcessor 19 | options: 20 | show_source: false 21 | heading_level: 3 22 | show_root_heading: true 23 | 24 | ## Graph Types 25 | 26 | ::: fluidize.core.types.graph.GraphData 27 | options: 28 | heading_level: 3 29 | show_root_heading: true 30 | extra: 31 | show_attributes: true 32 | 33 | 34 | ::: fluidize.core.types.graph.GraphNode 35 | options: 36 | heading_level: 3 37 | show_root_heading: true 38 | extra: 39 | show_attributes: true 40 | 41 | 42 | ::: fluidize.core.types.graph.GraphEdge 43 | options: 44 | heading_level: 3 45 | show_root_heading: true 46 | extra: 47 | show_attributes: true 48 | -------------------------------------------------------------------------------- /fluidize/managers/simulations.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from fluidize_sdk import FluidizeSDK 4 | 5 | from fluidize.core.types.node import nodeMetadata_simulation 6 | 7 | 8 | class SimulationsManager: 9 | """ 10 | Simulations manager that provides access to the Fluidize simulation library. 11 | """ 12 | 13 | def __init__(self, adapter: Any) -> None: 14 | """ 15 | Args: 16 | adapter: adapter (FluidizeSDK or LocalAdapter) 17 | """ 18 | self._adapter = adapter 19 | # TODO: Fix hardcoding of api_token and remove type ignore 20 | self.fluidize_sdk = FluidizeSDK(api_token="placeholder") # noqa: S106 21 | 22 | def list_simulations(self) -> list[Any]: 23 | """ 24 | List all simulations available in the Fluidize simulation library. 25 | 26 | Returns: 27 | List of simulation metadata 28 | """ 29 | simulations = self.fluidize_sdk.simulation.list_simulations(sim_global=True) 30 | return [ 31 | nodeMetadata_simulation.from_dict_and_path(data=simulation.model_dump(), path=None) 32 | for simulation in simulations 33 | ] 34 | -------------------------------------------------------------------------------- /fluidize/core/modules/execute/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Execution Module 3 | 4 | This module provides execution clients for different environments: 5 | - Docker SDK for local execution 6 | - VM/SSH clients for remote execution 7 | - Shared utilities for all execution methods 8 | 9 | All clients use proper libraries instead of unsafe command string construction: 10 | - DockerExecutionClient: Uses docker-py SDK 11 | - KubernetesExecutionClient: Uses kubernetes-client library 12 | - VMExecutionClient: Uses shlex.quote() for safe SSH command execution 13 | """ 14 | 15 | from .docker_client import ContainerResult, DockerExecutionClient 16 | from .execution_manager import ExecutionManager 17 | 18 | # from .kubernetes_client import JobResult, KubernetesExecutionClient 19 | from .utilities.environment_builder import EnvironmentBuilder 20 | from .utilities.path_converter import PathConverter 21 | from .utilities.volume_builder import VolumeBuilder 22 | from .vm_client import VMExecutionClient, VMExecutionResult 23 | 24 | __all__ = [ 25 | "ContainerResult", 26 | "DockerExecutionClient", 27 | "EnvironmentBuilder", 28 | "ExecutionManager", 29 | "PathConverter", 30 | "VMExecutionClient", 31 | "VMExecutionResult", 32 | "VolumeBuilder", 33 | # "JobResult", 34 | # "KubernetesExecutionClient", 35 | ] 36 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/python 3 | { 4 | "name": "fluidize-python", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 7 | "runArgs": [ 8 | // avoid UID/GID remapping under rootless Podman 9 | "--userns=keep-id" 10 | ], 11 | "features": {}, 12 | 13 | // Use 'postCreateCommand' to run commands after the container is created. 14 | "postCreateCommand": "./.devcontainer/postCreateCommand.sh", 15 | 16 | // Configure tool-specific properties. 17 | "customizations": { 18 | "vscode": { 19 | "extensions": ["ms-python.python", "editorconfig.editorconfig"], 20 | "settings": { 21 | "python.testing.pytestArgs": ["tests"], 22 | "python.testing.unittestEnabled": false, 23 | "python.testing.pytestEnabled": true, 24 | "python.defaultInterpreterPath": "/workspaces/fluidize-python/.venv/bin/python", 25 | "python.testing.pytestPath": "/workspaces/fluidize-python/.venv/bin/pytest" 26 | } 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /fluidize/core/types/execution_models/execution_hints.py: -------------------------------------------------------------------------------- 1 | """ 2 | Execution hints and configurations for container execution. 3 | """ 4 | 5 | from dataclasses import dataclass, field 6 | from typing import Any, Optional 7 | 8 | 9 | @dataclass 10 | class RetryPolicy: 11 | """Retry policy for failed executions.""" 12 | 13 | max_retries: int = 3 14 | backoff_limit: int = 6 15 | restart_policy: str = "OnFailure" # Never, OnFailure, Always 16 | 17 | 18 | @dataclass 19 | class ExecutionHints: 20 | """Execution-specific hints and configurations.""" 21 | 22 | # Platform specifications 23 | platform: Optional[str] = None # linux/amd64, linux/arm64 24 | architecture: Optional[str] = None 25 | 26 | # Security settings 27 | privileged: bool = False 28 | run_as_user: Optional[int] = None 29 | run_as_group: Optional[int] = None 30 | 31 | # Networking 32 | network_mode: str = "default" 33 | dns_policy: str = "ClusterFirst" 34 | 35 | # Container settings 36 | tty: bool = False 37 | stdin: bool = False 38 | 39 | # Timeouts 40 | active_deadline_seconds: Optional[int] = None # Max execution time 41 | termination_grace_period: int = 30 42 | 43 | # Node scheduling 44 | node_selector: dict[str, str] = field(default_factory=dict) 45 | tolerations: list[dict[str, Any]] = field(default_factory=list) 46 | affinity: Optional[dict[str, Any]] = None 47 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038461760/source/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test1 Simple Node 4 | 5 | This script creates an output.txt file containing 'CANDY' in the outputs directory. 6 | It doesn't require any input files. 7 | 8 | Author: Henry Bae 9 | """ 10 | 11 | import os 12 | import sys 13 | 14 | 15 | def create_candy_output(input_path, output_path): 16 | """ 17 | Create output.txt file with 'CANDY' content 18 | """ 19 | print(f"Input path: {input_path}") 20 | print(f"Output path: {output_path}") 21 | 22 | # Create output directory if it doesn't exist 23 | os.makedirs(output_path, exist_ok=True) 24 | 25 | # Create output.txt with 'CANDY' content 26 | output_file = os.path.join(output_path, "output.txt") 27 | 28 | with open(output_file, "w") as f: 29 | f.write("CANDY") 30 | 31 | print(f"Successfully created {output_file} with content: 'CANDY'") 32 | 33 | return output_file 34 | 35 | 36 | if __name__ == "__main__": 37 | if len(sys.argv) != 3: 38 | print("Usage: python main.py ") 39 | sys.exit(1) 40 | 41 | input_path = sys.argv[1] 42 | output_path = sys.argv[2] 43 | 44 | try: 45 | output_file = create_candy_output(input_path, output_path) 46 | print(f"\n✓ Success! Output file created: {output_file}") 47 | except Exception as e: 48 | print(f"\n✗ Error: {e}") 49 | sys.exit(1) 50 | -------------------------------------------------------------------------------- /tests/integration/test_simulations_manager.py: -------------------------------------------------------------------------------- 1 | """Integration tests for SimulationsManager - tests real API connectivity.""" 2 | 3 | import pytest 4 | 5 | from fluidize.managers.simulations import SimulationsManager 6 | 7 | 8 | class TestSimulationsManagerIntegration: 9 | """Integration test suite for SimulationsManager class.""" 10 | 11 | @pytest.fixture 12 | def mock_adapter(self): 13 | """Create a mock adapter for testing.""" 14 | from unittest.mock import Mock 15 | 16 | adapter = Mock() 17 | return adapter 18 | 19 | def test_list_simulations_integration(self, mock_adapter): 20 | """Integration test that actually calls the API and prints output.""" 21 | 22 | # Create manager without mocking SDK 23 | manager = SimulationsManager(mock_adapter) 24 | 25 | # Act - make real API call 26 | result = manager.list_simulations() 27 | 28 | # Assert basic functionality 29 | assert isinstance(result, list) 30 | 31 | # Print results for manual verification 32 | print("\n=== Integration Test Results ===") 33 | print(f"Number of simulations found: {len(result)}") 34 | for sim in result: 35 | print("Simulation details:") 36 | print(f" Name: {sim.name}") 37 | print(f" ID: {sim.id}") 38 | print(f" Description: {sim.description}") 39 | print(f" Version: {sim.version}") 40 | print("\n") 41 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038461760/source/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test1 Simple Node 4 | 5 | This script creates an output.txt file containing 'CANDY' in the outputs directory. 6 | It doesn't require any input files. 7 | 8 | Author: Henry Bae 9 | """ 10 | 11 | import os 12 | import sys 13 | 14 | 15 | def create_candy_output(input_path, output_path): 16 | """ 17 | Create output.txt file with 'CANDY' content 18 | """ 19 | print(f"Input path: {input_path}") 20 | print(f"Output path: {output_path}") 21 | 22 | # Create output directory if it doesn't exist 23 | os.makedirs(output_path, exist_ok=True) 24 | 25 | # Create output.txt with 'CANDY' content 26 | output_file = os.path.join(output_path, "output.txt") 27 | 28 | with open(output_file, "w") as f: 29 | f.write("CANDY") 30 | 31 | print(f"Successfully created {output_file} with content: 'CANDY'") 32 | 33 | return output_file 34 | 35 | 36 | if __name__ == "__main__": 37 | if len(sys.argv) != 3: 38 | print("Usage: python main.py ") 39 | sys.exit(1) 40 | 41 | input_path = sys.argv[1] 42 | output_path = sys.argv[2] 43 | 44 | try: 45 | output_file = create_candy_output(input_path, output_path) 46 | print(f"\n✓ Success! Output file created: {output_file}") 47 | except Exception as e: 48 | print(f"\n✗ Error: {e}") 49 | sys.exit(1) 50 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/methods/local/execstrat.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import fluidize.core.modules.run.node.methods.local.Environment as LocalEnvironment 4 | from fluidize.core.modules.run.node.methods.base.execstrat import BaseExecutionStrategy 5 | from fluidize.core.modules.run.node.methods.local.ExecuteNew import LocalExecutionManagerNew 6 | 7 | 8 | class LocalExecutionStrategy(BaseExecutionStrategy): 9 | def __init__( # type: ignore[no-untyped-def] 10 | self, 11 | node, 12 | prev_node, 13 | project, 14 | mlflow_tracker=None, 15 | run_id: Optional[str] = None, 16 | run_metadata=None, 17 | ) -> None: 18 | super().__init__(node, prev_node, project, mlflow_tracker, run_id, run_metadata) 19 | 20 | def _set_environment(self): # type: ignore[no-untyped-def] 21 | return LocalEnvironment.LocalEnvironmentManager( 22 | node=self.node, 23 | prev_node=self.prev_node, 24 | project=self.project, 25 | ) 26 | 27 | def _load_execution_manager(self): # type: ignore[no-untyped-def] 28 | return LocalExecutionManagerNew( 29 | self.node, 30 | self.prev_node, 31 | self.project, 32 | self.run_id, 33 | self.run_metadata, 34 | ) 35 | 36 | def handle_files(self) -> None: 37 | """Handle file operations for local execution.""" 38 | # For local execution, file handling is done by the environment manager 39 | # This method can be extended in the future for specific file operations 40 | pass 41 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/methods/local/Environment.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from upath import UPath 4 | 5 | from fluidize.core.modules.run.node.methods.base.Environment import BaseEnvironmentManager 6 | from fluidize.core.types.node import nodeProperties_simulation 7 | from fluidize.core.types.project import ProjectSummary 8 | 9 | 10 | class LocalEnvironmentManager(BaseEnvironmentManager): 11 | def __init__( 12 | self, node: nodeProperties_simulation, prev_node: Optional[nodeProperties_simulation], project: ProjectSummary 13 | ) -> None: 14 | super().__init__(node, prev_node, project) 15 | 16 | def _get_file_content(self, loc: UPath) -> str: 17 | """Helper method to read file content.""" 18 | with open(loc) as f: 19 | content = f.read() 20 | return content 21 | 22 | def _write_file_content(self, loc: UPath, content: str) -> None: 23 | with open(loc, "w") as f: 24 | f.write(content) 25 | 26 | def _should_process_file(self, file_path: UPath) -> bool: 27 | """Determine if a file should be processed for parameter substitution in local storage.""" 28 | try: 29 | # Skip if file is too large (over 10MB) to avoid memory issues 30 | if file_path.stat().st_size > 10 * 1024 * 1024: 31 | return False 32 | 33 | # Skip common binary file extensions 34 | binary_extensions = {".jpg", ".jpeg", ".png", ".gif", ".pdf", ".zip", ".tar", ".gz", ".exe", ".bin"} 35 | return file_path.suffix.lower() not in binary_extensions 36 | except (OSError, PermissionError): 37 | # If we can't access the file, skip it 38 | return False 39 | -------------------------------------------------------------------------------- /fluidize/core/types/graph.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Data Structure for graph.json file within projects. 4 | 5 | """ 6 | 7 | from typing import Optional 8 | 9 | from pydantic import BaseModel 10 | 11 | 12 | class Position(BaseModel): 13 | """Position of a node in layout space.""" 14 | 15 | x: float #: X coordinate in layout space. 16 | y: float #: Y coordinate in layout space. 17 | 18 | 19 | class graphNodeData(BaseModel): 20 | """Extra metadata for a node.""" 21 | 22 | label: str #: Node label. 23 | simulation_id: Optional[str] = None #: Simulation ID. 24 | 25 | 26 | # Default Node Type in GraphGraph 27 | class GraphNode(BaseModel): 28 | """A node in the graph. 29 | 30 | Attributes: 31 | id: Unique node ID. 32 | position: Node position. 33 | data: Extra metadata. 34 | type: Renderer/type key. 35 | """ 36 | 37 | id: str #: Node ID. 38 | position: Position #: Node position. 39 | data: graphNodeData #: Node data. 40 | type: str #: Node type. 41 | 42 | 43 | # Edge Type in Graph 44 | class GraphEdge(BaseModel): 45 | """An edge in the graph. 46 | 47 | Attributes: 48 | id: Unique edge ID. 49 | source: Source node ID. 50 | target: Target node ID. 51 | type: Renderer/type key. 52 | """ 53 | 54 | id: str #: Edge ID. 55 | source: str #: Source node ID. 56 | target: str #: Target node ID. 57 | type: str #: Edge type. 58 | 59 | 60 | class GraphData(BaseModel): 61 | """A graph representation of a project in the `graph.json` file. 62 | 63 | Attributes: 64 | nodes: List of nodes. 65 | edges: List of edges. 66 | """ 67 | 68 | nodes: list[GraphNode] #: List of nodes. 69 | edges: list[GraphEdge] #: List of edges. 70 | -------------------------------------------------------------------------------- /.github/workflows/on-release-main.yml: -------------------------------------------------------------------------------- 1 | name: release-main 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | 9 | set-version: 10 | runs-on: ubuntu-24.04 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - name: Export tag 15 | id: vars 16 | run: echo tag=${GITHUB_REF#refs/*/} >> $GITHUB_OUTPUT 17 | if: ${{ github.event_name == 'release' }} 18 | 19 | - name: Update project version 20 | run: | 21 | sed -i "s/^version = \".*\"/version = \"$RELEASE_VERSION\"/" pyproject.toml 22 | env: 23 | RELEASE_VERSION: ${{ steps.vars.outputs.tag }} 24 | if: ${{ github.event_name == 'release' }} 25 | 26 | - name: Upload updated pyproject.toml 27 | uses: actions/upload-artifact@v4 28 | with: 29 | name: pyproject-toml 30 | path: pyproject.toml 31 | 32 | publish: 33 | runs-on: ubuntu-latest 34 | needs: [set-version] 35 | steps: 36 | - name: Check out 37 | uses: actions/checkout@v4 38 | 39 | - name: Set up the environment 40 | uses: ./.github/actions/setup-python-env 41 | 42 | - name: Download updated pyproject.toml 43 | uses: actions/download-artifact@v4 44 | with: 45 | name: pyproject-toml 46 | 47 | - name: Build package 48 | run: uv build 49 | 50 | - name: Publish package 51 | run: uv publish 52 | env: 53 | UV_PUBLISH_TOKEN: ${{ secrets.PYPI_TOKEN }} 54 | 55 | deploy-docs: 56 | needs: publish 57 | runs-on: ubuntu-latest 58 | permissions: 59 | contents: write 60 | steps: 61 | - name: Check out 62 | uses: actions/checkout@v4 63 | 64 | - name: Set up the environment 65 | uses: ./.github/actions/setup-python-env 66 | 67 | - name: Deploy documentation 68 | run: uv run mkdocs gh-deploy --force 69 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: install 2 | install: ## Install the virtual environment and install the pre-commit hooks 3 | @echo "🚀 Creating virtual environment using uv" 4 | @uv sync 5 | @uv run pre-commit install 6 | 7 | .PHONY: check 8 | check: ## Run code quality tools. 9 | @echo "🚀 Checking lock file consistency with 'pyproject.toml'" 10 | @uv lock --locked 11 | @echo "🚀 Linting code: Running pre-commit" 12 | @uv run pre-commit run -a 13 | @echo "🚀 Static type checking: Running mypy" 14 | @uv run mypy 15 | @echo "🚀 Checking for obsolete dependencies: Running deptry" 16 | @uv run deptry . 17 | 18 | .PHONY: test 19 | test: ## Test the code with pytest 20 | @echo "🚀 Testing code: Running pytest" 21 | @uv run python -m pytest --cov --cov-config=pyproject.toml --cov-report=xml 22 | 23 | .PHONY: build 24 | build: clean-build ## Build wheel file 25 | @echo "🚀 Creating wheel file" 26 | @uvx --from build pyproject-build --installer uv 27 | 28 | .PHONY: clean-build 29 | clean-build: ## Clean build artifacts 30 | @echo "🚀 Removing build artifacts" 31 | @uv run python -c "import shutil; import os; shutil.rmtree('dist') if os.path.exists('dist') else None" 32 | 33 | .PHONY: publish 34 | publish: ## Publish a release to PyPI. 35 | @echo "🚀 Publishing." 36 | @uvx twine upload --repository-url https://upload.pypi.org/legacy/ dist/* 37 | 38 | .PHONY: build-and-publish 39 | build-and-publish: build publish ## Build and publish. 40 | 41 | .PHONY: docs-test 42 | docs-test: ## Test if documentation can be built without warnings or errors 43 | @uv run mkdocs build -s 44 | 45 | .PHONY: docs 46 | docs: ## Build and serve the documentation 47 | @uv run mkdocs serve 48 | 49 | .PHONY: help 50 | help: 51 | @uv run python -c "import re; \ 52 | [[print(f'\033[36m{m[0]:<20}\033[0m {m[1]}') for m in re.findall(r'^([a-zA-Z_-]+):.*?## (.*)$$', open(makefile).read(), re.M)] for makefile in ('$(MAKEFILE_LIST)').strip().split()]" 53 | 54 | .DEFAULT_GOAL := help 55 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Main 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened, ready_for_review] 9 | 10 | jobs: 11 | quality: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out 15 | uses: actions/checkout@v4 16 | 17 | - uses: actions/cache@v4 18 | with: 19 | path: ~/.cache/pre-commit 20 | key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} 21 | 22 | - name: Set up the environment 23 | uses: ./.github/actions/setup-python-env 24 | 25 | - name: Run checks 26 | run: make check 27 | 28 | tests-and-type-check: 29 | runs-on: ubuntu-latest 30 | strategy: 31 | matrix: 32 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 33 | fail-fast: false 34 | defaults: 35 | run: 36 | shell: bash 37 | steps: 38 | - name: Check out 39 | uses: actions/checkout@v4 40 | 41 | - name: Set up the environment 42 | uses: ./.github/actions/setup-python-env 43 | with: 44 | python-version: ${{ matrix.python-version }} 45 | 46 | - name: Run tests 47 | run: uv run python -m pytest tests --cov --cov-config=pyproject.toml --cov-report=xml 48 | 49 | - name: Check typing 50 | run: uv run mypy 51 | 52 | 53 | - name: Upload coverage reports to Codecov with GitHub Action on Python 3.11 54 | uses: codecov/codecov-action@v4 55 | if: ${{ matrix.python-version == '3.11' }} 56 | env: 57 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 58 | 59 | check-docs: 60 | runs-on: ubuntu-latest 61 | steps: 62 | - name: Check out 63 | uses: actions/checkout@v4 64 | 65 | - name: Set up the environment 66 | uses: ./.github/actions/setup-python-env 67 | 68 | - name: Check if documentation can be built 69 | run: uv run mkdocs build -s 70 | -------------------------------------------------------------------------------- /fluidize/core/utils/retrieval/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configurable retrieval mode system for DataLoader. 3 | 4 | This module provides a global configuration system for determining 5 | retrieval mode (local/cloud/cluster) that can be customized per application. 6 | """ 7 | 8 | from typing import Callable, Optional 9 | 10 | # Global mode function - can be overridden by applications 11 | _get_mode_function: Optional[Callable[[], str]] = None 12 | 13 | 14 | def set_mode_function(func: Callable[[], str]) -> None: 15 | """ 16 | Set the global function for determining retrieval mode. 17 | 18 | This should be called once at application startup to configure 19 | how the DataLoader determines whether to use local, cloud, or cluster storage. 20 | 21 | Args: 22 | func: Function that returns 'local', 'cloud', or 'cluster' 23 | 24 | Example: 25 | def my_mode_function(): 26 | return "local" if some_condition else "cloud" 27 | 28 | set_mode_function(my_mode_function) 29 | """ 30 | global _get_mode_function 31 | _get_mode_function = func 32 | 33 | 34 | def get_retrieval_mode() -> str: 35 | """ 36 | Get the current retrieval mode. 37 | 38 | Uses the configured mode function if available, otherwise falls back 39 | to default Python library logic. 40 | 41 | Returns: 42 | str: 'local', 'cloud', or 'cluster' 43 | """ 44 | if _get_mode_function: 45 | return _get_mode_function() 46 | 47 | # Default fallback for Python library 48 | try: 49 | from fluidize.config import config 50 | 51 | return "local" if config.is_local_mode() else "api" 52 | except ImportError: 53 | # If config not available, default to local 54 | return "local" 55 | 56 | 57 | def reset_mode_function() -> None: 58 | """ 59 | Reset to default mode detection. 60 | 61 | Useful for testing or when switching between different configurations. 62 | """ 63 | global _get_mode_function 64 | _get_mode_function = None 65 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/project/methods/local.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from pathlib import Path 3 | 4 | from fluidize.core.modules.run.node.methods.local.execstrat import LocalExecutionStrategy 5 | from fluidize.core.modules.run.project.methods.base import BaseProjectRunner 6 | from fluidize.core.types.project import ProjectSummary 7 | from fluidize.core.utils.retrieval.handler import register_handler 8 | 9 | 10 | class LocalProjectRunner(BaseProjectRunner): 11 | """ 12 | Handles the execution of a project run, coordinating multiple node executions 13 | within a single run folder. 14 | """ 15 | 16 | def __init__(self, project: ProjectSummary) -> None: 17 | """Initialize the project runner with a project""" 18 | super().__init__(project) 19 | self.strategy = self.get_default_execution_strategy() 20 | 21 | def _get_run_number(self, runs_path: Path) -> int: 22 | """ 23 | Helper function to find the next available run number 24 | in the specified runs directory. 25 | """ 26 | run_numbers = [int(p.name.split("_")[1]) for p in runs_path.glob("run_*") if p.is_dir()] 27 | return 1 if not run_numbers else max(run_numbers) + 1 28 | 29 | def _copy_project_contents(self, source_path: Path, destination_path: Path) -> None: 30 | """ 31 | Copy the contents of the project directory to the new run directory (excluding the 'run' directory). 32 | """ 33 | for item in source_path.iterdir(): 34 | if item.name != "runs": 35 | dest_item = destination_path / item.name 36 | if item.is_dir(): 37 | shutil.copytree(item, dest_item) 38 | else: 39 | shutil.copy2(item, dest_item) 40 | 41 | def get_default_execution_strategy(self): # type: ignore[no-untyped-def] 42 | return LocalExecutionStrategy 43 | 44 | 45 | # Auto-register this handler when module is imported 46 | register_handler("project_runner", "local", LocalProjectRunner) 47 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # MuJoCo Simulation Main Execution Script 4 | # This script is executed by the fluidize framework inside the Docker container 5 | 6 | set -e # Exit on any error 7 | 8 | echo "Starting MuJoCo Simulation Setup..." 9 | 10 | # Python dependencies are already installed in the Docker image 11 | echo "Python dependencies pre-installed in Docker image" 12 | 13 | echo "Environment Variables:" 14 | echo " FLUIDIZE_NODE_PATH: ${FLUIDIZE_NODE_PATH:-not set}" 15 | echo " FLUIDIZE_SIMULATION_PATH: ${FLUIDIZE_SIMULATION_PATH:-not set}" 16 | echo " FLUIDIZE_OUTPUT_PATH: ${FLUIDIZE_OUTPUT_PATH:-not set}" 17 | echo " FLUIDIZE_INPUT_PATH: ${FLUIDIZE_INPUT_PATH:-not set}" 18 | 19 | # Set default paths if environment variables are not set 20 | NODE_PATH="${FLUIDIZE_NODE_PATH:-/app}" 21 | SIMULATION_PATH="${FLUIDIZE_SIMULATION_PATH:-/app/source}" 22 | OUTPUT_PATH="${FLUIDIZE_OUTPUT_PATH:-/app/source/outputs}" 23 | INPUT_PATH="${FLUIDIZE_INPUT_PATH:-}" 24 | 25 | echo "Using paths:" 26 | echo " Node path: $NODE_PATH" 27 | echo " Simulation path: $SIMULATION_PATH" 28 | echo " Output path: $OUTPUT_PATH" 29 | echo " Input path: $INPUT_PATH" 30 | 31 | # Ensure output directory exists 32 | mkdir -p "$OUTPUT_PATH" 33 | 34 | # Change to simulation directory 35 | cd "$SIMULATION_PATH" 36 | 37 | # Set MuJoCo environment variables for headless rendering 38 | export MUJOCO_GL=osmesa 39 | export PYTHONPATH="$SIMULATION_PATH:$PYTHONPATH" 40 | 41 | # Export paths for the Python script 42 | export SIMULATION_OUTPUT_PATH="$OUTPUT_PATH" 43 | export SIMULATION_INPUT_PATH="$INPUT_PATH" 44 | 45 | # Run the MuJoCo simulation 46 | echo "Executing pinata simulation..." 47 | python pinata_simulation.py 48 | 49 | # Check if simulation was successful 50 | if [ $? -eq 0 ]; then 51 | echo "MuJoCo simulation completed successfully!" 52 | echo "Output files generated in: $OUTPUT_PATH" 53 | ls -la "$OUTPUT_PATH" 54 | exit 0 # Explicitly exit with success code 55 | else 56 | echo "MuJoCo simulation failed!" 57 | exit 1 58 | fi 59 | -------------------------------------------------------------------------------- /fluidize/core/types/file_models/parameters_model.py: -------------------------------------------------------------------------------- 1 | from typing import Any, ClassVar 2 | 3 | from pydantic import Field, model_validator 4 | 5 | from fluidize.core.constants import FileConstants 6 | from fluidize.core.types.parameters import Parameter 7 | 8 | from .json_file_model_base import JSONFileModelBase 9 | 10 | 11 | class ParametersModel(JSONFileModelBase): 12 | _filename: ClassVar[str] = FileConstants.PARAMETERS_SUFFIX 13 | """ 14 | A base model for parameters objects stored in JSON structure. 15 | 16 | This model provides two main functionalities: 17 | 1. A validator to automatically unpack nested data based on a 'key' 18 | from the subclass's Config. 19 | 2. A method to wrap the model's data back into the nested structure 20 | for serialization. 21 | """ 22 | 23 | parameters: list[Parameter] = Field(default_factory=list) 24 | 25 | @model_validator(mode="before") 26 | @classmethod 27 | def _unpack_and_validate(cls, data: Any) -> Any: 28 | """ 29 | Unpacks and validates the data against the key 30 | specified in the subclass's Config. 31 | """ 32 | if not isinstance(data, dict): 33 | return data 34 | 35 | config = getattr(cls, "Key", None) 36 | key = getattr(config, "key", None) 37 | 38 | # If there's no key in the config or the key is not in the data, 39 | # assume the data is already in the correct, unpacked structure. 40 | if not key or key not in data: 41 | return data 42 | 43 | unpacked_data = data[key] 44 | if not isinstance(unpacked_data, list): 45 | # If parameters is not a list, treat it as empty 46 | unpacked_data = [] 47 | 48 | # Return data in the format expected by the model 49 | return {"parameters": unpacked_data} 50 | 51 | def model_dump_wrapped(self) -> dict[str, Any]: 52 | """Override to avoid double wrapping of parameters key.""" 53 | return {"parameters": [p.model_dump() for p in self.parameters]} 54 | 55 | class Key: 56 | key = "parameters" 57 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/node-1754038465820/source/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test2 Duplicate Node 4 | 5 | This script reads output.txt from the inputs directory (from Test1) and 6 | duplicates its content, then saves it to output.txt in the outputs directory. 7 | Expected: 'CANDY' -> 'CANDYCANDY' 8 | 9 | Author: Henry Bae 10 | """ 11 | 12 | import os 13 | import sys 14 | 15 | 16 | def duplicate_input_content(input_path, output_path): 17 | """ 18 | Read output.txt from input path, duplicate its content, and save to output path 19 | """ 20 | print(f"Input path: {input_path}") 21 | print(f"Output path: {output_path}") 22 | 23 | # Find output.txt in input directory 24 | input_file = os.path.join(input_path, "output.txt") 25 | 26 | if not os.path.exists(input_file): 27 | raise FileNotFoundError(f"Required input file not found: {input_file}") 28 | 29 | # Read the input content 30 | with open(input_file) as f: 31 | content = f.read().strip() 32 | 33 | print(f"Read content from {input_file}: '{content}'") 34 | 35 | # Duplicate the content 36 | duplicated_content = content + content 37 | print(f"Duplicated content: '{duplicated_content}'") 38 | 39 | # Create output directory if it doesn't exist 40 | os.makedirs(output_path, exist_ok=True) 41 | 42 | # Write duplicated content to output.txt 43 | output_file = os.path.join(output_path, "output.txt") 44 | 45 | with open(output_file, "w") as f: 46 | f.write(duplicated_content) 47 | 48 | print(f"Successfully created {output_file} with duplicated content: '{duplicated_content}'") 49 | 50 | return output_file 51 | 52 | 53 | if __name__ == "__main__": 54 | if len(sys.argv) != 3: 55 | print("Usage: python main.py ") 56 | sys.exit(1) 57 | 58 | input_path = sys.argv[1] 59 | output_path = sys.argv[2] 60 | 61 | try: 62 | output_file = duplicate_input_content(input_path, output_path) 63 | print(f"\n✓ Success! Duplicated output file created: {output_file}") 64 | except Exception as e: 65 | print(f"\n✗ Error: {e}") 66 | sys.exit(1) 67 | -------------------------------------------------------------------------------- /tests/fixtures/docker_projects/project-1754038373536/runs/run_1/node-1754038465820/source/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test2 Duplicate Node 4 | 5 | This script reads output.txt from the inputs directory (from Test1) and 6 | duplicates its content, then saves it to output.txt in the outputs directory. 7 | Expected: 'CANDY' -> 'CANDYCANDY' 8 | 9 | Author: Henry Bae 10 | """ 11 | 12 | import os 13 | import sys 14 | 15 | 16 | def duplicate_input_content(input_path, output_path): 17 | """ 18 | Read output.txt from input path, duplicate its content, and save to output path 19 | """ 20 | print(f"Input path: {input_path}") 21 | print(f"Output path: {output_path}") 22 | 23 | # Find output.txt in input directory 24 | input_file = os.path.join(input_path, "output.txt") 25 | 26 | if not os.path.exists(input_file): 27 | raise FileNotFoundError(f"Required input file not found: {input_file}") 28 | 29 | # Read the input content 30 | with open(input_file) as f: 31 | content = f.read().strip() 32 | 33 | print(f"Read content from {input_file}: '{content}'") 34 | 35 | # Duplicate the content 36 | duplicated_content = content + content 37 | print(f"Duplicated content: '{duplicated_content}'") 38 | 39 | # Create output directory if it doesn't exist 40 | os.makedirs(output_path, exist_ok=True) 41 | 42 | # Write duplicated content to output.txt 43 | output_file = os.path.join(output_path, "output.txt") 44 | 45 | with open(output_file, "w") as f: 46 | f.write(duplicated_content) 47 | 48 | print(f"Successfully created {output_file} with duplicated content: '{duplicated_content}'") 49 | 50 | return output_file 51 | 52 | 53 | if __name__ == "__main__": 54 | if len(sys.argv) != 3: 55 | print("Usage: python main.py ") 56 | sys.exit(1) 57 | 58 | input_path = sys.argv[1] 59 | output_path = sys.argv[2] 60 | 61 | try: 62 | output_file = duplicate_input_content(input_path, output_path) 63 | print(f"\n✓ Success! Duplicated output file created: {output_file}") 64 | except Exception as e: 65 | print(f"\n✗ Error: {e}") 66 | sys.exit(1) 67 | -------------------------------------------------------------------------------- /fluidize/core/types/file_models/metadata_model.py: -------------------------------------------------------------------------------- 1 | from typing import Any, ClassVar 2 | 3 | from pydantic import model_validator 4 | 5 | from fluidize.core.constants import FileConstants 6 | 7 | from .file_model_base import FileModelBase 8 | 9 | 10 | class MetadataModel(FileModelBase): 11 | _filename: ClassVar[str] = FileConstants.METADATA_SUFFIX 12 | """ 13 | A base model for metadata objects stored in a nested structure. 14 | 15 | This model provides two main functionalities: 16 | 1. A validator to automatically unpack nested data based on a 'key' 17 | and validate its version from the subclass's Config. 18 | 2. A method to wrap the model's data back into the nested structure 19 | for serialization. 20 | """ 21 | 22 | @model_validator(mode="before") 23 | @classmethod 24 | def _unpack_and_validate(cls, data: Any) -> Any: 25 | """ 26 | Unpacks and validates the data against the key and version 27 | specified in the subclass's Config. 28 | """ 29 | if not isinstance(data, dict): 30 | return data 31 | 32 | config = getattr(cls, "Key", None) 33 | key = getattr(config, "key", None) 34 | 35 | # If there's no key in the config or the key is not in the data, 36 | # assume the data is already in the correct, unpacked structure. 37 | if not key or key not in data: 38 | return data 39 | 40 | unpacked_data = data[key] 41 | if not isinstance(unpacked_data, dict): 42 | raise TypeError() 43 | 44 | # If an expected version is defined in the config, validate or inject it. 45 | expected_version = getattr(config, "metadata_version", None) 46 | if expected_version is not None: 47 | # If the file has a version, it must match. 48 | if "metadata_version" in unpacked_data: 49 | file_version = unpacked_data.get("metadata_version") 50 | if file_version != expected_version: 51 | raise ValueError() 52 | # If the file has no version, inject the expected one. 53 | else: 54 | unpacked_data["metadata_version"] = expected_version 55 | 56 | return unpacked_data 57 | -------------------------------------------------------------------------------- /fluidize/core/utils/dataloader/loader/writer_local.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import yaml 4 | from upath import UPath 5 | 6 | from fluidize.core.utils.retrieval.handler import register_handler 7 | 8 | from .writer_base import BaseDataWriter 9 | 10 | 11 | class LocalDataWriter(BaseDataWriter): 12 | """ 13 | JSON Data Writer for local filesystem storage. 14 | Implements primitive operations required by the base class. 15 | """ 16 | 17 | def _ensure_directory_exists(self, dir_path: UPath) -> bool: 18 | """ 19 | Ensures that the specified directory exists. 20 | Creates it if it doesn't exist. 21 | """ 22 | try: 23 | # create directory directly on UPath 24 | dir_path.mkdir(parents=True, exist_ok=True) 25 | except Exception as e: 26 | print(f"Error creating directory {dir_path}: {e}") 27 | return False 28 | else: 29 | return True 30 | 31 | def _write_json_file(self, file_path: UPath, data: dict) -> bool: 32 | """ 33 | Writes JSON data to the specified file path. 34 | """ 35 | try: 36 | with file_path.open("w") as f: 37 | json.dump(data, f, indent=2) 38 | except Exception as e: 39 | print(f"Error writing JSON to {file_path}: {e}") 40 | return False 41 | else: 42 | return True 43 | 44 | def _write_text_file(self, file_path: UPath, data: str) -> bool: 45 | try: 46 | with file_path.open("w") as f: 47 | f.write(data) 48 | except Exception as e: 49 | print(f"Error writing text to {file_path}: {e}") 50 | return False 51 | else: 52 | return True 53 | 54 | def _write_yaml(self, file_path: UPath, data: dict) -> bool: 55 | with file_path.open("w") as f: 56 | try: 57 | yaml.dump(data, f, default_flow_style=False) 58 | except Exception as e: 59 | print(f"Error writing YAML to {file_path}: {e}") 60 | return False 61 | else: 62 | return True 63 | 64 | 65 | # Auto-register this handler when module is imported 66 | register_handler("datawriter", "local", LocalDataWriter) 67 | -------------------------------------------------------------------------------- /fluidize/core/types/execution_models/resource_requirements.py: -------------------------------------------------------------------------------- 1 | """ 2 | Resource requirements for container execution. 3 | """ 4 | 5 | from dataclasses import dataclass 6 | from enum import Enum 7 | 8 | 9 | class NodePoolType(Enum): 10 | """Kubernetes node pool types.""" 11 | 12 | SMALL_CPU = "small-cpu-pool" 13 | MEDIUM_CPU = "medium-cpu-pool" 14 | LARGE_CPU = "large-cpu-pool" 15 | GPU = "gpu-pool" 16 | AUTO = "auto" # Let system decide 17 | 18 | 19 | class GPUType(Enum): 20 | """Supported GPU types.""" 21 | 22 | NVIDIA_TESLA_T4 = "nvidia-tesla-t4" 23 | NVIDIA_TESLA_V100 = "nvidia-tesla-v100" 24 | NVIDIA_A100 = "nvidia-tesla-a100" 25 | 26 | 27 | @dataclass 28 | class ResourceRequirements: 29 | """Resource requirements for container execution.""" 30 | 31 | # CPU resources 32 | cpu_request: str = "100m" # Minimum CPU 33 | cpu_limit: str = "2000m" # Maximum CPU 34 | 35 | # Memory resources 36 | memory_request: str = "256Mi" # Minimum memory 37 | memory_limit: str = "2Gi" # Maximum memory 38 | 39 | # GPU resources 40 | gpu_count: int = 0 41 | gpu_type: GPUType = GPUType.NVIDIA_TESLA_T4 42 | 43 | # Storage resources 44 | disk_size: str = "10Gi" 45 | disk_type: str = "pd-standard" # pd-standard, pd-ssd, pd-balanced 46 | 47 | # Node pool preference 48 | node_pool_preference: NodePoolType = NodePoolType.AUTO 49 | 50 | def requires_gpu(self) -> bool: 51 | """Check if GPU resources are required.""" 52 | return self.gpu_count > 0 53 | 54 | def get_node_pool(self) -> str: 55 | """Get the appropriate node pool name.""" 56 | if self.node_pool_preference == NodePoolType.AUTO: 57 | if self.requires_gpu(): 58 | return NodePoolType.GPU.value 59 | elif self._is_high_cpu(): 60 | return NodePoolType.MEDIUM_CPU.value 61 | else: 62 | return NodePoolType.SMALL_CPU.value 63 | return str(self.node_pool_preference.value) 64 | 65 | def _is_high_cpu(self) -> bool: 66 | """Determine if this is a high CPU workload.""" 67 | # Extract number from CPU limit (e.g., "2000m" -> 2000) 68 | cpu_limit_num = int(self.cpu_limit.replace("m", "")) 69 | return cpu_limit_num > 1000 # More than 1 CPU 70 | -------------------------------------------------------------------------------- /fluidize/core/modules/graph/process.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | 4 | class ProcessGraph: 5 | def print_bfs_nodes(self, G: Any, start_node: Optional[Any] = None) -> tuple[list, list]: 6 | """ 7 | Print nodes in BFS order starting from a specific node. 8 | Returns two lists: one with node IDs and one with previous node IDs. 9 | 10 | Parameters: 11 | - G: NetworkX graph 12 | - start_node: Node to start BFS from. If None, picks the first node in the graph. 13 | 14 | Returns: 15 | - nodes: List of node IDs in BFS order 16 | - prev_nodes: List of previous node IDs (matching the index of nodes list) 17 | """ 18 | 19 | if not G.nodes(): 20 | print("Empty graph, no nodes to traverse.") 21 | return [], [] 22 | 23 | # If no start node is provided, use the first node in the graph 24 | if start_node is None: 25 | start_node = next(iter(G.nodes())) 26 | print(f"No start node provided, using first node: {start_node}") 27 | 28 | # Check if the start node exists in the graph 29 | if start_node not in G: 30 | print(f"Start node '{start_node}' not found in graph.") 31 | return [], [] 32 | 33 | print(f"BFS traversal starting from node '{start_node}':") 34 | 35 | # Perform BFS traversal 36 | visited = [] 37 | queue = [(start_node, None)] # (node, prev_node) 38 | nodes = [] 39 | prev_nodes: list = [] 40 | 41 | while queue: 42 | node, prev_node = queue.pop(0) # Dequeue a vertex and its previous node from queue 43 | if node not in visited: 44 | visited.append(node) 45 | nodes.append(node) 46 | prev_nodes.append(prev_node) 47 | print(f" - Adding node to traversal: {node}, previous node: {prev_node}") 48 | 49 | # Add all unvisited neighbors to queue 50 | for neighbor in G.neighbors(node): 51 | if neighbor not in visited and neighbor not in [n for n, _ in queue]: 52 | queue.append((neighbor, node)) 53 | print(f" - Adding neighbor to queue: {neighbor}, will follow {node}") 54 | 55 | return nodes, prev_nodes 56 | -------------------------------------------------------------------------------- /fluidize/core/utils/retrieval/handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Auto-registration handler system for DataLoader and other handler-based classes. 3 | 4 | Handler classes auto-register themselves when imported, making the system 5 | flexible and easy to extend. 6 | """ 7 | 8 | from typing import Any 9 | 10 | from .main import get_retrieval_mode 11 | 12 | # Global handler registry: handler_type -> mode -> handler_class 13 | _handlers: dict[str, dict[str, type]] = {} 14 | 15 | 16 | def register_handler(handler_type: str, mode: str, handler_class: type) -> None: 17 | """ 18 | Register a handler class for a specific type and mode. 19 | 20 | This is typically called automatically when handler classes are imported. 21 | 22 | Args: 23 | handler_type: The type of handler (e.g., "dataloader", "pathfinder") 24 | mode: The mode this handler supports (e.g., "local", "cloud", "cluster") 25 | handler_class: The handler class to register 26 | """ 27 | if handler_type not in _handlers: 28 | _handlers[handler_type] = {} 29 | _handlers[handler_type][mode] = handler_class 30 | 31 | 32 | def get_handler(handler_type: str, *args: Any, **kwargs: Any) -> Any: 33 | """ 34 | Get a handler instance for the specified type and current retrieval mode. 35 | 36 | Args: 37 | handler_type: The type of handler to get (e.g., "dataloader", "pathfinder") 38 | *args: Positional arguments to pass to the handler constructor 39 | **kwargs: Keyword arguments to pass to the handler constructor 40 | 41 | Returns: 42 | An instance of the appropriate handler class 43 | 44 | Raises: 45 | ValueError: If no handlers are registered for the type or mode is unsupported 46 | """ 47 | if handler_type not in _handlers: 48 | raise ValueError() 49 | 50 | mode = get_retrieval_mode() 51 | handler_mapping = _handlers[handler_type] 52 | 53 | if mode not in handler_mapping: 54 | raise ValueError() 55 | 56 | handler_class = handler_mapping[mode] 57 | return handler_class(*args, **kwargs) 58 | 59 | 60 | def get_registered_handlers() -> dict[str, dict[str, type]]: 61 | """ 62 | Get all registered handlers (useful for debugging/inspection). 63 | 64 | Returns: 65 | Dictionary mapping handler types to their mode->class mappings 66 | """ 67 | return _handlers.copy() 68 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: fluidize-python 2 | repo_url: https://github.com/Fluidize-Inc/fluidize-python 3 | site_url: https://Fluidize-Inc.github.io/fluidize-python 4 | site_description: Python package for automatic generation of scientific computing software pipelines. 5 | site_author: Henry Bae 6 | edit_uri: edit/main/docs/ 7 | repo_name: Fluidize-Inc/fluidize-python 8 | copyright: Maintained by Fluidize-Inc. 9 | 10 | nav: 11 | - Home: index.md 12 | - Getting Started: 13 | - Quickstart: getting-started/quickstart.md 14 | - Examples: getting-started/examples.md 15 | - Core Modules: 16 | - core-modules/index.md 17 | - Client: core-modules/client.md 18 | - Projects: core-modules/projects.md 19 | - Graph: core-modules/graph.md 20 | - Node: core-modules/node.md 21 | - Run: core-modules/run.md 22 | plugins: 23 | - search 24 | - mkdocstrings: 25 | handlers: 26 | python: 27 | paths: ["fluidize"] 28 | options: 29 | merge_init_into_class: true 30 | show_signature: true 31 | show_signature_annotations: true 32 | members_order: source 33 | docstring_style: google 34 | filters: 35 | - "!^_" 36 | show_source: false 37 | show_root_full_path: false 38 | extra: 39 | show_attributes: true 40 | show_root_heading: true 41 | theme: 42 | name: material 43 | features: 44 | - tabs 45 | - navigation.indexes 46 | palette: 47 | - media: "(prefers-color-scheme: light)" 48 | scheme: default 49 | primary: white 50 | accent: deep orange 51 | toggle: 52 | icon: material/brightness-7 53 | name: Switch to dark mode 54 | - media: "(prefers-color-scheme: dark)" 55 | scheme: slate 56 | primary: black 57 | accent: deep orange 58 | toggle: 59 | icon: material/brightness-4 60 | name: Switch to light mode 61 | icon: 62 | repo: fontawesome/brands/github 63 | 64 | extra: 65 | social: 66 | - icon: fontawesome/brands/github 67 | link: https://github.com/Fluidize-Inc/fluidize-python 68 | - icon: fontawesome/brands/python 69 | link: https://pypi.org/project/fluidize-python 70 | 71 | markdown_extensions: 72 | - toc: 73 | permalink: true 74 | - pymdownx.arithmatex: 75 | generic: true 76 | -------------------------------------------------------------------------------- /docs/core-modules/index.md: -------------------------------------------------------------------------------- 1 | # Core Modules 2 | 3 | The Fluidize library is composed of a set of core modules that provide a high-level interface for managing Fluidize resources. These modules are designed to be used together to build and execute scientific computing pipelines. 4 | 5 | ## [Client](client.md) 6 | 7 | The **Fluidize Client** provides a unified, high-level interface for managing Fluidize resources in both local and cloud API modes. It serves as the primary entry point for creating and running pipelines across these environments. 8 | 9 | ## [Projects](projects.md) 10 | 11 | The **Projects** module provides tools for managing project lifecycles: 12 | 13 | - [**Registry Manager**](projects.md#fluidize.managers.registry.RegistryManager): 14 | Handles the user’s complete project registry, with functionality to create, edit, and delete projects. 15 | 16 | - [**Project Manager**](projects.md#fluidize.managers.project.ProjectManager): 17 | Focuses on individual projects, managing the project graph, nodes, and runs, and supporting execution of project-specific workflows. 18 | 19 | ## [Graph](graph.md) 20 | 21 | The **Graph** module provides tools for managing the project graph, which is a representation of the simulation pipeline. 22 | 23 | In a Fluidize project, pipelines are represented as a directed acyclic graph (DAG) where each node represents a module simulation and each edge represents the flow of data between nodes: 24 | 25 | - [**Graph Manager**](graph.md#fluidize.managers.graph.GraphManager): 26 | Manages the project graph, and provides high level functionality to create, edit, and delete nodes and edges. 27 | 28 | - [**Graph Processor**](graph.md#fluidize.managers.graph.graph_processor.GraphProcessor): 29 | Manages specific operations on the graph data structure within the local filesystem. 30 | 31 | ## [Node](node.md) 32 | 33 | The **Node** module provides tools for managing the metadata, properties, and parameters of individual nodes within a project. 34 | 35 | ## [Run](run.md) 36 | 37 | The **Run** module provides tools for managing simulation pipeline runs within a project: 38 | 39 | - [**Runs Manager**](run.md#fluidize.managers.run.RunsManager): 40 | Manages the high level execution of runs and retrieving run status. 41 | 42 | - [**Project Runner**](run.md#fluidize.core.modules.run.project.ProjectRunner): 43 | Manages the specific execution details of a project pipeline, including environment preparation and node execution order. 44 | -------------------------------------------------------------------------------- /fluidize/core/types/runs.py: -------------------------------------------------------------------------------- 1 | # minor Issue : Addressing the UPath and CloudPath would be nice 2 | from enum import Enum 3 | from pathlib import Path, PurePosixPath 4 | from typing import Optional, Union 5 | 6 | from pydantic import BaseModel, ConfigDict 7 | from upath import UPath 8 | 9 | from .file_models.metadata_model import MetadataModel 10 | from .project import ProjectSummary 11 | 12 | 13 | class RunStatus(str, Enum): 14 | NOT_RUN = "NOT_RUN" 15 | RUNNING = "RUNNING" 16 | FAILED = "FAILED" 17 | SUCCESS = "SUCCESS" 18 | 19 | 20 | # Metadata for a project run in the metadata.yaml file. 21 | class projectRunMetadata(MetadataModel): 22 | """Metadata generated for a project run.""" 23 | 24 | metadata_version: str = "1.0" 25 | run_number: int 26 | run_folder: str 27 | name: str 28 | id: str 29 | date_created: str 30 | date_modified: Optional[str] 31 | description: Optional[str] 32 | tags: Optional[list[str]] = None 33 | run_status: RunStatus = RunStatus.NOT_RUN 34 | mlflow_run_id: Optional[str] = None 35 | mlflow_experiment_id: Optional[str] = None 36 | 37 | class Key: 38 | key = "run" 39 | metadata_version: str = "1.0" 40 | 41 | model_config = ConfigDict(use_enum_values=True, extra="ignore") 42 | 43 | 44 | class NodePaths(BaseModel): 45 | """Paths on the host/node filesystem.""" 46 | 47 | # This is required because UPath and CloudPath are not natively supported by Pydantic. 48 | model_config = ConfigDict(arbitrary_types_allowed=True) 49 | node_path: Union[Path, UPath] 50 | simulation_path: Union[Path, UPath] 51 | input_path: Optional[Union[Path, UPath]] = None 52 | output_path: Union[Path, UPath] 53 | 54 | 55 | class ContainerPaths(BaseModel): 56 | """Paths inside the container.""" 57 | 58 | node_path: PurePosixPath 59 | simulation_path: PurePosixPath 60 | input_path: Optional[PurePosixPath] = None 61 | output_path: Optional[PurePosixPath] = None 62 | 63 | 64 | class RunFlowPayload(BaseModel): 65 | name: Optional[str] = None 66 | description: Optional[str] = None 67 | tags: Optional[list[str]] = None 68 | 69 | 70 | class RunFlowRequest(BaseModel): 71 | """Request model for run_flow API calls.""" 72 | 73 | project: ProjectSummary 74 | payload: RunFlowPayload 75 | 76 | 77 | class RunFlowResponse(BaseModel): 78 | """Response model for run_flow API calls.""" 79 | 80 | flow_status: str 81 | run_number: int 82 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/project/project_runner.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Any, Optional, cast 3 | 4 | from fluidize.core.types.project import ProjectSummary 5 | from fluidize.core.types.runs import RunFlowPayload 6 | from fluidize.core.utils.retrieval.handler import get_handler 7 | 8 | 9 | class ProjectRunner: 10 | """ 11 | Project runner that delegates to registered implementation based on mode. 12 | 13 | Follows the same pattern as DataLoader - uses get_handler() to get 14 | the appropriate implementation (local, cloud, etc.) 15 | """ 16 | 17 | def __init__(self, project: ProjectSummary): 18 | """ 19 | Args: 20 | project: ProjectSummary 21 | """ 22 | self.project = project 23 | self.handler = get_handler("project_runner", project) 24 | 25 | def prepare_run_environment(self, metadata: RunFlowPayload) -> int: 26 | """ 27 | Create a new run folder for the project. 28 | 29 | Args: 30 | metadata: RunFlowPayload 31 | 32 | Returns: 33 | int: Run number 34 | """ 35 | return cast(int, self.handler.prepare_run_environment(metadata)) 36 | 37 | async def execute_node(self, node_id: str, prev_node_id: Optional[str] = None, **kwargs: Any) -> dict[str, Any]: 38 | """ 39 | Execute a single node within the project run. 40 | 41 | Args: 42 | node_id: Node ID 43 | prev_node_id: Previous node ID 44 | **kwargs: Additional keyword arguments 45 | 46 | Returns: 47 | dict[str, Any]: Execution result 48 | """ 49 | return await asyncio.to_thread(self.handler.execute_node, node_id, prev_node_id=prev_node_id, **kwargs) 50 | 51 | async def execute_flow(self, nodes_to_run: list[str], prev_nodes: list[str], **kwargs: Any) -> list[dict[str, Any]]: 52 | """ 53 | Execute a flow of nodes in order. 54 | 55 | Args: 56 | nodes_to_run: List of node IDs 57 | prev_nodes: List of previous node IDs 58 | **kwargs: Additional keyword arguments 59 | 60 | Returns: 61 | list[dict[str, Any]]: Execution results for all nodes 62 | """ 63 | # Make sure that nodes_to_run and prev_nodes are same size lists 64 | if len(nodes_to_run) != len(prev_nodes): 65 | msg = "nodes_to_run and prev_nodes must be of the same length" 66 | raise ValueError(msg) 67 | return cast(list[dict[str, Any]], await self.handler.execute_flow(nodes_to_run, prev_nodes, **kwargs)) 68 | -------------------------------------------------------------------------------- /fluidize/core/types/file_models/properties_model.py: -------------------------------------------------------------------------------- 1 | from typing import Any, ClassVar 2 | 3 | from pydantic import model_validator 4 | 5 | from fluidize.core.constants import FileConstants 6 | 7 | from .file_model_base import FileModelBase 8 | 9 | 10 | # TODO: Think about combining the metadata_model and properties_model somehow? Separated just in case 11 | class PropertiesModel(FileModelBase): 12 | _filename: ClassVar[str] = FileConstants.PROPERTIES_SUFFIX 13 | """ 14 | A base model for properties objects stored in a nested structure. 15 | 16 | This model provides two main functionalities: 17 | 1. A validator to automatically unpack nested data based on a 'key' 18 | and validate its version from the subclass's Config. 19 | 2. A method to wrap the model's data back into the nested structure 20 | for serialization. 21 | """ 22 | 23 | @model_validator(mode="before") 24 | @classmethod 25 | def _unpack_and_validate(cls, data: Any) -> Any: 26 | """ 27 | Unpacks and validates the data against the key and version 28 | specified in the subclass's Config. 29 | """ 30 | if not isinstance(data, dict): 31 | return data 32 | 33 | config = getattr(cls, "Key", None) 34 | key = getattr(config, "key", None) 35 | 36 | # If there's no key in the config or the key is not in the data, 37 | # assume the data is already in the correct, unpacked structure. 38 | if not key or key not in data: 39 | return data 40 | 41 | unpacked_data = data[key] 42 | if not isinstance(unpacked_data, dict): 43 | raise TypeError() 44 | 45 | # If an expected version is defined in the config, validate or inject it. 46 | expected_version = getattr(config, "metadata_version", None) 47 | if expected_version is not None: 48 | # If the file has a version, it must match. 49 | if "metadata_version" in unpacked_data: 50 | file_version = unpacked_data.get("metadata_version") 51 | if file_version != expected_version: 52 | raise ValueError() 53 | # If the file has no version, inject the expected one. 54 | else: 55 | unpacked_data["metadata_version"] = expected_version 56 | 57 | return unpacked_data 58 | 59 | # def edit_value(self, attr: str, value: Any) -> "PropertiesModel": 60 | # """ 61 | # Update one field on this model and re-validate the assignment. 62 | # Raises AttributeError if `attr` is not present. 63 | # """ 64 | # if not hasattr(self, attr): 65 | # raise AttributeError(f"{self.__class__.__name__} has no attribute {attr!r}") 66 | # setattr(self, attr, value) # will be validated thanks to validate_assignment 67 | # return self 68 | -------------------------------------------------------------------------------- /fluidize/adapters/local/projects.py: -------------------------------------------------------------------------------- 1 | """ 2 | Local projects handler - uses core ProjectProcessor for filesystem operations. 3 | """ 4 | 5 | from typing import Any, Optional 6 | 7 | from fluidize.core.modules.projects.processor import ProjectProcessor 8 | from fluidize.core.types.project import ProjectSummary 9 | 10 | 11 | class ProjectsHandler: 12 | """Handles project operations for local mode - matches SDK interface.""" 13 | 14 | def __init__(self, config: Any) -> None: 15 | """ 16 | Initialize the projects handler. 17 | 18 | Args: 19 | config: FluidizeConfig instance 20 | """ 21 | self.config = config 22 | self.processor = ProjectProcessor() 23 | 24 | def delete(self, project_id: str) -> dict: 25 | """ 26 | Delete a project based on its ID. 27 | 28 | Args: 29 | project_id: The project ID to delete 30 | 31 | Returns: 32 | Dict indicating success 33 | """ 34 | self.processor.delete_project(project_id) 35 | return {"success": True, "message": f"Project {project_id} deleted"} 36 | 37 | def list(self) -> list[ProjectSummary]: 38 | """ 39 | Get a summary of all projects. 40 | 41 | Returns: 42 | List of project summaries 43 | """ 44 | return self.processor.get_projects() 45 | 46 | def retrieve(self, project_id: str) -> ProjectSummary: 47 | """ 48 | Get a project by its ID. 49 | 50 | Args: 51 | project_id: The project ID to retrieve 52 | 53 | Returns: 54 | ProjectSummary (project data) 55 | """ 56 | return self.processor.get_project(project_id) 57 | 58 | def upsert( 59 | self, 60 | *, 61 | id: str, # noqa: A002 62 | description: Optional[str] = None, 63 | label: Optional[str] = None, 64 | location: Optional[str] = None, 65 | metadata_version: Optional[str] = "1.0", 66 | status: Optional[str] = None, 67 | **kwargs: Any, 68 | ) -> ProjectSummary: 69 | """ 70 | Create or update a project. 71 | 72 | Args: 73 | id: Unique project identifier # noqa: A002 74 | description: Optional project description 75 | label: Optional project label 76 | location: Optional project location 77 | metadata_version: Project metadata version (default: "1.0") 78 | status: Optional project status 79 | **kwargs: Additional arguments 80 | 81 | Returns: 82 | ProjectSummary (created/updated project data) 83 | """ 84 | return self.processor.upsert_project( 85 | id=id, 86 | description=description, 87 | label=label, 88 | location=location, 89 | metadata_version=metadata_version, 90 | status=status, 91 | **kwargs, 92 | ) 93 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/node_runner.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from fluidize.core.modules.run.node.methods.base.execstrat import BaseExecutionStrategy 4 | from fluidize.core.modules.tracking.mlflow_tracker import MLFlowTracker 5 | from fluidize.core.types import node 6 | from fluidize.core.types.project import ProjectSummary 7 | 8 | 9 | # RunJob now uses a strategy instance to dynamically choose behavior. 10 | class RunJob: 11 | """ 12 | A job that runs for a single node. 13 | """ 14 | 15 | def __init__( 16 | self, 17 | project: ProjectSummary, 18 | strategyClass: type[BaseExecutionStrategy], 19 | nodeProperties_simulation: node.nodeProperties_simulation, 20 | prev_nodeProperties_simulation: Optional[node.nodeProperties_simulation] = None, 21 | mlflow_tracker: Optional[MLFlowTracker] = None, 22 | run_id: Optional[str] = None, 23 | run_metadata: Optional[object] = None, # Add run metadata 24 | ): 25 | """ 26 | Args: 27 | project: The project this node belongs to 28 | strategyClass: The strategy class to use for execution 29 | nodeProperties_simulation: The node properties to run 30 | prev_nodeProperties_simulation: The previous node properties (optional) 31 | mlflow_tracker: The MLflow tracker (optional) 32 | run_id: The run ID (optional) 33 | run_metadata: The run metadata (optional) 34 | """ 35 | self.project = project 36 | self.nodeProperties_simulation = nodeProperties_simulation 37 | self.prev_nodeProperties_simulation = prev_nodeProperties_simulation 38 | self.mlflow_tracker = mlflow_tracker 39 | self.run_id = run_id 40 | self.run_metadata = run_metadata 41 | 42 | self.strategy = strategyClass( 43 | node=self.nodeProperties_simulation, 44 | prev_node=self.prev_nodeProperties_simulation, 45 | project=self.project, 46 | mlflow_tracker=self.mlflow_tracker, 47 | run_id=self.run_id, 48 | run_metadata=self.run_metadata, # Pass metadata to strategy 49 | ) 50 | 51 | def run(self) -> None: 52 | print(f"\n=== Starting run for node: {self.nodeProperties_simulation.node_id} ===") 53 | try: 54 | # Set context once at the beginning of the run 55 | # self.strategy.set_context(self.nodeProperties_simulation, self.prev_nodeProperties_simulation, self.project) 56 | 57 | print("1. Preparing environment...") 58 | self.strategy.prepare_environment() 59 | 60 | print("2. Executing simulation...") 61 | result = self.strategy.execute_simulation() 62 | 63 | print("3. Handling files...") 64 | self.strategy.handle_files() 65 | 66 | print(f"=== Run completed for node: {self.nodeProperties_simulation.node_id} with result: {result} ===\n") 67 | except Exception as e: 68 | print(f"ERROR during run execution: {e!s}") 69 | raise 70 | -------------------------------------------------------------------------------- /fluidize/managers/runs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project-scoped runs manager for user-friendly run operations. 3 | """ 4 | 5 | from typing import Any 6 | 7 | from upath import UPath 8 | 9 | from fluidize.core.types.project import ProjectSummary 10 | from fluidize.core.types.runs import RunFlowPayload, projectRunMetadata 11 | 12 | 13 | class RunsManager: 14 | """ 15 | Runs manager for a specific project. 16 | 17 | Provides run operations like executing workflows without requiring 18 | project context on each method call. 19 | """ 20 | 21 | def __init__(self, adapter: Any, project: ProjectSummary) -> None: 22 | """ 23 | Args: 24 | adapter: adapter (FluidizeSDK or LocalAdapter) 25 | project: The project this runs manager is bound to 26 | """ 27 | self.adapter = adapter 28 | self.project = project 29 | 30 | def run_flow(self, payload: RunFlowPayload) -> dict[str, Any]: 31 | """ 32 | Execute a flow run for this project. 33 | 34 | Args: 35 | payload: Run configuration (name, description, tags) 36 | 37 | Returns: 38 | Dictionary with flow_status and run_number 39 | """ 40 | return self.adapter.runs.run_flow(project=self.project, payload=payload) # type: ignore[no-any-return] 41 | 42 | def list_runs(self) -> list[str]: 43 | """ 44 | List all runs for this project. 45 | 46 | Returns: 47 | List of run identifiers for this project 48 | """ 49 | return self.adapter.runs.list_runs(self.project) # type: ignore[no-any-return] 50 | 51 | def get_metadata(self, run_number: int) -> projectRunMetadata: 52 | """ 53 | Get the metadata of a specific run for this project. 54 | 55 | Args: 56 | run_number: The run number to check 57 | 58 | Returns: 59 | Dictionary with run metadata information 60 | """ 61 | return self.adapter.runs.get_run_metadata(self.project, run_number) # type: ignore[no-any-return] 62 | 63 | def list_node_outputs(self, run_number: int, node_id: str) -> list[str]: 64 | """ 65 | List all output files for a specific node in a run for this project. 66 | 67 | Args: 68 | run_number: The run number 69 | node_id: The node ID to list outputs for 70 | 71 | Returns: 72 | List of relative file paths within the node's output directory 73 | """ 74 | return self.adapter.runs.list_node_outputs(self.project, run_number, node_id) # type: ignore[no-any-return] 75 | 76 | def get_output_path(self, run_number: int, node_id: str) -> UPath: 77 | """ 78 | Get the full path to a node's output directory for this project. 79 | 80 | Args: 81 | run_number: The run number 82 | node_id: The node ID 83 | 84 | Returns: 85 | UPath to the node's output directory 86 | """ 87 | return self.adapter.runs.get_output_path(self.project, run_number, node_id) # type: ignore[no-any-return] 88 | -------------------------------------------------------------------------------- /fluidize/core/utils/dataloader/data_writer.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from upath import UPath 4 | 5 | from fluidize.core.types.project import ProjectSummary 6 | from fluidize.core.utils.retrieval.handler import get_handler 7 | 8 | 9 | class DataWriter: 10 | def __init__(self) -> None: 11 | pass 12 | 13 | @classmethod 14 | def _get_handler(cls) -> Any: 15 | return get_handler("datawriter") 16 | 17 | @classmethod 18 | def write_json_for_project(cls, project: ProjectSummary, suffix: str, data: dict) -> bool: 19 | """ 20 | Writes the given `data` to JSON for the given `project`. 21 | """ 22 | return bool(cls._get_handler().write_json_for_project(project, suffix, data)) 23 | 24 | @classmethod 25 | def write_json(cls, filepath: UPath, data: dict) -> bool: 26 | """ 27 | Writes JSON data to the specified path, handling root-level files correctly. 28 | This is a more generic method that properly handles paths based on the file type. 29 | 30 | Note that the mode parameter is legacy and needs to be removed. 31 | """ 32 | return bool(cls._get_handler().write_json(filepath, data)) 33 | 34 | @classmethod 35 | def write_yaml(cls, filepath: UPath, data: dict) -> bool: 36 | """ 37 | Writes YAML data to the specified path. 38 | 39 | Args: 40 | filepath: Path to the YAML file 41 | data: Data to write in YAML format 42 | 43 | Returns: 44 | bool: True if successful, False otherwise 45 | """ 46 | return bool(cls._get_handler().write_yaml(filepath, data)) 47 | 48 | @classmethod 49 | def write_text(cls, filepath: UPath, data: str) -> bool: 50 | """ 51 | Writes text data to the specified path. 52 | 53 | Args: 54 | filepath: Path to the text file 55 | data: Text data to write 56 | 57 | Returns: 58 | bool: True if successful, False otherwise 59 | """ 60 | return bool(cls._get_handler().write_text(filepath, data)) 61 | 62 | @classmethod 63 | def create_directory(cls, directory_path: UPath) -> bool: 64 | """ 65 | Creates a directory along with any necessary parent directories. 66 | Automatically handles local or cluster storage based on the current mode. 67 | 68 | Args: 69 | directory_path: Path to the directory to create 70 | 71 | Returns: 72 | bool: True if successful, False otherwise 73 | """ 74 | return bool(cls._get_handler().create_directory(directory_path)) 75 | 76 | @classmethod 77 | def save_simulation(cls, simulation: Any, sim_global: bool = True) -> dict[Any, Any]: 78 | """ 79 | Saves a new simulation with the given simulation object. 80 | 81 | Args: 82 | simulation: The Simulation object containing all simulation data 83 | 84 | Returns: 85 | Dict with status and simulation name 86 | """ 87 | return dict(cls._get_handler().save_simulation(simulation, sim_global)) 88 | -------------------------------------------------------------------------------- /fluidize/core/utils/dataloader/data_loader.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from upath import UPath 4 | 5 | from fluidize.core.types.file_models.metadata_model import MetadataModel 6 | from fluidize.core.types.file_models.properties_model import PropertiesModel 7 | from fluidize.core.types.project import ProjectSummary 8 | from fluidize.core.utils.retrieval.handler import get_handler 9 | 10 | 11 | class DataLoader: 12 | def __init__(self) -> None: 13 | pass 14 | 15 | @classmethod 16 | def _get_handler(cls) -> Any: 17 | return get_handler("dataloader") 18 | 19 | @classmethod 20 | def list_directories(cls, path: UPath) -> list[UPath]: 21 | return list(cls._get_handler().list_directories(path)) 22 | 23 | @classmethod 24 | def list_files(cls, path: UPath) -> list[UPath]: 25 | return list(cls._get_handler().list_files(path)) 26 | 27 | @classmethod 28 | def copy_directory(cls, source: UPath, destination: UPath) -> None: 29 | cls._get_handler().copy_directory(source, destination) 30 | return None 31 | 32 | @classmethod 33 | def load_json(cls, filepath: UPath) -> dict: 34 | return dict(cls._get_handler().load_json(filepath)) 35 | 36 | @classmethod 37 | def load_yaml(cls, filepath: UPath) -> dict: 38 | return dict(cls._get_handler().load_yaml(filepath)) 39 | 40 | @classmethod 41 | def load_for_project(cls, project: ProjectSummary, suffix: str) -> dict: 42 | return dict(cls._get_handler().load_for_project(project, suffix)) 43 | 44 | @classmethod 45 | def delete_directory_for_project(cls, project: ProjectSummary, folder_name: str) -> None: 46 | cls._get_handler().delete_directory_for_project(project, folder_name) 47 | return None 48 | 49 | @classmethod 50 | def delete_entire_project_folder(cls, project: ProjectSummary) -> None: 51 | cls._get_handler().delete_entire_project_folder(project) 52 | return None 53 | 54 | @classmethod 55 | def load_node_parameters(cls, path: UPath) -> dict: 56 | return dict(cls._get_handler().load_node_parameters(path)) 57 | 58 | @classmethod 59 | def list_runs(cls, project: ProjectSummary) -> list[str]: 60 | return list(cls._get_handler().list_runs(project)) 61 | 62 | @classmethod 63 | def list_metadatas(cls, path: UPath, objectType: type) -> list[MetadataModel]: 64 | return list(cls._get_handler().list_metadatas(path, objectType)) 65 | 66 | @classmethod 67 | def list_properties(cls, path: UPath, objectType: type) -> list[PropertiesModel]: 68 | return list(cls._get_handler().list_properties(path, objectType)) 69 | 70 | @classmethod 71 | def list_simulations(cls, sim_global: bool = True) -> list[Any]: 72 | return list(cls._get_handler().list_simulations(sim_global)) 73 | 74 | @classmethod 75 | def check_file_exists(cls, filepath: UPath) -> bool: 76 | return bool(cls._get_handler().check_file_exists(filepath)) 77 | 78 | @classmethod 79 | def remove_directory(cls, dirpath: UPath) -> None: 80 | cls._get_handler().remove_directory(dirpath) 81 | return None 82 | -------------------------------------------------------------------------------- /fluidize/core/utils/dataloader/loader/loader_local.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from pathlib import Path 3 | 4 | from upath import UPath 5 | 6 | from fluidize.core.utils.retrieval.handler import register_handler 7 | 8 | from .loader_base import BaseDataLoader 9 | 10 | 11 | class LocalDataLoader(BaseDataLoader): 12 | """ 13 | Local filesystem implementation of the JSON data loader. 14 | """ 15 | 16 | def _get_file_content(self, filepath: Path) -> str: 17 | """Load raw file content from the local filesystem as a string.""" 18 | with open(filepath, encoding="utf-8") as f: 19 | return f.read() 20 | 21 | def _file_exists(self, filepath: Path) -> bool: 22 | """Check if a file exists in the local filesystem.""" 23 | return filepath.exists() and filepath.is_file() 24 | 25 | def _directory_exists(self, dirpath: Path) -> bool: 26 | """Check if a directory exists in the local filesystem.""" 27 | return dirpath.exists() and dirpath.is_dir() 28 | 29 | def _list_directory(self, dirpath: Path) -> list[UPath]: 30 | """List contents of a directory in the local filesystem.""" 31 | return [UPath(item) for item in dirpath.iterdir()] 32 | 33 | def _is_directory(self, path: Path) -> bool: 34 | """Check if a path is a directory in the local filesystem.""" 35 | return path.is_dir() 36 | 37 | def copy_directory(self, source: Path, destination: Path) -> None: 38 | """Copy a directory in the local filesystem.""" 39 | shutil.copytree(source, destination) 40 | 41 | def remove_directory(self, dirpath: Path) -> None: 42 | """Remove a directory in the local filesystem.""" 43 | shutil.rmtree(dirpath) 44 | 45 | def _create_directory(self, dirpath: Path) -> None: 46 | """Create a directory in the local filesystem.""" 47 | dirpath.mkdir(parents=True, exist_ok=True) 48 | 49 | def _glob(self, path: Path, pattern: str) -> list[UPath]: 50 | """List files matching `pattern` under the given path on local filesystem.""" 51 | return [UPath(p) for p in path.glob(pattern)] 52 | 53 | def _cat_files(self, paths: list[UPath]) -> dict[str, bytes]: 54 | """Read multiple files' content in a batch from the local filesystem.""" 55 | contents = {} 56 | for path in paths: 57 | try: 58 | # UPath objects from local glob will have a 'path' attribute that is a Path object 59 | with open(path, "rb") as f: 60 | # Use the string representation of the path as the key for consistency 61 | contents[str(path)] = f.read() 62 | except FileNotFoundError: 63 | # Handle cases where a file might disappear between glob and read 64 | # Skip files that can't be read instead of adding None values 65 | continue 66 | except Exception as e: 67 | # Handle other potential reading errors 68 | print(f"Error reading file {path}: {e}") 69 | # Skip files that can't be read instead of adding None values 70 | continue 71 | return contents 72 | 73 | 74 | # Auto-register this handler when module is imported 75 | register_handler("dataloader", "local", LocalDataLoader) 76 | -------------------------------------------------------------------------------- /tests/unit/conftest.py: -------------------------------------------------------------------------------- 1 | """Shared fixtures and configuration for unit tests.""" 2 | 3 | import tempfile 4 | from collections.abc import Generator 5 | from pathlib import Path 6 | from unittest.mock import Mock, patch 7 | 8 | import pytest 9 | from upath import UPath 10 | 11 | # Import local handlers to ensure they auto-register 12 | from fluidize.config import FluidizeConfig 13 | from fluidize.core.types.project import ProjectSummary 14 | 15 | 16 | @pytest.fixture 17 | def temp_dir() -> Generator[Path, None, None]: 18 | """Create a temporary directory for testing filesystem operations.""" 19 | with tempfile.TemporaryDirectory() as tmpdir: 20 | yield Path(tmpdir) 21 | 22 | 23 | @pytest.fixture 24 | def mock_config(temp_dir: Path) -> FluidizeConfig: 25 | """Create a mock configuration for local mode testing.""" 26 | config = FluidizeConfig(mode="local") 27 | config.local_base_path = temp_dir 28 | config.local_projects_path = temp_dir / "projects" 29 | config.local_simulations_path = temp_dir / "simulations" 30 | 31 | # Ensure directories exist 32 | config.local_projects_path.mkdir(parents=True, exist_ok=True) 33 | config.local_simulations_path.mkdir(parents=True, exist_ok=True) 34 | 35 | return config 36 | 37 | 38 | @pytest.fixture 39 | def sample_project() -> ProjectSummary: 40 | """Create a sample project for testing.""" 41 | return ProjectSummary( 42 | id="test-project-001", 43 | label="Test Project", 44 | description="A sample project for testing", 45 | status="active", 46 | location="/test/location", 47 | metadata_version="1.0", 48 | ) 49 | 50 | 51 | @pytest.fixture 52 | def sample_project_minimal() -> ProjectSummary: 53 | """Create a minimal sample project for testing.""" 54 | return ProjectSummary(id="minimal-project", metadata_version="1.0") 55 | 56 | 57 | @pytest.fixture 58 | def mock_path_finder() -> Generator[Mock, None, None]: 59 | """Mock PathFinder for consistent testing.""" 60 | with patch("fluidize.core.utils.pathfinder.path_finder.PathFinder._get_handler") as mock_handler_getter: 61 | mock_handler = Mock() 62 | mock_handler_getter.return_value = mock_handler 63 | yield mock_handler 64 | 65 | 66 | @pytest.fixture 67 | def mock_data_loader() -> Generator[Mock, None, None]: 68 | """Mock DataLoader for filesystem operation testing.""" 69 | with patch("fluidize.core.utils.dataloader.data_loader.DataLoader._get_handler") as mock_handler_getter: 70 | mock_handler = Mock() 71 | mock_handler_getter.return_value = mock_handler 72 | yield mock_handler 73 | 74 | 75 | @pytest.fixture 76 | def mock_data_writer() -> Generator[Mock, None, None]: 77 | """Mock DataWriter for filesystem write operation testing.""" 78 | with patch("fluidize.core.utils.dataloader.data_writer.DataWriter") as mock_writer: 79 | yield mock_writer 80 | 81 | 82 | @pytest.fixture 83 | def setup_project_paths(temp_dir: Path, sample_project: ProjectSummary) -> dict[str, UPath]: 84 | """Set up project directory structure for testing.""" 85 | projects_path = UPath(temp_dir / "projects") 86 | project_path = projects_path / sample_project.id 87 | metadata_path = project_path / "metadata.yaml" 88 | 89 | projects_path.mkdir(parents=True, exist_ok=True) 90 | project_path.mkdir(parents=True, exist_ok=True) 91 | 92 | return {"projects_path": projects_path, "project_path": project_path, "metadata_path": metadata_path} 93 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/methods/base/execstrat.py: -------------------------------------------------------------------------------- 1 | import time 2 | from abc import ABC, abstractmethod 3 | from typing import Any, Optional 4 | 5 | from fluidize.core.types.node import nodeProperties_simulation 6 | from fluidize.core.types.project import ProjectSummary 7 | 8 | 9 | class BaseExecutionStrategy(ABC): 10 | def __init__( 11 | self, 12 | node: nodeProperties_simulation, 13 | prev_node: Optional[nodeProperties_simulation], 14 | project: ProjectSummary, 15 | mlflow_tracker: Any = None, 16 | run_id: Optional[str] = None, 17 | run_metadata: Any = None, 18 | ) -> None: 19 | self.node = node 20 | self.prev_node = prev_node 21 | self.project = project 22 | self.mlflow_tracker = mlflow_tracker 23 | self.run_id = run_id 24 | self.run_metadata = run_metadata 25 | 26 | def set_context( 27 | self, 28 | nodeProperties_simulation: nodeProperties_simulation, 29 | prev_nodeProperties_simulation: Optional[nodeProperties_simulation], 30 | project: ProjectSummary, 31 | ) -> None: 32 | """Update the context for reusing the strategy instance""" 33 | self.node = nodeProperties_simulation 34 | self.prev_node = prev_nodeProperties_simulation 35 | self.project = project 36 | 37 | @abstractmethod 38 | def _set_environment(self) -> Any: 39 | """Load the environment for the node execution""" 40 | pass 41 | 42 | @abstractmethod 43 | def _load_execution_manager(self) -> Any: 44 | """Load the execution manager for the node""" 45 | pass 46 | 47 | def prepare_environment(self) -> None: 48 | self.env_manager = self._set_environment() 49 | 50 | # Load the parameters 51 | simulation_params, properties_params = self.env_manager.load_node_parameters() 52 | 53 | # Process the parameters with the environment manager 54 | self.env_manager.process_parameters(simulation_params, properties_params) 55 | 56 | def execute_simulation(self) -> Any: 57 | # Track execution time 58 | start_time = time.time() 59 | 60 | self.env_manager = self._load_execution_manager() 61 | result = self.env_manager.execute() 62 | 63 | # Log execution metrics to MLFlow 64 | if self.mlflow_tracker: 65 | try: 66 | execution_time = time.time() - start_time 67 | self.mlflow_tracker.log_metrics({ 68 | "execution_time_seconds": execution_time, 69 | }) 70 | 71 | # Log execution mode 72 | execution_mode = self.__class__.__name__.replace("ExecutionStrategy", "").lower() 73 | self.mlflow_tracker.log_tag("execution_mode", execution_mode) 74 | 75 | # Log result status 76 | if isinstance(result, str): 77 | success = "success" in result.lower() 78 | self.mlflow_tracker.log_tag("execution_result", "success" if success else "failure") 79 | elif isinstance(result, bool): 80 | self.mlflow_tracker.log_tag("execution_result", "success" if result else "failure") 81 | 82 | except Exception as e: 83 | # Don't fail the execution if MLFlow logging fails 84 | print(f"MLFlow logging failed: {e}") 85 | 86 | return result 87 | 88 | @abstractmethod 89 | def handle_files(self) -> None: 90 | """Handle file operations for the execution.""" 91 | pass 92 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "fluidize" 3 | version = "0.0.2" 4 | description = "Python package for automatic generation of scientific computing software pipelines." 5 | authors = [{ name = "Henry Bae", email = "henry@fluidize.ai" }] 6 | readme = "README.md" 7 | keywords = ['python'] 8 | requires-python = ">=3.9,<4.0" 9 | classifiers = [ 10 | "Intended Audience :: Developers", 11 | "Programming Language :: Python", 12 | "Programming Language :: Python :: 3", 13 | "Programming Language :: Python :: 3.9", 14 | "Programming Language :: Python :: 3.10", 15 | "Programming Language :: Python :: 3.11", 16 | "Programming Language :: Python :: 3.12", 17 | "Programming Language :: Python :: 3.13", 18 | "Topic :: Software Development :: Libraries :: Python Modules", 19 | ] 20 | dependencies = [ 21 | "asciitree>=0.3.3", 22 | "docker>=7.1.0", 23 | "fluidize-sdk>=0.7.0", 24 | "jinja2>=3.1.6", 25 | "mlflow>=3.1.4", 26 | "networkx>=3.2.1", 27 | "pydantic>=2.11.7", 28 | "pyyaml>=6.0.2", 29 | "universal-pathlib>=0.2.6", 30 | ] 31 | 32 | [project.urls] 33 | Homepage = "https://Fluidize-Inc.github.io/fluidize-python/" 34 | Repository = "https://github.com/Fluidize-Inc/fluidize-python" 35 | Documentation = "https://Fluidize-Inc.github.io/fluidize-python/" 36 | 37 | [dependency-groups] 38 | dev = [ 39 | "pytest>=7.2.0", 40 | "pre-commit>=2.20.0", 41 | "tox-uv>=1.11.3", 42 | "deptry>=0.23.0", 43 | "mypy>=0.991", 44 | "pytest-cov>=4.0.0", 45 | "ruff>=0.11.5", 46 | "mkdocs>=1.4.2", 47 | "mkdocs-material>=8.5.10", 48 | "mkdocstrings[python]>=0.26.1", 49 | "types-networkx>=3.4.2.20250509", 50 | "types-pyyaml>=6.0.12.20250809", 51 | ] 52 | 53 | [build-system] 54 | requires = ["hatchling"] 55 | build-backend = "hatchling.build" 56 | 57 | [tool.hatch.build.targets.wheel] 58 | packages = ["fluidize"] 59 | 60 | [tool.mypy] 61 | files = ["fluidize"] 62 | disallow_untyped_defs = true 63 | disallow_any_unimported = true 64 | no_implicit_optional = true 65 | check_untyped_defs = true 66 | warn_return_any = true 67 | warn_unused_ignores = true 68 | show_error_codes = true 69 | 70 | [tool.pytest.ini_options] 71 | testpaths = ["tests"] 72 | 73 | [tool.ruff] 74 | target-version = "py39" 75 | line-length = 120 76 | fix = true 77 | exclude = [ 78 | "tests/fixtures/", 79 | "utils", 80 | "examples", 81 | ] 82 | 83 | [tool.ruff.lint] 84 | select = [ 85 | # flake8-2020 86 | "YTT", 87 | # flake8-bandit 88 | "S", 89 | # flake8-bugbear 90 | "B", 91 | # flake8-builtins 92 | "A", 93 | # flake8-comprehensions 94 | "C4", 95 | # flake8-debugger 96 | "T10", 97 | # flake8-simplify 98 | "SIM", 99 | # isort 100 | "I", 101 | # mccabe 102 | "C90", 103 | # pycodestyle 104 | "E", "W", 105 | # pyflakes 106 | "F", 107 | # pygrep-hooks 108 | "PGH", 109 | # pyupgrade 110 | "UP", 111 | # ruff 112 | "RUF", 113 | # tryceratops 114 | "TRY", 115 | ] 116 | ignore = [ 117 | # LineTooLong 118 | "E501", 119 | # DoNotAssignLambda 120 | "E731", 121 | # Use `X | Y` for type annotations (not compatible with Python 3.9) 122 | "UP007", 123 | ] 124 | 125 | [tool.ruff.lint.per-file-ignores] 126 | "tests/*" = ["S101"] 127 | 128 | [tool.ruff.format] 129 | preview = true 130 | 131 | [tool.coverage.report] 132 | skip_empty = true 133 | 134 | [tool.coverage.run] 135 | branch = true 136 | source = ["fluidize"] 137 | 138 | [tool.deptry] 139 | extend_exclude = ["utils", "examples"] 140 | -------------------------------------------------------------------------------- /tests/unit/managers/test_simulation.py: -------------------------------------------------------------------------------- 1 | """Unit tests for Simulations Manager - high-level simulation library interface.""" 2 | 3 | from unittest.mock import Mock, patch 4 | 5 | import pytest 6 | 7 | from fluidize.managers.simulations import SimulationsManager 8 | 9 | 10 | class TestSimulationsManager: 11 | """Test suite for SimulationsManager class.""" 12 | 13 | @pytest.fixture 14 | def mock_adapter(self): 15 | """Create a mock adapter for testing.""" 16 | adapter = Mock() 17 | return adapter 18 | 19 | @pytest.fixture 20 | def simulations_manager(self, mock_adapter): 21 | """Create a SimulationsManager instance for testing.""" 22 | with patch("fluidize.managers.simulations.FluidizeSDK"): 23 | return SimulationsManager(mock_adapter) 24 | 25 | @patch("fluidize.managers.simulations.FluidizeSDK") 26 | def test_init(self, mock_sdk_class, mock_adapter): 27 | """Test SimulationsManager initialization.""" 28 | manager = SimulationsManager(mock_adapter) 29 | 30 | assert manager._adapter is mock_adapter 31 | assert manager.fluidize_sdk is not None 32 | mock_sdk_class.assert_called_once() 33 | 34 | @patch("fluidize.managers.simulations.FluidizeSDK") 35 | def test_list_simulations_returns_list(self, mock_sdk_class, simulations_manager): 36 | """Test that list_simulations returns a list.""" 37 | # Arrange 38 | mock_sdk_instance = mock_sdk_class.return_value 39 | simulations_manager.fluidize_sdk = mock_sdk_instance 40 | # Create a mock simulation object with model_dump method 41 | mock_simulation = Mock() 42 | mock_simulation.model_dump.return_value = { 43 | "name": "Test Simulation", 44 | "id": "sim_001", 45 | "description": "A test simulation", 46 | "date": "2024-01-01", 47 | "version": "1.0.0", 48 | "authors": [], 49 | "tags": [], 50 | } 51 | mock_sdk_instance.simulation.list_simulations.return_value = [mock_simulation] 52 | 53 | # Act 54 | result = simulations_manager.list_simulations() 55 | 56 | # Assert 57 | assert isinstance(result, list) 58 | mock_sdk_instance.simulation.list_simulations.assert_called_once_with(sim_global=True) 59 | 60 | @patch("fluidize.managers.simulations.FluidizeSDK") 61 | def test_list_simulations_empty_list(self, mock_sdk_class, simulations_manager): 62 | """Test that list_simulations handles empty results.""" 63 | # Arrange 64 | mock_sdk_instance = mock_sdk_class.return_value 65 | simulations_manager.fluidize_sdk = mock_sdk_instance 66 | mock_sdk_instance.simulation.list_simulations.return_value = [] 67 | 68 | # Act 69 | result = simulations_manager.list_simulations() 70 | 71 | # Assert 72 | assert result == [] 73 | mock_sdk_instance.simulation.list_simulations.assert_called_once_with(sim_global=True) 74 | 75 | @patch("fluidize.managers.simulations.FluidizeSDK") 76 | def test_list_simulations_sdk_delegation(self, mock_sdk_class, simulations_manager): 77 | """Test that list_simulations properly delegates to SDK.""" 78 | # Arrange 79 | mock_sdk_instance = mock_sdk_class.return_value 80 | simulations_manager.fluidize_sdk = mock_sdk_instance 81 | mock_sdk_instance.simulation.list_simulations.return_value = [] 82 | 83 | # Act 84 | simulations_manager.list_simulations() 85 | 86 | # Assert 87 | mock_sdk_instance.simulation.list_simulations.assert_called_once_with(sim_global=True) 88 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/source/debug_tutorial.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Debug script to test the exact tutorial MJCF""" 3 | 4 | import os 5 | 6 | import mujoco 7 | 8 | # Set environment for headless rendering 9 | os.environ["MUJOCO_GL"] = "osmesa" 10 | 11 | # EXACT tutorial MJCF from your message 12 | MJCF = """ 13 | 14 | 15 | 17 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | """ 59 | 60 | 61 | def main(): 62 | print("🧪 Testing exact tutorial MJCF...") 63 | 64 | try: 65 | model = mujoco.MjModel.from_xml_string(MJCF) 66 | data = mujoco.MjData(model) 67 | 68 | print("✅ Model created successfully") 69 | print(f"📊 Bodies: {model.nbody}, Geoms: {model.ngeom}") 70 | print(f"🔗 Tendons: {model.ntendon}") 71 | 72 | # Reset and step forward to see initial state 73 | mujoco.mj_resetData(model, data) 74 | mujoco.mj_forward(model, data) 75 | 76 | print(f"Initial pinata position: {data.body('box_and_sphere').xpos}") 77 | print(f"Initial wire length: {data.tendon('wire').length[0]:.3f}m") 78 | 79 | # Step simulation a few times to see if pinata falls 80 | data.ctrl = 20 # Set motor control 81 | 82 | for i in range(100): 83 | mujoco.mj_step(model, data) 84 | if i % 20 == 0: 85 | pos = data.body("box_and_sphere").xpos 86 | wire_len = data.tendon("wire").length[0] 87 | print(f"Step {i:3d}: pinata z={pos[2]:.3f}, wire_len={wire_len:.3f}") 88 | 89 | final_pos = data.body("box_and_sphere").xpos 90 | print(f"\nFinal pinata position: {final_pos}") 91 | 92 | if final_pos[2] < -0.4: 93 | print("❌ PINATA FELL TO GROUND!") 94 | else: 95 | print("✅ Pinata stayed suspended") 96 | 97 | return 0 98 | 99 | except Exception as e: 100 | print(f"❌ Error: {e}") 101 | return 1 102 | 103 | 104 | if __name__ == "__main__": 105 | exit(main()) 106 | -------------------------------------------------------------------------------- /fluidize/core/types/node.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This module defines the format for files within individual nodes in the graph. 4 | # TODO: Make more clear documentation. 5 | 6 | """ 7 | 8 | import datetime 9 | from typing import Optional, Union 10 | 11 | from pydantic import BaseModel, ConfigDict, computed_field 12 | 13 | from .file_models.metadata_model import MetadataModel 14 | from .file_models.parameters_model import ParametersModel 15 | from .file_models.properties_model import PropertiesModel 16 | from .runs import RunStatus 17 | 18 | # ISSUE #32 19 | """ Properties.yaml.simulations""" 20 | 21 | 22 | class nodeProperties_simulation(PropertiesModel): 23 | properties_version: str = "1.0" 24 | # node_id is now a computed field based on folder name (see below) 25 | container_image: str 26 | simulation_mount_path: str 27 | source_output_folder: str = "output" # Where simulation creates files 28 | should_run: bool = True # Add default value 29 | last_run: Optional[str] = None 30 | run_status: RunStatus = RunStatus.NOT_RUN 31 | version: Union[str, float] = "1.0" # Accept both string and float 32 | 33 | @computed_field 34 | def node_id(self) -> str: 35 | """Node ID is always the folder name where this node exists""" 36 | return self.directory.name 37 | 38 | @computed_field 39 | def output_path(self) -> str: 40 | return str(self.directory / self.source_output_folder) 41 | 42 | model_config = ConfigDict( 43 | use_enum_values=True, # Use enum values in serialization 44 | extra="ignore", # Ignore extra fields 45 | ) 46 | 47 | class Key: 48 | key = "simulation" 49 | # Extra fields won't trigger validation errors 50 | extra = "ignore" 51 | 52 | 53 | """ Metadata.yaml""" 54 | """ This file contains metadata about the node, including 55 | 56 | name: str = display name of the node 57 | description: str = description of the source code 58 | version: str = version of the node 59 | authors: list[author] = list of authors of the node 60 | tags: list[str] = list of tags associated with the node 61 | code_url: str = URL to the source code of the node 62 | paper_url: Optional[str] = URL to the paper associated with the node 63 | 64 | """ 65 | 66 | 67 | class author(BaseModel): 68 | name: str 69 | institution: str 70 | email: Optional[str] = None # Optional email field for the author 71 | 72 | 73 | # More will be done here later 74 | class tag(BaseModel): 75 | name: str 76 | description: Optional[str] = None # Optional description for the tag 77 | color: Optional[str] = None # Optional color for the tag, e.g., hex code 78 | icon: Optional[str] = None # Optional icon for the tag, e.g., an emoji or icon name 79 | 80 | 81 | # ISSUE #32 82 | class nodeMetadata_simulation(MetadataModel): 83 | metadata_version: str = "1.0" 84 | name: str 85 | # This is the simulation ID - stays constant throughout 86 | id: str 87 | description: str 88 | date: Optional[datetime.date] 89 | version: str 90 | authors: list[author] 91 | tags: list[tag] 92 | code_url: Optional[str] = None 93 | paper_url: Optional[str] = None 94 | mlflow_run_id: Optional[str] = None 95 | 96 | class Key: 97 | key = "simulation" 98 | metadata_version = "1.0" 99 | 100 | 101 | class nodeParameters_simulation(ParametersModel): 102 | """ 103 | Parameters configuration for a simulation node. 104 | 105 | Handles loading and saving of parameters.json files with the structure: 106 | {"parameters": [list of Parameter objects]} 107 | """ 108 | 109 | class Key: 110 | key = "parameters" 111 | -------------------------------------------------------------------------------- /fluidize/core/modules/execute/utilities/resource_builder.py: -------------------------------------------------------------------------------- 1 | """ 2 | Resource Builder 3 | 4 | Builds resource specifications for container execution, primarily for Kubernetes. 5 | Reads resource information from Terraform outputs when available. 6 | """ 7 | 8 | from dataclasses import dataclass 9 | from typing import ClassVar, Optional 10 | 11 | from fluidize.core.types.execution_models import ExecutionContext 12 | 13 | 14 | @dataclass 15 | class ResourceSpec: 16 | """Resource specification for container execution.""" 17 | 18 | requests: dict[str, str] 19 | limits: dict[str, str] 20 | node_selector: Optional[dict[str, str]] = None 21 | tolerations: Optional[list[dict]] = None 22 | 23 | 24 | class ResourceBuilder: 25 | """ 26 | Builds resource specifications for container execution. 27 | 28 | Simple approach: read from Terraform outputs or use minimal defaults. 29 | """ 30 | 31 | # Minimal default if no Terraform outputs available 32 | DEFAULT_RESOURCES: ClassVar[dict[str, dict[str, str]]] = { 33 | "requests": {"cpu": "1", "memory": "4Gi"}, 34 | "limits": {"cpu": "2", "memory": "8Gi"}, 35 | } 36 | 37 | @staticmethod 38 | def build_resource_spec(context: ExecutionContext) -> ResourceSpec: 39 | """ 40 | Build resource specification from context. 41 | 42 | Args: 43 | context: Execution context 44 | 45 | Returns: 46 | ResourceSpec with resource requirements 47 | """ 48 | # Use explicit requirements if provided 49 | if context.resource_requirements: 50 | return ResourceSpec( 51 | requests={ 52 | "cpu": context.resource_requirements.cpu_request, 53 | "memory": context.resource_requirements.memory_request, 54 | }, 55 | limits={ 56 | "cpu": context.resource_requirements.cpu_limit, 57 | "memory": context.resource_requirements.memory_limit, 58 | }, 59 | node_selector={"cloud.google.com/gke-nodepool": context.get_node_pool()}, 60 | ) 61 | 62 | # Otherwise use defaults 63 | return ResourceSpec( 64 | requests=ResourceBuilder.DEFAULT_RESOURCES["requests"].copy(), 65 | limits=ResourceBuilder.DEFAULT_RESOURCES["limits"].copy(), 66 | ) 67 | 68 | @staticmethod 69 | def build_docker_resource_args(resource_spec: ResourceSpec) -> list[str]: 70 | """ 71 | Convert resource specification to Docker CLI arguments. 72 | 73 | Args: 74 | resource_spec: Resource specification 75 | 76 | Returns: 77 | List of Docker resource arguments 78 | """ 79 | args = [] 80 | 81 | # Memory limit 82 | if "memory" in resource_spec.limits: 83 | memory = resource_spec.limits["memory"] 84 | # Convert Kubernetes format to Docker format 85 | docker_memory = memory.replace("Gi", "g").replace("Mi", "m") 86 | args.extend(["--memory", docker_memory]) 87 | 88 | # CPU limit 89 | if "cpu" in resource_spec.limits: 90 | args.extend(["--cpus", resource_spec.limits["cpu"]]) 91 | 92 | return args 93 | 94 | # @staticmethod 95 | # def build_kubernetes_resources(resource_spec: ResourceSpec) -> dict: 96 | # """ 97 | # Convert to Kubernetes resource format. 98 | 99 | # Args: 100 | # resource_spec: Resource specification 101 | 102 | # Returns: 103 | # Kubernetes resource requirements dictionary 104 | # """ 105 | # return {"requests": resource_spec.requests, "limits": resource_spec.limits} 106 | -------------------------------------------------------------------------------- /fluidize/managers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | High-level manager classes for Fluidize resources. 3 | 4 | This module provides a clean abstraction layer for managing Fluidize projects 5 | and their associated resources. The architecture follows a clear hierarchy 6 | designed to make common workflows intuitive while maintaining flexibility 7 | for advanced use cases. 8 | 9 | Architecture Overview: 10 | The managers module implements a two-tier pattern: 11 | 12 | 1. **Global Managers** - Handle cross-project operations 13 | - `RegistryManager`: Creates, retrieves, updates, and lists projects 14 | 15 | 2. **Project-Scoped Managers** - Bound to specific projects 16 | - `GraphManager`: Manages nodes and edges within a project's computational graph 17 | - `RunsManager`: Executes and monitors workflow runs for a project 18 | 19 | Design Pattern: 20 | The module uses a wrapper pattern where global managers return entity 21 | objects that provide convenient access to scoped operations:: 22 | 23 | client.projects (Projects) 24 | └── .create() / .get() → Project entity 25 | ├── .graph (GraphManager) - Computational graph operations 26 | └── .runs (RunsManager) - Workflow execution operations 27 | 28 | Usage Examples: 29 | Basic project workflow:: 30 | 31 | >>> from fluidize import FluidizeClient 32 | >>> client = FluidizeClient(mode="local") 33 | 34 | # Global manager creates project 35 | >>> project = client.projects.create( 36 | ... project_id="cfd-sim", 37 | ... label="CFD Simulation", 38 | ... description="Fluid dynamics analysis" 39 | ... ) 40 | 41 | # Project entity provides scoped managers 42 | >>> node = project.graph.add_node(node_data) 43 | >>> result = project.runs.run_flow(payload) 44 | 45 | Managing multiple projects:: 46 | 47 | >>> # List all projects 48 | >>> projects = client.projects.list() 49 | >>> for p in projects: 50 | ... print(f"{p.id}: {p.label}") 51 | 52 | >>> # Update existing project 53 | >>> updated = client.projects.update( 54 | ... "cfd-sim", 55 | ... status="completed" 56 | ... ) 57 | 58 | Graph operations:: 59 | 60 | >>> project = client.projects.get("my-project") 61 | >>> graph_data = project.graph.get() 62 | >>> project.graph.add_node(simulation_node) 63 | >>> project.graph.add_edge(connection_edge) 64 | >>> ascii_viz = project.graph.show() 65 | 66 | Running workflows:: 67 | 68 | >>> project = client.projects.get("my-project") 69 | >>> payload = RunFlowPayload( 70 | ... name="simulation-run", 71 | ... description="CFD analysis run" 72 | ... ) 73 | >>> result = project.runs.run_flow(payload) 74 | >>> status = project.runs.get_status(result["run_number"]) 75 | 76 | File Structure: 77 | - `projects.py`: Global project CRUD operations (Projects class) 78 | - `project_manager.py`: Single project entity with sub-managers (Project class) 79 | - `graph.py`: Project-scoped graph operations (GraphManager class) 80 | - `runs.py`: Project-scoped run operations (RunsManager class) 81 | 82 | Threading and adapter Support: 83 | All managers are thread-safe and support both local filesystem and 84 | cloud API adapters through the underlying adapter pattern. The choice 85 | of adapter is transparent to the manager classes. 86 | 87 | See Also: 88 | - :class:`~fluidize.managers.registry.RegistryManager`: Global project manager 89 | - :class:`~fluidize.managers.project.ProjectManager`: Project entity wrapper 90 | - :class:`~fluidize.managers.graph.GraphManager`: Graph operations 91 | - :class:`~fluidize.managers.runs.RunsManager`: Run operations 92 | """ 93 | -------------------------------------------------------------------------------- /fluidize/core/types/file_models/json_file_model_base.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, TypeVar, Union 4 | 5 | from pydantic import BaseModel, ConfigDict, PrivateAttr, ValidationError 6 | from upath import UPath 7 | 8 | T = TypeVar("T", bound="JSONFileModelBase") 9 | 10 | 11 | class JSONFileModelBase(BaseModel): 12 | _filepath: Union[UPath, None] = PrivateAttr(default=None) 13 | 14 | @property 15 | def filepath(self) -> UPath: 16 | """Return the exact path to the model file. Raises if not set.""" 17 | if not self._filepath: 18 | raise ValueError() 19 | return self._filepath 20 | 21 | @property 22 | def directory(self) -> UPath: 23 | """Return the folder containing the model file. Raises if filepath not set.""" 24 | fp = self.filepath 25 | return fp.parent 26 | 27 | @classmethod 28 | def from_file(cls: type[T], directory: Union[str, UPath]) -> T: 29 | from fluidize.core.utils.dataloader.data_loader import DataLoader 30 | 31 | filename = getattr(cls, "_filename", None) 32 | if not filename: 33 | raise TypeError() 34 | 35 | path = UPath(directory) / filename 36 | data = DataLoader.load_json(path) 37 | 38 | if not data: 39 | raise FileNotFoundError() 40 | 41 | try: 42 | instance = cls.model_validate(data) 43 | except ValidationError: 44 | raise 45 | except Exception as e: 46 | raise ValueError() from e 47 | else: 48 | instance._filepath = path 49 | return instance 50 | 51 | @classmethod 52 | def from_dict_and_path(cls: type[T], data: dict, path: UPath) -> T: 53 | """Creates a model instance from a dictionary and a path, without reading the file again.""" 54 | if not data: 55 | raise ValueError() 56 | 57 | try: 58 | instance = cls.model_validate(data) 59 | except ValidationError: 60 | raise 61 | except Exception as e: 62 | raise ValueError() from e 63 | else: 64 | instance._filepath = path 65 | return instance 66 | 67 | def model_dump_wrapped(self) -> dict[str, Any]: 68 | config = getattr(self, "Key", None) 69 | key = getattr(config, "key", None) 70 | 71 | if not key: 72 | return self.model_dump() 73 | 74 | return {key: self.model_dump(mode="json")} 75 | 76 | def save(self, directory: UPath | None = None) -> None: 77 | from fluidize.core.utils.dataloader.data_loader import DataLoader 78 | from fluidize.core.utils.dataloader.data_writer import DataWriter 79 | 80 | if directory: 81 | filename = getattr(self.__class__, "_filename", None) 82 | if not filename: 83 | raise TypeError() 84 | self._filepath = UPath(directory) / filename 85 | 86 | if not self._filepath: 87 | raise ValueError() 88 | 89 | # Load existing data to preserve other keys, if the file already exists. 90 | # Pass a new UPath object to avoid issues with object caching if it's the same file. 91 | existing_data = DataLoader.load_json(UPath(self._filepath)) 92 | 93 | new_data = self.model_dump_wrapped() 94 | existing_data.update(new_data) 95 | 96 | DataWriter.write_json(self._filepath, existing_data) 97 | 98 | def edit(self, **kwargs: Any) -> None: 99 | for key, value in kwargs.items(): 100 | if hasattr(self, key): 101 | setattr(self, key, value) 102 | else: 103 | raise AttributeError() 104 | self.save() 105 | 106 | model_config = ConfigDict(arbitrary_types_allowed=True) 107 | -------------------------------------------------------------------------------- /fluidize/core/types/file_models/file_model_base.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Optional, TypeVar, Union 4 | 5 | from pydantic import BaseModel, ConfigDict, PrivateAttr, ValidationError 6 | from upath import UPath 7 | 8 | T = TypeVar("T", bound="FileModelBase") 9 | 10 | 11 | class FileModelBase(BaseModel): 12 | _filepath: Union[UPath, None] = PrivateAttr(default=None) 13 | 14 | @property 15 | def filepath(self) -> UPath: 16 | """Return the exact path to the model file. Raises if not set.""" 17 | if not self._filepath: 18 | raise ValueError() 19 | return self._filepath 20 | 21 | @property 22 | def directory(self) -> UPath: 23 | """Return the folder containing the model file. Raises if filepath not set.""" 24 | fp = self.filepath 25 | return fp.parent 26 | 27 | @classmethod 28 | def from_file(cls: type[T], directory: Union[str, UPath]) -> T: 29 | from fluidize.core.utils.dataloader.data_loader import DataLoader 30 | 31 | filename = getattr(cls, "_filename", None) 32 | if not filename: 33 | raise TypeError() 34 | 35 | path = UPath(directory) / filename 36 | data = DataLoader.load_yaml(path) 37 | 38 | if not data: 39 | raise FileNotFoundError() 40 | 41 | try: 42 | instance = cls.model_validate(data) 43 | except ValidationError: 44 | raise 45 | except Exception as e: 46 | raise ValueError() from e 47 | else: 48 | instance._filepath = path 49 | return instance 50 | 51 | @classmethod 52 | def from_dict_and_path(cls: type[T], data: Any, path: Optional[UPath]) -> T: 53 | """Creates a model instance from a dictionary and a path, without reading the file again.""" 54 | if not data: 55 | raise ValueError() 56 | 57 | try: 58 | instance = cls.model_validate(data) 59 | except ValidationError: 60 | raise 61 | except Exception as e: 62 | raise ValueError() from e 63 | else: 64 | instance._filepath = path 65 | return instance 66 | 67 | def model_dump_wrapped(self) -> dict[str, Any]: 68 | config = getattr(self, "Key", None) 69 | key = getattr(config, "key", None) 70 | 71 | if not key: 72 | return self.model_dump() 73 | 74 | return {key: self.model_dump(mode="json")} 75 | 76 | def save(self, directory: UPath | None = None) -> None: 77 | from fluidize.core.utils.dataloader.data_loader import DataLoader 78 | from fluidize.core.utils.dataloader.data_writer import DataWriter 79 | 80 | if directory: 81 | filename = getattr(self.__class__, "_filename", None) 82 | if not filename: 83 | raise TypeError() 84 | self._filepath = UPath(directory) / filename 85 | 86 | if not self._filepath: 87 | raise ValueError() 88 | 89 | # Load existing data to preserve other keys, if the file already exists. 90 | # Pass a new UPath object to avoid issues with object caching if it's the same file. 91 | existing_data = DataLoader.load_yaml(UPath(self._filepath)) 92 | 93 | new_data = self.model_dump_wrapped() 94 | existing_data.update(new_data) 95 | 96 | DataWriter.write_yaml(self._filepath, existing_data) 97 | 98 | def edit(self, **kwargs: Any) -> None: 99 | for key, value in kwargs.items(): 100 | if hasattr(self, key): 101 | setattr(self, key, value) 102 | else: 103 | raise AttributeError() 104 | self.save() 105 | 106 | model_config = ConfigDict(arbitrary_types_allowed=True) 107 | -------------------------------------------------------------------------------- /fluidize/core/utils/pathfinder/methods/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Optional 3 | 4 | from upath import UPath 5 | 6 | from fluidize.core.constants import FileConstants 7 | from fluidize.core.types.project import ProjectSummary 8 | 9 | 10 | class BasePathFinder(ABC): 11 | @abstractmethod 12 | def get_projects_path(self) -> UPath: 13 | """Get the path to the projects directory""" 14 | pass 15 | 16 | def get_project_path(self, project: ProjectSummary) -> UPath: 17 | """Get the path to a specific project""" 18 | return self.get_projects_path() / project.id 19 | 20 | def get_project_metadata_path(self, project: ProjectSummary) -> UPath: 21 | """Get the path to the metadata file for a specific project""" 22 | return self.get_project_path(project) / FileConstants.METADATA_SUFFIX 23 | 24 | @abstractmethod 25 | def get_simulations_path(self, sim_global: bool) -> UPath: 26 | """Get the path to the simulations directory""" 27 | pass 28 | 29 | def get_simulation_path(self, simulation_id: str, sim_global: bool) -> UPath: 30 | """Get the path to a specific simulation within a project""" 31 | return self.get_simulations_path(sim_global) / simulation_id 32 | 33 | def get_runs_path(self, project: ProjectSummary) -> UPath: 34 | """Get the path to the runs directory for a specific project""" 35 | project_path = self.get_project_path(project) 36 | return project_path / "runs" 37 | 38 | def get_run_path(self, project: ProjectSummary, run_number: Optional[int] = None) -> UPath: 39 | """Get the path to a specific run within a project""" 40 | return self.get_runs_path(project) / f"run_{run_number}" 41 | 42 | def get_node_path(self, project: ProjectSummary, node_id: str, run_number: Optional[int] = None) -> UPath: 43 | """Get the directory for a specific node. 44 | 45 | If run_number is provided, it will return the path to the node corresponding to that run. 46 | If run_number is None, it will return the path to the node in the editor. 47 | 48 | """ 49 | if run_number is not None: 50 | return self.get_runs_path(project) / f"run_{run_number}" / node_id 51 | # Without the run number it's the editor run path 52 | return self.get_project_path(project) / node_id 53 | 54 | def get_node_parameters_path( 55 | self, project: ProjectSummary, node_id: str, run_number: Optional[int] = None 56 | ) -> UPath: 57 | """Get the path to the parameters file for a specific node. 58 | 59 | If run_number is provided, it will return the path to the node parameters corresponding to that run. 60 | If run_number is None, it will return the path to the node parameters in the editor. 61 | 62 | """ 63 | return self.get_node_path(project, node_id, run_number) / "parameters.json" 64 | 65 | def get_properties_path(self, project: ProjectSummary, node_id: str, run_number: Optional[int] = None) -> UPath: 66 | return self.get_node_path(project, node_id, run_number) / "properties.yaml" 67 | 68 | def get_node_output_path(self, project: ProjectSummary, run_number: int, node_id: str) -> UPath: 69 | """ 70 | Get the path to the output directory for a specific node in a run. 71 | 72 | Args: 73 | project: The project containing the run 74 | run_number: The run number 75 | node_id: ID of the node 76 | 77 | Returns: 78 | Path to the node's output directory: {project_path}/runs/run_{run_number}/outputs/{node_id}/ 79 | """ 80 | return self.get_run_path(project, run_number) / FileConstants.OUTPUTS_DIR / node_id 81 | 82 | @abstractmethod 83 | def get_mlflow_tracking_uri(self) -> str: 84 | """Get the MLFlow tracking URI for this storage backend""" 85 | pass 86 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to `fluidize-python` 2 | 3 | Contributions are welcome, and they are greatly appreciated! 4 | Every little bit helps, and credit will always be given. 5 | 6 | You can contribute in many ways: 7 | 8 | # Types of Contributions 9 | 10 | ## Report Bugs 11 | 12 | Report bugs at https://github.com/Fluidize-Inc/fluidize-python/issues 13 | 14 | If you are reporting a bug, please include: 15 | 16 | - Your operating system name and version. 17 | - Any details about your local setup that might be helpful in troubleshooting. 18 | - Detailed steps to reproduce the bug. 19 | 20 | ## Fix Bugs 21 | 22 | Look through the GitHub issues for bugs. 23 | Anything tagged with "bug" and "help wanted" is open to whoever wants to implement a fix for it. 24 | 25 | ## Implement Features 26 | 27 | Look through the GitHub issues for features. 28 | Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. 29 | 30 | ## Write Documentation 31 | 32 | fluidize-python could always use more documentation, whether as part of the official docs, in docstrings, or even on the web in blog posts, articles, and such. 33 | 34 | ## Submit Feedback 35 | 36 | The best way to send feedback is to file an issue at https://github.com/Fluidize-Inc/fluidize-python/issues. 37 | 38 | If you are proposing a new feature: 39 | 40 | - Explain in detail how it would work. 41 | - Keep the scope as narrow as possible, to make it easier to implement. 42 | - Remember that this is a volunteer-driven project, and that contributions 43 | are welcome :) 44 | 45 | # Get Started! 46 | 47 | Ready to contribute? Here's how to set up `fluidize-python` for local development. 48 | Please note this documentation assumes you already have `uv` and `Git` installed and ready to go. 49 | 50 | 1. Fork the `fluidize-python` repo on GitHub. 51 | 52 | 2. Clone your fork locally: 53 | 54 | ```bash 55 | cd 56 | git clone git@github.com:YOUR_NAME/fluidize-python.git 57 | ``` 58 | 59 | 3. Now we need to install the environment. Navigate into the directory 60 | 61 | ```bash 62 | cd fluidize-python 63 | ``` 64 | 65 | Then, install and activate the environment with: 66 | 67 | ```bash 68 | uv sync 69 | ``` 70 | 71 | 4. Install pre-commit to run linters/formatters at commit time: 72 | 73 | ```bash 74 | uv run pre-commit install 75 | ``` 76 | 77 | 5. Create a branch for local development: 78 | 79 | ```bash 80 | git checkout -b name-of-your-bugfix-or-feature 81 | ``` 82 | 83 | Now you can make your changes locally. 84 | 85 | 6. Don't forget to add test cases for your added functionality to the `tests` directory. 86 | 87 | 7. When you're done making changes, check that your changes pass the formatting tests. 88 | 89 | ```bash 90 | make check 91 | ``` 92 | 93 | Now, validate that all unit tests are passing: 94 | 95 | ```bash 96 | make test 97 | ``` 98 | 99 | 9. Before raising a pull request you should also run tox. 100 | This will run the tests across different versions of Python: 101 | 102 | ```bash 103 | tox 104 | ``` 105 | 106 | This requires you to have multiple versions of python installed. 107 | This step is also triggered in the CI/CD pipeline, so you could also choose to skip this step locally. 108 | 109 | 10. Commit your changes and push your branch to GitHub: 110 | 111 | ```bash 112 | git add . 113 | git commit -m "Your detailed description of your changes." 114 | git push origin name-of-your-bugfix-or-feature 115 | ``` 116 | 117 | 11. Submit a pull request through the GitHub website. 118 | 119 | # Pull Request Guidelines 120 | 121 | Before you submit a pull request, check that it meets these guidelines: 122 | 123 | 1. The pull request should include tests. 124 | 125 | 2. If the pull request adds functionality, the docs should be updated. 126 | Put your new functionality into a function with a docstring, and add the feature to the list in `README.md`. 127 | -------------------------------------------------------------------------------- /tests/fixtures/sample_projects.py: -------------------------------------------------------------------------------- 1 | """Sample project data for testing.""" 2 | 3 | from fluidize.core.types.project import ProjectSummary 4 | 5 | 6 | class SampleProjects: 7 | """Collection of sample project data for testing.""" 8 | 9 | @staticmethod 10 | def standard_project() -> ProjectSummary: 11 | """Standard project with all fields populated.""" 12 | return ProjectSummary( 13 | id="sample-project-001", 14 | label="Sample Project", 15 | description="A comprehensive sample project for testing purposes", 16 | status="active", 17 | location="/sample/project/location", 18 | metadata_version="1.0", 19 | ) 20 | 21 | @staticmethod 22 | def minimal_project() -> ProjectSummary: 23 | """Minimal project with only required fields.""" 24 | return ProjectSummary(id="minimal-001", metadata_version="1.0") 25 | 26 | @staticmethod 27 | def project_with_special_chars() -> ProjectSummary: 28 | """Project with special characters for edge case testing.""" 29 | return ProjectSummary( 30 | id="special-chars-project", 31 | label="Test Project with Special Chars: !@#$%^&*()", 32 | description="Description with\nnewlines and\ttabs and 'quotes'", 33 | status="test-status", 34 | location="/path/with spaces/and-special-chars", 35 | metadata_version="1.0", 36 | ) 37 | 38 | @staticmethod 39 | def projects_for_listing() -> list[ProjectSummary]: 40 | """Multiple projects for testing list operations.""" 41 | return [ 42 | ProjectSummary( 43 | id="list-project-001", 44 | label="First Project", 45 | description="First project in the list", 46 | status="active", 47 | location="/list/test/1", 48 | metadata_version="1.0", 49 | ), 50 | ProjectSummary( 51 | id="list-project-002", 52 | label="Second Project", 53 | description="Second project in the list", 54 | status="pending", 55 | location="/list/test/2", 56 | metadata_version="1.0", 57 | ), 58 | ProjectSummary( 59 | id="list-project-003", 60 | label="Third Project", 61 | description="Third project in the list", 62 | status="completed", 63 | location="/list/test/3", 64 | metadata_version="1.0", 65 | ), 66 | ] 67 | 68 | @staticmethod 69 | def project_update_data() -> dict: 70 | """Sample data for project updates.""" 71 | return { 72 | "label": "Updated Project Label", 73 | "description": "This project has been updated", 74 | "status": "updated", 75 | "location": "/updated/location", 76 | } 77 | 78 | @staticmethod 79 | def project_creation_params() -> dict: 80 | """Parameters for project creation testing.""" 81 | return { 82 | "project_id": "creation-test-001", 83 | "label": "Created Project", 84 | "description": "Project created via parameters", 85 | "location": "/created/project/path", 86 | "status": "created", 87 | } 88 | 89 | @staticmethod 90 | def invalid_project_data() -> list[dict]: 91 | """Invalid project data for error testing.""" 92 | return [ 93 | # Missing required ID 94 | {"label": "No ID Project", "description": "This project has no ID", "metadata_version": "1.0"}, 95 | # Invalid metadata version 96 | {"id": "invalid-version", "metadata_version": "999.0"}, 97 | # Empty ID 98 | {"id": "", "metadata_version": "1.0"}, 99 | ] 100 | -------------------------------------------------------------------------------- /fluidize/client.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fluidize Python Client - High-level interface for the Fluidize Engine and API. 3 | """ 4 | 5 | from pathlib import Path 6 | from typing import Any, Literal, Optional 7 | 8 | from fluidize_sdk import FluidizeSDK 9 | 10 | import fluidize.core.utils.dataloader.loader.loader_local 11 | import fluidize.core.utils.dataloader.loader.writer_local 12 | 13 | # Ensure handlers are registered (redundant safety check) 14 | import fluidize.core.utils.pathfinder.methods.local # noqa: F401 15 | 16 | from .adapters.local import LocalAdapter 17 | from .config import FluidizeConfig 18 | from .managers.registry import RegistryManager 19 | 20 | 21 | class FluidizeClient: 22 | """ 23 | High-level client for interacting with Fluidize. 24 | 25 | This client provides an intuitive interface for managing projects, 26 | nodes, and running simulation flows. It supports two modes: 27 | 28 | - API mode: Connects to the Fluidize cloud API 29 | - Local mode: Works with local Fluidize engine installation 30 | 31 | Configuration is handled automatically through environment variables 32 | and the FluidizeConfig class. 33 | """ 34 | 35 | def __init__(self, mode: Literal["local", "api", "auto"] = "auto", base_path: Optional[Path] = None): 36 | """ 37 | Args: 38 | mode: Operation mode - "local", "api", or "auto" for environment detection 39 | base_path: Optional custom base path for local mode. If None, uses ~/.fluidize 40 | Config will handle all other settings via environment variables 41 | """ 42 | # Config handles all configuration logic 43 | self.config = FluidizeConfig(mode, base_path) 44 | 45 | # Check Docker availability for local mode 46 | if self.config.is_local_mode(): 47 | self.config.warn_if_docker_unavailable() 48 | 49 | # Initialize the appropriate adapter based on mode 50 | self._adapter = self._initialize_adapter() 51 | 52 | # Initialize resource managers 53 | self.projects = RegistryManager(self._adapter) 54 | 55 | def _initialize_adapter(self) -> Any: 56 | """Initialize the appropriate adapter based on the mode. 57 | 58 | Returns: 59 | Any: The initialized adapter 60 | """ 61 | if self.config.is_api_mode(): 62 | return self._initialize_api_adapter() 63 | else: 64 | return self._initialize_local_adapter() 65 | 66 | def _initialize_api_adapter(self) -> FluidizeSDK: 67 | """Initialize the API adapter using FluidizeSDK. 68 | 69 | Returns: 70 | FluidizeSDK: The initialized API adapter 71 | """ 72 | if not self.config.api_key: 73 | msg = "API mode requires an API key. Set the FLUIDIZE_API_KEY environment variable." 74 | raise ValueError(msg) 75 | 76 | return FluidizeSDK( 77 | api_token=self.config.api_key, 78 | ) 79 | 80 | def _initialize_local_adapter(self) -> LocalAdapter: 81 | """Initialize the local adapter. 82 | 83 | Returns: 84 | LocalAdapter: The initialized local adapter 85 | """ 86 | return LocalAdapter(self.config) 87 | 88 | @property 89 | def mode(self) -> str: 90 | """Get the current operation mode. 91 | 92 | Returns: 93 | str: The current operation mode 94 | """ 95 | return self.config.mode 96 | 97 | @property 98 | def adapter(self) -> Any: 99 | """Access the underlying adapter for advanced operations. 100 | 101 | Returns: 102 | Any: The underlying adapter 103 | """ 104 | return self._adapter 105 | 106 | def __repr__(self) -> str: 107 | """Return a string representation of the client. 108 | 109 | Returns: 110 | str: A string representation of the client 111 | """ 112 | return f"FluidizeClient(mode='{self.mode}')" 113 | -------------------------------------------------------------------------------- /fluidize/config.py: -------------------------------------------------------------------------------- 1 | """Configuration management for Fluidize Client""" 2 | 3 | import os 4 | import shutil 5 | import subprocess 6 | import warnings 7 | from pathlib import Path 8 | from typing import Literal, Optional 9 | 10 | 11 | class FluidizeConfig: 12 | """Lightweight configuration for fluidize library. 13 | 14 | Handles mode switching between local and API operations, 15 | and manages paths and settings for both modes. 16 | """ 17 | 18 | def __init__(self, mode: Literal["local", "api", "auto"] = "auto", base_path: Optional[Path] = None): 19 | """Initialize configuration with specified mode. 20 | 21 | Args: 22 | mode: Operation mode - "local", "api", or "auto" for environment detection 23 | base_path: Optional custom base path for local mode. If None, uses ~/.fluidize 24 | """ 25 | self.mode = self._resolve_mode(mode) 26 | 27 | # Local paths (when mode="local") 28 | self.local_base_path = base_path if base_path is not None else Path.home() / ".fluidize" 29 | self.local_projects_path = self.local_base_path / "projects" 30 | self.local_simulations_path = self.local_base_path / "simulations" 31 | 32 | # API configuration (when mode="api") 33 | self.api_key = os.getenv("FLUIDIZE_API_KEY") 34 | 35 | # Ensure local directories exist when in local mode 36 | if self.mode == "local": 37 | self._ensure_local_directories() 38 | 39 | def _resolve_mode(self, mode: Literal["local", "api", "auto"]) -> Literal["local", "api"]: 40 | """Resolve the actual mode from the given mode parameter. 41 | 42 | Args: 43 | mode: The requested mode 44 | 45 | Returns: 46 | The resolved mode (either "local" or "api") 47 | """ 48 | if mode == "auto": 49 | # Auto-detect from environment variable 50 | env_mode = os.getenv("FLUIDIZE_MODE", "local").lower() 51 | return "api" if env_mode == "api" else "local" 52 | return mode 53 | 54 | def _ensure_local_directories(self) -> None: 55 | """Ensure local directories exist for local mode operations.""" 56 | self.local_base_path.mkdir(parents=True, exist_ok=True) 57 | self.local_projects_path.mkdir(parents=True, exist_ok=True) 58 | self.local_simulations_path.mkdir(parents=True, exist_ok=True) 59 | 60 | def is_local_mode(self) -> bool: 61 | """Check if running in local mode.""" 62 | return self.mode == "local" 63 | 64 | def is_api_mode(self) -> bool: 65 | """Check if running in API mode.""" 66 | return self.mode == "api" 67 | 68 | def check_docker_available(self) -> bool: 69 | """Check if Docker is available and running. 70 | 71 | Returns: 72 | True if Docker is available, False otherwise 73 | """ 74 | # First check if docker executable exists 75 | docker_path = shutil.which("docker") 76 | if not docker_path: 77 | return False 78 | 79 | try: 80 | # Run 'docker --version' to check if Docker is installed and accessible 81 | result = subprocess.run( # noqa: S603 82 | [docker_path, "--version"], 83 | capture_output=True, 84 | text=True, 85 | timeout=5, 86 | check=False, 87 | ) 88 | except (subprocess.TimeoutExpired, FileNotFoundError, OSError): 89 | return False 90 | else: 91 | return result.returncode == 0 92 | 93 | def warn_if_docker_unavailable(self) -> None: 94 | """Issue a warning if Docker is not available for local runs. 95 | 96 | Returns: 97 | None 98 | """ 99 | if not self.check_docker_available(): 100 | warnings.warn( 101 | "Docker is not available. Local simulation runs will not be possible. " 102 | "Please install and start Docker to enable local execution.", 103 | UserWarning, 104 | stacklevel=2, 105 | ) 106 | 107 | 108 | # Default global config instance 109 | config = FluidizeConfig() 110 | -------------------------------------------------------------------------------- /fluidize/managers/registry.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from fluidize.core.utils.exceptions import ProjectAlreadyExistsError 4 | 5 | from .project import ProjectManager 6 | 7 | 8 | class RegistryManager: 9 | """ 10 | Registry manager for project CRUD operations. 11 | 12 | Provides methods to create, retrieve, update, and list projects. 13 | All methods return Project entities that give access to project-scoped operations. 14 | """ 15 | 16 | def __init__(self, adapter: Any) -> None: 17 | """ 18 | Args: 19 | adapter: adapter (FluidizeSDK or LocalAdapter) 20 | """ 21 | self.adapter = adapter 22 | 23 | def create( 24 | self, 25 | project_id: str, 26 | label: str = "", 27 | description: str = "", 28 | location: str = "", 29 | status: str = "", 30 | ) -> ProjectManager: 31 | """ 32 | Create a new project. 33 | 34 | Args: 35 | project_id: Unique identifier for the project 36 | label: Display label for the project 37 | description: Project description 38 | location: Project location 39 | status: Project status 40 | 41 | Returns: 42 | Created project wrapped in Project class 43 | 44 | Raises: 45 | ProjectAlreadyExistsError: If a project with the same ID already exists 46 | """ 47 | # Check if project already exists 48 | try: 49 | self.get(project_id) 50 | # If we get here, project exists - raise error 51 | raise ProjectAlreadyExistsError(project_id) 52 | except FileNotFoundError: 53 | # Project doesn't exist, proceed with creation 54 | pass 55 | 56 | project_summary = self.adapter.projects.upsert( 57 | id=project_id, 58 | label=label, 59 | description=description, 60 | location=location, 61 | status=status, 62 | ) 63 | return ProjectManager(self.adapter, project_summary) 64 | 65 | # - [ ] ISSUE #1: Project not found error should be put out when invalid project is put with get 66 | def get(self, project_id: str) -> ProjectManager: 67 | """ 68 | Get a project by ID. 69 | 70 | Args: 71 | project_id: The project ID 72 | 73 | Returns: 74 | Project wrapped in Project class 75 | """ 76 | project_summary = self.adapter.projects.retrieve(project_id) 77 | return ProjectManager(self.adapter, project_summary) 78 | 79 | def list(self) -> list[ProjectManager]: 80 | """ 81 | List all projects. 82 | 83 | Returns: 84 | List of projects wrapped in Project class 85 | """ 86 | project_summaries = self.adapter.projects.list() 87 | return [ProjectManager(self.adapter, summary) for summary in project_summaries] 88 | 89 | def update( 90 | self, 91 | project_id: str, 92 | label: Optional[str] = None, 93 | description: Optional[str] = None, 94 | location: Optional[str] = None, 95 | status: Optional[str] = None, 96 | ) -> ProjectManager: 97 | """ 98 | Update an existing project. 99 | 100 | Args: 101 | project_id: The project ID to update 102 | label: New label 103 | description: New description 104 | location: New location 105 | status: New status 106 | 107 | Returns: 108 | Updated project wrapped in Project class 109 | """ 110 | # Build update data, only include non-None values 111 | update_data = {"id": project_id} 112 | if label is not None: 113 | update_data["label"] = label 114 | if description is not None: 115 | update_data["description"] = description 116 | if location is not None: 117 | update_data["location"] = location 118 | if status is not None: 119 | update_data["status"] = status 120 | 121 | project_summary = self.adapter.projects.upsert(**update_data) 122 | return ProjectManager(self.adapter, project_summary) 123 | -------------------------------------------------------------------------------- /tests/integration/conftest.py: -------------------------------------------------------------------------------- 1 | """Shared fixtures and configuration for integration tests.""" 2 | 3 | import tempfile 4 | from collections.abc import Generator 5 | from pathlib import Path 6 | 7 | import pytest 8 | 9 | # Import local handlers to ensure they auto-register 10 | from fluidize.adapters.local.adapter import LocalAdapter 11 | from fluidize.client import FluidizeClient 12 | from fluidize.config import FluidizeConfig 13 | from fluidize.managers.registry import RegistryManager 14 | 15 | 16 | @pytest.fixture 17 | def integration_temp_dir() -> Generator[Path, None, None]: 18 | """Create a temporary directory for integration testing.""" 19 | with tempfile.TemporaryDirectory() as tmpdir: 20 | yield Path(tmpdir) 21 | 22 | 23 | @pytest.fixture 24 | def integration_config(integration_temp_dir: Path) -> FluidizeConfig: 25 | """Create a configuration for integration testing with real filesystem operations.""" 26 | config = FluidizeConfig(mode="local") 27 | config.local_base_path = integration_temp_dir 28 | config.local_projects_path = integration_temp_dir / "projects" 29 | config.local_simulations_path = integration_temp_dir / "simulations" 30 | 31 | # Ensure directories exist 32 | config.local_projects_path.mkdir(parents=True, exist_ok=True) 33 | config.local_simulations_path.mkdir(parents=True, exist_ok=True) 34 | 35 | return config 36 | 37 | 38 | @pytest.fixture(autouse=True) 39 | def setup_integration_config(integration_temp_dir: Path): 40 | """Set up configuration paths for integration tests.""" 41 | from fluidize.config import config 42 | 43 | # Store original values to restore later 44 | original_mode = config.mode 45 | original_base_path = config.local_base_path 46 | original_projects_path = config.local_projects_path 47 | original_simulations_path = config.local_simulations_path 48 | 49 | # Configure the global config instance for testing 50 | config.mode = "local" 51 | config.local_base_path = integration_temp_dir 52 | config.local_projects_path = integration_temp_dir / "projects" 53 | config.local_simulations_path = integration_temp_dir / "simulations" 54 | 55 | # Create directories 56 | config.local_projects_path.mkdir(parents=True, exist_ok=True) 57 | config.local_simulations_path.mkdir(parents=True, exist_ok=True) 58 | 59 | try: 60 | yield config 61 | finally: 62 | # Restore original values 63 | config.mode = original_mode 64 | config.local_base_path = original_base_path 65 | config.local_projects_path = original_projects_path 66 | config.local_simulations_path = original_simulations_path 67 | 68 | 69 | @pytest.fixture 70 | def local_adapter(integration_config: FluidizeConfig) -> LocalAdapter: 71 | """Create a LocalAdapter for integration testing.""" 72 | return LocalAdapter(integration_config) 73 | 74 | 75 | @pytest.fixture 76 | def client() -> FluidizeClient: 77 | """Create a full Client for end-to-end integration testing.""" 78 | return FluidizeClient(mode="local") 79 | 80 | 81 | @pytest.fixture 82 | def projects_manager(local_adapter: LocalAdapter) -> RegistryManager: 83 | """Create a Projects manager for integration testing.""" 84 | return RegistryManager(local_adapter) 85 | 86 | 87 | @pytest.fixture 88 | def sample_projects_data() -> list[dict]: 89 | """Sample project data for integration testing.""" 90 | return [ 91 | { 92 | "id": "integration-project-1", 93 | "label": "Integration Test Project 1", 94 | "description": "First project for integration testing", 95 | "status": "active", 96 | "location": "/integration/test/1", 97 | }, 98 | { 99 | "id": "integration-project-2", 100 | "label": "Integration Test Project 2", 101 | "description": "Second project for integration testing", 102 | "status": "pending", 103 | "location": "/integration/test/2", 104 | }, 105 | { 106 | "id": "integration-project-minimal", 107 | "label": "", 108 | "description": "", 109 | "status": "", 110 | "location": "", 111 | }, 112 | ] 113 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Fluidize 2 | 3 | [![Python](https://img.shields.io/badge/python-3.9%2B-blue?style=for-the-badge&logo=python&logoColor=white)](https://python.org) 4 | [![PyPI](https://img.shields.io/pypi/v/fluidize?style=for-the-badge&logo=pypi&logoColor=white)](https://pypi.org/project/fluidize/) 5 | [![License](https://img.shields.io/github/license/Fluidize-Inc/fluidize-python?style=for-the-badge)](LICENSE) 6 | [![Documentation](https://img.shields.io/badge/docs-available-brightgreen?style=for-the-badge&logo=gitbook&logoColor=white)](https://Fluidize-Inc.github.io/fluidize-python/) 7 | 8 | ## About 9 | 10 | **fluidize-python** is a library for building modular, reproducible scientific computing pipelines. It provides a unified interface to a wide range of physical simulation tools, eliminating the need to navigate the inconsistent, incomplete instructions that often vary from tool to tool. 11 | 12 | This library marks our first step toward AI-orchestrated scientific computing. By standardizing tools and practices within our framework, AI agents can automatically build, configure, and execute computational pipelines across domains and simulation platforms. 13 | 14 | Our goal is to improve today’s simulation tools so AI can assist researchers and scientists in accelerating the pace of innovation and scientific discovery. 15 | 16 | ## Installation 17 | 18 | ### Prerequesites: 19 | 20 | - Python 3.9+ 21 | - Docker Desktop (for local execution). Download and install Docker Desktop from https://docs.docker.com/desktop/. 22 | 23 | After installation, verify with: 24 | ```bash 25 | docker --version 26 | ``` 27 | 28 | 29 | 30 | ### From PyPI 31 | ```bash 32 | pip install fluidize 33 | ``` 34 | 35 | ### From Source 36 | ```bash 37 | git clone https://github.com/Fluidize-Inc/fluidize-python.git 38 | cd fluidize-python 39 | make install 40 | ``` 41 | 42 | ## Run Examples 43 | 44 | Example projects are located in this folder: [examples/](https://github.com/Fluidize-Inc/fluidize-python/tree/main/examples). There you can find an [Jupyter Notebook](https://github.com/Fluidize-Inc/fluidize-python/blob/main/examples/demo.ipynb) of a simple simulation 45 | 46 | ## Architecture 47 | 48 | At Fluidize, we believe strong organization leads to better reproducibility and scalability. 49 | 50 | We treat each simulation pipeline as an individual project. Within projects, each pipeline is treated as a DAG (directed acyclic graph), where nodes represent individual pieces of scientific software (e.g. inputs, solvers, visualization tools, etc.) and edges represent data flow between nodes. 51 | 52 | 53 | ### Nodes 54 | Nodes are the foundational building blocks of simulation pipelines. Each node represents a computational unit with: 55 | 56 | | File | Purpose | 57 | |------|---------| 58 | | `properties.yaml` | Container configuration, working directory, and output paths | 59 | | `metadata.yaml` | Node description, version, authors, and repository URL | 60 | | `Dockerfile` | Environment setup and dependency installation | 61 | | `parameters.json` | Tunable parameters for experiments | 62 | | `main.sh` | Execution script for the source code | 63 | | `source/` | Original scientific computing code | 64 | 65 | **Key Features:**
66 | - Predictable input/output paths
67 | - Modular and extensible design
68 | - No source code modification required
69 | - Automated node generation support (Public launch soon) 70 | 71 | 72 | ### Projects 73 | 74 | Projects store a simple data layer for managing individual modules within a pipeline. 75 | 76 | | File | Purpose | 77 | |------|---------| 78 | | `graph.json` | Node (scientific software) and edge (data flow) definitions | 79 | | `metadata.yaml` | Project description and configuration | 80 | 81 | 82 | ### Runs 83 | 84 | Pipelines can be executed both locally and on the cloud. Local execution is handled by Docker engine. Cloud execution is routed through our API, and uses the Kubernetes engine with Argo Workflow Manager. 85 | 86 | ## Contributing 87 | 88 | We would love to collaborate with you! Please see our [Contributing Guide](https://github.com/Fluidize-Inc/fluidize-python/blob/main/CONTRIBUTING.md) for details. 89 | 90 | Also - we would love to help streamline your pipeline! Please reach out to us at [founders@fluidize.ai](mailto:founders@fluidize.ai). 91 | 92 | ## License 93 | 94 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 95 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/source/test_pinata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Quick test script for the pinata simulation setup 4 | """ 5 | 6 | import os 7 | import sys 8 | 9 | 10 | def test_imports(): 11 | """Test if all required packages can be imported""" 12 | try: 13 | import numpy as np 14 | 15 | print(f"✅ NumPy {np.__version__}") 16 | except ImportError as e: 17 | print(f"❌ NumPy import failed: {e}") 18 | return False 19 | 20 | try: 21 | import matplotlib 22 | 23 | matplotlib.use("Agg") # Set headless backend 24 | import matplotlib.pyplot as plt 25 | 26 | print(f"✅ Matplotlib {matplotlib.__version__}") 27 | except ImportError as e: 28 | print(f"❌ Matplotlib import failed: {e}") 29 | return False 30 | 31 | try: 32 | import seaborn as sns 33 | 34 | print(f"✅ Seaborn {sns.__version__}") 35 | except ImportError as e: 36 | print(f"❌ Seaborn import failed: {e}") 37 | return False 38 | 39 | try: 40 | import imageio 41 | 42 | print(f"✅ ImageIO {imageio.__version__}") 43 | except ImportError as e: 44 | print(f"❌ ImageIO import failed: {e}") 45 | return False 46 | 47 | try: 48 | import mujoco 49 | 50 | print(f"✅ MuJoCo {mujoco.__version__}") 51 | except ImportError as e: 52 | print(f"❌ MuJoCo import failed: {e}") 53 | return False 54 | 55 | return True 56 | 57 | 58 | def test_environment(): 59 | """Test environment setup""" 60 | print("\n🔍 Environment Check:") 61 | print(f"Python version: {sys.version}") 62 | print(f"MUJOCO_GL: {os.environ.get('MUJOCO_GL', 'not set')}") 63 | print(f"Output path: {os.environ.get('SIMULATION_OUTPUT_PATH', 'not set')}") 64 | 65 | # Test output directory creation 66 | output_path = os.environ.get("SIMULATION_OUTPUT_PATH", "test_outputs") 67 | os.makedirs(output_path, exist_ok=True) 68 | 69 | test_file = os.path.join(output_path, "test.txt") 70 | with open(test_file, "w") as f: 71 | f.write("Test file created successfully") 72 | 73 | if os.path.exists(test_file): 74 | print(f"✅ Output directory writable: {output_path}") 75 | os.remove(test_file) 76 | return True 77 | else: 78 | print(f"❌ Cannot write to output directory: {output_path}") 79 | return False 80 | 81 | 82 | def test_mujoco_basic(): 83 | """Test basic MuJoCo functionality""" 84 | try: 85 | import mujoco 86 | 87 | # Test basic model creation 88 | simple_xml = """ 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | """ 99 | 100 | model = mujoco.MjModel.from_xml_string(simple_xml) 101 | data = mujoco.MjData(model) 102 | 103 | # Test forward kinematics 104 | mujoco.mj_forward(model, data) 105 | 106 | print("✅ MuJoCo basic functionality working") 107 | return True 108 | 109 | except Exception as e: 110 | print(f"❌ MuJoCo test failed: {e}") 111 | return False 112 | 113 | 114 | def main(): 115 | print("🧪 Testing Pinata Simulation Environment") 116 | print("=" * 50) 117 | 118 | success = True 119 | 120 | # Test imports 121 | print("\n📦 Testing imports...") 122 | if not test_imports(): 123 | success = False 124 | 125 | # Test environment 126 | print("\n🌍 Testing environment...") 127 | if not test_environment(): 128 | success = False 129 | 130 | # Test MuJoCo 131 | print("\n🎯 Testing MuJoCo...") 132 | if not test_mujoco_basic(): 133 | success = False 134 | 135 | print("\n" + "=" * 50) 136 | if success: 137 | print("🎉 All tests passed! Ready for pinata simulation.") 138 | return 0 139 | else: 140 | print("❌ Some tests failed. Check the environment.") 141 | return 1 142 | 143 | 144 | if __name__ == "__main__": 145 | exit_code = main() 146 | sys.exit(exit_code) 147 | -------------------------------------------------------------------------------- /examples/example-projects/MUJOCO/Mujoco-Simulation/README.md: -------------------------------------------------------------------------------- 1 | # MuJoCo Pinata Simulation 2 | 3 | Enhanced implementation of the "Tendons, actuators and sensors" example from the MuJoCo tutorial with professional visualization and Docker compatibility. 4 | 5 | ## Overview 6 | 7 | This simulation features: 8 | - **Pinata**: A compound object (box + sphere) suspended by a tendon 9 | - **Bat**: Motor-controlled bat that swings to hit the pinata 10 | - **Sensors**: IMU (accelerometer + gyroscope) to measure pinata motion 11 | - **Enhanced Visualization**: High-quality video output and publication-ready plots 12 | 13 | ## Features 14 | 15 | ### Simulation Components 16 | - Spatial tendon with realistic physics 17 | - Motor actuator with sinusoidal control 18 | - Multi-body pinata with realistic dynamics 19 | - Comprehensive sensor suite (IMU, joint sensors, tendon length) 20 | 21 | ### Outputs 22 | - **MP4 Video**: High-quality 1280x720 @ 60fps with contact visualization 23 | - **Analysis Plots**: Multi-panel scientific plots with: 24 | - Accelerometer time series (3-axis) 25 | - Position tracking and phase plots 26 | - Energy analysis (kinetic/potential) 27 | - Frequency spectrum analysis 28 | - Bat motion and wire length correlation 29 | - **Data Export**: Both NPZ (binary) and CSV formats for easy analysis 30 | - **Comprehensive Logging**: Simulation parameters and statistics 31 | 32 | ### Enhanced Visualization 33 | - Contact points and force vectors 34 | - Tendon visualization 35 | - Professional matplotlib styling with Seaborn 36 | - Multiple camera views 37 | - Anti-aliased rendering 38 | 39 | ## Running the Simulation 40 | 41 | ### Docker (Recommended) 42 | ```bash 43 | docker run -v $(pwd):/app mujoco-simulation:latest 44 | ``` 45 | 46 | The simulation will automatically: 47 | 1. Create output directories (`source/outputs/`) 48 | 2. Run the pinata simulation 49 | 3. Generate all visualization and data files 50 | 4. Print a summary of created files 51 | 52 | ### Expected Outputs 53 | ``` 54 | source/outputs/ 55 | ├── videos/ 56 | │ └── pinata_simulation_[timestamp].mp4 57 | ├── plots/ 58 | │ └── comprehensive_analysis_[timestamp].png 59 | ├── data/ 60 | │ ├── simulation_data_[timestamp].npz 61 | │ └── sensor_data_[timestamp].csv 62 | └── logs/ 63 | └── simulation_log_[timestamp].txt 64 | ``` 65 | 66 | ## Technical Details 67 | 68 | ### Model Specifications 69 | - **Physics**: RK4 integrator with 2ms timestep 70 | - **Rendering**: OpenGL with anti-aliasing and shadows 71 | - **Contact**: Realistic contact dynamics with visualization 72 | - **Materials**: Custom textures and lighting 73 | 74 | ### Data Collection 75 | - Full 6-DOF IMU data (acceleration + angular velocity) 76 | - Pinata position and velocity tracking 77 | - Bat joint angles and velocities 78 | - Tendon length measurements 79 | - Energy calculations 80 | 81 | ### Analysis Features 82 | - Time-domain analysis of all sensor channels 83 | - Frequency-domain analysis (FFT) of acceleration 84 | - Phase space plots for trajectory analysis 85 | - Energy conservation tracking 86 | - Statistical summaries 87 | 88 | ## Dependencies 89 | 90 | All dependencies are pre-installed in the Docker container: 91 | - `mujoco` - Physics simulation 92 | - `numpy` - Numerical computing 93 | - `matplotlib` - Plotting 94 | - `seaborn` - Enhanced plot styling 95 | - `imageio` - Video generation 96 | - `imageio-ffmpeg` - MP4 encoding 97 | 98 | ## Files 99 | 100 | - `pinata_simulation.py` - Main simulation script 101 | - `test_pinata.py` - Environment validation script 102 | - `main.sh` - Docker entry point 103 | - `Dockerfile` - Container specification 104 | 105 | ## Customization 106 | 107 | Key parameters can be modified in the `PinataSimulation` class: 108 | - `duration` - Simulation time (default: 3.0s) 109 | - `framerate` - Video framerate (default: 60 fps) 110 | - `video_width/height` - Resolution (default: 1280x720) 111 | - Control strategy in `control_strategy()` method 112 | 113 | ## Scientific Applications 114 | 115 | This simulation demonstrates: 116 | - Multi-body dynamics with constraints 117 | - Sensor fusion and data analysis 118 | - Contact mechanics visualization 119 | - Control system implementation 120 | - Real-time physics simulation 121 | - Scientific data visualization 122 | 123 | Perfect for education, research, and demonstration of advanced physics simulation capabilities. 124 | -------------------------------------------------------------------------------- /fluidize/managers/project.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project wrapper that provides convenient access to project data and operations. 3 | """ 4 | 5 | from typing import Any, Optional 6 | 7 | from fluidize.core.types.project import ProjectSummary 8 | 9 | from .graph import GraphManager 10 | from .runs import RunsManager 11 | 12 | 13 | class ProjectManager: 14 | """ 15 | Project manager that wraps project data and provides access to scoped managers. 16 | 17 | Provides convenient access to graph and runs operations for this specific project. 18 | """ 19 | 20 | def __init__(self, adapter: Any, project_summary: ProjectSummary) -> None: 21 | """ 22 | Args: 23 | adapter: adapter (FluidizeSDK or LocalAdapter) 24 | project_summary: The underlying project data 25 | """ 26 | self._adapter = adapter 27 | self._project_summary = project_summary 28 | self._graph: Optional[GraphManager] = None 29 | self._runs: Optional[RunsManager] = None 30 | 31 | @property 32 | def graph(self) -> GraphManager: 33 | """ 34 | Get the graph manager for this project. 35 | 36 | Returns: 37 | GraphManager manager scoped to this project 38 | """ 39 | if self._graph is None: 40 | self._graph = GraphManager(self._adapter, self._project_summary) 41 | return self._graph 42 | 43 | @property 44 | def runs(self) -> RunsManager: 45 | """ 46 | Get the runs manager for this project. 47 | 48 | Returns: 49 | ProjectRuns manager scoped to this project 50 | """ 51 | if self._runs is None: 52 | self._runs = RunsManager(self._adapter, self._project_summary) 53 | return self._runs 54 | 55 | # Delegate all ProjectSummary attributes 56 | @property 57 | def id(self) -> str: 58 | """Get project ID. 59 | 60 | Returns: 61 | The project ID 62 | """ 63 | return self._project_summary.id 64 | 65 | @property 66 | def label(self) -> Optional[str]: 67 | """Get project label. 68 | 69 | Returns: 70 | The project label 71 | """ 72 | return self._project_summary.label 73 | 74 | @property 75 | def description(self) -> Optional[str]: 76 | """Get project description. 77 | 78 | Returns: 79 | The project description 80 | """ 81 | return self._project_summary.description 82 | 83 | @property 84 | def location(self) -> Optional[str]: 85 | """Get project location. 86 | 87 | Returns: 88 | The project location 89 | """ 90 | return self._project_summary.location 91 | 92 | @property 93 | def status(self) -> Optional[str]: 94 | """Get project status. 95 | 96 | Returns: 97 | The project status 98 | """ 99 | return self._project_summary.status 100 | 101 | @property 102 | def metadata_version(self) -> str: 103 | """Get project metadata version. 104 | 105 | Returns: 106 | The project metadata version 107 | """ 108 | return self._project_summary.metadata_version 109 | 110 | @property 111 | def created_at(self) -> Optional[str]: 112 | """Get project creation timestamp. 113 | 114 | Returns: 115 | The project creation timestamp 116 | """ 117 | return getattr(self._project_summary, "created_at", None) 118 | 119 | @property 120 | def updated_at(self) -> Optional[str]: 121 | """Get project update timestamp. 122 | 123 | Returns: 124 | The project update timestamp 125 | """ 126 | return getattr(self._project_summary, "updated_at", None) 127 | 128 | def to_dict(self) -> dict[str, Any]: 129 | """ 130 | Convert project to dictionary representation. 131 | 132 | Returns: 133 | Dictionary representation of the project 134 | """ 135 | return { 136 | "id": self.id, 137 | "label": self.label, 138 | "description": self.description, 139 | "location": self.location, 140 | "status": self.status, 141 | "metadata_version": self.metadata_version, 142 | "created_at": self.created_at, 143 | "updated_at": self.updated_at, 144 | } 145 | 146 | def __str__(self) -> str: 147 | return f"Project {self.id}: {self.label or 'No label'}" 148 | -------------------------------------------------------------------------------- /fluidize/core/modules/run/node/methods/base/Execute.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from pathlib import PurePosixPath 3 | from typing import Any, Optional 4 | 5 | from fluidize.core.types.node import nodeProperties_simulation 6 | from fluidize.core.types.project import ProjectSummary 7 | from fluidize.core.types.runs import ContainerPaths, NodePaths, RunStatus 8 | from fluidize.core.utils.dataloader.data_loader import DataLoader 9 | 10 | 11 | class BaseExecutionManager(ABC): 12 | def __init__( 13 | self, node: nodeProperties_simulation, prev_node: Optional[nodeProperties_simulation], project: ProjectSummary 14 | ) -> None: 15 | self.node = node 16 | self.prev_node = prev_node 17 | self.project = project 18 | self.node_paths, self.container_paths = self._get_paths_for_run() 19 | 20 | def _get_paths_for_run(self) -> tuple[NodePaths, ContainerPaths]: 21 | """Get all paths required for single-container execution. Returns NodePaths and ContainerPaths with all necessary paths.""" 22 | node_path = self.node.directory 23 | prev_node_path = self.prev_node.directory if self.prev_node else None 24 | 25 | container_node_path = PurePosixPath(f"/mnt/{self.node.node_id!s}") 26 | # container_prev_node_path = PurePosixPath(f"/mnt/{self.prev_node.node_id}") if self.prev_node else None 27 | 28 | # This is run path -> outputs / previous node_id 29 | node_input_path = ( 30 | prev_node_path.parent / "outputs" / f"{self.prev_node.node_id!s}" 31 | if self.prev_node and prev_node_path 32 | else None 33 | ) 34 | container_node_input_path = PurePosixPath("/mnt/inputs") if self.prev_node else None 35 | 36 | # This is the output path setup 37 | node_output_path = node_path.parent / "outputs" / f"{self.node.node_id!s}" 38 | container_node_output_path = container_node_path / self.node.source_output_folder 39 | 40 | node_paths = NodePaths( 41 | node_path=node_path, 42 | simulation_path=node_path / self.node.simulation_mount_path, 43 | input_path=node_input_path, 44 | output_path=node_output_path, 45 | ) 46 | 47 | container_paths = ContainerPaths( 48 | node_path=container_node_path, 49 | simulation_path=container_node_path / self.node.simulation_mount_path, 50 | input_path=container_node_input_path, 51 | output_path=container_node_output_path, 52 | ) 53 | 54 | return node_paths, container_paths 55 | 56 | def _check_main_script_exists(self) -> bool: 57 | """Check if the main.sh script exists in the node's simulation path.""" 58 | main_script_path = self.node.directory / "main.sh" 59 | return DataLoader.check_file_exists(main_script_path) 60 | 61 | def _initialize_run(self) -> bool: 62 | """Initialize the run by setting up paths and checking prerequisites.""" 63 | # Check if main.sh exists 64 | if not self._check_main_script_exists(): 65 | return False 66 | 67 | # Create output directory if it doesn't exist 68 | try: 69 | self.node_paths.output_path.mkdir(parents=True, exist_ok=True) 70 | except Exception: 71 | return False 72 | 73 | return True 74 | 75 | @abstractmethod 76 | def _execute_node(self) -> Any: 77 | """Abstract method to execute the node's main script. Should be implemented in subclasses.""" 78 | pass 79 | 80 | def execute(self) -> bool: 81 | """Execute the simulation and return True only if successful.""" 82 | if not self._initialize_run(): 83 | self.node.edit(run_status=RunStatus.FAILED) 84 | return False 85 | 86 | # Execute the node's main script 87 | try: 88 | self.node.edit(run_status=RunStatus.RUNNING) 89 | result = self._execute_node() 90 | 91 | # Check if execution was successful 92 | if result == "success": 93 | self.node.edit(run_status=RunStatus.SUCCESS) 94 | return True 95 | else: 96 | print(f"❌ [BaseExecutionManager] Execution failed: {result}") 97 | self.node.edit(run_status=RunStatus.FAILED) 98 | return False 99 | 100 | except Exception as e: 101 | print(f"💥 [BaseExecutionManager] Exception during execution: {e!s}") 102 | self.node.edit(run_status=RunStatus.FAILED) 103 | return False 104 | -------------------------------------------------------------------------------- /tests/unit/core/types/test_files.py: -------------------------------------------------------------------------------- 1 | """Unit tests for FileMetadata model.""" 2 | 3 | import pytest 4 | from pydantic import ValidationError 5 | 6 | from fluidize.core.types.files import FileMetadata 7 | 8 | 9 | class TestFileMetadata: 10 | """Test suite for FileMetadata model.""" 11 | 12 | def test_file_metadata_creation(self): 13 | """Test FileMetadata creation with all fields.""" 14 | metadata = FileMetadata( 15 | path="/path/to/file", filename="test.py", size=1024, mime_type="text/x-python", language="python" 16 | ) 17 | 18 | assert metadata.path == "/path/to/file" 19 | assert metadata.filename == "test.py" 20 | assert metadata.size == 1024 21 | assert metadata.mime_type == "text/x-python" 22 | assert metadata.language == "python" 23 | 24 | def test_file_metadata_validation(self): 25 | """Test FileMetadata field validation.""" 26 | # Test with missing required fields 27 | with pytest.raises(ValidationError): 28 | FileMetadata() 29 | 30 | with pytest.raises(ValidationError): 31 | FileMetadata(path="/path/to/file") 32 | 33 | with pytest.raises(ValidationError): 34 | FileMetadata( 35 | path="/path/to/file", 36 | filename="test.py", 37 | # Missing size, mime_type, language 38 | ) 39 | 40 | def test_file_metadata_serialization(self): 41 | """Test FileMetadata serialization to dict.""" 42 | metadata = FileMetadata( 43 | path="/path/to/file", filename="test.py", size=1024, mime_type="text/x-python", language="python" 44 | ) 45 | 46 | data = metadata.model_dump() 47 | expected = { 48 | "path": "/path/to/file", 49 | "filename": "test.py", 50 | "size": 1024, 51 | "mime_type": "text/x-python", 52 | "language": "python", 53 | } 54 | 55 | assert data == expected 56 | 57 | def test_file_metadata_from_dict(self): 58 | """Test FileMetadata creation from dictionary.""" 59 | data = { 60 | "path": "/path/to/file", 61 | "filename": "test.py", 62 | "size": 1024, 63 | "mime_type": "text/x-python", 64 | "language": "python", 65 | } 66 | 67 | metadata = FileMetadata(**data) 68 | 69 | assert metadata.path == "/path/to/file" 70 | assert metadata.filename == "test.py" 71 | assert metadata.size == 1024 72 | assert metadata.mime_type == "text/x-python" 73 | assert metadata.language == "python" 74 | 75 | def test_file_metadata_json_serialization(self): 76 | """Test FileMetadata JSON serialization.""" 77 | metadata = FileMetadata( 78 | path="/path/to/file", filename="test.py", size=1024, mime_type="text/x-python", language="python" 79 | ) 80 | 81 | json_str = metadata.model_dump_json() 82 | 83 | # Should be valid JSON containing all fields 84 | assert '"path":"/path/to/file"' in json_str 85 | assert '"filename":"test.py"' in json_str 86 | assert '"size":1024' in json_str 87 | assert '"mime_type":"text/x-python"' in json_str 88 | assert '"language":"python"' in json_str 89 | 90 | def test_file_metadata_edge_cases(self): 91 | """Test FileMetadata with edge cases.""" 92 | # Test with empty strings (valid but unusual) 93 | metadata = FileMetadata(path="", filename="", size=0, mime_type="", language="") 94 | 95 | assert metadata.path == "" 96 | assert metadata.filename == "" 97 | assert metadata.size == 0 98 | assert metadata.mime_type == "" 99 | assert metadata.language == "" 100 | 101 | # Test with very long strings 102 | long_path = "/very/long/path/" + "a" * 1000 103 | metadata = FileMetadata( 104 | path=long_path, filename="test.py", size=999999999, mime_type="text/x-python", language="python" 105 | ) 106 | 107 | assert metadata.path == long_path 108 | assert metadata.size == 999999999 109 | 110 | def test_file_metadata_type_validation(self): 111 | """Test FileMetadata type validation.""" 112 | # Test invalid size type 113 | with pytest.raises(ValidationError): 114 | FileMetadata( 115 | path="/path/to/file", 116 | filename="test.py", 117 | size="invalid", # Should be int 118 | mime_type="text/x-python", 119 | language="python", 120 | ) 121 | 122 | # Test negative size (should be valid as Pydantic doesn't restrict it) 123 | metadata = FileMetadata( 124 | path="/path/to/file", filename="test.py", size=-1, mime_type="text/x-python", language="python" 125 | ) 126 | 127 | assert metadata.size == -1 128 | -------------------------------------------------------------------------------- /fluidize/core/utils/dataloader/loader/writer_base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Union 3 | 4 | from upath import UPath 5 | 6 | from fluidize.core.types.project import ProjectSummary 7 | from fluidize.core.utils.pathfinder.path_finder import PathFinder 8 | 9 | 10 | class BaseDataWriter(ABC): 11 | """ 12 | Base class for all data writers. Implements higher-level operations using primitive 13 | methods that must be implemented by subclasses. 14 | """ 15 | 16 | def write_json_for_project(self, project: ProjectSummary, suffix: str, data: dict) -> None: 17 | """ 18 | Writes the given data to JSON for the given project. 19 | """ 20 | project_path = PathFinder.get_project_path(project) 21 | # build file path using UPath 22 | file_path: UPath = project_path / suffix 23 | dir_path = file_path.parent 24 | 25 | self._ensure_directory_exists(dir_path) 26 | self._write_json_file(file_path, data) 27 | 28 | def write_json(self, filepath: Union[str, UPath], data: dict) -> None: 29 | """ 30 | Writes JSON data to the specified file path. 31 | """ 32 | path = UPath(filepath) if not isinstance(filepath, UPath) else filepath 33 | dir_path = path.parent 34 | self._ensure_directory_exists(dir_path) 35 | self._write_json_file(path, data) 36 | 37 | def write_yaml(self, filepath: UPath, data: dict) -> None: 38 | """ 39 | Writes YAML data to the specified file path. 40 | 41 | Args: 42 | filepath: Path to the YAML file 43 | data: Data to write in YAML format 44 | 45 | Returns: 46 | None 47 | """ 48 | path = UPath(filepath) if not isinstance(filepath, UPath) else filepath 49 | dir_path = path.parent 50 | self._ensure_directory_exists(dir_path) 51 | self._write_yaml(path, data) 52 | 53 | def write_text(self, filepath: UPath, data: str) -> None: 54 | """ 55 | Writes text data to the specified file path. 56 | 57 | Args: 58 | filepath: Path to the text file 59 | data: Text data to write 60 | 61 | Returns: 62 | None 63 | """ 64 | path = UPath(filepath) if not isinstance(filepath, UPath) else filepath 65 | dir_path = path.parent 66 | self._ensure_directory_exists(dir_path) 67 | self._write_text_file(path, data) 68 | 69 | def create_directory(self, directory_path: UPath) -> bool: 70 | """ 71 | Creates a directory along with any necessary parent directories. 72 | 73 | Args: 74 | directory_path: Path to the directory to create 75 | request: Optional request object 76 | 77 | Returns: 78 | bool: True if successful, False otherwise 79 | """ 80 | return self._ensure_directory_exists(directory_path) 81 | 82 | # TODO 83 | # def save_simulation(self, simulation, sim_global) -> dict: 84 | # # Get simulation path 85 | # sim_path = PathFinder.get_simulations_path(sim_global) / simulation.name 86 | 87 | # # Create simulation directory 88 | # self._ensure_directory_exists(sim_path) 89 | 90 | # # Generate all node files 91 | # generator = GenerateNode() 92 | # file_definitions = generator.generate_node_files(simulation) 93 | 94 | # # Write all files using the appropriate writer method 95 | # for file_def in file_definitions: 96 | # file_path = sim_path / file_def['filename'] 97 | # if not self._write_text_file(file_path, file_def['content']): 98 | # raise Exception(f"Failed to write file: {file_path}") 99 | 100 | # return { 101 | # "status": "success", 102 | # "simulation_name": simulation.name, 103 | # "files_written": [f['filename'] for f in file_definitions] 104 | # } 105 | 106 | # Abstract primitive operations that must be implemented by subclasses 107 | @abstractmethod 108 | def _ensure_directory_exists(self, dir_path: UPath) -> bool: 109 | """ 110 | Ensures that the specified directory exists. 111 | Creates it if it doesn't exist. 112 | 113 | Returns: 114 | bool: True if successful, False otherwise 115 | """ 116 | pass 117 | 118 | @abstractmethod 119 | def _write_json_file(self, file_path: UPath, data: dict) -> bool: 120 | """ 121 | Writes JSON data to the specified file path. 122 | 123 | Returns: 124 | bool: True if successful, False otherwise 125 | """ 126 | pass 127 | 128 | @abstractmethod 129 | def _write_text_file(self, file_path: UPath, data: str) -> bool: 130 | """ 131 | Writes text data to the specified file path. 132 | 133 | Returns: 134 | bool: True if successful, False otherwise 135 | """ 136 | pass 137 | 138 | @abstractmethod 139 | def _write_yaml(self, file_path: UPath, data: dict) -> bool: 140 | """ 141 | Writes YAML data to the specified file path. 142 | 143 | Returns: 144 | bool: True if successful, False otherwise 145 | """ 146 | pass 147 | --------------------------------------------------------------------------------