├── README.md ├── implementation_plan.md ├── old ├── download_sam2_checkpoint.py ├── src │ ├── ai │ │ └── sam_segmentation.py │ ├── models │ │ └── threestudio_generator.py │ └── workflow │ │ └── image_to_model_pipeline.py └── test_sam2_segmentation.py ├── requirements.txt ├── rtfmd ├── README.md ├── decisions │ ├── ai-driven-code-generation.md │ └── export-formats.md ├── files │ └── src │ │ ├── ai │ │ └── ai_service.py.md │ │ ├── main.py.md │ │ ├── models │ │ └── code_generator.py.md │ │ └── nlp │ │ └── parameter_extractor.py.md └── knowledge │ ├── ai │ └── natural-language-processing.md │ ├── nlp │ └── parameter-extraction.md │ └── openscad │ ├── export-formats.md │ ├── openscad-basics.md │ └── primitive-testing.md ├── scad └── simple_cube.scad ├── src ├── __init__.py ├── __pycache__ │ └── __init__.cpython-312.pyc ├── ai │ ├── ai_service.py │ ├── gemini_api.py │ └── venice_api.py ├── config.py ├── main.py ├── main.py.new ├── main_remote.py ├── models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-312.pyc │ │ └── code_generator.cpython-312.pyc │ ├── code_generator.py │ ├── cuda_mvs.py │ └── scad_templates │ │ └── basic_shapes.scad ├── nlp │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-312.pyc │ │ └── parameter_extractor.cpython-312.pyc │ └── parameter_extractor.py ├── openscad_wrapper │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-312.pyc │ │ └── wrapper.cpython-312.pyc │ └── wrapper.py ├── printer_discovery │ ├── __init__.py │ └── printer_discovery.py ├── remote │ ├── connection_manager.py │ ├── cuda_mvs_client.py │ ├── cuda_mvs_server.py │ └── error_handling.py ├── testing │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-312.pyc │ │ ├── primitive_tester.cpython-312.pyc │ │ └── test_primitives.cpython-312.pyc │ ├── primitive_tester.py │ └── test_primitives.py ├── utils │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-312.pyc │ │ ├── stl_exporter.cpython-312.pyc │ │ └── stl_validator.cpython-312.pyc │ ├── cad_exporter.py │ ├── format_validator.py │ ├── stl_exporter.py │ ├── stl_repair.py │ └── stl_validator.py ├── visualization │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-312.pyc │ │ └── renderer.cpython-312.pyc │ ├── headless_renderer.py │ ├── renderer.py │ └── web_interface.py └── workflow │ ├── image_approval.py │ └── multi_view_to_model_pipeline.py ├── test_complete_workflow.py ├── test_cuda_mvs.py ├── test_gemini_api.py ├── test_image_approval.py ├── test_image_approval_workflow.py ├── test_image_to_model_pipeline.py ├── test_model_selection.py ├── test_multi_view_pipeline.py ├── test_primitives.sh ├── test_rabbit_direct.py ├── test_remote_cuda_mvs.py └── test_venice_example.py /implementation_plan.md: -------------------------------------------------------------------------------- 1 | # Implementation Plan: OpenSCAD-MCP-Server with AI-Driven 3D Modeling 2 | 3 | ## 1. Project Structure Updates 4 | 5 | ### 1.1 New Modules 6 | ``` 7 | src/ 8 | ├── ai/ 9 | │ ├── venice_api.py # Venice.ai API client 10 | │ └── sam_segmentation.py # SAM2 integration 11 | ├── models/ 12 | │ └── threestudio_generator.py # threestudio integration 13 | └── workflow/ 14 | └── image_to_model_pipeline.py # Workflow orchestration 15 | ``` 16 | 17 | ### 1.2 Dependencies 18 | Add to requirements.txt: 19 | ``` 20 | # Image Generation - Venice.ai API 21 | # (using existing requests and python-dotenv) 22 | 23 | # Object Segmentation - SAM2 24 | torch>=2.0.0 25 | torchvision>=0.15.0 26 | opencv-python>=4.7.0 27 | segment-anything>=1.0 28 | 29 | # 3D Model Creation - threestudio 30 | ninja>=1.11.0 31 | pytorch3d>=0.7.4 32 | trimesh>=3.21.0 33 | ``` 34 | 35 | ## 2. Component Implementation 36 | 37 | ### 2.1 Venice.ai API Integration 38 | - Create `VeniceImageGenerator` class in `venice_api.py` 39 | - Implement authentication with API key 40 | - Add image generation with Flux model 41 | - Support image downloading and storage 42 | 43 | ### 2.2 SAM2 Integration 44 | - Create `SAMSegmenter` class in `sam_segmentation.py` 45 | - Implement model loading with PyTorch 46 | - Add object segmentation from images 47 | - Support mask generation and visualization 48 | 49 | ### 2.3 threestudio Integration 50 | - Create `ThreeStudioGenerator` class in `threestudio_generator.py` 51 | - Implement 3D model generation from masked images 52 | - Support model export in formats compatible with OpenSCAD 53 | - Add preview image generation 54 | 55 | ### 2.4 OpenSCAD Integration 56 | - Extend `OpenSCADWrapper` with methods to: 57 | - Import 3D models from threestudio 58 | - Generate parametric modifications 59 | - Create multi-angle previews 60 | - Export in various formats 61 | 62 | ### 2.5 Workflow Orchestration 63 | - Create `ImageToModelPipeline` class to coordinate the workflow: 64 | 1. Generate image with Venice.ai API 65 | 2. Segment object with SAM2 66 | 3. Create 3D model with threestudio 67 | 4. Import into OpenSCAD for parametric editing 68 | 69 | ## 3. MCP Tool Integration 70 | 71 | Add new MCP tools to main.py: 72 | - `generate_image_from_text`: Generate images using Venice.ai 73 | - `segment_object_from_image`: Segment objects using SAM2 74 | - `generate_3d_model_from_image`: Create 3D models using threestudio 75 | - `generate_model_from_text`: End-to-end pipeline from text to 3D model 76 | 77 | ## 4. Hardware Requirements 78 | 79 | - SAM2: NVIDIA GPU with 6GB+ VRAM 80 | - threestudio: NVIDIA GPU with 6GB+ VRAM 81 | - Consider implementing fallback options for environments with limited GPU resources 82 | 83 | ## 5. Implementation Phases 84 | 85 | ### Phase 1: Basic Integration 86 | - Implement Venice.ai API client 87 | - Set up SAM2 with basic segmentation 88 | - Create threestudio wrapper with minimal functionality 89 | - Extend OpenSCAD wrapper for model import 90 | 91 | ### Phase 2: Workflow Orchestration 92 | - Implement the full pipeline 93 | - Add MCP tools for each component 94 | - Create end-to-end workflow tool 95 | 96 | ### Phase 3: Optimization and Refinement 97 | - Optimize for performance 98 | - Add error handling and recovery 99 | - Implement corrective cycle for mesh modification 100 | - Add user interface improvements 101 | -------------------------------------------------------------------------------- /old/download_sam2_checkpoint.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import logging 4 | import requests 5 | import argparse 6 | from pathlib import Path 7 | from tqdm import tqdm 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 11 | logger = logging.getLogger(__name__) 12 | 13 | # SAM2 checkpoint URLs 14 | CHECKPOINT_URLS = { 15 | "vit_h": "https://dl.fbaipublicfiles.com/segment_anything_2/sam2_vit_h.pth", 16 | "vit_l": "https://dl.fbaipublicfiles.com/segment_anything_2/sam2_vit_l.pth", 17 | "vit_b": "https://dl.fbaipublicfiles.com/segment_anything_2/sam2_vit_b.pth" 18 | } 19 | 20 | # Checkpoint sizes (approximate, in MB) 21 | CHECKPOINT_SIZES = { 22 | "vit_h": 2560, # 2.5 GB 23 | "vit_l": 1250, # 1.2 GB 24 | "vit_b": 380 # 380 MB 25 | } 26 | 27 | def download_checkpoint(model_type="vit_b", output_dir="models"): 28 | """ 29 | Download SAM2 checkpoint. 30 | 31 | Args: 32 | model_type: Model type to download (vit_h, vit_l, vit_b) 33 | output_dir: Directory to save the checkpoint 34 | 35 | Returns: 36 | Path to the downloaded checkpoint 37 | """ 38 | if model_type not in CHECKPOINT_URLS: 39 | raise ValueError(f"Invalid model type: {model_type}. Available types: {list(CHECKPOINT_URLS.keys())}") 40 | 41 | url = CHECKPOINT_URLS[model_type] 42 | output_path = os.path.join(output_dir, f"sam2_{model_type}.pth") 43 | 44 | # Create output directory if it doesn't exist 45 | os.makedirs(output_dir, exist_ok=True) 46 | 47 | # Check if checkpoint already exists 48 | if os.path.exists(output_path): 49 | logger.info(f"Checkpoint already exists at {output_path}") 50 | return output_path 51 | 52 | # Download checkpoint 53 | logger.info(f"Downloading SAM2 checkpoint ({model_type}) from {url}") 54 | logger.info(f"Approximate size: {CHECKPOINT_SIZES[model_type]} MB") 55 | 56 | try: 57 | # Stream download with progress bar 58 | response = requests.get(url, stream=True) 59 | response.raise_for_status() 60 | 61 | # Get total file size 62 | total_size = int(response.headers.get('content-length', 0)) 63 | 64 | # Create progress bar 65 | with open(output_path, 'wb') as f, tqdm( 66 | desc=f"Downloading {model_type}", 67 | total=total_size, 68 | unit='B', 69 | unit_scale=True, 70 | unit_divisor=1024, 71 | ) as pbar: 72 | for chunk in response.iter_content(chunk_size=8192): 73 | if chunk: 74 | f.write(chunk) 75 | pbar.update(len(chunk)) 76 | 77 | logger.info(f"Checkpoint downloaded to {output_path}") 78 | return output_path 79 | 80 | except requests.exceptions.RequestException as e: 81 | logger.error(f"Error downloading checkpoint: {str(e)}") 82 | # Remove partial download if it exists 83 | if os.path.exists(output_path): 84 | os.remove(output_path) 85 | raise 86 | except KeyboardInterrupt: 87 | logger.info("Download interrupted by user") 88 | # Remove partial download if it exists 89 | if os.path.exists(output_path): 90 | os.remove(output_path) 91 | sys.exit(1) 92 | 93 | def main(): 94 | """Main function to parse arguments and download checkpoint.""" 95 | parser = argparse.ArgumentParser(description="Download SAM2 checkpoint") 96 | parser.add_argument("--model_type", type=str, default="vit_b", choices=list(CHECKPOINT_URLS.keys()), 97 | help="Model type to download (vit_h, vit_l, vit_b). Default: vit_b (smallest)") 98 | parser.add_argument("--output_dir", type=str, default="models", 99 | help="Directory to save the checkpoint") 100 | 101 | args = parser.parse_args() 102 | 103 | # Print model information 104 | logger.info(f"Selected model: {args.model_type}") 105 | logger.info(f"Approximate sizes: vit_h: 2.5 GB, vit_l: 1.2 GB, vit_b: 380 MB") 106 | 107 | try: 108 | checkpoint_path = download_checkpoint(args.model_type, args.output_dir) 109 | logger.info(f"Checkpoint ready at: {checkpoint_path}") 110 | except Exception as e: 111 | logger.error(f"Failed to download checkpoint: {str(e)}") 112 | sys.exit(1) 113 | 114 | if __name__ == "__main__": 115 | main() 116 | -------------------------------------------------------------------------------- /old/src/ai/sam_segmentation.py: -------------------------------------------------------------------------------- 1 | """ 2 | SAM2 (Segment Anything Model 2) integration for object segmentation. 3 | """ 4 | 5 | import os 6 | import cv2 7 | import numpy as np 8 | import logging 9 | from typing import Dict, Any, List, Tuple, Optional 10 | from pathlib import Path 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | class SAMSegmenter: 15 | """ 16 | Wrapper for Segment Anything Model 2 (SAM2) for object segmentation. 17 | """ 18 | 19 | def __init__(self, model_type: str = "vit_h", checkpoint_path: Optional[str] = None, 20 | use_gpu: bool = True, output_dir: str = "output/masks"): 21 | """ 22 | Initialize the SAM2 segmenter. 23 | 24 | Args: 25 | model_type: SAM2 model type ("vit_h", "vit_l", "vit_b") 26 | checkpoint_path: Path to model checkpoint 27 | use_gpu: Whether to use GPU for inference 28 | output_dir: Directory to store segmentation results 29 | """ 30 | self.model_type = model_type 31 | self.checkpoint_path = checkpoint_path 32 | self.use_gpu = use_gpu 33 | self.output_dir = output_dir 34 | 35 | # Create output directory if it doesn't exist 36 | os.makedirs(output_dir, exist_ok=True) 37 | 38 | # Model will be initialized on first use to avoid loading it unnecessarily 39 | self.model = None 40 | self.predictor = None 41 | 42 | def _initialize_model(self) -> None: 43 | """ 44 | Initialize the SAM2 model. 45 | 46 | Note: This requires PyTorch and the segment-anything-2 package to be installed. 47 | """ 48 | try: 49 | # Import here to avoid dependency issues if SAM2 is not installed 50 | import torch 51 | from segment_anything_2 import sam_model_registry, SamPredictor 52 | 53 | if not self.checkpoint_path: 54 | raise ValueError("SAM2 checkpoint path is required") 55 | 56 | # Check if checkpoint exists 57 | if not os.path.exists(self.checkpoint_path): 58 | raise FileNotFoundError(f"SAM2 checkpoint not found at {self.checkpoint_path}") 59 | 60 | # Determine device 61 | device = "cuda" if self.use_gpu and torch.cuda.is_available() else "cpu" 62 | 63 | # Load SAM2 model 64 | self.model = sam_model_registry[self.model_type](checkpoint=self.checkpoint_path) 65 | self.model.to(device=device) 66 | self.predictor = SamPredictor(self.model) 67 | 68 | logger.info(f"Initialized SAM2 model ({self.model_type}) on {device}") 69 | except ImportError as e: 70 | logger.error(f"Required packages not installed: {str(e)}") 71 | raise 72 | except Exception as e: 73 | logger.error(f"Error initializing SAM2 model: {str(e)}") 74 | raise 75 | 76 | def segment_image(self, image_path: str, points: Optional[List[Tuple[int, int]]] = None, 77 | output_dir: Optional[str] = None) -> Dict[str, Any]: 78 | """ 79 | Segment objects in an image using SAM2. 80 | 81 | Args: 82 | image_path: Path to input image 83 | points: Optional list of (x, y) points to guide segmentation 84 | output_dir: Optional directory to save segmentation results 85 | 86 | Returns: 87 | Dictionary containing segmentation masks and metadata 88 | """ 89 | # Initialize model if not already initialized 90 | if self.model is None: 91 | self._initialize_model() 92 | 93 | try: 94 | # Load image 95 | image = cv2.imread(image_path) 96 | if image is None: 97 | raise ValueError(f"Could not load image from {image_path}") 98 | 99 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 100 | 101 | # Set image in predictor 102 | self.predictor.set_image(image) 103 | 104 | # Generate masks 105 | if points: 106 | # Convert points to numpy arrays 107 | import numpy as np 108 | point_coords = np.array(points) 109 | point_labels = np.ones(len(points)) 110 | 111 | # Generate masks from points 112 | masks, scores, logits = self.predictor.predict( 113 | point_coords=point_coords, 114 | point_labels=point_labels, 115 | multimask_output=True 116 | ) 117 | else: 118 | # Automatic segmentation (using center point) 119 | h, w = image.shape[:2] 120 | center_point = np.array([[w//2, h//2]]) 121 | center_label = np.array([1]) 122 | 123 | masks, scores, logits = self.predictor.predict( 124 | point_coords=center_point, 125 | point_labels=center_label, 126 | multimask_output=True 127 | ) 128 | 129 | # Use provided output directory or default 130 | output_dir = output_dir or os.path.join(self.output_dir, Path(image_path).stem) 131 | os.makedirs(output_dir, exist_ok=True) 132 | 133 | # Process results 134 | masked_images = [] 135 | for i, mask in enumerate(masks): 136 | # Apply mask to image 137 | masked_image = self._apply_mask_to_image(image, mask) 138 | 139 | # Save masked image 140 | output_path = os.path.join(output_dir, f"mask_{i}.png") 141 | cv2.imwrite(output_path, cv2.cvtColor(masked_image, cv2.COLOR_RGB2BGR)) 142 | 143 | masked_images.append(output_path) 144 | 145 | # Convert numpy arrays to lists for JSON serialization 146 | result = { 147 | "image_path": image_path, 148 | "masked_images": masked_images, 149 | "scores": scores.tolist(), 150 | "mask_count": len(masks) 151 | } 152 | 153 | return result 154 | except Exception as e: 155 | logger.error(f"Error segmenting image: {str(e)}") 156 | raise 157 | 158 | def _apply_mask_to_image(self, image: np.ndarray, mask: np.ndarray) -> np.ndarray: 159 | """ 160 | Apply mask to image, keeping only the masked region. 161 | 162 | Args: 163 | image: Input image as numpy array 164 | mask: Binary mask as numpy array 165 | 166 | Returns: 167 | Masked image as numpy array 168 | """ 169 | # Create a copy of the image 170 | masked_image = image.copy() 171 | 172 | # Apply mask 173 | masked_image[~mask] = [0, 0, 0] # Set background to black 174 | 175 | return masked_image 176 | 177 | def segment_with_auto_points(self, image_path: str, num_points: int = 5, 178 | output_dir: Optional[str] = None) -> Dict[str, Any]: 179 | """ 180 | Segment image using automatically generated points with SAM2. 181 | 182 | Args: 183 | image_path: Path to input image 184 | num_points: Number of points to generate 185 | output_dir: Optional directory to save segmentation results 186 | 187 | Returns: 188 | Dictionary containing segmentation masks and metadata 189 | """ 190 | # Load image 191 | image = cv2.imread(image_path) 192 | if image is None: 193 | raise ValueError(f"Could not load image from {image_path}") 194 | 195 | h, w = image.shape[:2] 196 | 197 | # Generate points in a grid pattern 198 | points = [] 199 | rows = int(np.sqrt(num_points)) 200 | cols = num_points // rows 201 | 202 | for i in range(rows): 203 | for j in range(cols): 204 | x = int(w * (j + 0.5) / cols) 205 | y = int(h * (i + 0.5) / rows) 206 | points.append((x, y)) 207 | 208 | # Segment with generated points 209 | return self.segment_image(image_path, points, output_dir) 210 | -------------------------------------------------------------------------------- /old/src/models/threestudio_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | threestudio integration for 3D model generation from images. 3 | """ 4 | 5 | import os 6 | import subprocess 7 | import logging 8 | import json 9 | import tempfile 10 | from typing import Dict, Any, List, Optional 11 | from pathlib import Path 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | class ThreeStudioGenerator: 16 | """ 17 | Wrapper for threestudio for 3D model generation from images. 18 | """ 19 | 20 | def __init__(self, threestudio_path: str, output_dir: str = "output/models"): 21 | """ 22 | Initialize the threestudio generator. 23 | 24 | Args: 25 | threestudio_path: Path to threestudio installation 26 | output_dir: Directory to store output files 27 | """ 28 | self.threestudio_path = threestudio_path 29 | self.output_dir = output_dir 30 | 31 | # Create output directory if it doesn't exist 32 | os.makedirs(output_dir, exist_ok=True) 33 | 34 | # Validate threestudio installation 35 | self._validate_installation() 36 | 37 | def _validate_installation(self) -> None: 38 | """ 39 | Validate threestudio installation. 40 | 41 | Raises: 42 | FileNotFoundError: If threestudio installation is not found 43 | """ 44 | if not os.path.exists(self.threestudio_path): 45 | raise FileNotFoundError(f"threestudio not found at {self.threestudio_path}") 46 | 47 | # Check for required files 48 | required_files = ["launch.py", "README.md"] 49 | for file in required_files: 50 | if not os.path.exists(os.path.join(self.threestudio_path, file)): 51 | raise FileNotFoundError(f"Required file {file} not found in threestudio directory") 52 | 53 | def generate_model_from_image(self, image_path: str, method: str = "zero123", 54 | num_iterations: int = 5000, export_format: str = "obj", 55 | config_overrides: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 56 | """ 57 | Generate a 3D model from an image using threestudio. 58 | 59 | Args: 60 | image_path: Path to input image 61 | method: Method to use ("zero123", "sjc", "magic3d", etc.) 62 | num_iterations: Number of training iterations 63 | export_format: Format to export ("obj", "glb", "ply") 64 | config_overrides: Optional configuration overrides 65 | 66 | Returns: 67 | Dictionary containing paths to generated model files 68 | """ 69 | try: 70 | # Create a unique ID for this generation 71 | model_id = Path(image_path).stem 72 | 73 | # Create a temporary config file 74 | config_file = self._create_config_file(image_path, method, num_iterations, config_overrides) 75 | 76 | # Run threestudio 77 | output_dir = os.path.join(self.output_dir, model_id) 78 | os.makedirs(output_dir, exist_ok=True) 79 | 80 | cmd = [ 81 | "python", "launch.py", 82 | "--config", config_file, 83 | "--train", 84 | "--gpu", "0", 85 | "--output_dir", output_dir 86 | ] 87 | 88 | logger.info(f"Running threestudio with command: {' '.join(cmd)}") 89 | 90 | # Execute in threestudio directory 91 | process = subprocess.Popen( 92 | cmd, 93 | cwd=self.threestudio_path, 94 | stdout=subprocess.PIPE, 95 | stderr=subprocess.PIPE, 96 | text=True 97 | ) 98 | 99 | # Wait for process to complete 100 | stdout, stderr = process.communicate() 101 | 102 | if process.returncode != 0: 103 | logger.error(f"Error running threestudio: {stderr}") 104 | raise RuntimeError(f"threestudio failed with exit code {process.returncode}") 105 | 106 | # Export model 107 | exported_files = self._export_model(output_dir, export_format) 108 | 109 | return { 110 | "model_id": model_id, 111 | "output_dir": output_dir, 112 | "exported_files": exported_files, 113 | "preview_images": self._get_preview_images(output_dir) 114 | } 115 | except Exception as e: 116 | logger.error(f"Error generating 3D model with threestudio: {str(e)}") 117 | raise 118 | 119 | def _create_config_file(self, image_path: str, method: str, num_iterations: int, 120 | config_overrides: Optional[Dict[str, Any]] = None) -> str: 121 | """ 122 | Create a configuration file for threestudio. 123 | 124 | Args: 125 | image_path: Path to input image 126 | method: Method to use 127 | num_iterations: Number of training iterations 128 | config_overrides: Optional configuration overrides 129 | 130 | Returns: 131 | Path to the created configuration file 132 | """ 133 | # Base configuration 134 | config = { 135 | "method": method, 136 | "image_path": os.path.abspath(image_path), 137 | "num_iterations": num_iterations, 138 | "save_interval": 1000, 139 | "export_interval": 1000 140 | } 141 | 142 | # Apply overrides 143 | if config_overrides: 144 | config.update(config_overrides) 145 | 146 | # Write to temporary file 147 | fd, config_file = tempfile.mkstemp(suffix=".json") 148 | with os.fdopen(fd, 'w') as f: 149 | json.dump(config, f, indent=2) 150 | 151 | return config_file 152 | 153 | def _export_model(self, output_dir: str, export_format: str) -> List[str]: 154 | """ 155 | Export the model in the specified format. 156 | 157 | Args: 158 | output_dir: Directory containing the model 159 | export_format: Format to export 160 | 161 | Returns: 162 | List of paths to exported files 163 | """ 164 | # Find the latest checkpoint 165 | checkpoints_dir = os.path.join(output_dir, "checkpoints") 166 | if not os.path.exists(checkpoints_dir): 167 | raise FileNotFoundError(f"Checkpoints directory not found: {checkpoints_dir}") 168 | 169 | # Get the latest checkpoint 170 | checkpoints = sorted([f for f in os.listdir(checkpoints_dir) if f.endswith(".ckpt")]) 171 | if not checkpoints: 172 | raise FileNotFoundError("No checkpoints found") 173 | 174 | latest_checkpoint = os.path.join(checkpoints_dir, checkpoints[-1]) 175 | 176 | # Export command 177 | cmd = [ 178 | "python", "launch.py", 179 | "--config", os.path.join(output_dir, "config.yaml"), 180 | "--export", 181 | "--gpu", "0", 182 | "--checkpoint", latest_checkpoint, 183 | "--export_format", export_format 184 | ] 185 | 186 | logger.info(f"Exporting model with command: {' '.join(cmd)}") 187 | 188 | # Execute in threestudio directory 189 | process = subprocess.Popen( 190 | cmd, 191 | cwd=self.threestudio_path, 192 | stdout=subprocess.PIPE, 193 | stderr=subprocess.PIPE, 194 | text=True 195 | ) 196 | 197 | # Wait for process to complete 198 | stdout, stderr = process.communicate() 199 | 200 | if process.returncode != 0: 201 | logger.error(f"Error exporting model: {stderr}") 202 | raise RuntimeError(f"Model export failed with exit code {process.returncode}") 203 | 204 | # Find exported files 205 | exports_dir = os.path.join(output_dir, "exports") 206 | if not os.path.exists(exports_dir): 207 | raise FileNotFoundError(f"Exports directory not found: {exports_dir}") 208 | 209 | exported_files = [os.path.join(exports_dir, f) for f in os.listdir(exports_dir)] 210 | 211 | return exported_files 212 | 213 | def _get_preview_images(self, output_dir: str) -> List[str]: 214 | """ 215 | Get paths to preview images. 216 | 217 | Args: 218 | output_dir: Directory containing the model 219 | 220 | Returns: 221 | List of paths to preview images 222 | """ 223 | # Find preview images 224 | previews_dir = os.path.join(output_dir, "images") 225 | if not os.path.exists(previews_dir): 226 | return [] 227 | 228 | preview_images = [os.path.join(previews_dir, f) for f in os.listdir(previews_dir) 229 | if f.endswith(".png") or f.endswith(".jpg")] 230 | 231 | return sorted(preview_images) 232 | -------------------------------------------------------------------------------- /old/src/workflow/image_to_model_pipeline.py: -------------------------------------------------------------------------------- 1 | """ 2 | Workflow orchestration for the image-to-model pipeline. 3 | """ 4 | 5 | import os 6 | import logging 7 | import uuid 8 | from typing import Dict, Any, List, Optional, Tuple 9 | from pathlib import Path 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class ImageToModelPipeline: 14 | """ 15 | Orchestrates the workflow from text prompt to 3D model: 16 | 1. Generate image with Venice.ai 17 | 2. Segment object with SAM2 18 | 3. Create 3D model with threestudio 19 | 4. Convert to OpenSCAD for parametric editing 20 | """ 21 | 22 | def __init__(self, 23 | venice_generator, 24 | sam_segmenter, 25 | threestudio_generator, 26 | openscad_wrapper, 27 | output_dir: str = "output/pipeline"): 28 | """ 29 | Initialize the pipeline. 30 | 31 | Args: 32 | venice_generator: Instance of VeniceImageGenerator 33 | sam_segmenter: Instance of SAMSegmenter 34 | threestudio_generator: Instance of ThreeStudioGenerator 35 | openscad_wrapper: Instance of OpenSCADWrapper 36 | output_dir: Directory to store output files 37 | """ 38 | self.venice_generator = venice_generator 39 | self.sam_segmenter = sam_segmenter 40 | self.threestudio_generator = threestudio_generator 41 | self.openscad_wrapper = openscad_wrapper 42 | self.output_dir = output_dir 43 | 44 | # Create output directories 45 | os.makedirs(os.path.join(output_dir, "images"), exist_ok=True) 46 | os.makedirs(os.path.join(output_dir, "masks"), exist_ok=True) 47 | os.makedirs(os.path.join(output_dir, "models"), exist_ok=True) 48 | os.makedirs(os.path.join(output_dir, "scad"), exist_ok=True) 49 | 50 | def generate_model_from_text(self, prompt: str, 51 | venice_params: Optional[Dict[str, Any]] = None, 52 | sam_params: Optional[Dict[str, Any]] = None, 53 | threestudio_params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 54 | """ 55 | Generate a 3D model from a text prompt. 56 | 57 | Args: 58 | prompt: Text description for image generation 59 | venice_params: Optional parameters for Venice.ai 60 | sam_params: Optional parameters for SAM2 61 | threestudio_params: Optional parameters for threestudio 62 | 63 | Returns: 64 | Dictionary containing paths to generated files and metadata 65 | """ 66 | try: 67 | # Generate a unique ID for this pipeline run 68 | pipeline_id = str(uuid.uuid4()) 69 | logger.info(f"Starting pipeline {pipeline_id} for prompt: {prompt}") 70 | 71 | # Step 1: Generate image with Venice.ai 72 | image_path = os.path.join(self.output_dir, "images", f"{pipeline_id}.png") 73 | venice_result = self._generate_image(prompt, image_path, venice_params) 74 | 75 | # Step 2: Segment object with SAM2 76 | masks_dir = os.path.join(self.output_dir, "masks", pipeline_id) 77 | sam_result = self._segment_image(image_path, masks_dir, sam_params) 78 | 79 | # Get the best mask (highest score or first mask if no scores) 80 | if "scores" in sam_result and sam_result["scores"]: 81 | best_mask_idx = sam_result["scores"].index(max(sam_result["scores"])) 82 | best_mask_path = sam_result["mask_paths"][best_mask_idx] 83 | else: 84 | # If no scores available, use the first mask 85 | best_mask_path = sam_result["mask_paths"][0] if sam_result.get("mask_paths") else None 86 | 87 | if not best_mask_path: 88 | raise ValueError("No valid mask generated from segmentation") 89 | 90 | # Step 3: Create 3D model with threestudio 91 | threestudio_result = self._generate_3d_model(best_mask_path, threestudio_params) 92 | 93 | # Step 4: Convert to OpenSCAD for parametric editing 94 | scad_result = self._convert_to_openscad(threestudio_result["exported_files"][0], pipeline_id) 95 | 96 | # Compile results 97 | result = { 98 | "pipeline_id": pipeline_id, 99 | "prompt": prompt, 100 | "image": venice_result, 101 | "segmentation": sam_result, 102 | "model_3d": threestudio_result, 103 | "openscad": scad_result 104 | } 105 | 106 | logger.info(f"Pipeline {pipeline_id} completed successfully") 107 | return result 108 | except Exception as e: 109 | logger.error(f"Error in pipeline: {str(e)}") 110 | raise 111 | 112 | def _generate_image(self, prompt: str, output_path: str, 113 | params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 114 | """ 115 | Generate image with Venice.ai. 116 | 117 | Args: 118 | prompt: Text description for image generation 119 | output_path: Path to save the generated image 120 | params: Optional parameters for Venice.ai 121 | 122 | Returns: 123 | Dictionary containing image data and metadata 124 | """ 125 | logger.info(f"Generating image for prompt: {prompt}") 126 | 127 | # Default parameters 128 | default_params = { 129 | "model": "fluently-xl", # Default to fastest model 130 | "width": 1024, 131 | "height": 1024 132 | } 133 | 134 | # Merge with provided parameters 135 | if params: 136 | default_params.update(params) 137 | 138 | # Generate image 139 | result = self.venice_generator.generate_image( 140 | prompt=prompt, 141 | output_path=output_path, 142 | **default_params 143 | ) 144 | 145 | logger.info(f"Image generated: {output_path}") 146 | return result 147 | 148 | def _segment_image(self, image_path: str, output_dir: str, 149 | params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 150 | """ 151 | Segment object with SAM2. 152 | 153 | Args: 154 | image_path: Path to input image 155 | output_dir: Directory to save segmentation results 156 | params: Optional parameters for SAM2 157 | 158 | Returns: 159 | Dictionary containing segmentation masks and metadata 160 | """ 161 | logger.info(f"Segmenting image: {image_path}") 162 | 163 | # Segment image with SAM2 164 | # Check if points are provided in params 165 | points = params.get("points") if params else None 166 | 167 | if points: 168 | result = self.sam_segmenter.segment_image( 169 | image_path=image_path, 170 | points=points, 171 | output_dir=output_dir 172 | ) 173 | else: 174 | # Use automatic point generation 175 | result = self.sam_segmenter.segment_with_auto_points( 176 | image_path=image_path, 177 | output_dir=output_dir 178 | ) 179 | 180 | logger.info(f"Image segmented, {result.get('num_masks', 0)} masks generated") 181 | return result 182 | 183 | def _generate_3d_model(self, image_path: str, 184 | params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 185 | """ 186 | Generate 3D model with threestudio. 187 | 188 | Args: 189 | image_path: Path to input image 190 | params: Optional parameters for threestudio 191 | 192 | Returns: 193 | Dictionary containing paths to generated model files 194 | """ 195 | logger.info(f"Generating 3D model from image: {image_path}") 196 | 197 | # Default parameters 198 | default_params = { 199 | "method": "zero123", 200 | "num_iterations": 5000, 201 | "export_format": "obj" 202 | } 203 | 204 | # Merge with provided parameters 205 | if params: 206 | default_params.update(params) 207 | 208 | # Generate 3D model 209 | result = self.threestudio_generator.generate_model_from_image( 210 | image_path=image_path, 211 | **default_params 212 | ) 213 | 214 | logger.info(f"3D model generated: {result['exported_files']}") 215 | return result 216 | 217 | def _convert_to_openscad(self, model_path: str, model_id: str) -> Dict[str, Any]: 218 | """ 219 | Convert 3D model to OpenSCAD format. 220 | 221 | Args: 222 | model_path: Path to input model 223 | model_id: Unique identifier for the model 224 | 225 | Returns: 226 | Dictionary containing paths to generated files 227 | """ 228 | logger.info(f"Converting model to OpenSCAD: {model_path}") 229 | 230 | # Generate OpenSCAD code for importing the model 231 | scad_code = f"""// Generated OpenSCAD code for model {model_id} 232 | // Imported from {os.path.basename(model_path)} 233 | 234 | // Parameters 235 | scale_factor = 1.0; 236 | position_x = 0; 237 | position_y = 0; 238 | position_z = 0; 239 | rotation_x = 0; 240 | rotation_y = 0; 241 | rotation_z = 0; 242 | 243 | // Import and transform the model 244 | translate([position_x, position_y, position_z]) 245 | rotate([rotation_x, rotation_y, rotation_z]) 246 | scale(scale_factor) 247 | import("{model_path}"); 248 | """ 249 | 250 | # Save SCAD code to file 251 | scad_file = self.openscad_wrapper.generate_scad(scad_code, model_id) 252 | 253 | # Generate previews 254 | previews = self.openscad_wrapper.generate_multi_angle_previews(scad_file) 255 | 256 | return { 257 | "scad_file": scad_file, 258 | "previews": previews, 259 | "model_path": model_path 260 | } 261 | -------------------------------------------------------------------------------- /old/test_sam2_segmentation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import logging 4 | import argparse 5 | from pathlib import Path 6 | from typing import Dict, Any, List, Optional, Tuple 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | logger = logging.getLogger(__name__) 11 | 12 | # Add project root to path 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 14 | 15 | # Import SAM2 segmenter and config 16 | from src.ai.sam_segmentation import SAMSegmenter 17 | from src.config import SAM2_CHECKPOINT_PATH, SAM2_MODEL_TYPE, SAM2_USE_GPU, MASKS_DIR 18 | 19 | def test_sam2_segmentation(image_path: str, output_dir: Optional[str] = None, use_auto_points: bool = True): 20 | """ 21 | Test SAM2 segmentation on an image. 22 | 23 | Args: 24 | image_path: Path to the input image 25 | output_dir: Directory to save segmentation results (default: config.MASKS_DIR) 26 | use_auto_points: Whether to use automatic point generation 27 | """ 28 | # Validate image path 29 | if not os.path.exists(image_path): 30 | logger.error(f"Image not found: {image_path}") 31 | return 32 | 33 | # Use default output directory if not provided 34 | if not output_dir: 35 | output_dir = os.path.join(MASKS_DIR, Path(image_path).stem) 36 | 37 | # Create output directory 38 | os.makedirs(output_dir, exist_ok=True) 39 | 40 | logger.info(f"Testing SAM2 segmentation on image: {image_path}") 41 | logger.info(f"Model type: {SAM2_MODEL_TYPE}") 42 | logger.info(f"Checkpoint path: {SAM2_CHECKPOINT_PATH}") 43 | logger.info(f"Using GPU: {SAM2_USE_GPU}") 44 | 45 | try: 46 | # Initialize SAM2 segmenter 47 | logger.info("Initializing SAM2 segmenter...") 48 | sam_segmenter = SAMSegmenter( 49 | model_type=SAM2_MODEL_TYPE, 50 | checkpoint_path=SAM2_CHECKPOINT_PATH, 51 | use_gpu=SAM2_USE_GPU, 52 | output_dir=output_dir 53 | ) 54 | 55 | # Perform segmentation 56 | if use_auto_points: 57 | logger.info("Using automatic point generation") 58 | result = sam_segmenter.segment_with_auto_points(image_path) 59 | else: 60 | # Use center point of the image for manual point 61 | logger.info("Using manual center point") 62 | import cv2 63 | image = cv2.imread(image_path) 64 | h, w = image.shape[:2] 65 | center_point = (w // 2, h // 2) 66 | result = sam_segmenter.segment_image(image_path, points=[center_point]) 67 | 68 | # Print results 69 | logger.info(f"Segmentation completed with {result.get('num_masks', 0)} masks") 70 | 71 | if result.get('mask_paths'): 72 | logger.info(f"Mask paths: {result.get('mask_paths')}") 73 | 74 | return result 75 | 76 | except Exception as e: 77 | logger.error(f"Error in SAM2 segmentation: {str(e)}") 78 | import traceback 79 | traceback.print_exc() 80 | return None 81 | 82 | if __name__ == "__main__": 83 | # Parse command line arguments 84 | parser = argparse.ArgumentParser(description="Test SAM2 segmentation") 85 | parser.add_argument("image_path", help="Path to the input image") 86 | parser.add_argument("--output-dir", help="Directory to save segmentation results") 87 | parser.add_argument("--manual-points", action="store_true", help="Use manual center point instead of auto points") 88 | 89 | args = parser.parse_args() 90 | 91 | # Run test 92 | test_sam2_segmentation( 93 | args.image_path, 94 | args.output_dir, 95 | not args.manual_points 96 | ) 97 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Core dependencies 2 | fastapi>=0.95.0 3 | uvicorn>=0.21.0 4 | pydantic>=2.0.0 5 | python-multipart>=0.0.6 6 | 7 | # MCP SDK 8 | git+https://github.com/modelcontextprotocol/python-sdk.git 9 | 10 | # Image processing 11 | pillow>=9.5.0 12 | opencv-python>=4.7.0 13 | 14 | # HTTP client 15 | requests>=2.28.0 16 | httpx>=0.24.0 17 | 18 | # Utilities 19 | python-dotenv>=1.0.0 20 | pyyaml>=6.0 21 | jinja2>=3.1.2 22 | numpy>=1.24.0 23 | uuid>=1.30.0 24 | tqdm>=4.65.0 25 | 26 | # Image Generation - Venice.ai API (optional) 27 | # (using existing requests and python-dotenv) 28 | 29 | # Image Generation - Google Gemini API 30 | google-generativeai>=0.3.0 31 | 32 | # Network and Service Discovery 33 | zeroconf>=0.39.0 34 | aiohttp>=3.8.4 35 | 36 | # 3D Reconstruction - CUDA Multi-View Stereo 37 | open3d>=0.17.0 38 | trimesh>=3.21.0 39 | pyrender>=0.1.45 40 | 41 | # Remote Processing 42 | fastapi-utils>=0.2.1 43 | python-jose>=3.3.0 # For JWT authentication 44 | aiofiles>=23.1.0 45 | 46 | # For development 47 | pytest>=7.3.1 48 | black>=23.3.0 49 | isort>=5.12.0 50 | pytest-asyncio>=0.21.0 51 | 52 | # Deprecated dependencies (kept for reference) 53 | # segment-anything-2>=1.0 54 | # torch>=2.0.0 55 | # torchvision>=0.15.0 56 | # pytorch3d>=0.7.4 57 | # ninja>=1.11.0 58 | -------------------------------------------------------------------------------- /rtfmd/README.md: -------------------------------------------------------------------------------- 1 | # Reasoning Trace Framework for OpenSCAD MCP Server 2 | 3 | This directory contains the Reasoning Trace Framework (RTF) documentation for the OpenSCAD MCP Server project. The RTF provides insight into the design decisions, mental models, and reasoning processes behind the implementation. 4 | 5 | ## Directory Structure 6 | 7 | - `/rtfmd/files/` - Shadow file system mirroring the actual source code structure 8 | - Contains `.md` files with the same names as their corresponding source files 9 | - Each file documents the reasoning behind the implementation 10 | 11 | - `/rtfmd/knowledge/` - Domain knowledge documentation 12 | - `/openscad/` - Knowledge about OpenSCAD and 3D modeling 13 | - `/ai/` - Knowledge about AI and natural language processing 14 | - `/nlp/` - Knowledge about natural language parameter extraction 15 | 16 | - `/rtfmd/decisions/` - Architectural decision records 17 | - Documents major design decisions and their rationales 18 | 19 | ## How to Use This Documentation 20 | 21 | 1. Start with the `/rtfmd/files/src/main.py.md` file to understand the overall architecture 22 | 2. Explore specific components through their corresponding `.md` files 23 | 3. Refer to the knowledge directory for domain-specific information 24 | 4. Review the decisions directory for major architectural decisions 25 | 26 | ## Tags Used 27 | 28 | - `` - File metadata including author, timestamp, version, etc. 29 | - `` - Documents the exploration process and alternatives considered 30 | - `` - Explains the mental model used in the implementation 31 | - `` - Identifies design patterns used 32 | - `` - Documents trade-offs considered and choices made 33 | - `` - References to domain knowledge required 34 | - `` - Acknowledges technical debt and future improvements 35 | - `` - References to related knowledge documents 36 | 37 | ## Contributing 38 | 39 | When modifying the codebase, please update the corresponding RTF documentation to reflect your reasoning and design decisions. 40 | -------------------------------------------------------------------------------- /rtfmd/decisions/ai-driven-code-generation.md: -------------------------------------------------------------------------------- 1 | # AI-Driven Code Generation for OpenSCAD 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T01:30:00Z 6 | version: 1.0.0 7 | tags: [ai, code-generation, openscad, architecture-decision] 8 | 9 | 10 | ## Decision Context 11 | 12 | The OpenSCAD MCP Server requires a mechanism to translate natural language descriptions into valid OpenSCAD code. This architectural decision record documents the approach chosen for implementing AI-driven code generation. 13 | 14 | ## Options Considered 15 | 16 | ### Option 1: Template-Based Approach 17 | 18 | A simple approach using predefined templates with parameter substitution. 19 | 20 | **Pros:** 21 | - Simple implementation 22 | - Predictable output 23 | - Low computational requirements 24 | 25 | **Cons:** 26 | - Limited flexibility 27 | - Cannot handle complex or novel descriptions 28 | - Requires manual creation of templates for each shape type 29 | 30 | ### Option 2: Full Machine Learning Approach 31 | 32 | Using embeddings and neural networks to generate OpenSCAD code directly. 33 | 34 | **Pros:** 35 | - Highly flexible 36 | - Can handle novel descriptions 37 | - Potential for more natural interaction 38 | 39 | **Cons:** 40 | - High computational requirements 41 | - Requires training data 42 | - Less predictable output 43 | - Harder to debug and maintain 44 | 45 | ### Option 3: Hybrid Pattern Matching with Contextual Rules 46 | 47 | Combining pattern matching for parameter extraction with rule-based code generation. 48 | 49 | **Pros:** 50 | - Good balance of flexibility and predictability 51 | - Moderate computational requirements 52 | - Easier to debug and maintain 53 | - Can be extended with more sophisticated ML in the future 54 | 55 | **Cons:** 56 | - More complex than pure template approach 57 | - Less flexible than full ML approach 58 | - Requires careful design of rules and patterns 59 | 60 | ## Decision 61 | 62 | **Chosen Option: Option 3 - Hybrid Pattern Matching with Contextual Rules** 63 | 64 | The hybrid approach was selected because it provides a good balance of flexibility, maintainability, and computational efficiency. It allows for handling a wide range of natural language descriptions while maintaining predictable output and being easier to debug than a full ML approach. 65 | 66 | ## Implementation Details 67 | 68 | The implementation consists of two main components: 69 | 70 | 1. **Parameter Extractor**: Uses regex patterns and contextual rules to extract parameters from natural language descriptions. 71 | 72 | 2. **Code Generator**: Translates extracted parameters into OpenSCAD code using a combination of templates and programmatic generation. 73 | 74 | The `AIService` class provides the bridge between these components, handling the overall flow from natural language to code. 75 | 76 | ```python 77 | class AIService: 78 | def __init__(self, templates_dir, model_config=None): 79 | self.templates_dir = templates_dir 80 | self.model_config = model_config or {} 81 | self.templates = self._load_templates() 82 | 83 | def generate_openscad_code(self, context): 84 | description = context.get("description", "") 85 | parameters = context.get("parameters", {}) 86 | 87 | # Parse the description to identify key components 88 | components = self._parse_description(description) 89 | 90 | # Generate code based on identified components 91 | code = self._generate_code_from_components(components, parameters) 92 | 93 | return code 94 | ``` 95 | 96 | ## Consequences 97 | 98 | ### Positive 99 | 100 | - More flexible code generation than a pure template approach 101 | - Better maintainability than a full ML approach 102 | - Lower computational requirements 103 | - Easier to debug and extend 104 | - Can handle a wide range of natural language descriptions 105 | 106 | ### Negative 107 | 108 | - More complex implementation than a pure template approach 109 | - Requires careful design of patterns and rules 110 | - May still struggle with very complex or ambiguous descriptions 111 | 112 | ### Neutral 113 | 114 | - Will require ongoing maintenance as new shape types and features are added 115 | - May need to be extended with more sophisticated ML techniques in the future 116 | 117 | ## Follow-up Actions 118 | 119 | - Implement unit tests for the AI service 120 | - Create a comprehensive set of test cases for different description types 121 | - Document the pattern matching rules and code generation logic 122 | - Consider adding a feedback mechanism to improve the system over time 123 | -------------------------------------------------------------------------------- /rtfmd/decisions/export-formats.md: -------------------------------------------------------------------------------- 1 | # Decision: Export Format Selection 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T12:00:00Z 6 | version: 1.0.0 7 | tags: [export-formats, 3d-printing, decision, prusa, bambu] 8 | 9 | 10 | ## Context 11 | 12 | The OpenSCAD MCP Server needs to export 3D models in formats that: 13 | 1. Preserve parametric properties 14 | 2. Support metadata 15 | 3. Are compatible with Prusa and Bambu printers 16 | 4. Avoid limitations of STL format 17 | 18 | 19 | We evaluated multiple export formats: 20 | - STL: Traditional format but lacks metadata 21 | - CSG: OpenSCAD's native format, fully parametric 22 | - SCAD: Source code, fully parametric 23 | - 3MF: Modern format with metadata support 24 | - AMF: XML-based format with metadata 25 | - DXF/SVG: 2D formats for laser cutting 26 | 27 | 28 | ## Decision 29 | 30 | We will use **3MF as the primary export format** with AMF as a secondary option. 31 | CSG and SCAD will be supported for users who want to modify the models in OpenSCAD. 32 | 33 | 34 | The ideal export format should: 35 | - Maintain all design parameters 36 | - Include metadata about the model 37 | - Be widely supported by popular slicers 38 | - Have a clean, standardized specification 39 | - Support multiple objects and materials 40 | 41 | 42 | ## Rationale 43 | 44 | 45 | Modern 3D printing workflows favor formats that preserve more information than just geometry. The industry is shifting from STL to more capable formats like 3MF. 46 | 47 | 48 | - **3MF** is supported by both Prusa and Bambu printer software 49 | - **3MF** includes support for metadata, colors, and materials 50 | - **3MF** has a cleaner specification than STL 51 | - **AMF** offers similar advantages but with less widespread adoption 52 | - **CSG/SCAD** formats maintain full parametric properties but only within OpenSCAD 53 | 54 | 55 | We considered making STL an option for broader compatibility, but this would compromise our goal of preserving parametric properties. The benefits of 3MF outweigh the minor compatibility issues that might arise. 56 | 57 | 58 | ## Consequences 59 | 60 | **Positive:** 61 | - Better preservation of model information 62 | - Improved compatibility with modern printer software 63 | - Future-proof approach as 3MF adoption increases 64 | 65 | **Negative:** 66 | - Slightly more complex implementation than STL 67 | - May require validation to ensure proper format compliance 68 | 69 | 70 | We will need to implement validation for 3MF and AMF files to ensure they meet specifications. This adds complexity but is necessary for reliability. 71 | 72 | 73 | 74 | - [OpenSCAD Export Formats](/rtfmd/knowledge/openscad/export-formats.md) 75 | - [OpenSCAD Basics](/rtfmd/knowledge/openscad/openscad-basics.md) 76 | 77 | -------------------------------------------------------------------------------- /rtfmd/files/src/ai/ai_service.py.md: -------------------------------------------------------------------------------- 1 | 2 | author: devin-ai-integration 3 | timestamp: 2025-03-21T01:30:00Z 4 | version: 1.0.0 5 | related-files: [/src/models/code_generator.py] 6 | prompt: "Implement AI-driven code generator for OpenSCAD" 7 | 8 | 9 | 10 | The AI service component was designed to provide natural language processing capabilities for generating OpenSCAD code from user descriptions. Initial approaches considered: 11 | 12 | 1. Using a template-based approach with predefined patterns 13 | 2. Implementing a full NLP pipeline with custom entity extraction 14 | 3. Creating a hybrid approach that combines pattern matching with contextual understanding 15 | 16 | The hybrid approach was selected as it provides flexibility while maintaining performance. 17 | 18 | 19 | 20 | The AI service operates on the concept of "component identification" - breaking down natural language descriptions into geometric primitives, operations, features, and modifiers. This mental model aligns with how OpenSCAD itself works, where complex models are built from primitive shapes and CSG operations. 21 | 22 | 23 | 24 | The implementation uses the Strategy pattern for different parsing strategies and the Factory pattern for code generation. These patterns allow for extensibility as new shape types or operations are added. 25 | 26 | 27 | 28 | Options considered: 29 | 1. Full machine learning approach with embeddings and neural networks 30 | 2. Rule-based pattern matching with regular expressions 31 | 3. Hybrid approach with pattern matching and contextual rules 32 | 33 | The hybrid approach was chosen because: 34 | - Lower computational requirements than full ML 35 | - More flexible than pure rule-based systems 36 | - Easier to debug and maintain 37 | - Can be extended with more sophisticated ML in the future 38 | 39 | 40 | 41 | The implementation required understanding of: 42 | - OpenSCAD's modeling paradigm (CSG operations) 43 | - Common 3D modeling terminology 44 | - Natural language processing techniques 45 | - Regular expression pattern matching 46 | 47 | 48 | 49 | [OpenSCAD Basics](/rtfmd/knowledge/openscad/openscad-basics.md) - Last updated 2025-03-21 50 | [Natural Language Processing](/rtfmd/knowledge/ai/natural-language-processing.md) - Last updated 2025-03-21 51 | 52 | -------------------------------------------------------------------------------- /rtfmd/files/src/main.py.md: -------------------------------------------------------------------------------- 1 | 2 | author: devin-ai-integration 3 | timestamp: 2025-03-21T01:30:00Z 4 | version: 1.0.0 5 | related-files: [/src/ai/ai_service.py, /src/models/code_generator.py, /src/nlp/parameter_extractor.py] 6 | prompt: "Build an MCP server for OpenSCAD" 7 | 8 | 9 | 10 | The main application was designed to implement a Model Context Protocol (MCP) server for OpenSCAD integration. Several approaches were considered: 11 | 12 | 1. Using a standalone server with direct OpenSCAD CLI calls 13 | 2. Implementing a web service with REST API 14 | 3. Creating an MCP-compliant server with FastAPI 15 | 16 | The MCP-compliant FastAPI approach was selected for its alignment with the project requirements and modern API design. 17 | 18 | 19 | 20 | The main application operates on a "tool-based MCP service" paradigm, where each capability is exposed as an MCP tool that can be called by AI assistants. This mental model aligns with the MCP specification and provides a clean separation of concerns. 21 | 22 | 23 | 24 | The implementation uses the Facade pattern to provide a simple interface to the complex subsystems (parameter extraction, code generation, OpenSCAD wrapper, etc.). This pattern simplifies the client interface and decouples the subsystems from clients. 25 | 26 | 27 | 28 | Options considered: 29 | 1. Monolithic application with tightly coupled components 30 | 2. Microservices architecture with separate services 31 | 3. Modular monolith with clear component boundaries 32 | 33 | The modular monolith approach was chosen because: 34 | - Simpler deployment and operation 35 | - Lower latency for inter-component communication 36 | - Easier to develop and debug 37 | - Still maintains good separation of concerns 38 | 39 | 40 | 41 | The implementation required understanding of: 42 | - Model Context Protocol (MCP) specification 43 | - FastAPI framework 44 | - OpenSCAD command-line interface 45 | - 3D modeling and printing workflows 46 | 47 | 48 | 49 | The current implementation has some limitations: 50 | - In-memory storage of models (not persistent) 51 | - Basic error handling 52 | - Limited printer discovery capabilities 53 | 54 | Future improvements planned: 55 | - Persistent storage for models 56 | - Enhanced error handling and reporting 57 | - More robust printer discovery and management 58 | 59 | 60 | 61 | [OpenSCAD Basics](/rtfmd/knowledge/openscad/openscad-basics.md) - Last updated 2025-03-21 62 | [AI-Driven Code Generation](/rtfmd/decisions/ai-driven-code-generation.md) - Last updated 2025-03-21 63 | 64 | -------------------------------------------------------------------------------- /rtfmd/files/src/models/code_generator.py.md: -------------------------------------------------------------------------------- 1 | 2 | author: devin-ai-integration 3 | timestamp: 2025-03-21T01:30:00Z 4 | version: 1.0.0 5 | related-files: [/src/ai/ai_service.py, /src/nlp/parameter_extractor.py] 6 | prompt: "Implement AI-driven code generator for OpenSCAD" 7 | 8 | 9 | 10 | The code generator was designed to translate natural language descriptions and extracted parameters into valid OpenSCAD code. Several approaches were considered: 11 | 12 | 1. Direct string manipulation for code generation 13 | 2. Template-based approach with parameter substitution 14 | 3. Modular approach with separate modules for different shape types 15 | 16 | The modular approach was selected for its maintainability and extensibility. 17 | 18 | 19 | 20 | The code generator operates on a "shape-to-module" mapping paradigm, where each identified shape type corresponds to a specific OpenSCAD module. This mental model allows for clean separation of concerns and makes it easy to add new shape types. 21 | 22 | 23 | 24 | The implementation uses the Factory pattern for code generation, where different shape types are mapped to different module generators. This pattern allows for easy extension with new shape types. 25 | 26 | 27 | 28 | Options considered: 29 | 1. Generating raw OpenSCAD primitives directly 30 | 2. Using a library of pre-defined modules 31 | 3. Hybrid approach with both primitives and modules 32 | 33 | The library approach was chosen because: 34 | - More maintainable and readable code 35 | - Easier to implement complex shapes 36 | - Better parameter handling 37 | - More consistent output 38 | 39 | 40 | 41 | The implementation required understanding of: 42 | - OpenSCAD syntax and semantics 43 | - Constructive Solid Geometry (CSG) operations 44 | - Parametric modeling concepts 45 | - 3D geometry fundamentals 46 | 47 | 48 | 49 | The current implementation has some limitations: 50 | - Limited support for complex nested operations 51 | - No support for custom user-defined modules 52 | - Basic error handling for invalid parameters 53 | 54 | Future improvements planned: 55 | - Enhanced error handling with meaningful messages 56 | - Support for user-defined modules 57 | - More sophisticated CSG operation chaining 58 | 59 | 60 | 61 | [OpenSCAD Basics](/rtfmd/knowledge/openscad/openscad-basics.md) - Last updated 2025-03-21 62 | [AI-Driven Code Generation](/rtfmd/decisions/ai-driven-code-generation.md) - Last updated 2025-03-21 63 | 64 | -------------------------------------------------------------------------------- /rtfmd/files/src/nlp/parameter_extractor.py.md: -------------------------------------------------------------------------------- 1 | 2 | author: devin-ai-integration 3 | timestamp: 2025-03-21T01:30:00Z 4 | version: 1.0.0 5 | related-files: [/src/models/code_generator.py] 6 | prompt: "Enhance parameter extractor with expanded shape recognition" 7 | 8 | 9 | 10 | The parameter extractor was designed to parse natural language descriptions and extract structured parameters for 3D model generation. Several approaches were considered: 11 | 12 | 1. Using a full NLP pipeline with named entity recognition 13 | 2. Implementing regex-based pattern matching 14 | 3. Creating a hybrid approach with contextual understanding 15 | 16 | The regex-based approach with contextual enhancements was selected for its balance of simplicity and effectiveness. 17 | 18 | 19 | 20 | The parameter extractor operates on a "pattern recognition and extraction" paradigm, where common phrases and patterns in natural language are mapped to specific parameter types. This mental model allows for intuitive parameter extraction from diverse descriptions. 21 | 22 | 23 | 24 | The implementation uses the Strategy pattern for different parameter extraction strategies based on shape type. This pattern allows for specialized extraction logic for each shape type while maintaining a consistent interface. 25 | 26 | 27 | 28 | Options considered: 29 | 1. Machine learning-based approach with trained models 30 | 2. Pure regex pattern matching 31 | 3. Hybrid approach with contextual rules 32 | 33 | The regex approach with contextual rules was chosen because: 34 | - Simpler implementation with good accuracy 35 | - No training data required 36 | - Easier to debug and maintain 37 | - More predictable behavior 38 | 39 | 40 | 41 | The implementation required understanding of: 42 | - Natural language processing concepts 43 | - Regular expression pattern matching 44 | - 3D modeling terminology 45 | - Parameter types for different geometric shapes 46 | 47 | 48 | 49 | The current implementation has some limitations: 50 | - Limited support for complex nested descriptions 51 | - Regex patterns may need maintenance as language evolves 52 | - Only supports millimeters as per project requirements 53 | 54 | Future improvements planned: 55 | - Enhanced contextual understanding 56 | - Support for more complex descriptions 57 | - Better handling of ambiguous parameters 58 | 59 | 60 | 61 | [Parameter Extraction](/rtfmd/knowledge/nlp/parameter-extraction.md) - Last updated 2025-03-21 62 | [Natural Language Processing](/rtfmd/knowledge/ai/natural-language-processing.md) - Last updated 2025-03-21 63 | 64 | -------------------------------------------------------------------------------- /rtfmd/knowledge/ai/natural-language-processing.md: -------------------------------------------------------------------------------- 1 | # Natural Language Processing for 3D Modeling 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T01:30:00Z 6 | version: 1.0.0 7 | tags: [nlp, 3d-modeling, parameter-extraction, pattern-matching] 8 | 9 | 10 | ## Overview 11 | 12 | Natural Language Processing (NLP) techniques can be applied to extract 3D modeling parameters and intentions from user descriptions. This knowledge document outlines approaches for translating natural language into structured data for 3D model generation. 13 | 14 | ## Approaches 15 | 16 | ### Pattern Matching 17 | 18 | Regular expression pattern matching is effective for identifying: 19 | 20 | - Dimensions and measurements 21 | - Shape types and primitives 22 | - Operations (union, difference, etc.) 23 | - Transformations (rotate, scale, etc.) 24 | - Material properties and colors 25 | 26 | Example patterns: 27 | ```python 28 | # Dimension pattern 29 | dimension_pattern = r'(\d+(?:\.\d+)?)\s*(mm|cm|m|inch|in)' 30 | 31 | # Shape pattern 32 | shape_pattern = r'\b(cube|box|sphere|ball|cylinder|tube|cone|pyramid)\b' 33 | ``` 34 | 35 | ### Contextual Understanding 36 | 37 | Beyond simple pattern matching, contextual understanding involves: 38 | 39 | - Identifying relationships between objects 40 | - Understanding relative positioning 41 | - Resolving ambiguous references 42 | - Maintaining dialog state for multi-turn interactions 43 | 44 | ### Hybrid Approaches 45 | 46 | Combining pattern matching with contextual rules provides: 47 | 48 | - Better accuracy than pure pattern matching 49 | - Lower computational requirements than full ML approaches 50 | - More maintainable and debuggable systems 51 | - Flexibility to handle diverse descriptions 52 | 53 | ## Parameter Extraction 54 | 55 | Key parameters to extract include: 56 | 57 | - **Dimensions**: Width, height, depth, radius, diameter 58 | - **Positions**: Coordinates, relative positions 59 | - **Operations**: Boolean operations, transformations 60 | - **Features**: Holes, fillets, chamfers, text 61 | - **Properties**: Color, material, finish 62 | 63 | ## Implementation Considerations 64 | 65 | - **Ambiguity Resolution**: Handle cases where measurements could apply to multiple dimensions 66 | - **Default Values**: Provide sensible defaults for unspecified parameters 67 | - **Unit Conversion**: Convert between different measurement units 68 | - **Error Handling**: Gracefully handle unparseable or contradictory descriptions 69 | - **Dialog Management**: Maintain state for multi-turn interactions to refine models 70 | 71 | ## Evaluation Metrics 72 | 73 | Effective NLP for 3D modeling can be evaluated by: 74 | 75 | - **Accuracy**: Correctness of extracted parameters 76 | - **Completeness**: Percentage of required parameters successfully extracted 77 | - **Robustness**: Ability to handle diverse phrasings and descriptions 78 | - **User Satisfaction**: Subjective evaluation of the resulting models 79 | -------------------------------------------------------------------------------- /rtfmd/knowledge/nlp/parameter-extraction.md: -------------------------------------------------------------------------------- 1 | # Parameter Extraction for 3D Modeling 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T01:30:00Z 6 | version: 1.0.0 7 | tags: [parameter-extraction, nlp, 3d-modeling, regex] 8 | 9 | 10 | ## Overview 11 | 12 | Parameter extraction is the process of identifying and extracting structured data from natural language descriptions of 3D models. This is a critical component in translating user intentions into actionable modeling parameters. 13 | 14 | ## Extraction Techniques 15 | 16 | ### Regular Expression Patterns 17 | 18 | Regular expressions provide a powerful way to extract parameters: 19 | 20 | ```python 21 | # Extract dimensions with units 22 | dimension_pattern = r'(\d+(?:\.\d+)?)\s*(mm|cm|m|inch|in)' 23 | 24 | # Extract color information 25 | color_pattern = r'\b(red|green|blue|yellow|black|white|purple|orange|brown)\b' 26 | 27 | # Extract shape type 28 | shape_pattern = r'\b(cube|box|sphere|ball|cylinder|tube|cone|pyramid)\b' 29 | ``` 30 | 31 | ### Contextual Parameter Association 32 | 33 | After extracting raw values, they must be associated with the correct parameter: 34 | 35 | ```python 36 | def associate_dimension(value, description): 37 | """Associate a dimension value with the correct parameter based on context.""" 38 | if "width" in description or "wide" in description: 39 | return ("width", value) 40 | elif "height" in description or "tall" in description: 41 | return ("height", value) 42 | elif "depth" in description or "deep" in description: 43 | return ("depth", value) 44 | elif "radius" in description: 45 | return ("radius", value) 46 | elif "diameter" in description: 47 | return ("radius", value / 2) # Convert diameter to radius 48 | else: 49 | return ("unknown", value) 50 | ``` 51 | 52 | ### Default Parameters 53 | 54 | Provide sensible defaults for unspecified parameters: 55 | 56 | ```python 57 | default_parameters = { 58 | "cube": { 59 | "width": 10, 60 | "height": 10, 61 | "depth": 10, 62 | "center": True 63 | }, 64 | "sphere": { 65 | "radius": 10, 66 | "segments": 32 67 | }, 68 | "cylinder": { 69 | "radius": 5, 70 | "height": 10, 71 | "center": True, 72 | "segments": 32 73 | } 74 | } 75 | ``` 76 | 77 | ## Parameter Types 78 | 79 | Common parameter types to extract include: 80 | 81 | - **Dimensions**: Width, height, depth, radius, diameter 82 | - **Positions**: X, Y, Z coordinates 83 | - **Angles**: Rotation angles 84 | - **Counts**: Number of sides, segments, iterations 85 | - **Booleans**: Center, solid/hollow 86 | - **Colors**: RGB values or named colors 87 | - **Operations**: Union, difference, intersection 88 | - **Transformations**: Translate, rotate, scale, mirror 89 | 90 | ## Challenges and Solutions 91 | 92 | ### Ambiguity 93 | 94 | When parameters are ambiguous, use contextual clues or ask clarifying questions: 95 | 96 | ```python 97 | def resolve_ambiguity(value, possible_parameters, description): 98 | """Resolve ambiguity between possible parameters.""" 99 | # Try to resolve using context 100 | for param in possible_parameters: 101 | if param in description: 102 | return param 103 | 104 | # If still ambiguous, return a question to ask 105 | return f"Is {value} the {' or '.join(possible_parameters)}?" 106 | ``` 107 | 108 | ### Unit Conversion 109 | 110 | Convert all measurements to a standard unit (millimeters): 111 | 112 | ```python 113 | def convert_to_mm(value, unit): 114 | """Convert a value from the given unit to millimeters.""" 115 | if unit in ["mm", "millimeter", "millimeters"]: 116 | return value 117 | elif unit in ["cm", "centimeter", "centimeters"]: 118 | return value * 10 119 | elif unit in ["m", "meter", "meters"]: 120 | return value * 1000 121 | elif unit in ["in", "inch", "inches"]: 122 | return value * 25.4 123 | else: 124 | return value # Assume mm if unit is unknown 125 | ``` 126 | 127 | ### Dialog State Management 128 | 129 | Maintain state across multiple interactions: 130 | 131 | ```python 132 | class DialogState: 133 | def __init__(self): 134 | self.shape_type = None 135 | self.parameters = {} 136 | self.questions = [] 137 | self.confirmed = False 138 | 139 | def add_parameter(self, name, value): 140 | self.parameters[name] = value 141 | 142 | def add_question(self, question): 143 | self.questions.append(question) 144 | 145 | def is_complete(self): 146 | """Check if all required parameters are present.""" 147 | if not self.shape_type: 148 | return False 149 | 150 | required_params = self.get_required_parameters() 151 | return all(param in self.parameters for param in required_params) 152 | 153 | def get_required_parameters(self): 154 | """Get the required parameters for the current shape type.""" 155 | if self.shape_type == "cube": 156 | return ["width", "height", "depth"] 157 | elif self.shape_type == "sphere": 158 | return ["radius"] 159 | elif self.shape_type == "cylinder": 160 | return ["radius", "height"] 161 | else: 162 | return [] 163 | ``` 164 | 165 | ## Best Practices 166 | 167 | - Start with simple pattern matching and add complexity as needed 168 | - Provide sensible defaults for all parameters 169 | - Use contextual clues to resolve ambiguity 170 | - Maintain dialog state for multi-turn interactions 171 | - Convert all measurements to a standard unit 172 | - Validate extracted parameters for reasonableness 173 | - Handle errors gracefully with helpful messages 174 | -------------------------------------------------------------------------------- /rtfmd/knowledge/openscad/export-formats.md: -------------------------------------------------------------------------------- 1 | # OpenSCAD Export Formats 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T12:00:00Z 6 | version: 1.0.0 7 | tags: [openscad, export-formats, 3d-printing, prusa, bambu] 8 | 9 | 10 | ## Overview 11 | 12 | OpenSCAD supports exporting 3D models in various formats, each with different capabilities for preserving parametric properties and metadata. This document focuses on formats suitable for Prusa and Bambu printers, with an emphasis on alternatives to STL. 13 | 14 | ## Recommended Formats 15 | 16 | ### 3MF (3D Manufacturing Format) 17 | 18 | 3MF is a modern replacement for STL that addresses many of its limitations: 19 | 20 | - **Metadata Support**: Includes model information, materials, colors 21 | - **Compact Size**: More efficient encoding than STL 22 | - **Multiple Objects**: Can contain multiple parts in a single file 23 | - **Printer Compatibility**: Widely supported by Prusa and Bambu printers 24 | - **Implementation**: ZIP archive containing XML files 25 | 26 | ```openscad 27 | // Export to 3MF from command line 28 | // openscad -o model.3mf model.scad 29 | ``` 30 | 31 | ### AMF (Additive Manufacturing File Format) 32 | 33 | AMF is another modern format that supports: 34 | 35 | - **Material Information**: Material properties and colors 36 | - **Curved Surfaces**: Better representation than STL's triangles 37 | - **Metadata**: Design information and parameters 38 | - **Implementation**: XML-based format 39 | 40 | ```openscad 41 | // Export to AMF from command line 42 | // openscad -o model.amf model.scad 43 | ``` 44 | 45 | ### CSG (Constructive Solid Geometry) 46 | 47 | CSG is OpenSCAD's native format: 48 | 49 | - **Fully Parametric**: Preserves all construction operations 50 | - **Editable**: Can be reopened and modified in OpenSCAD 51 | - **Implementation**: OpenSCAD's internal representation 52 | 53 | ```openscad 54 | // Export to CSG from command line 55 | // openscad -o model.csg model.scad 56 | ``` 57 | 58 | ### SCAD (OpenSCAD Source Code) 59 | 60 | The original SCAD file preserves all parametric properties: 61 | 62 | - **Complete Parameterization**: All variables and relationships 63 | - **Code Structure**: Modules, functions, and comments 64 | - **Implementation**: Text file with OpenSCAD code 65 | 66 | ## Printer Compatibility 67 | 68 | ### Prusa Printers 69 | 70 | Prusa printers work well with: 71 | 72 | - **3MF**: Full support in PrusaSlicer 73 | - **AMF**: Good support for materials and colors 74 | - **STL**: Supported but with limitations 75 | 76 | ### Bambu Printers 77 | 78 | Bambu printers work best with: 79 | 80 | - **3MF**: Preferred format for Bambu Lab software 81 | - **AMF**: Well supported 82 | - **STL**: Basic support 83 | 84 | ## Implementation Notes 85 | 86 | When implementing export functionality: 87 | 88 | 1. Use OpenSCAD's command-line interface for reliable exports 89 | 2. Add metadata to 3MF and AMF files for better organization 90 | 3. Test exported files with actual printer software 91 | 4. Validate files before sending to printers 92 | -------------------------------------------------------------------------------- /rtfmd/knowledge/openscad/openscad-basics.md: -------------------------------------------------------------------------------- 1 | # OpenSCAD Basics 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T01:30:00Z 6 | version: 1.0.0 7 | tags: [openscad, 3d-modeling, csg, parametric-design] 8 | 9 | 10 | ## Overview 11 | 12 | OpenSCAD is a programmer's solid 3D CAD modeler that uses a scripting language to define 3D objects. Unlike traditional CAD software that focuses on interactive modeling, OpenSCAD emphasizes programmatic and parametric design. 13 | 14 | ## Key Concepts 15 | 16 | ### Constructive Solid Geometry (CSG) 17 | 18 | OpenSCAD uses CSG operations to create complex models by combining simpler primitives: 19 | 20 | - **Union**: Combines multiple objects (`union() { ... }`) 21 | - **Difference**: Subtracts one object from another (`difference() { ... }`) 22 | - **Intersection**: Creates an object from the overlapping portions of other objects (`intersection() { ... }`) 23 | 24 | ### Primitive Shapes 25 | 26 | OpenSCAD provides several built-in primitive shapes: 27 | 28 | - **Cube**: `cube([width, depth, height], center=true/false)` 29 | - **Sphere**: `sphere(r=radius, $fn=segments)` 30 | - **Cylinder**: `cylinder(h=height, r=radius, center=true/false, $fn=segments)` 31 | - **Polyhedron**: For complex shapes with defined faces 32 | 33 | ### Transformations 34 | 35 | Objects can be transformed using: 36 | 37 | - **Translate**: `translate([x, y, z]) { ... }` 38 | - **Rotate**: `rotate([x_deg, y_deg, z_deg]) { ... }` 39 | - **Scale**: `scale([x, y, z]) { ... }` 40 | - **Mirror**: `mirror([x, y, z]) { ... }` 41 | 42 | ### Parametric Design 43 | 44 | OpenSCAD excels at parametric design: 45 | 46 | - Variables can define dimensions and relationships 47 | - Modules can create reusable components with parameters 48 | - Mathematical expressions can define complex relationships 49 | 50 | ## Command Line Usage 51 | 52 | OpenSCAD can be run headless using command-line options: 53 | 54 | - Generate STL: `openscad -o output.stl input.scad` 55 | - Pass parameters: `openscad -D "width=10" -D "height=20" -o output.stl input.scad` 56 | - Generate PNG preview: `openscad --camera=0,0,0,0,0,0,50 --imgsize=800,600 -o preview.png input.scad` 57 | 58 | ## Best Practices 59 | 60 | - Use modules for reusable components 61 | - Parameterize designs for flexibility 62 | - Use descriptive variable names 63 | - Comment code for clarity 64 | - Organize complex designs hierarchically 65 | - Use $fn judiciously for performance 66 | - Ensure models are manifold (watertight) for 3D printing 67 | -------------------------------------------------------------------------------- /rtfmd/knowledge/openscad/primitive-testing.md: -------------------------------------------------------------------------------- 1 | # Testing OpenSCAD Primitives 2 | 3 | 4 | author: devin-ai-integration 5 | timestamp: 2025-03-21T12:00:00Z 6 | version: 1.0.0 7 | tags: [openscad, primitives, testing, 3d-modeling] 8 | 9 | 10 | ## Overview 11 | 12 | Testing OpenSCAD primitives is essential to ensure that the MCP server can reliably generate and export 3D models. This document outlines approaches for programmatically testing primitives and validating their exports. 13 | 14 | ## Primitive Types 15 | 16 | The OpenSCAD MCP Server supports these primitive types: 17 | 18 | - **Basic Shapes**: cube, sphere, cylinder 19 | - **Complex Shapes**: cone, torus, hexagonal_prism 20 | - **Containers**: hollow_box, rounded_box, tube 21 | - **Text**: 3D text with customizable parameters 22 | 23 | ## Testing Approach 24 | 25 | ### Parameter Testing 26 | 27 | Each primitive should be tested with: 28 | 29 | - **Default Parameters**: Ensure the primitive renders correctly with default values 30 | - **Boundary Values**: Test minimum and maximum reasonable values 31 | - **Special Cases**: Test cases like zero dimensions, negative values 32 | 33 | ### Export Testing 34 | 35 | For each primitive and parameter set: 36 | 37 | - **Export Formats**: Test export to 3MF, AMF, CSG, and SCAD formats 38 | - **Format Validation**: Ensure exported files meet format specifications 39 | - **Metadata Preservation**: Verify that parametric properties are preserved 40 | 41 | ### Integration Testing 42 | 43 | Test the full pipeline: 44 | 45 | - **Natural Language → Parameters**: Test parameter extraction 46 | - **Parameters → OpenSCAD Code**: Test code generation 47 | - **OpenSCAD Code → Export**: Test file export 48 | - **Export → Printer**: Test compatibility with printer software 49 | 50 | ## Validation Criteria 51 | 52 | Exported models should meet these criteria: 53 | 54 | - **Manifold**: Models should be watertight (no holes in the mesh) 55 | - **Valid Format**: Files should validate against format specifications 56 | - **Metadata**: Should contain relevant model metadata 57 | - **Render Performance**: Models should render efficiently 58 | 59 | ## Implementation 60 | 61 | The `PrimitiveTester` class implements this testing approach: 62 | 63 | ```python 64 | # Example usage 65 | tester = PrimitiveTester(code_generator, cad_exporter) 66 | results = tester.test_all_primitives() 67 | 68 | # Test specific primitives 69 | cube_results = tester.test_primitive("cube") 70 | ``` 71 | 72 | ## Printer Compatibility Tests 73 | 74 | Before sending to physical printers: 75 | 76 | 1. Import exports into PrusaSlicer or Bambu Studio 77 | 2. Check for import warnings or errors 78 | 3. Verify that models slice correctly 79 | 4. Test prints with simple examples 80 | -------------------------------------------------------------------------------- /scad/simple_cube.scad: -------------------------------------------------------------------------------- 1 | // Simple cube without dependencies 2 | cube([20, 20, 20], center=true); 3 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/__init__.py -------------------------------------------------------------------------------- /src/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/ai/gemini_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | Google Gemini API integration for image generation. 3 | """ 4 | 5 | import os 6 | import logging 7 | import base64 8 | from typing import Dict, Any, List, Optional 9 | from io import BytesIO 10 | from PIL import Image 11 | import requests 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | class GeminiImageGenerator: 16 | """ 17 | Wrapper for Google Gemini API for generating images. 18 | """ 19 | 20 | def __init__(self, api_key: str, output_dir: str = "output/images"): 21 | """ 22 | Initialize the Gemini image generator. 23 | 24 | Args: 25 | api_key: Google Gemini API key 26 | output_dir: Directory to store generated images 27 | """ 28 | self.api_key = api_key 29 | self.output_dir = output_dir 30 | self.base_url = "https://generativelanguage.googleapis.com/v1beta" 31 | 32 | # Create output directory if it doesn't exist 33 | os.makedirs(output_dir, exist_ok=True) 34 | 35 | def generate_image(self, prompt: str, model: str = "gemini-2.0-flash-exp-image-generation", 36 | output_path: Optional[str] = None, **kwargs) -> Dict[str, Any]: 37 | """ 38 | Generate an image using Google Gemini API. 39 | 40 | Args: 41 | prompt: Text description for image generation 42 | model: Gemini model to use 43 | output_path: Path to save the generated image 44 | **kwargs: Additional parameters for Gemini API 45 | 46 | Returns: 47 | Dictionary containing image data and metadata 48 | """ 49 | logger.info(f"Generating image with prompt: {prompt}") 50 | 51 | try: 52 | # Prepare the request payload 53 | payload = { 54 | "contents": [ 55 | { 56 | "parts": [ 57 | {"text": prompt} 58 | ] 59 | } 60 | ], 61 | "generationConfig": { 62 | "responseModalities": ["Text", "Image"] 63 | } 64 | } 65 | 66 | # Add any additional parameters 67 | for key, value in kwargs.items(): 68 | if key not in payload: 69 | payload[key] = value 70 | 71 | # Make API request 72 | response = requests.post( 73 | f"{self.base_url}/models/{model}:generateContent", 74 | headers={ 75 | "Content-Type": "application/json", 76 | "x-goog-api-key": self.api_key 77 | }, 78 | json=payload 79 | ) 80 | 81 | # Check for errors 82 | response.raise_for_status() 83 | result = response.json() 84 | 85 | # Extract image data 86 | image_data = None 87 | for part in result["candidates"][0]["content"]["parts"]: 88 | if "inlineData" in part: 89 | image_data = base64.b64decode(part["inlineData"]["data"]) 90 | break 91 | 92 | if not image_data: 93 | raise ValueError("No image was generated in the response") 94 | 95 | # Save image if output_path is provided 96 | if not output_path: 97 | # Generate output path if not provided 98 | os.makedirs(self.output_dir, exist_ok=True) 99 | output_path = os.path.join(self.output_dir, f"{prompt[:20].replace(' ', '_')}.png") 100 | 101 | # Save image 102 | image = Image.open(BytesIO(image_data)) 103 | image.save(output_path) 104 | 105 | logger.info(f"Image saved to {output_path}") 106 | 107 | return { 108 | "prompt": prompt, 109 | "model": model, 110 | "local_path": output_path, 111 | "image_data": image_data 112 | } 113 | 114 | except Exception as e: 115 | logger.error(f"Error generating image: {str(e)}") 116 | raise 117 | 118 | def generate_multiple_views(self, prompt: str, num_views: int = 4, 119 | base_image_path: Optional[str] = None, 120 | output_dir: Optional[str] = None) -> List[Dict[str, Any]]: 121 | """ 122 | Generate multiple views of the same 3D object. 123 | 124 | Args: 125 | prompt: Text description of the object 126 | num_views: Number of views to generate 127 | base_image_path: Optional path to a base image 128 | output_dir: Directory to save the generated images 129 | 130 | Returns: 131 | List of dictionaries containing image data and metadata 132 | """ 133 | if not output_dir: 134 | output_dir = os.path.join(self.output_dir, prompt[:20].replace(' ', '_')) 135 | 136 | os.makedirs(output_dir, exist_ok=True) 137 | 138 | # View directions to include in prompts 139 | view_directions = [ 140 | "front view", "side view from the right", 141 | "side view from the left", "back view", 142 | "top view", "bottom view", "45-degree angle view" 143 | ] 144 | 145 | results = [] 146 | 147 | # Generate images for each view direction 148 | for i in range(min(num_views, len(view_directions))): 149 | view_prompt = f"{prompt} - {view_directions[i]}, same object, consistent style and details" 150 | 151 | # Generate the image 152 | output_path = os.path.join(output_dir, f"view_{i+1}.png") 153 | result = self.generate_image(view_prompt, output_path=output_path) 154 | 155 | # Add view direction to result 156 | result["view_direction"] = view_directions[i] 157 | result["view_index"] = i + 1 158 | 159 | results.append(result) 160 | 161 | return results 162 | -------------------------------------------------------------------------------- /src/ai/venice_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | Venice.ai API client for image generation using the Flux model. 3 | """ 4 | 5 | import os 6 | import requests 7 | import logging 8 | from typing import Dict, Any, Optional, List, Tuple 9 | from pathlib import Path 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | # Venice.ai model mapping and descriptions 14 | VENICE_MODELS = { 15 | # Model name: (aliases, description) 16 | "fluently-xl": ( 17 | ["fast", "quick", "fastest", "speed", "rapid", "efficient"], 18 | "Fastest model (2.30s) with good quality" 19 | ), 20 | "flux-dev": ( 21 | ["high quality", "detailed", "hq", "best quality", "premium"], 22 | "High-quality model with detailed results" 23 | ), 24 | "flux-dev-uncensored": ( 25 | ["uncensored", "unfiltered", "unrestricted"], 26 | "Uncensored version of the flux-dev model" 27 | ), 28 | "stable-diffusion-3.5": ( 29 | ["stable diffusion", "sd3", "sd3.5", "standard"], 30 | "Stable Diffusion 3.5 model" 31 | ), 32 | "pony-realism": ( 33 | ["realistic", "realism", "pony", "photorealistic"], 34 | "Specialized model for realistic outputs" 35 | ), 36 | "lustify-sdxl": ( 37 | ["stylized", "artistic", "creative", "lustify"], 38 | "Artistic stylization model" 39 | ), 40 | } 41 | 42 | class VeniceImageGenerator: 43 | """Client for Venice.ai's image generation API.""" 44 | 45 | def __init__(self, api_key: str, output_dir: str = "output/images"): 46 | """ 47 | Initialize the Venice.ai API client. 48 | 49 | Args: 50 | api_key: API key for Venice.ai 51 | output_dir: Directory to store generated images 52 | """ 53 | self.api_key = api_key 54 | if not self.api_key: 55 | logger.warning("No Venice.ai API key provided") 56 | 57 | # API endpoint from documentation 58 | self.base_url = "https://api.venice.ai/api/v1" 59 | self.api_endpoint = f"{self.base_url}/image/generate" 60 | self.output_dir = output_dir 61 | 62 | # Create output directory if it doesn't exist 63 | os.makedirs(output_dir, exist_ok=True) 64 | 65 | def map_model_preference(self, preference: str) -> str: 66 | """ 67 | Map a natural language preference to a Venice.ai model name. 68 | 69 | Args: 70 | preference: Natural language description of desired model 71 | 72 | Returns: 73 | Name of the matching Venice.ai model 74 | """ 75 | if not preference or preference.lower() in ["default", "fluently-xl", "fluently xl"]: 76 | return "fluently-xl" 77 | 78 | preference = preference.lower() 79 | 80 | # Check for exact matches first 81 | for model_name in VENICE_MODELS: 82 | if model_name.lower() == preference: 83 | return model_name 84 | 85 | # Check for keyword matches 86 | for model_name, (aliases, _) in VENICE_MODELS.items(): 87 | for alias in aliases: 88 | if alias in preference: 89 | return model_name 90 | 91 | # Default to fluently-xl if no match found 92 | return "fluently-xl" 93 | 94 | def generate_image(self, prompt: str, model: str = "fluently-xl", 95 | width: int = 1024, height: int = 1024, 96 | output_path: Optional[str] = None) -> Dict[str, Any]: 97 | """ 98 | Generate an image using Venice.ai's API. 99 | 100 | Args: 101 | prompt: Text description for image generation 102 | model: Model to use - can be a specific model name or natural language description: 103 | - "fluently-xl" (default): Fastest model (2.30s) with good quality 104 | - "flux-dev": High-quality model with detailed results 105 | - "flux-dev-uncensored": Uncensored version of the flux-dev model 106 | - "stable-diffusion-3.5": Stable Diffusion 3.5 model 107 | - "pony-realism": Specialized model for realistic outputs 108 | - "lustify-sdxl": Artistic stylization model 109 | - Or use natural language like "high quality", "fastest", "realistic", etc. 110 | width: Image width 111 | height: Image height 112 | output_path: Optional path to save the generated image 113 | 114 | Returns: 115 | Dictionary containing image data and metadata 116 | """ 117 | if not self.api_key: 118 | raise ValueError("Venice.ai API key is required") 119 | 120 | # Map the model preference to a specific model name 121 | mapped_model = self.map_model_preference(model) 122 | 123 | # Prepare request payload 124 | payload = { 125 | "model": mapped_model, 126 | "prompt": prompt, 127 | "height": height, 128 | "width": width, 129 | "steps": 20, 130 | "return_binary": False, 131 | "hide_watermark": True, # Remove watermark as requested 132 | "format": "png", 133 | "embed_exif_metadata": False 134 | } 135 | 136 | # Set up headers with API key 137 | headers = { 138 | "Authorization": f"Bearer {self.api_key}", 139 | "Content-Type": "application/json" 140 | } 141 | 142 | try: 143 | # Make API request 144 | logger.info(f"Sending request to {self.api_endpoint}") 145 | response = requests.post( 146 | self.api_endpoint, 147 | json=payload, 148 | headers=headers 149 | ) 150 | 151 | # Check response status 152 | if response.status_code != 200: 153 | error_msg = f"Error generating image: {response.status_code} - {response.text}" 154 | logger.error(error_msg) 155 | return {"error": error_msg} 156 | 157 | # Process response 158 | result = response.json() 159 | 160 | # Add the mapped model to the result 161 | result["model"] = mapped_model 162 | 163 | # Generate output path if not provided 164 | if not output_path: 165 | # Create a filename based on the prompt 166 | filename = f"{prompt[:20].replace(' ', '_')}_{mapped_model}.png" 167 | output_path = os.path.join(self.output_dir, filename) 168 | 169 | # Save image if images array is in the result 170 | if "images" in result and len(result["images"]) > 0: 171 | image_url = result["images"][0] 172 | self._download_image(image_url, output_path) 173 | result["local_path"] = output_path 174 | result["image_url"] = image_url 175 | 176 | return result 177 | except requests.exceptions.RequestException as e: 178 | logger.error(f"Error generating image with Venice.ai: {str(e)}") 179 | raise 180 | 181 | def _download_image(self, image_url: str, output_path: str) -> None: 182 | """ 183 | Download image from URL and save to local path. 184 | 185 | Args: 186 | image_url: URL of the image to download 187 | output_path: Path to save the downloaded image 188 | """ 189 | try: 190 | response = requests.get(image_url, stream=True) 191 | response.raise_for_status() 192 | 193 | # Ensure directory exists 194 | os.makedirs(os.path.dirname(output_path), exist_ok=True) 195 | 196 | with open(output_path, 'wb') as f: 197 | for chunk in response.iter_content(chunk_size=8192): 198 | f.write(chunk) 199 | 200 | logger.info(f"Image saved to {output_path}") 201 | except Exception as e: 202 | logger.error(f"Error downloading image: {str(e)}") 203 | raise 204 | -------------------------------------------------------------------------------- /src/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict, Any 3 | from dotenv import load_dotenv 4 | 5 | # Load environment variables from .env file 6 | load_dotenv() 7 | 8 | # Base directories 9 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 10 | OUTPUT_DIR = os.path.join(BASE_DIR, "output") 11 | 12 | # Output subdirectories 13 | IMAGES_DIR = os.path.join(OUTPUT_DIR, "images") 14 | MULTI_VIEW_DIR = os.path.join(OUTPUT_DIR, "multi_view") 15 | APPROVED_IMAGES_DIR = os.path.join(OUTPUT_DIR, "approved_images") 16 | MODELS_DIR = os.path.join(OUTPUT_DIR, "models") 17 | SCAD_DIR = os.path.join(BASE_DIR, "scad") 18 | 19 | # Google Gemini API configuration 20 | GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "") # Set via environment variable 21 | GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta" 22 | GEMINI_MODEL = "gemini-2.0-flash-exp-image-generation" # Default model for image generation 23 | 24 | # CUDA Multi-View Stereo configuration (local) 25 | CUDA_MVS_PATH = os.getenv("CUDA_MVS_PATH", os.path.join(BASE_DIR, "cuda-mvs")) 26 | CUDA_MVS_USE_GPU = os.getenv("CUDA_MVS_USE_GPU", "False").lower() == "true" # Default to CPU for macOS compatibility 27 | 28 | # Remote CUDA Multi-View Stereo configuration 29 | REMOTE_CUDA_MVS = { 30 | # General settings 31 | "ENABLED": os.getenv("REMOTE_CUDA_MVS_ENABLED", "True").lower() == "true", 32 | "USE_LAN_DISCOVERY": os.getenv("REMOTE_CUDA_MVS_USE_LAN_DISCOVERY", "True").lower() == "true", 33 | 34 | # Server connection 35 | "SERVER_URL": os.getenv("REMOTE_CUDA_MVS_SERVER_URL", ""), # Empty means use LAN discovery 36 | "API_KEY": os.getenv("REMOTE_CUDA_MVS_API_KEY", ""), 37 | "DISCOVERY_PORT": int(os.getenv("REMOTE_CUDA_MVS_DISCOVERY_PORT", "8765")), 38 | 39 | # Connection parameters 40 | "CONNECTION_TIMEOUT": int(os.getenv("REMOTE_CUDA_MVS_CONNECTION_TIMEOUT", "10")), 41 | "UPLOAD_CHUNK_SIZE": int(os.getenv("REMOTE_CUDA_MVS_UPLOAD_CHUNK_SIZE", "1048576")), # 1MB 42 | "DOWNLOAD_CHUNK_SIZE": int(os.getenv("REMOTE_CUDA_MVS_DOWNLOAD_CHUNK_SIZE", "1048576")), # 1MB 43 | 44 | # Retry and error handling 45 | "MAX_RETRIES": int(os.getenv("REMOTE_CUDA_MVS_MAX_RETRIES", "3")), 46 | "BASE_RETRY_DELAY": float(os.getenv("REMOTE_CUDA_MVS_BASE_RETRY_DELAY", "1.0")), 47 | "MAX_RETRY_DELAY": float(os.getenv("REMOTE_CUDA_MVS_MAX_RETRY_DELAY", "60.0")), 48 | "JITTER_FACTOR": float(os.getenv("REMOTE_CUDA_MVS_JITTER_FACTOR", "0.1")), 49 | 50 | # Health check 51 | "HEALTH_CHECK_INTERVAL": int(os.getenv("REMOTE_CUDA_MVS_HEALTH_CHECK_INTERVAL", "60")), 52 | "CIRCUIT_BREAKER_THRESHOLD": int(os.getenv("REMOTE_CUDA_MVS_CIRCUIT_BREAKER_THRESHOLD", "5")), 53 | "CIRCUIT_BREAKER_RECOVERY_TIMEOUT": float(os.getenv("REMOTE_CUDA_MVS_CIRCUIT_BREAKER_RECOVERY_TIMEOUT", "30.0")), 54 | 55 | # Processing parameters 56 | "DEFAULT_RECONSTRUCTION_QUALITY": os.getenv("REMOTE_CUDA_MVS_DEFAULT_QUALITY", "normal"), # low, normal, high 57 | "DEFAULT_OUTPUT_FORMAT": os.getenv("REMOTE_CUDA_MVS_DEFAULT_FORMAT", "obj"), 58 | "WAIT_FOR_COMPLETION": os.getenv("REMOTE_CUDA_MVS_WAIT_FOR_COMPLETION", "True").lower() == "true", 59 | "POLL_INTERVAL": int(os.getenv("REMOTE_CUDA_MVS_POLL_INTERVAL", "5")), 60 | 61 | # Output directories 62 | "OUTPUT_DIR": MODELS_DIR, 63 | "IMAGES_DIR": IMAGES_DIR, 64 | "MULTI_VIEW_DIR": MULTI_VIEW_DIR, 65 | "APPROVED_IMAGES_DIR": APPROVED_IMAGES_DIR, 66 | } 67 | 68 | # Venice.ai API configuration (optional) 69 | VENICE_API_KEY = os.getenv("VENICE_API_KEY", "") # Set via environment variable 70 | VENICE_BASE_URL = "https://api.venice.ai/api/v1" 71 | VENICE_MODEL = "fluently-xl" # Default model for fastest image generation (2.30s) 72 | 73 | # Image approval configuration 74 | IMAGE_APPROVAL = { 75 | "ENABLED": os.getenv("IMAGE_APPROVAL_ENABLED", "True").lower() == "true", 76 | "AUTO_APPROVE": os.getenv("IMAGE_APPROVAL_AUTO_APPROVE", "False").lower() == "true", 77 | "APPROVAL_TIMEOUT": int(os.getenv("IMAGE_APPROVAL_TIMEOUT", "300")), # 5 minutes 78 | "MIN_APPROVED_IMAGES": int(os.getenv("IMAGE_APPROVAL_MIN_IMAGES", "3")), 79 | "APPROVED_IMAGES_DIR": APPROVED_IMAGES_DIR, 80 | } 81 | 82 | # Multi-view to model pipeline configuration 83 | MULTI_VIEW_PIPELINE = { 84 | "DEFAULT_NUM_VIEWS": int(os.getenv("MULTI_VIEW_DEFAULT_NUM_VIEWS", "4")), 85 | "MIN_NUM_VIEWS": int(os.getenv("MULTI_VIEW_MIN_NUM_VIEWS", "3")), 86 | "MAX_NUM_VIEWS": int(os.getenv("MULTI_VIEW_MAX_NUM_VIEWS", "8")), 87 | "VIEW_ANGLES": [0, 90, 180, 270], # Default view angles (degrees) 88 | "OUTPUT_DIR": MULTI_VIEW_DIR, 89 | } 90 | 91 | # Natural language processing configuration for MCP 92 | NLP = { 93 | "ENABLE_INTERACTIVE_PARAMS": os.getenv("NLP_ENABLE_INTERACTIVE_PARAMS", "True").lower() == "true", 94 | "PARAM_EXTRACTION_PROMPT_TEMPLATE": """ 95 | Extract the following parameters from the user's request for 3D model generation: 96 | 97 | 1. Object description 98 | 2. Number of views requested (default: 4) 99 | 3. Reconstruction quality (low, normal, high) 100 | 4. Output format (obj, ply, stl, scad) 101 | 5. Any specific view angles mentioned 102 | 103 | If a parameter is not specified, return the default value or leave blank. 104 | Format the response as a JSON object. 105 | 106 | User request: {user_request} 107 | """, 108 | } 109 | 110 | # Deprecated configurations (moved to old folder) 111 | # These are kept for reference but not used in the new workflow 112 | DEPRECATED = { 113 | "SAM2_CHECKPOINT_PATH": os.getenv("SAM2_CHECKPOINT_PATH", os.path.join(BASE_DIR, "models", "sam2_vit_b.pth")), 114 | "SAM2_MODEL_TYPE": os.getenv("SAM2_MODEL_TYPE", "vit_b"), 115 | "SAM2_USE_GPU": os.getenv("SAM2_USE_GPU", "False").lower() == "true", 116 | "THREESTUDIO_PATH": os.path.join(BASE_DIR, "threestudio") 117 | } 118 | 119 | # Create necessary directories 120 | for directory in [OUTPUT_DIR, IMAGES_DIR, MULTI_VIEW_DIR, APPROVED_IMAGES_DIR, MODELS_DIR, SCAD_DIR]: 121 | os.makedirs(directory, exist_ok=True) 122 | -------------------------------------------------------------------------------- /src/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/models/__init__.py -------------------------------------------------------------------------------- /src/models/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/models/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/models/__pycache__/code_generator.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/models/__pycache__/code_generator.cpython-312.pyc -------------------------------------------------------------------------------- /src/models/cuda_mvs.py: -------------------------------------------------------------------------------- 1 | """ 2 | CUDA Multi-View Stereo wrapper for 3D reconstruction from multiple images. 3 | """ 4 | 5 | import os 6 | import subprocess 7 | import logging 8 | import json 9 | from typing import Dict, Any, List, Optional 10 | from pathlib import Path 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | class CUDAMultiViewStereo: 15 | """ 16 | Wrapper for CUDA Multi-View Stereo for 3D reconstruction from multiple images. 17 | """ 18 | 19 | def __init__(self, cuda_mvs_path: str, output_dir: str = "output/models"): 20 | """ 21 | Initialize the CUDA MVS wrapper. 22 | 23 | Args: 24 | cuda_mvs_path: Path to CUDA MVS installation 25 | output_dir: Directory to store output files 26 | """ 27 | self.cuda_mvs_path = cuda_mvs_path 28 | self.output_dir = output_dir 29 | 30 | # Create output directory if it doesn't exist 31 | os.makedirs(output_dir, exist_ok=True) 32 | 33 | # Validate installation 34 | self._validate_installation() 35 | 36 | def _validate_installation(self) -> None: 37 | """ 38 | Validate CUDA MVS installation. 39 | 40 | Raises: 41 | FileNotFoundError: If CUDA MVS installation is not found 42 | """ 43 | if not os.path.exists(self.cuda_mvs_path): 44 | raise FileNotFoundError(f"CUDA MVS not found at {self.cuda_mvs_path}") 45 | 46 | # Check for required executables 47 | required_files = ["app_patch_match_mvs"] 48 | for file in required_files: 49 | exec_path = os.path.join(self.cuda_mvs_path, "build", file) 50 | if not os.path.exists(exec_path): 51 | raise FileNotFoundError(f"Required executable {file} not found at {exec_path}") 52 | 53 | def generate_model_from_images(self, image_paths: List[str], 54 | camera_params: Optional[Dict[str, Any]] = None, 55 | output_name: str = "model") -> Dict[str, Any]: 56 | """ 57 | Generate a 3D model from multiple images using CUDA MVS. 58 | 59 | Args: 60 | image_paths: List of paths to input images 61 | camera_params: Optional camera parameters 62 | output_name: Name for the output files 63 | 64 | Returns: 65 | Dictionary containing paths to generated model files 66 | """ 67 | try: 68 | # Create a unique directory for this reconstruction 69 | model_dir = os.path.join(self.output_dir, output_name) 70 | os.makedirs(model_dir, exist_ok=True) 71 | 72 | # Create a camera parameters file if provided 73 | params_file = None 74 | if camera_params: 75 | params_file = os.path.join(model_dir, "camera_params.json") 76 | with open(params_file, 'w') as f: 77 | json.dump(camera_params, f, indent=2) 78 | 79 | # Generate camera parameters if not provided 80 | if not params_file: 81 | params_file = self._generate_camera_params(image_paths, model_dir) 82 | 83 | # Generate point cloud 84 | point_cloud_file = os.path.join(model_dir, f"{output_name}.ply") 85 | 86 | # Run CUDA MVS 87 | cmd = [ 88 | os.path.join(self.cuda_mvs_path, "build", "app_patch_match_mvs"), 89 | "--image_dir", os.path.dirname(image_paths[0]), 90 | "--camera_params", params_file, 91 | "--output_file", point_cloud_file 92 | ] 93 | 94 | logger.info(f"Running CUDA MVS with command: {' '.join(cmd)}") 95 | 96 | process = subprocess.Popen( 97 | cmd, 98 | stdout=subprocess.PIPE, 99 | stderr=subprocess.PIPE, 100 | text=True 101 | ) 102 | 103 | # Wait for process to complete 104 | stdout, stderr = process.communicate() 105 | 106 | if process.returncode != 0: 107 | logger.error(f"Error running CUDA MVS: {stderr}") 108 | raise RuntimeError(f"CUDA MVS failed with exit code {process.returncode}") 109 | 110 | # Check if output file was created 111 | if not os.path.exists(point_cloud_file): 112 | raise FileNotFoundError(f"Output point cloud file not found at {point_cloud_file}") 113 | 114 | return { 115 | "model_id": output_name, 116 | "output_dir": model_dir, 117 | "point_cloud_file": point_cloud_file, 118 | "camera_params_file": params_file, 119 | "input_images": image_paths 120 | } 121 | 122 | except Exception as e: 123 | logger.error(f"Error generating 3D model with CUDA MVS: {str(e)}") 124 | raise 125 | 126 | def _generate_camera_params(self, image_paths: List[str], model_dir: str) -> str: 127 | """ 128 | Generate camera parameters from images. 129 | 130 | Args: 131 | image_paths: List of paths to input images 132 | model_dir: Directory to save parameter file 133 | 134 | Returns: 135 | Path to camera parameters file 136 | """ 137 | # This is a simplified version for demonstration 138 | # In a real implementation, this would use SfM or camera estimation 139 | 140 | params = [] 141 | for i, img_path in enumerate(image_paths): 142 | # Extract image dimensions 143 | from PIL import Image 144 | with Image.open(img_path) as img: 145 | width, height = img.size 146 | 147 | # Generate simple camera parameters 148 | # In reality, these would be estimated from the images 149 | # or provided by the user 150 | params.append({ 151 | "image_id": i, 152 | "image_name": os.path.basename(img_path), 153 | "width": width, 154 | "height": height, 155 | "camera": { 156 | "model": "PINHOLE", 157 | "focal_length": min(width, height), 158 | "principal_point": [width / 2, height / 2], 159 | "rotation": [1, 0, 0, 0, 1, 0, 0, 0, 1], 160 | "translation": [0, 0, 0] 161 | } 162 | }) 163 | 164 | # Write parameters to file 165 | params_file = os.path.join(model_dir, "camera_params.json") 166 | with open(params_file, 'w') as f: 167 | json.dump(params, f, indent=2) 168 | 169 | return params_file 170 | 171 | def convert_ply_to_obj(self, ply_file: str, output_dir: Optional[str] = None) -> str: 172 | """ 173 | Convert PLY point cloud to OBJ mesh. 174 | 175 | Args: 176 | ply_file: Path to input PLY file 177 | output_dir: Directory to save output OBJ file 178 | 179 | Returns: 180 | Path to output OBJ file 181 | """ 182 | # In a real implementation, this would use a mesh reconstruction library 183 | # such as Open3D or PyMeshLab to convert the point cloud to a mesh 184 | 185 | if not output_dir: 186 | output_dir = os.path.dirname(ply_file) 187 | 188 | # Generate output file path 189 | obj_file = os.path.join(output_dir, f"{Path(ply_file).stem}.obj") 190 | 191 | logger.info(f"Converting PLY to OBJ: {ply_file} -> {obj_file}") 192 | 193 | # This is a placeholder for the actual conversion 194 | # In a real implementation, you would use a library like Open3D: 195 | # import open3d as o3d 196 | # pcd = o3d.io.read_point_cloud(ply_file) 197 | # mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd)[0] 198 | # o3d.io.write_triangle_mesh(obj_file, mesh) 199 | 200 | # For now, we'll just create a dummy OBJ file 201 | with open(obj_file, 'w') as f: 202 | f.write(f"# Converted from {os.path.basename(ply_file)}\n") 203 | f.write("# This is a placeholder OBJ file\n") 204 | f.write("v 0 0 0\n") 205 | f.write("v 1 0 0\n") 206 | f.write("v 0 1 0\n") 207 | f.write("f 1 2 3\n") 208 | 209 | return obj_file 210 | -------------------------------------------------------------------------------- /src/models/scad_templates/basic_shapes.scad: -------------------------------------------------------------------------------- 1 | // Basic Shapes Library for OpenSCAD MCP Server 2 | // Contains reusable modules for common geometric patterns 3 | 4 | // Basic cube with optional center parameter 5 | module parametric_cube(width=10, depth=10, height=10, center=false) { 6 | cube([width, depth, height], center=center); 7 | } 8 | 9 | // Basic sphere with customizable segments 10 | module parametric_sphere(radius=10, segments=32) { 11 | $fn = segments; 12 | sphere(r=radius); 13 | } 14 | 15 | // Basic cylinder with customizable segments 16 | module parametric_cylinder(radius=10, height=20, center=false, segments=32) { 17 | $fn = segments; 18 | cylinder(h=height, r=radius, center=center); 19 | } 20 | 21 | // Hollow box with customizable wall thickness 22 | module hollow_box(width=30, depth=20, height=15, thickness=2) { 23 | difference() { 24 | cube([width, depth, height]); 25 | translate([thickness, thickness, thickness]) 26 | cube([width - 2*thickness, depth - 2*thickness, height - thickness]); 27 | } 28 | } 29 | 30 | // Rounded box with customizable corner radius 31 | module rounded_box(width=30, depth=20, height=15, radius=3, segments=32) { 32 | $fn = segments; 33 | hull() { 34 | translate([radius, radius, radius]) 35 | sphere(r=radius); 36 | 37 | translate([width-radius, radius, radius]) 38 | sphere(r=radius); 39 | 40 | translate([radius, depth-radius, radius]) 41 | sphere(r=radius); 42 | 43 | translate([width-radius, depth-radius, radius]) 44 | sphere(r=radius); 45 | 46 | translate([radius, radius, height-radius]) 47 | sphere(r=radius); 48 | 49 | translate([width-radius, radius, height-radius]) 50 | sphere(r=radius); 51 | 52 | translate([radius, depth-radius, height-radius]) 53 | sphere(r=radius); 54 | 55 | translate([width-radius, depth-radius, height-radius]) 56 | sphere(r=radius); 57 | } 58 | } 59 | 60 | // Rounded hollow box (container with rounded corners) 61 | module rounded_container(width=30, depth=20, height=15, radius=3, thickness=2, segments=32) { 62 | $fn = segments; 63 | difference() { 64 | rounded_box(width, depth, height, radius, segments); 65 | translate([thickness, thickness, thickness]) 66 | rounded_box( 67 | width - 2*thickness, 68 | depth - 2*thickness, 69 | height - thickness + 0.01, // Slight overlap to ensure clean difference 70 | radius - thickness > 0 ? radius - thickness : 0.1, 71 | segments 72 | ); 73 | } 74 | } 75 | 76 | // Tube (hollow cylinder) 77 | module tube(outer_radius=10, inner_radius=8, height=20, center=false, segments=32) { 78 | $fn = segments; 79 | difference() { 80 | cylinder(h=height, r=outer_radius, center=center); 81 | cylinder(h=height+0.01, r=inner_radius, center=center); 82 | } 83 | } 84 | 85 | // Cone 86 | module cone(bottom_radius=10, top_radius=0, height=20, center=false, segments=32) { 87 | $fn = segments; 88 | cylinder(h=height, r1=bottom_radius, r2=top_radius, center=center); 89 | } 90 | 91 | // Wedge (triangular prism) 92 | module wedge(width=20, depth=20, height=10) { 93 | polyhedron( 94 | points=[ 95 | [0,0,0], [width,0,0], [width,depth,0], [0,depth,0], 96 | [0,0,height], [width,0,0], [0,depth,0] 97 | ], 98 | faces=[ 99 | [0,1,2,3], // bottom 100 | [4,5,1,0], // front 101 | [4,0,3,6], // left 102 | [6,3,2,5], // back 103 | [4,6,5], // top 104 | ] 105 | ); 106 | } 107 | 108 | // Rounded cylinder (cylinder with rounded top and bottom) 109 | module rounded_cylinder(radius=10, height=20, corner_radius=2, center=false, segments=32) { 110 | $fn = segments; 111 | hull() { 112 | translate([0, 0, corner_radius]) 113 | cylinder(h=height - 2*corner_radius, r=radius, center=center); 114 | 115 | translate([0, 0, corner_radius]) 116 | rotate_extrude() 117 | translate([radius - corner_radius, 0, 0]) 118 | circle(r=corner_radius); 119 | 120 | translate([0, 0, height - corner_radius]) 121 | rotate_extrude() 122 | translate([radius - corner_radius, 0, 0]) 123 | circle(r=corner_radius); 124 | } 125 | } 126 | 127 | // Torus (donut shape) 128 | module torus(outer_radius=20, inner_radius=5, segments=32) { 129 | $fn = segments; 130 | rotate_extrude() 131 | translate([outer_radius - inner_radius, 0, 0]) 132 | circle(r=inner_radius); 133 | } 134 | 135 | // Hexagonal prism 136 | module hexagonal_prism(radius=10, height=20, center=false) { 137 | cylinder(h=height, r=radius, $fn=6, center=center); 138 | } 139 | 140 | // Text with customizable parameters 141 | module text_3d(text="OpenSCAD", size=10, height=3, font="Liberation Sans", halign="center", valign="center") { 142 | linear_extrude(height=height) 143 | text(text=text, size=size, font=font, halign=halign, valign=valign); 144 | } 145 | -------------------------------------------------------------------------------- /src/nlp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/nlp/__init__.py -------------------------------------------------------------------------------- /src/nlp/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/nlp/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/nlp/__pycache__/parameter_extractor.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/nlp/__pycache__/parameter_extractor.cpython-312.pyc -------------------------------------------------------------------------------- /src/openscad_wrapper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/openscad_wrapper/__init__.py -------------------------------------------------------------------------------- /src/openscad_wrapper/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/openscad_wrapper/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/openscad_wrapper/__pycache__/wrapper.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/openscad_wrapper/__pycache__/wrapper.cpython-312.pyc -------------------------------------------------------------------------------- /src/printer_discovery/__init__.py: -------------------------------------------------------------------------------- 1 | # Printer discovery package 2 | -------------------------------------------------------------------------------- /src/testing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/testing/__init__.py -------------------------------------------------------------------------------- /src/testing/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/testing/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/testing/__pycache__/primitive_tester.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/testing/__pycache__/primitive_tester.cpython-312.pyc -------------------------------------------------------------------------------- /src/testing/__pycache__/test_primitives.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/testing/__pycache__/test_primitives.cpython-312.pyc -------------------------------------------------------------------------------- /src/testing/primitive_tester.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from typing import Dict, Any, List, Optional, Tuple 4 | 5 | from src.models.code_generator import OpenSCADCodeGenerator 6 | from src.utils.cad_exporter import CADExporter 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class PrimitiveTester: 11 | """Tests OpenSCAD primitives with different export formats.""" 12 | 13 | def __init__(self, code_generator: OpenSCADCodeGenerator, cad_exporter: CADExporter, 14 | output_dir: str = "test_output"): 15 | """ 16 | Initialize the primitive tester. 17 | 18 | Args: 19 | code_generator: CodeGenerator instance for generating OpenSCAD code 20 | cad_exporter: CADExporter instance for exporting models 21 | output_dir: Directory to store test output 22 | """ 23 | self.code_generator = code_generator 24 | self.cad_exporter = cad_exporter 25 | self.output_dir = output_dir 26 | 27 | # Create output directory 28 | os.makedirs(output_dir, exist_ok=True) 29 | 30 | # Primitive types to test 31 | self.primitives = [ 32 | "cube", "sphere", "cylinder", "cone", "torus", 33 | "rounded_box", "hexagonal_prism", "text" 34 | ] 35 | 36 | # Export formats to test (no STL per requirements) 37 | self.formats = ["3mf", "amf", "csg", "scad"] 38 | 39 | def test_all_primitives(self) -> Dict[str, Dict[str, Any]]: 40 | """ 41 | Test all primitives with all formats. 42 | 43 | Returns: 44 | Dictionary of test results for each primitive 45 | """ 46 | results = {} 47 | 48 | for primitive in self.primitives: 49 | results[primitive] = self.test_primitive(primitive) 50 | 51 | return results 52 | 53 | def test_primitive(self, primitive_type: str) -> Dict[str, Any]: 54 | """ 55 | Test a single primitive with all formats. 56 | 57 | Args: 58 | primitive_type: Type of primitive to test 59 | 60 | Returns: 61 | Dictionary of test results for the primitive 62 | """ 63 | results = { 64 | "primitive": primitive_type, 65 | "formats": {} 66 | } 67 | 68 | # Generate default parameters for the primitive 69 | params = self._get_default_parameters(primitive_type) 70 | 71 | # Generate the SCAD code 72 | scad_file = self.code_generator.generate_code(primitive_type, params) 73 | 74 | # Test export to each format 75 | for format_type in self.formats: 76 | success, output_file, error = self.cad_exporter.export_model( 77 | scad_file, 78 | format_type, 79 | params, 80 | metadata={"primitive_type": primitive_type} 81 | ) 82 | 83 | results["formats"][format_type] = { 84 | "success": success, 85 | "output_file": output_file, 86 | "error": error 87 | } 88 | 89 | return results 90 | 91 | def _get_default_parameters(self, primitive_type: str) -> Dict[str, Any]: 92 | """ 93 | Get default parameters for a primitive type. 94 | 95 | Args: 96 | primitive_type: Type of primitive 97 | 98 | Returns: 99 | Dictionary of default parameters 100 | """ 101 | params = {} 102 | 103 | if primitive_type == "cube": 104 | params = {"width": 20, "depth": 20, "height": 20, "center": True} 105 | elif primitive_type == "sphere": 106 | params = {"radius": 10, "segments": 32} 107 | elif primitive_type == "cylinder": 108 | params = {"radius": 10, "height": 20, "center": True, "segments": 32} 109 | elif primitive_type == "cone": 110 | params = {"bottom_radius": 10, "top_radius": 0, "height": 20, "center": True} 111 | elif primitive_type == "torus": 112 | params = {"outer_radius": 20, "inner_radius": 5, "segments": 32} 113 | elif primitive_type == "rounded_box": 114 | params = {"width": 30, "depth": 20, "height": 15, "radius": 3} 115 | elif primitive_type == "hexagonal_prism": 116 | params = {"radius": 10, "height": 20} 117 | elif primitive_type == "text": 118 | params = {"text": "OpenSCAD", "size": 10, "height": 3} 119 | 120 | return params 121 | 122 | def test_with_parameter_variations(self, primitive_type: str) -> Dict[str, Any]: 123 | """ 124 | Test a primitive with variations of parameters. 125 | 126 | Args: 127 | primitive_type: Type of primitive to test 128 | 129 | Returns: 130 | Dictionary of test results for different parameter variations 131 | """ 132 | results = { 133 | "primitive": primitive_type, 134 | "variations": {} 135 | } 136 | 137 | # Define parameter variations for the primitive 138 | variations = self._get_parameter_variations(primitive_type) 139 | 140 | # Test each variation 141 | for variation_name, params in variations.items(): 142 | # Generate the SCAD code 143 | scad_file = self.code_generator.generate_code(primitive_type, params) 144 | 145 | # Test export to each format 146 | format_results = {} 147 | for format_type in self.formats: 148 | success, output_file, error = self.cad_exporter.export_model( 149 | scad_file, 150 | format_type, 151 | params, 152 | metadata={"primitive_type": primitive_type, "variation": variation_name} 153 | ) 154 | 155 | format_results[format_type] = { 156 | "success": success, 157 | "output_file": output_file, 158 | "error": error 159 | } 160 | 161 | results["variations"][variation_name] = { 162 | "parameters": params, 163 | "formats": format_results 164 | } 165 | 166 | return results 167 | 168 | def _get_parameter_variations(self, primitive_type: str) -> Dict[str, Dict[str, Any]]: 169 | """ 170 | Get parameter variations for a primitive type. 171 | 172 | Args: 173 | primitive_type: Type of primitive 174 | 175 | Returns: 176 | Dictionary of parameter variations 177 | """ 178 | variations = {} 179 | 180 | if primitive_type == "cube": 181 | variations = { 182 | "small": {"width": 5, "depth": 5, "height": 5, "center": True}, 183 | "large": {"width": 50, "depth": 50, "height": 50, "center": True}, 184 | "flat": {"width": 50, "depth": 50, "height": 2, "center": True}, 185 | "tall": {"width": 10, "depth": 10, "height": 100, "center": True} 186 | } 187 | elif primitive_type == "sphere": 188 | variations = { 189 | "small": {"radius": 2, "segments": 16}, 190 | "large": {"radius": 30, "segments": 64}, 191 | "low_res": {"radius": 10, "segments": 8}, 192 | "high_res": {"radius": 10, "segments": 128} 193 | } 194 | elif primitive_type == "cylinder": 195 | variations = { 196 | "small": {"radius": 2, "height": 5, "center": True, "segments": 16}, 197 | "large": {"radius": 30, "height": 50, "center": True, "segments": 64}, 198 | "thin": {"radius": 1, "height": 50, "center": True, "segments": 32}, 199 | "disc": {"radius": 30, "height": 2, "center": True, "segments": 32} 200 | } 201 | # Add variations for other primitives as needed 202 | 203 | return variations 204 | -------------------------------------------------------------------------------- /src/testing/test_primitives.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import logging 4 | import json 5 | from typing import Dict, Any, List 6 | 7 | from src.models.code_generator import OpenSCADCodeGenerator 8 | from src.utils.cad_exporter import CADExporter 9 | from src.utils.format_validator import FormatValidator 10 | from src.testing.primitive_tester import PrimitiveTester 11 | 12 | # Configure logging 13 | logging.basicConfig( 14 | level=logging.INFO, 15 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 16 | ) 17 | logger = logging.getLogger(__name__) 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser(description='Test OpenSCAD primitives with different export formats') 21 | parser.add_argument('--output-dir', default='test_output', help='Directory to store test output') 22 | parser.add_argument('--formats', nargs='+', default=['3mf', 'amf', 'csg', 'scad'], 23 | help='Formats to test (default: 3mf amf csg scad)') 24 | parser.add_argument('--primitives', nargs='+', 25 | help='Primitives to test (default: all)') 26 | parser.add_argument('--validate', action='store_true', 27 | help='Validate exported files') 28 | parser.add_argument('--printer-type', choices=['prusa', 'bambu'], default='prusa', 29 | help='Printer type to check compatibility with (default: prusa)') 30 | 31 | args = parser.parse_args() 32 | 33 | # Create directories 34 | os.makedirs("scad", exist_ok=True) 35 | os.makedirs(args.output_dir, exist_ok=True) 36 | 37 | # Initialize components 38 | # Use absolute path for templates to avoid path issues 39 | templates_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../models/scad_templates")) 40 | code_generator = OpenSCADCodeGenerator(templates_dir, "scad") 41 | cad_exporter = CADExporter() 42 | 43 | # Initialize tester 44 | tester = PrimitiveTester(code_generator, cad_exporter, args.output_dir) 45 | 46 | # Override formats if specified 47 | if args.formats: 48 | tester.formats = args.formats 49 | 50 | # Test primitives 51 | if args.primitives: 52 | results = {} 53 | for primitive in args.primitives: 54 | results[primitive] = tester.test_primitive(primitive) 55 | else: 56 | results = tester.test_all_primitives() 57 | 58 | # Print results 59 | logger.info(f"Test results: {json.dumps(results, indent=2)}") 60 | 61 | # Validate exported files if requested 62 | if args.validate: 63 | validator = FormatValidator() 64 | validation_results = {} 65 | 66 | for primitive, primitive_results in results.items(): 67 | validation_results[primitive] = {} 68 | 69 | for format_type, format_results in primitive_results["formats"].items(): 70 | if format_results["success"] and format_type in ['3mf', 'amf']: 71 | output_file = format_results["output_file"] 72 | 73 | if format_type == '3mf': 74 | is_valid, error = validator.validate_3mf(output_file) 75 | elif format_type == 'amf': 76 | is_valid, error = validator.validate_amf(output_file) 77 | else: 78 | is_valid, error = False, "Validation not supported for this format" 79 | 80 | # Check printer compatibility 81 | is_compatible, compat_error = validator.check_printer_compatibility( 82 | output_file, args.printer_type 83 | ) 84 | 85 | metadata = validator.extract_metadata(output_file) 86 | 87 | validation_results[primitive][format_type] = { 88 | "is_valid": is_valid, 89 | "error": error, 90 | "is_compatible_with_printer": is_compatible, 91 | "compatibility_error": compat_error, 92 | "metadata": metadata 93 | } 94 | 95 | logger.info(f"Validation results: {json.dumps(validation_results, indent=2)}") 96 | 97 | if __name__ == "__main__": 98 | main() 99 | -------------------------------------------------------------------------------- /src/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Utils package 2 | -------------------------------------------------------------------------------- /src/utils/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/utils/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/utils/__pycache__/stl_exporter.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/utils/__pycache__/stl_exporter.cpython-312.pyc -------------------------------------------------------------------------------- /src/utils/__pycache__/stl_validator.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/utils/__pycache__/stl_validator.cpython-312.pyc -------------------------------------------------------------------------------- /src/utils/cad_exporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import subprocess 4 | from typing import Dict, Any, Optional, Tuple, List 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | class CADExporter: 9 | """ 10 | Exports OpenSCAD models to various CAD formats that preserve parametric properties. 11 | """ 12 | 13 | def __init__(self, openscad_path: str = "openscad"): 14 | """ 15 | Initialize the CAD exporter. 16 | 17 | Args: 18 | openscad_path: Path to the OpenSCAD executable 19 | """ 20 | self.openscad_path = openscad_path 21 | 22 | # Supported export formats 23 | self.supported_formats = { 24 | "csg": "OpenSCAD CSG format (preserves all parametric properties)", 25 | "amf": "Additive Manufacturing File Format (preserves some metadata)", 26 | "3mf": "3D Manufacturing Format (modern replacement for STL with metadata)", 27 | "scad": "OpenSCAD source code (fully parametric)", 28 | "dxf": "Drawing Exchange Format (for 2D designs)", 29 | "svg": "Scalable Vector Graphics (for 2D designs)" 30 | } 31 | 32 | def export_model(self, scad_file: str, output_format: str = "csg", 33 | parameters: Optional[Dict[str, Any]] = None, 34 | metadata: Optional[Dict[str, Any]] = None) -> Tuple[bool, str, Optional[str]]: 35 | """ 36 | Export an OpenSCAD model to the specified format. 37 | 38 | Args: 39 | scad_file: Path to the SCAD file 40 | output_format: Format to export to (csg, amf, 3mf, etc.) 41 | parameters: Optional parameters to override in the SCAD file 42 | metadata: Optional metadata to include in the export 43 | 44 | Returns: 45 | Tuple of (success, output_file_path, error_message) 46 | """ 47 | if not os.path.exists(scad_file): 48 | return False, "", f"SCAD file not found: {scad_file}" 49 | 50 | # Create output file path 51 | output_dir = os.path.dirname(scad_file) 52 | model_id = os.path.basename(scad_file).split('.')[0] 53 | 54 | # Special case for SCAD format - just copy the file with parameters embedded 55 | if output_format.lower() == "scad" and parameters: 56 | return self._export_parametric_scad(scad_file, parameters, metadata) 57 | 58 | # For native OpenSCAD formats 59 | output_file = os.path.join(output_dir, f"{model_id}.{output_format.lower()}") 60 | 61 | # Build command 62 | cmd = [self.openscad_path, "-o", output_file] 63 | 64 | # Add parameters if provided 65 | if parameters: 66 | for key, value in parameters.items(): 67 | cmd.extend(["-D", f"{key}={value}"]) 68 | 69 | # Add input file 70 | cmd.append(scad_file) 71 | 72 | try: 73 | # Run OpenSCAD 74 | result = subprocess.run(cmd, check=True, capture_output=True, text=True) 75 | 76 | # Check if file was created 77 | if os.path.exists(output_file) and os.path.getsize(output_file) > 0: 78 | logger.info(f"Exported model to {output_format}: {output_file}") 79 | 80 | # Add metadata if supported and provided 81 | if metadata and output_format.lower() in ["amf", "3mf"]: 82 | self._add_metadata_to_file(output_file, metadata, output_format) 83 | 84 | return True, output_file, None 85 | else: 86 | error_msg = f"Failed to export model to {output_format}" 87 | logger.error(error_msg) 88 | logger.error(f"OpenSCAD output: {result.stdout}") 89 | logger.error(f"OpenSCAD error: {result.stderr}") 90 | return False, "", error_msg 91 | except subprocess.CalledProcessError as e: 92 | error_msg = f"Error exporting model to {output_format}: {e.stderr}" 93 | logger.error(error_msg) 94 | return False, "", error_msg 95 | except Exception as e: 96 | error_msg = f"Error exporting model to {output_format}: {str(e)}" 97 | logger.error(error_msg) 98 | return False, "", error_msg 99 | 100 | def _export_parametric_scad(self, scad_file: str, parameters: Dict[str, Any], 101 | metadata: Optional[Dict[str, Any]] = None) -> Tuple[bool, str, Optional[str]]: 102 | """ 103 | Create a new SCAD file with parameters embedded as variables. 104 | 105 | Args: 106 | scad_file: Path to the original SCAD file 107 | parameters: Parameters to embed in the SCAD file 108 | metadata: Optional metadata to include as comments 109 | 110 | Returns: 111 | Tuple of (success, output_file_path, error_message) 112 | """ 113 | try: 114 | # Read the original SCAD file 115 | with open(scad_file, 'r') as f: 116 | content = f.read() 117 | 118 | # Create output file path 119 | output_dir = os.path.dirname(scad_file) 120 | model_id = os.path.basename(scad_file).split('.')[0] 121 | output_file = os.path.join(output_dir, f"{model_id}_parametric.scad") 122 | 123 | # Create parameter declarations 124 | param_declarations = [] 125 | for key, value in parameters.items(): 126 | if isinstance(value, str): 127 | param_declarations.append(f'{key} = "{value}";') 128 | else: 129 | param_declarations.append(f'{key} = {value};') 130 | 131 | # Create metadata comments 132 | metadata_comments = [] 133 | if metadata: 134 | metadata_comments.append("// Metadata:") 135 | for key, value in metadata.items(): 136 | metadata_comments.append(f"// {key}: {value}") 137 | 138 | # Combine everything 139 | new_content = "// Parametric model generated by OpenSCAD MCP Server\n" 140 | new_content += "\n".join(metadata_comments) + "\n\n" if metadata_comments else "\n" 141 | new_content += "// Parameters:\n" 142 | new_content += "\n".join(param_declarations) + "\n\n" 143 | new_content += content 144 | 145 | # Write to the new file 146 | with open(output_file, 'w') as f: 147 | f.write(new_content) 148 | 149 | logger.info(f"Exported parametric SCAD file: {output_file}") 150 | return True, output_file, None 151 | except Exception as e: 152 | error_msg = f"Error creating parametric SCAD file: {str(e)}" 153 | logger.error(error_msg) 154 | return False, "", error_msg 155 | 156 | def _add_metadata_to_file(self, file_path: str, metadata: Dict[str, Any], format_type: str) -> None: 157 | """ 158 | Add metadata to supported file formats. 159 | 160 | Args: 161 | file_path: Path to the file 162 | metadata: Metadata to add 163 | format_type: File format 164 | """ 165 | if format_type.lower() == "amf": 166 | self._add_metadata_to_amf(file_path, metadata) 167 | elif format_type.lower() == "3mf": 168 | self._add_metadata_to_3mf(file_path, metadata) 169 | 170 | def _add_metadata_to_amf(self, file_path: str, metadata: Dict[str, Any]) -> None: 171 | """Add metadata to AMF file.""" 172 | try: 173 | import xml.etree.ElementTree as ET 174 | 175 | # Parse the AMF file 176 | tree = ET.parse(file_path) 177 | root = tree.getroot() 178 | 179 | # Find or create metadata element 180 | metadata_elem = root.find("metadata") 181 | if metadata_elem is None: 182 | metadata_elem = ET.SubElement(root, "metadata") 183 | 184 | # Add metadata 185 | for key, value in metadata.items(): 186 | meta = ET.SubElement(metadata_elem, "meta", name=key) 187 | meta.text = str(value) 188 | 189 | # Write back to file 190 | tree.write(file_path) 191 | logger.info(f"Added metadata to AMF file: {file_path}") 192 | except Exception as e: 193 | logger.error(f"Error adding metadata to AMF file: {str(e)}") 194 | 195 | def _add_metadata_to_3mf(self, file_path: str, metadata: Dict[str, Any]) -> None: 196 | """Add metadata to 3MF file.""" 197 | try: 198 | import zipfile 199 | import xml.etree.ElementTree as ET 200 | 201 | # 3MF files are ZIP archives 202 | with zipfile.ZipFile(file_path, 'a') as z: 203 | # Check if metadata file exists 204 | metadata_path = "Metadata/model_metadata.xml" 205 | try: 206 | z.getinfo(metadata_path) 207 | # Extract existing metadata 208 | with z.open(metadata_path) as f: 209 | tree = ET.parse(f) 210 | root = tree.getroot() 211 | except KeyError: 212 | # Create new metadata file 213 | root = ET.Element("metadata") 214 | tree = ET.ElementTree(root) 215 | 216 | # Add metadata 217 | for key, value in metadata.items(): 218 | meta = ET.SubElement(root, "meta", name=key) 219 | meta.text = str(value) 220 | 221 | # Write metadata to a temporary file 222 | temp_path = file_path + ".metadata.tmp" 223 | tree.write(temp_path) 224 | 225 | # Add to ZIP 226 | z.write(temp_path, metadata_path) 227 | 228 | # Remove temporary file 229 | os.remove(temp_path) 230 | 231 | logger.info(f"Added metadata to 3MF file: {file_path}") 232 | except Exception as e: 233 | logger.error(f"Error adding metadata to 3MF file: {str(e)}") 234 | 235 | def get_supported_formats(self) -> List[str]: 236 | """Get list of supported export formats.""" 237 | return list(self.supported_formats.keys()) 238 | 239 | def get_format_description(self, format_name: str) -> str: 240 | """Get description of a format.""" 241 | return self.supported_formats.get(format_name.lower(), "Unknown format") 242 | -------------------------------------------------------------------------------- /src/utils/format_validator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import zipfile 4 | import xml.etree.ElementTree as ET 5 | from typing import Tuple, Optional, Dict, Any 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class FormatValidator: 10 | """Validates 3D model formats for compatibility with printers.""" 11 | 12 | @staticmethod 13 | def validate_3mf(file_path: str) -> Tuple[bool, Optional[str]]: 14 | """ 15 | Validate a 3MF file for compatibility with Prusa and Bambu printers. 16 | 17 | Args: 18 | file_path: Path to the 3MF file 19 | 20 | Returns: 21 | Tuple of (is_valid, error_message) 22 | """ 23 | if not os.path.exists(file_path): 24 | return False, f"File not found: {file_path}" 25 | 26 | try: 27 | # 3MF files are ZIP archives with XML content 28 | with zipfile.ZipFile(file_path, 'r') as zip_ref: 29 | # Check for required files 30 | required_files = ['3D/3dmodel.model', '[Content_Types].xml'] 31 | for req_file in required_files: 32 | try: 33 | zip_ref.getinfo(req_file) 34 | except KeyError: 35 | return False, f"Missing required file in 3MF: {req_file}" 36 | 37 | # Validate 3D model file 38 | with zip_ref.open('3D/3dmodel.model') as model_file: 39 | tree = ET.parse(model_file) 40 | root = tree.getroot() 41 | 42 | # Check for required elements 43 | if root.tag != '{http://schemas.microsoft.com/3dmanufacturing/core/2015/02}model': 44 | return False, "Invalid 3MF: Missing model element" 45 | 46 | # Verify resources section exists 47 | resources = root.find('.//{http://schemas.microsoft.com/3dmanufacturing/core/2015/02}resources') 48 | if resources is None: 49 | return False, "Invalid 3MF: Missing resources element" 50 | 51 | return True, None 52 | except Exception as e: 53 | logger.error(f"Error validating 3MF file: {str(e)}") 54 | return False, f"Error validating 3MF file: {str(e)}" 55 | 56 | @staticmethod 57 | def validate_amf(file_path: str) -> Tuple[bool, Optional[str]]: 58 | """ 59 | Validate an AMF file for compatibility with printers. 60 | 61 | Args: 62 | file_path: Path to the AMF file 63 | 64 | Returns: 65 | Tuple of (is_valid, error_message) 66 | """ 67 | if not os.path.exists(file_path): 68 | return False, f"File not found: {file_path}" 69 | 70 | try: 71 | # Parse the AMF file (XML format) 72 | tree = ET.parse(file_path) 73 | root = tree.getroot() 74 | 75 | # Check for required elements 76 | if root.tag != 'amf': 77 | return False, "Invalid AMF: Missing amf root element" 78 | 79 | # Check for at least one object 80 | objects = root.findall('./object') 81 | if not objects: 82 | return False, "Invalid AMF: No objects found" 83 | 84 | # Check that each object has a mesh 85 | for obj in objects: 86 | mesh = obj.find('./mesh') 87 | if mesh is None: 88 | return False, f"Invalid AMF: Object {obj.get('id', 'unknown')} is missing a mesh" 89 | 90 | # Check for vertices and volumes 91 | vertices = mesh.find('./vertices') 92 | volumes = mesh.findall('./volume') 93 | 94 | if vertices is None: 95 | return False, f"Invalid AMF: Mesh in object {obj.get('id', 'unknown')} is missing vertices" 96 | 97 | if not volumes: 98 | return False, f"Invalid AMF: Mesh in object {obj.get('id', 'unknown')} has no volumes" 99 | 100 | return True, None 101 | except Exception as e: 102 | logger.error(f"Error validating AMF file: {str(e)}") 103 | return False, f"Error validating AMF file: {str(e)}" 104 | 105 | @staticmethod 106 | def extract_metadata(file_path: str) -> Dict[str, Any]: 107 | """ 108 | Extract metadata from a 3MF or AMF file. 109 | 110 | Args: 111 | file_path: Path to the 3D model file 112 | 113 | Returns: 114 | Dictionary of metadata 115 | """ 116 | metadata = {} 117 | 118 | # Check file extension 119 | ext = os.path.splitext(file_path)[1].lower() 120 | 121 | try: 122 | if ext == '.3mf': 123 | with zipfile.ZipFile(file_path, 'r') as zip_ref: 124 | metadata_path = "Metadata/model_metadata.xml" 125 | try: 126 | with zip_ref.open(metadata_path) as f: 127 | tree = ET.parse(f) 128 | root = tree.getroot() 129 | 130 | for meta in root.findall('./meta'): 131 | name = meta.get('name') 132 | if name: 133 | metadata[name] = meta.text 134 | except KeyError: 135 | # Metadata file doesn't exist 136 | pass 137 | 138 | elif ext == '.amf': 139 | tree = ET.parse(file_path) 140 | root = tree.getroot() 141 | 142 | for meta in root.findall('./metadata'): 143 | name = meta.get('type') 144 | if name: 145 | metadata[name] = meta.text 146 | except Exception as e: 147 | logger.error(f"Error extracting metadata: {str(e)}") 148 | 149 | return metadata 150 | 151 | @staticmethod 152 | def check_printer_compatibility(file_path: str, printer_type: str = "prusa") -> Tuple[bool, Optional[str]]: 153 | """ 154 | Check if a 3D model file is compatible with a specific printer type. 155 | 156 | Args: 157 | file_path: Path to the 3D model file 158 | printer_type: Type of printer ("prusa" or "bambu") 159 | 160 | Returns: 161 | Tuple of (is_compatible, error_message) 162 | """ 163 | # Check file extension 164 | ext = os.path.splitext(file_path)[1].lower() 165 | 166 | # Validate based on file format 167 | if ext == '.3mf': 168 | is_valid, error = FormatValidator.validate_3mf(file_path) 169 | if not is_valid: 170 | return False, error 171 | 172 | # Additional printer-specific checks 173 | if printer_type.lower() == "prusa": 174 | # Prusa-specific checks for 3MF 175 | # For now, just basic validation is sufficient 176 | return True, None 177 | 178 | elif printer_type.lower() == "bambu": 179 | # Bambu-specific checks for 3MF 180 | # For now, just basic validation is sufficient 181 | return True, None 182 | 183 | else: 184 | return False, f"Unknown printer type: {printer_type}" 185 | 186 | elif ext == '.amf': 187 | is_valid, error = FormatValidator.validate_amf(file_path) 188 | if not is_valid: 189 | return False, error 190 | 191 | # Additional printer-specific checks 192 | if printer_type.lower() == "prusa": 193 | # Prusa-specific checks for AMF 194 | # For now, just basic validation is sufficient 195 | return True, None 196 | 197 | elif printer_type.lower() == "bambu": 198 | # Bambu-specific checks for AMF 199 | # For now, just basic validation is sufficient 200 | return True, None 201 | 202 | else: 203 | return False, f"Unknown printer type: {printer_type}" 204 | 205 | else: 206 | return False, f"Unsupported file format for printer compatibility check: {ext}" 207 | -------------------------------------------------------------------------------- /src/utils/stl_exporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import uuid 4 | import shutil 5 | from typing import Dict, Any, Optional, Tuple 6 | 7 | from src.utils.stl_validator import STLValidator 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | class STLExporter: 12 | """ 13 | Handles STL file export and validation for 3D printing. 14 | """ 15 | 16 | def __init__(self, openscad_wrapper, output_dir: str): 17 | """ 18 | Initialize the STL exporter. 19 | 20 | Args: 21 | openscad_wrapper: Instance of OpenSCADWrapper for generating STL files 22 | output_dir: Directory to store output files 23 | """ 24 | self.openscad_wrapper = openscad_wrapper 25 | self.output_dir = output_dir 26 | self.stl_dir = os.path.join(output_dir, "stl") 27 | 28 | # Create directory if it doesn't exist 29 | os.makedirs(self.stl_dir, exist_ok=True) 30 | 31 | def export_stl(self, scad_file: str, parameters: Optional[Dict[str, Any]] = None) -> Tuple[str, bool, Optional[str]]: 32 | """ 33 | Export a SCAD file to STL format. 34 | 35 | Args: 36 | scad_file: Path to the SCAD file 37 | parameters: Optional parameters to override in the SCAD file 38 | 39 | Returns: 40 | Tuple of (stl_file_path, is_valid, error_message) 41 | """ 42 | try: 43 | # Generate STL file 44 | stl_file = self.openscad_wrapper.generate_stl(scad_file, parameters) 45 | 46 | # Validate STL file 47 | is_valid, error = STLValidator.validate_stl(stl_file) 48 | 49 | if not is_valid: 50 | logger.warning(f"STL validation failed: {error}") 51 | 52 | # Attempt to repair if validation fails 53 | repair_success, repair_error = STLValidator.repair_stl(stl_file) 54 | if repair_success: 55 | # Validate again after repair 56 | is_valid, error = STLValidator.validate_stl(stl_file) 57 | else: 58 | logger.error(f"STL repair failed: {repair_error}") 59 | 60 | return stl_file, is_valid, error 61 | except Exception as e: 62 | logger.error(f"Error exporting STL: {str(e)}") 63 | return "", False, str(e) 64 | 65 | def export_stl_with_metadata(self, scad_file: str, parameters: Optional[Dict[str, Any]] = None, 66 | metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 67 | """ 68 | Export a SCAD file to STL format and include metadata. 69 | 70 | Args: 71 | scad_file: Path to the SCAD file 72 | parameters: Optional parameters to override in the SCAD file 73 | metadata: Optional metadata to include with the STL file 74 | 75 | Returns: 76 | Dictionary with STL file information 77 | """ 78 | # Export STL file 79 | stl_file, is_valid, error = self.export_stl(scad_file, parameters) 80 | 81 | # Create metadata file if metadata is provided 82 | metadata_file = None 83 | if metadata and stl_file: 84 | metadata_file = self._create_metadata_file(stl_file, metadata) 85 | 86 | # Extract model ID from filename 87 | model_id = os.path.basename(scad_file).split('.')[0] if scad_file else str(uuid.uuid4()) 88 | 89 | return { 90 | "model_id": model_id, 91 | "stl_file": stl_file, 92 | "is_valid": is_valid, 93 | "error": error, 94 | "metadata_file": metadata_file, 95 | "metadata": metadata 96 | } 97 | 98 | def _create_metadata_file(self, stl_file: str, metadata: Dict[str, Any]) -> str: 99 | """Create a metadata file for an STL file.""" 100 | metadata_file = f"{os.path.splitext(stl_file)[0]}.json" 101 | 102 | try: 103 | import json 104 | with open(metadata_file, 'w') as f: 105 | json.dump(metadata, f, indent=2) 106 | 107 | logger.info(f"Created metadata file: {metadata_file}") 108 | return metadata_file 109 | except Exception as e: 110 | logger.error(f"Error creating metadata file: {str(e)}") 111 | return "" 112 | 113 | def copy_stl_to_location(self, stl_file: str, destination: str) -> str: 114 | """ 115 | Copy an STL file to a specified location. 116 | 117 | Args: 118 | stl_file: Path to the STL file 119 | destination: Destination path or directory 120 | 121 | Returns: 122 | Path to the copied STL file 123 | """ 124 | try: 125 | if not os.path.exists(stl_file): 126 | raise FileNotFoundError(f"STL file not found: {stl_file}") 127 | 128 | # If destination is a directory, create a filename 129 | if os.path.isdir(destination): 130 | filename = os.path.basename(stl_file) 131 | destination = os.path.join(destination, filename) 132 | 133 | # Copy the file 134 | shutil.copy2(stl_file, destination) 135 | logger.info(f"Copied STL file to: {destination}") 136 | 137 | return destination 138 | except Exception as e: 139 | logger.error(f"Error copying STL file: {str(e)}") 140 | return "" 141 | -------------------------------------------------------------------------------- /src/utils/stl_repair.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from typing import Tuple, Optional 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | class STLRepair: 8 | """Provides methods to repair non-manifold STL files.""" 9 | 10 | @staticmethod 11 | def repair_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 12 | """Repair a non-manifold STL file.""" 13 | if not os.path.exists(stl_file): 14 | return False, f"STL file not found: {stl_file}" 15 | 16 | # Create a backup of the original file 17 | backup_file = f"{stl_file}.bak" 18 | try: 19 | with open(stl_file, 'rb') as src, open(backup_file, 'wb') as dst: 20 | dst.write(src.read()) 21 | except Exception as e: 22 | logger.error(f"Error creating backup file: {str(e)}") 23 | return False, f"Error creating backup file: {str(e)}" 24 | 25 | # Attempt to repair the STL file 26 | try: 27 | # Method 1: Convert to ASCII STL and ensure proper structure 28 | success, error = STLRepair._repair_ascii_stl(stl_file) 29 | if success: 30 | return True, None 31 | 32 | # Method 2: Create a minimal valid STL if all else fails 33 | return STLRepair._create_minimal_valid_stl(stl_file) 34 | except Exception as e: 35 | logger.error(f"Error repairing STL file: {str(e)}") 36 | return False, f"Error repairing STL file: {str(e)}" 37 | 38 | @staticmethod 39 | def _repair_ascii_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 40 | """Repair an ASCII STL file by ensuring proper structure.""" 41 | try: 42 | # Read the file 43 | with open(stl_file, 'r') as f: 44 | content = f.read() 45 | 46 | # Check if it's an ASCII STL 47 | if not content.strip().startswith('solid'): 48 | return False, "Not an ASCII STL file" 49 | 50 | # Ensure it has the correct structure 51 | lines = content.strip().split('\n') 52 | 53 | # Extract the solid name 54 | solid_name = lines[0].replace('solid', '').strip() 55 | if not solid_name: 56 | solid_name = "OpenSCAD_Model" 57 | 58 | # Check if it has the endsolid tag 59 | has_endsolid = any(line.strip().startswith('endsolid') for line in lines) 60 | 61 | # If it doesn't have endsolid, add it 62 | if not has_endsolid: 63 | with open(stl_file, 'w') as f: 64 | f.write(content.strip()) 65 | f.write(f"\nendsolid {solid_name}\n") 66 | 67 | return True, None 68 | except Exception as e: 69 | logger.error(f"Error repairing ASCII STL: {str(e)}") 70 | return False, f"Error repairing ASCII STL: {str(e)}" 71 | 72 | @staticmethod 73 | def _create_minimal_valid_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 74 | """Create a minimal valid STL file as a last resort.""" 75 | try: 76 | # Create a minimal valid STL file 77 | with open(stl_file, 'w') as f: 78 | f.write("solid OpenSCAD_Model\n") 79 | f.write(" facet normal 0 0 0\n") 80 | f.write(" outer loop\n") 81 | f.write(" vertex 0 0 0\n") 82 | f.write(" vertex 1 0 0\n") 83 | f.write(" vertex 0 1 0\n") 84 | f.write(" endloop\n") 85 | f.write(" endfacet\n") 86 | f.write("endsolid OpenSCAD_Model\n") 87 | 88 | return True, "Created minimal valid STL file" 89 | except Exception as e: 90 | logger.error(f"Error creating minimal valid STL file: {str(e)}") 91 | return False, f"Error creating minimal valid STL file: {str(e)}" 92 | -------------------------------------------------------------------------------- /src/utils/stl_validator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import subprocess 4 | import tempfile 5 | from typing import Tuple, Optional 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class STLValidator: 10 | """ 11 | Validates STL files to ensure they are manifold (watertight) and suitable for 3D printing. 12 | """ 13 | 14 | @staticmethod 15 | def validate_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 16 | """ 17 | Validate an STL file to ensure it is manifold and suitable for 3D printing. 18 | 19 | Args: 20 | stl_file: Path to the STL file to validate 21 | 22 | Returns: 23 | Tuple of (is_valid, error_message) 24 | """ 25 | if not os.path.exists(stl_file): 26 | return False, f"STL file not found: {stl_file}" 27 | 28 | # Check file size 29 | file_size = os.path.getsize(stl_file) 30 | if file_size == 0: 31 | return False, "STL file is empty" 32 | 33 | # Basic validation - check if the file starts with "solid" for ASCII STL 34 | # or contains binary header for binary STL 35 | try: 36 | with open(stl_file, 'rb') as f: 37 | header = f.read(5) 38 | if header == b'solid': 39 | # ASCII STL 40 | is_valid, error = STLValidator._validate_ascii_stl(stl_file) 41 | else: 42 | # Binary STL 43 | is_valid, error = STLValidator._validate_binary_stl(stl_file) 44 | 45 | return is_valid, error 46 | except Exception as e: 47 | logger.error(f"Error validating STL file: {str(e)}") 48 | return False, f"Error validating STL file: {str(e)}" 49 | 50 | @staticmethod 51 | def _validate_ascii_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 52 | """Validate an ASCII STL file.""" 53 | try: 54 | with open(stl_file, 'r') as f: 55 | content = f.read() 56 | 57 | # Check if the file has the correct structure 58 | if not content.strip().startswith('solid'): 59 | return False, "Invalid ASCII STL: Missing 'solid' header" 60 | 61 | if not content.strip().endswith('endsolid'): 62 | return False, "Invalid ASCII STL: Missing 'endsolid' footer" 63 | 64 | # Count facets and vertices 65 | facet_count = content.count('facet normal') 66 | vertex_count = content.count('vertex') 67 | 68 | if facet_count == 0: 69 | return False, "Invalid ASCII STL: No facets found" 70 | 71 | if vertex_count != facet_count * 3: 72 | return False, f"Invalid ASCII STL: Expected {facet_count * 3} vertices, found {vertex_count}" 73 | 74 | return True, None 75 | except Exception as e: 76 | logger.error(f"Error validating ASCII STL: {str(e)}") 77 | return False, f"Error validating ASCII STL: {str(e)}" 78 | 79 | @staticmethod 80 | def _validate_binary_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 81 | """Validate a binary STL file.""" 82 | try: 83 | with open(stl_file, 'rb') as f: 84 | # Skip 80-byte header 85 | f.seek(80) 86 | 87 | # Read number of triangles (4-byte unsigned int) 88 | triangle_count_bytes = f.read(4) 89 | if len(triangle_count_bytes) != 4: 90 | return False, "Invalid binary STL: File too short" 91 | 92 | # Convert bytes to integer (little-endian) 93 | triangle_count = int.from_bytes(triangle_count_bytes, byteorder='little') 94 | 95 | # Check file size 96 | expected_size = 84 + (triangle_count * 50) # Header + count + triangles 97 | actual_size = os.path.getsize(stl_file) 98 | 99 | if actual_size != expected_size: 100 | return False, f"Invalid binary STL: Expected size {expected_size}, actual size {actual_size}" 101 | 102 | return True, None 103 | except Exception as e: 104 | logger.error(f"Error validating binary STL: {str(e)}") 105 | return False, f"Error validating binary STL: {str(e)}" 106 | 107 | @staticmethod 108 | def repair_stl(stl_file: str) -> Tuple[bool, Optional[str]]: 109 | """ 110 | Attempt to repair a non-manifold STL file. 111 | 112 | Args: 113 | stl_file: Path to the STL file to repair 114 | 115 | Returns: 116 | Tuple of (success, error_message) 117 | """ 118 | # This is a placeholder for STL repair functionality 119 | # In a real implementation, you would use a library like admesh or meshlab 120 | # to repair the STL file 121 | 122 | logger.warning(f"STL repair not implemented: {stl_file}") 123 | return False, "STL repair not implemented" 124 | -------------------------------------------------------------------------------- /src/visualization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/visualization/__init__.py -------------------------------------------------------------------------------- /src/visualization/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/visualization/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/visualization/__pycache__/renderer.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhacksman/OpenSCAD-MCP-Server/6948c2dc5a385adc987bc3ff75ba435d26f48ba3/src/visualization/__pycache__/renderer.cpython-312.pyc -------------------------------------------------------------------------------- /src/visualization/headless_renderer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from PIL import Image, ImageDraw 4 | from typing import Dict, Any, Optional 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | class HeadlessRenderer: 9 | """Provides rendering capabilities for OpenSCAD in headless environments.""" 10 | 11 | def __init__(self, openscad_path: str = "openscad"): 12 | self.openscad_path = openscad_path 13 | self.camera_angles = { 14 | "front": "0,0,0,0,0,0,50", 15 | "top": "0,0,0,90,0,0,50", 16 | "right": "0,0,0,0,0,90,50", 17 | "perspective": "70,0,35,25,0,25,250" 18 | } 19 | 20 | def create_placeholder_image(self, output_path: str, model_id: str, view: str = "perspective") -> str: 21 | """Create a placeholder image with model information.""" 22 | try: 23 | # Create a blank image 24 | width, height = 800, 600 25 | image = Image.new('RGB', (width, height), color=(240, 240, 240)) 26 | draw = ImageDraw.Draw(image) 27 | 28 | # Add text 29 | draw.text((20, 20), f"OpenSCAD Model: {model_id}", fill=(0, 0, 0)) 30 | draw.text((20, 60), f"View: {view}", fill=(0, 0, 0)) 31 | draw.text((20, 100), "Headless rendering mode", fill=(0, 0, 0)) 32 | 33 | # Draw a simple 3D shape 34 | draw.polygon([(400, 200), (300, 300), (500, 300)], outline=(0, 0, 0), width=2) 35 | draw.polygon([(400, 200), (500, 300), (500, 400)], outline=(0, 0, 0), width=2) 36 | draw.polygon([(400, 200), (300, 300), (300, 400)], outline=(0, 0, 0), width=2) 37 | draw.rectangle((300, 300, 500, 400), outline=(0, 0, 0), width=2) 38 | 39 | # Add note about headless mode 40 | note = "Note: This is a placeholder image. OpenSCAD preview generation" 41 | note2 = "requires an X server or a headless rendering solution." 42 | draw.text((20, 500), note, fill=(150, 0, 0)) 43 | draw.text((20, 530), note2, fill=(150, 0, 0)) 44 | 45 | # Save the image 46 | image.save(output_path) 47 | logger.info(f"Created placeholder image: {output_path}") 48 | 49 | return output_path 50 | except Exception as e: 51 | logger.error(f"Error creating placeholder image: {str(e)}") 52 | return output_path 53 | -------------------------------------------------------------------------------- /src/visualization/renderer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import logging 4 | from typing import Dict, Any, Optional 5 | from PIL import Image, ImageDraw, ImageFont 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class Renderer: 10 | """ 11 | Handles rendering of OpenSCAD models to preview images. 12 | Implements multi-angle views and fallback rendering when headless mode fails. 13 | """ 14 | 15 | def __init__(self, openscad_wrapper): 16 | """ 17 | Initialize the renderer. 18 | 19 | Args: 20 | openscad_wrapper: Instance of OpenSCADWrapper for generating previews 21 | """ 22 | self.openscad_wrapper = openscad_wrapper 23 | 24 | # Standard camera angles for multi-view rendering 25 | self.camera_angles = { 26 | 'front': "0,0,0,0,0,0,50", 27 | 'top': "0,0,0,90,0,0,50", 28 | 'right': "0,0,0,0,90,0,50", 29 | 'perspective': "20,20,20,55,0,25,100" 30 | } 31 | 32 | def generate_preview(self, scad_file: str, parameters: Optional[Dict[str, Any]] = None) -> str: 33 | """ 34 | Generate a preview image for a SCAD file. 35 | 36 | Args: 37 | scad_file: Path to the SCAD file 38 | parameters: Optional parameters to override in the SCAD file 39 | 40 | Returns: 41 | Path to the generated preview image 42 | """ 43 | try: 44 | # Try to generate a preview using OpenSCAD 45 | preview_file = self.openscad_wrapper.generate_preview( 46 | scad_file, 47 | parameters, 48 | camera_position=self.camera_angles['perspective'], 49 | image_size="800,600" 50 | ) 51 | 52 | # Check if the file exists and has content 53 | if os.path.exists(preview_file) and os.path.getsize(preview_file) > 0: 54 | return preview_file 55 | else: 56 | # If the file doesn't exist or is empty, create a placeholder 57 | return self._create_placeholder_image(preview_file) 58 | except Exception as e: 59 | logger.error(f"Error generating preview: {str(e)}") 60 | # Create a placeholder image 61 | model_id = os.path.basename(scad_file).split('.')[0] 62 | preview_file = os.path.join(self.openscad_wrapper.preview_dir, f"{model_id}.png") 63 | return self._create_placeholder_image(preview_file) 64 | 65 | def generate_multi_angle_previews(self, scad_file: str, parameters: Optional[Dict[str, Any]] = None) -> Dict[str, str]: 66 | """ 67 | Generate preview images from multiple angles. 68 | 69 | Args: 70 | scad_file: Path to the SCAD file 71 | parameters: Optional parameters to override in the SCAD file 72 | 73 | Returns: 74 | Dictionary mapping angle names to preview image paths 75 | """ 76 | previews = {} 77 | model_id = os.path.basename(scad_file).split('.')[0] 78 | 79 | for angle_name, camera_position in self.camera_angles.items(): 80 | preview_file = os.path.join( 81 | self.openscad_wrapper.preview_dir, 82 | f"{model_id}_{angle_name}.png" 83 | ) 84 | 85 | try: 86 | # Try to generate a preview using OpenSCAD 87 | preview_file = self.openscad_wrapper.generate_preview( 88 | scad_file, 89 | parameters, 90 | camera_position=camera_position, 91 | image_size="800,600" 92 | ) 93 | 94 | # Check if the file exists and has content 95 | if os.path.exists(preview_file) and os.path.getsize(preview_file) > 0: 96 | previews[angle_name] = preview_file 97 | else: 98 | # If the file doesn't exist or is empty, create a placeholder 99 | previews[angle_name] = self._create_placeholder_image(preview_file, angle_name) 100 | except Exception as e: 101 | logger.error(f"Error generating {angle_name} preview: {str(e)}") 102 | # Create a placeholder image 103 | previews[angle_name] = self._create_placeholder_image(preview_file, angle_name) 104 | 105 | return previews 106 | 107 | def _create_placeholder_image(self, output_path: str, angle_name: str = "perspective") -> str: 108 | """ 109 | Create a placeholder image when OpenSCAD rendering fails. 110 | 111 | Args: 112 | output_path: Path to save the placeholder image 113 | angle_name: Name of the camera angle for the placeholder 114 | 115 | Returns: 116 | Path to the created placeholder image 117 | """ 118 | try: 119 | # Create a blank image 120 | img = Image.new('RGB', (800, 600), color=(240, 240, 240)) 121 | draw = ImageDraw.Draw(img) 122 | 123 | # Add text 124 | draw.text((400, 280), f"Preview not available", fill=(0, 0, 0)) 125 | draw.text((400, 320), f"View: {angle_name}", fill=(0, 0, 0)) 126 | 127 | # Save the image 128 | img.save(output_path) 129 | logger.info(f"Created placeholder image: {output_path}") 130 | return output_path 131 | except Exception as e: 132 | logger.error(f"Error creating placeholder image: {str(e)}") 133 | # If all else fails, return the path anyway 134 | return output_path 135 | 136 | def create_composite_preview(self, previews: Dict[str, str], output_path: str) -> str: 137 | """ 138 | Create a composite image from multiple angle previews. 139 | 140 | Args: 141 | previews: Dictionary mapping angle names to preview image paths 142 | output_path: Path to save the composite image 143 | 144 | Returns: 145 | Path to the created composite image 146 | """ 147 | try: 148 | # Create a blank image 149 | img = Image.new('RGB', (1600, 1200), color=(240, 240, 240)) 150 | 151 | # Load and paste each preview 152 | positions = { 153 | 'perspective': (0, 0), 154 | 'front': (800, 0), 155 | 'top': (0, 600), 156 | 'right': (800, 600) 157 | } 158 | 159 | for angle_name, preview_path in previews.items(): 160 | if angle_name in positions and os.path.exists(preview_path): 161 | try: 162 | angle_img = Image.open(preview_path) 163 | # Resize if needed 164 | angle_img = angle_img.resize((800, 600)) 165 | # Paste into composite 166 | img.paste(angle_img, positions[angle_name]) 167 | except Exception as e: 168 | logger.error(f"Error processing {angle_name} preview: {str(e)}") 169 | 170 | # Save the composite image 171 | img.save(output_path) 172 | logger.info(f"Created composite preview: {output_path}") 173 | return output_path 174 | except Exception as e: 175 | logger.error(f"Error creating composite preview: {str(e)}") 176 | # If all else fails, return the path anyway 177 | return output_path 178 | -------------------------------------------------------------------------------- /src/workflow/image_approval.py: -------------------------------------------------------------------------------- 1 | """ 2 | Image approval tool for MCP clients. 3 | """ 4 | 5 | import os 6 | import logging 7 | import shutil 8 | from typing import Dict, Any, List, Optional 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class ImageApprovalTool: 13 | """ 14 | Tool for image approval/denial in MCP clients. 15 | """ 16 | 17 | def __init__(self, output_dir: str = "output/approved_images"): 18 | """ 19 | Initialize the image approval tool. 20 | 21 | Args: 22 | output_dir: Directory to store approved images 23 | """ 24 | self.output_dir = output_dir 25 | 26 | # Create output directory if it doesn't exist 27 | os.makedirs(output_dir, exist_ok=True) 28 | 29 | def present_image_for_approval(self, image_path: str, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 30 | """ 31 | Present an image to the user for approval. 32 | 33 | Args: 34 | image_path: Path to the image 35 | metadata: Optional metadata about the image 36 | 37 | Returns: 38 | Dictionary with image path and approval request ID 39 | """ 40 | # For MCP server, we just prepare the response 41 | # The actual approval is handled by the client 42 | 43 | approval_id = os.path.basename(image_path).split('.')[0] 44 | 45 | return { 46 | "approval_id": approval_id, 47 | "image_path": image_path, 48 | "image_url": f"/images/{os.path.basename(image_path)}", 49 | "metadata": metadata or {} 50 | } 51 | 52 | def process_approval(self, approval_id: str, approved: bool, image_path: str) -> Dict[str, Any]: 53 | """ 54 | Process user's approval or denial of an image. 55 | 56 | Args: 57 | approval_id: ID of the approval request 58 | approved: Whether the image was approved 59 | image_path: Path to the image 60 | 61 | Returns: 62 | Dictionary with approval status and image path 63 | """ 64 | if approved: 65 | # Copy approved image to output directory 66 | approved_path = os.path.join(self.output_dir, os.path.basename(image_path)) 67 | os.makedirs(os.path.dirname(approved_path), exist_ok=True) 68 | shutil.copy2(image_path, approved_path) 69 | 70 | return { 71 | "approval_id": approval_id, 72 | "approved": True, 73 | "original_path": image_path, 74 | "approved_path": approved_path 75 | } 76 | else: 77 | return { 78 | "approval_id": approval_id, 79 | "approved": False, 80 | "original_path": image_path 81 | } 82 | 83 | def get_approved_images(self, filter_pattern: Optional[str] = None) -> List[str]: 84 | """ 85 | Get list of approved images. 86 | 87 | Args: 88 | filter_pattern: Optional pattern to filter image names 89 | 90 | Returns: 91 | List of paths to approved images 92 | """ 93 | import glob 94 | 95 | if filter_pattern: 96 | pattern = os.path.join(self.output_dir, filter_pattern) 97 | else: 98 | pattern = os.path.join(self.output_dir, "*") 99 | 100 | return glob.glob(pattern) 101 | 102 | def get_approval_status(self, approval_id: str) -> Dict[str, Any]: 103 | """ 104 | Get the approval status for a specific approval ID. 105 | 106 | Args: 107 | approval_id: ID of the approval request 108 | 109 | Returns: 110 | Dictionary with approval status 111 | """ 112 | # Check if any approved image matches the approval ID 113 | approved_images = self.get_approved_images() 114 | 115 | for image_path in approved_images: 116 | if approval_id in os.path.basename(image_path): 117 | return { 118 | "approval_id": approval_id, 119 | "approved": True, 120 | "approved_path": image_path 121 | } 122 | 123 | return { 124 | "approval_id": approval_id, 125 | "approved": False 126 | } 127 | 128 | def batch_process_approvals(self, approvals: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 129 | """ 130 | Process multiple approvals at once. 131 | 132 | Args: 133 | approvals: List of dictionaries with approval_id, approved, and image_path 134 | 135 | Returns: 136 | List of dictionaries with approval results 137 | """ 138 | results = [] 139 | 140 | for approval in approvals: 141 | result = self.process_approval( 142 | approval_id=approval["approval_id"], 143 | approved=approval["approved"], 144 | image_path=approval["image_path"] 145 | ) 146 | results.append(result) 147 | 148 | return results 149 | -------------------------------------------------------------------------------- /test_cuda_mvs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test script for CUDA Multi-View Stereo integration. 3 | """ 4 | 5 | import os 6 | import sys 7 | import logging 8 | import unittest 9 | from unittest.mock import patch, MagicMock 10 | from pathlib import Path 11 | 12 | # Add the src directory to the path 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 14 | 15 | from src.models.cuda_mvs import CUDAMultiViewStereo 16 | 17 | # Configure logging 18 | logging.basicConfig(level=logging.INFO) 19 | logger = logging.getLogger(__name__) 20 | 21 | class TestCUDAMVS(unittest.TestCase): 22 | """ 23 | Test cases for CUDA Multi-View Stereo integration. 24 | """ 25 | 26 | def setUp(self): 27 | """ 28 | Set up test environment. 29 | """ 30 | # Create a test output directory 31 | self.test_output_dir = "output/test_cuda_mvs" 32 | os.makedirs(self.test_output_dir, exist_ok=True) 33 | 34 | # Create test image directory 35 | self.test_images_dir = "output/test_cuda_mvs/images" 36 | os.makedirs(self.test_images_dir, exist_ok=True) 37 | 38 | # Create mock CUDA MVS path 39 | self.cuda_mvs_path = "mock_cuda_mvs" 40 | os.makedirs(os.path.join(self.cuda_mvs_path, "build"), exist_ok=True) 41 | 42 | # Create mock executable 43 | with open(os.path.join(self.cuda_mvs_path, "build", "app_patch_match_mvs"), "w") as f: 44 | f.write("#!/bin/bash\necho 'Mock CUDA MVS'\n") 45 | os.chmod(os.path.join(self.cuda_mvs_path, "build", "app_patch_match_mvs"), 0o755) 46 | 47 | # Create test images 48 | for i in range(3): 49 | with open(os.path.join(self.test_images_dir, f"view_{i}.png"), "w") as f: 50 | f.write(f"Mock image {i}") 51 | 52 | # Create the CUDA MVS wrapper with the mock path 53 | with patch('os.path.exists', return_value=True): 54 | self.cuda_mvs = CUDAMultiViewStereo( 55 | cuda_mvs_path=self.cuda_mvs_path, 56 | output_dir=self.test_output_dir 57 | ) 58 | 59 | @patch('subprocess.Popen') 60 | def test_generate_model_from_images(self, mock_popen): 61 | """ 62 | Test generating a 3D model from multiple images. 63 | """ 64 | # Mock subprocess 65 | mock_process = MagicMock() 66 | mock_process.returncode = 0 67 | mock_process.communicate.return_value = ("Mock stdout", "") 68 | mock_popen.return_value = mock_process 69 | 70 | # Mock file creation 71 | def mock_exists(path): 72 | if "point_cloud_file" in str(path): 73 | # Create the mock point cloud file 74 | os.makedirs(os.path.dirname(path), exist_ok=True) 75 | with open(path, "w") as f: 76 | f.write("Mock point cloud") 77 | return True 78 | return os.path.exists(path) 79 | 80 | # Test parameters 81 | image_paths = [os.path.join(self.test_images_dir, f"view_{i}.png") for i in range(3)] 82 | output_name = "test_model" 83 | 84 | # Call the method with patched os.path.exists 85 | with patch('os.path.exists', side_effect=mock_exists): 86 | result = self.cuda_mvs.generate_model_from_images(image_paths, output_name=output_name) 87 | 88 | # Verify the result 89 | self.assertIsNotNone(result) 90 | self.assertEqual(result["model_id"], output_name) 91 | self.assertTrue("point_cloud_file" in result) 92 | self.assertTrue("camera_params_file" in result) 93 | self.assertEqual(len(result["input_images"]), 3) 94 | 95 | # Verify the subprocess call 96 | mock_popen.assert_called_once() 97 | args, kwargs = mock_popen.call_args 98 | self.assertTrue("app_patch_match_mvs" in args[0][0]) 99 | 100 | def test_generate_camera_params(self): 101 | """ 102 | Test generating camera parameters from images. 103 | """ 104 | # Test parameters 105 | image_paths = [os.path.join(self.test_images_dir, f"view_{i}.png") for i in range(3)] 106 | model_dir = os.path.join(self.test_output_dir, "camera_params_test") 107 | os.makedirs(model_dir, exist_ok=True) 108 | 109 | # Mock PIL.Image.open 110 | mock_image = MagicMock() 111 | mock_image.size = (800, 600) 112 | 113 | # Call the method with patched PIL.Image.open 114 | with patch('PIL.Image.open', return_value=mock_image): 115 | params_file = self.cuda_mvs._generate_camera_params(image_paths, model_dir) 116 | 117 | # Verify the result 118 | self.assertTrue(os.path.exists(params_file)) 119 | 120 | # Read the params file 121 | import json 122 | with open(params_file, "r") as f: 123 | params = json.load(f) 124 | 125 | # Verify the params 126 | self.assertEqual(len(params), 3) 127 | for i, param in enumerate(params): 128 | self.assertEqual(param["image_id"], i) 129 | self.assertEqual(param["width"], 800) 130 | self.assertEqual(param["height"], 600) 131 | self.assertTrue("camera" in param) 132 | self.assertEqual(param["camera"]["model"], "PINHOLE") 133 | 134 | def test_convert_ply_to_obj(self): 135 | """ 136 | Test converting PLY point cloud to OBJ mesh. 137 | """ 138 | # Create a mock PLY file 139 | ply_file = os.path.join(self.test_output_dir, "test.ply") 140 | with open(ply_file, "w") as f: 141 | f.write("Mock PLY file") 142 | 143 | # Call the method 144 | obj_file = self.cuda_mvs.convert_ply_to_obj(ply_file) 145 | 146 | # Verify the result 147 | self.assertTrue(os.path.exists(obj_file)) 148 | self.assertTrue(obj_file.endswith(".obj")) 149 | 150 | # Read the OBJ file 151 | with open(obj_file, "r") as f: 152 | content = f.read() 153 | 154 | # Verify the content 155 | self.assertTrue("# Converted from test.ply" in content) 156 | self.assertTrue("v " in content) 157 | self.assertTrue("f " in content) 158 | 159 | def test_error_handling(self): 160 | """ 161 | Test error handling in the CUDA MVS wrapper. 162 | """ 163 | # Test parameters 164 | image_paths = [os.path.join(self.test_images_dir, f"view_{i}.png") for i in range(3)] 165 | output_name = "error_test" 166 | 167 | # Mock subprocess with error 168 | mock_process = MagicMock() 169 | mock_process.returncode = 1 170 | mock_process.communicate.return_value = ("", "Mock error") 171 | 172 | # Call the method with patched subprocess.Popen 173 | with patch('subprocess.Popen', return_value=mock_process): 174 | with self.assertRaises(RuntimeError): 175 | self.cuda_mvs.generate_model_from_images(image_paths, output_name=output_name) 176 | 177 | def tearDown(self): 178 | """ 179 | Clean up after tests. 180 | """ 181 | # Clean up test output directory 182 | import shutil 183 | if os.path.exists(self.test_output_dir): 184 | shutil.rmtree(self.test_output_dir) 185 | 186 | # Clean up mock CUDA MVS path 187 | if os.path.exists(self.cuda_mvs_path): 188 | shutil.rmtree(self.cuda_mvs_path) 189 | 190 | if __name__ == "__main__": 191 | unittest.main() 192 | -------------------------------------------------------------------------------- /test_gemini_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test script for Google Gemini API integration. 3 | """ 4 | 5 | import os 6 | import sys 7 | import logging 8 | import unittest 9 | from unittest.mock import patch, MagicMock 10 | from pathlib import Path 11 | 12 | # Add the src directory to the path 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 14 | 15 | from src.ai.gemini_api import GeminiImageGenerator 16 | 17 | # Configure logging 18 | logging.basicConfig(level=logging.INFO) 19 | logger = logging.getLogger(__name__) 20 | 21 | class TestGeminiAPI(unittest.TestCase): 22 | """ 23 | Test cases for Google Gemini API integration. 24 | """ 25 | 26 | def setUp(self): 27 | """ 28 | Set up test environment. 29 | """ 30 | # Create a test output directory 31 | self.test_output_dir = "output/test_gemini" 32 | os.makedirs(self.test_output_dir, exist_ok=True) 33 | 34 | # Mock API key 35 | self.api_key = "test_api_key" 36 | 37 | # Create the generator with the mock API key 38 | self.gemini_generator = GeminiImageGenerator( 39 | api_key=self.api_key, 40 | output_dir=self.test_output_dir 41 | ) 42 | 43 | @patch('requests.post') 44 | def test_generate_image(self, mock_post): 45 | """ 46 | Test generating a single image with Gemini API. 47 | """ 48 | # Mock response 49 | mock_response = MagicMock() 50 | mock_response.status_code = 200 51 | mock_response.json.return_value = { 52 | "candidates": [ 53 | { 54 | "content": { 55 | "parts": [ 56 | { 57 | "text": "Generated image description" 58 | }, 59 | { 60 | "inlineData": { 61 | "mimeType": "image/png", 62 | "data": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==" 63 | } 64 | } 65 | ] 66 | } 67 | } 68 | ] 69 | } 70 | mock_post.return_value = mock_response 71 | 72 | # Test parameters 73 | prompt = "A low-poly rabbit with black background" 74 | model = "gemini-2.0-flash-exp-image-generation" 75 | 76 | # Call the method 77 | result = self.gemini_generator.generate_image(prompt, model) 78 | 79 | # Verify the result 80 | self.assertIsNotNone(result) 81 | self.assertEqual(result["prompt"], prompt) 82 | self.assertEqual(result["model"], model) 83 | self.assertTrue("local_path" in result) 84 | self.assertTrue(os.path.exists(result["local_path"])) 85 | 86 | # Verify the API call 87 | mock_post.assert_called_once() 88 | args, kwargs = mock_post.call_args 89 | self.assertTrue("generativelanguage.googleapis.com" in args[0]) 90 | self.assertEqual(kwargs["headers"]["x-goog-api-key"], self.api_key) 91 | self.assertTrue("prompt" in str(kwargs["json"])) 92 | 93 | @patch('requests.post') 94 | def test_generate_multiple_views(self, mock_post): 95 | """ 96 | Test generating multiple views of an object with Gemini API. 97 | """ 98 | # Mock response 99 | mock_response = MagicMock() 100 | mock_response.status_code = 200 101 | mock_response.json.return_value = { 102 | "candidates": [ 103 | { 104 | "content": { 105 | "parts": [ 106 | { 107 | "text": "Generated image description" 108 | }, 109 | { 110 | "inlineData": { 111 | "mimeType": "image/png", 112 | "data": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==" 113 | } 114 | } 115 | ] 116 | } 117 | } 118 | ] 119 | } 120 | mock_post.return_value = mock_response 121 | 122 | # Test parameters 123 | prompt = "A low-poly rabbit" 124 | num_views = 3 125 | 126 | # Call the method 127 | results = self.gemini_generator.generate_multiple_views(prompt, num_views) 128 | 129 | # Verify the results 130 | self.assertEqual(len(results), num_views) 131 | for i, result in enumerate(results): 132 | self.assertTrue("view_direction" in result) 133 | self.assertEqual(result["view_index"], i + 1) 134 | self.assertTrue("local_path" in result) 135 | self.assertTrue(os.path.exists(result["local_path"])) 136 | 137 | # Verify the API calls 138 | self.assertEqual(mock_post.call_count, num_views) 139 | 140 | @patch('requests.post') 141 | def test_error_handling(self, mock_post): 142 | """ 143 | Test error handling in the Gemini API client. 144 | """ 145 | # Mock error response 146 | mock_response = MagicMock() 147 | mock_response.status_code = 400 148 | mock_response.raise_for_status.side_effect = Exception("API Error") 149 | mock_post.return_value = mock_response 150 | 151 | # Test parameters 152 | prompt = "A low-poly rabbit" 153 | 154 | # Call the method and expect an exception 155 | with self.assertRaises(Exception): 156 | self.gemini_generator.generate_image(prompt) 157 | 158 | def tearDown(self): 159 | """ 160 | Clean up after tests. 161 | """ 162 | # Clean up test output directory 163 | import shutil 164 | if os.path.exists(self.test_output_dir): 165 | shutil.rmtree(self.test_output_dir) 166 | 167 | if __name__ == "__main__": 168 | unittest.main() 169 | -------------------------------------------------------------------------------- /test_image_approval.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test script for image approval tool. 3 | """ 4 | 5 | import os 6 | import sys 7 | import logging 8 | import unittest 9 | from unittest.mock import patch, MagicMock 10 | from pathlib import Path 11 | 12 | # Add the src directory to the path 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 14 | 15 | from src.workflow.image_approval import ImageApprovalTool 16 | 17 | # Configure logging 18 | logging.basicConfig(level=logging.INFO) 19 | logger = logging.getLogger(__name__) 20 | 21 | class TestImageApproval(unittest.TestCase): 22 | """ 23 | Test cases for image approval tool. 24 | """ 25 | 26 | def setUp(self): 27 | """ 28 | Set up test environment. 29 | """ 30 | # Create a test output directory 31 | self.test_output_dir = "output/test_approval" 32 | os.makedirs(self.test_output_dir, exist_ok=True) 33 | 34 | # Create test images directory 35 | self.test_images_dir = "output/test_approval/images" 36 | os.makedirs(self.test_images_dir, exist_ok=True) 37 | 38 | # Create test images 39 | self.test_images = [] 40 | for i in range(3): 41 | image_path = os.path.join(self.test_images_dir, f"view_{i}.png") 42 | with open(image_path, "w") as f: 43 | f.write(f"Mock image {i}") 44 | self.test_images.append(image_path) 45 | 46 | # Create the approval tool 47 | self.approval_tool = ImageApprovalTool( 48 | output_dir=os.path.join(self.test_output_dir, "approved") 49 | ) 50 | 51 | def test_present_image_for_approval(self): 52 | """ 53 | Test presenting an image for approval. 54 | """ 55 | # Test parameters 56 | image_path = self.test_images[0] 57 | metadata = { 58 | "prompt": "A test image", 59 | "view_direction": "front view", 60 | "view_index": 1 61 | } 62 | 63 | # Call the method 64 | result = self.approval_tool.present_image_for_approval(image_path, metadata) 65 | 66 | # Verify the result 67 | self.assertIsNotNone(result) 68 | self.assertTrue("approval_id" in result) 69 | self.assertEqual(result["image_path"], image_path) 70 | self.assertTrue("image_url" in result) 71 | self.assertEqual(result["metadata"], metadata) 72 | 73 | def test_process_approval_approved(self): 74 | """ 75 | Test processing an approved image. 76 | """ 77 | # Test parameters 78 | image_path = self.test_images[0] 79 | approval_id = "test_approval_1" 80 | 81 | # Call the method 82 | result = self.approval_tool.process_approval(approval_id, True, image_path) 83 | 84 | # Verify the result 85 | self.assertIsNotNone(result) 86 | self.assertEqual(result["approval_id"], approval_id) 87 | self.assertTrue(result["approved"]) 88 | self.assertEqual(result["original_path"], image_path) 89 | self.assertTrue("approved_path" in result) 90 | 91 | # Verify the file was copied 92 | self.assertTrue(os.path.exists(result["approved_path"])) 93 | 94 | def test_process_approval_denied(self): 95 | """ 96 | Test processing a denied image. 97 | """ 98 | # Test parameters 99 | image_path = self.test_images[1] 100 | approval_id = "test_approval_2" 101 | 102 | # Call the method 103 | result = self.approval_tool.process_approval(approval_id, False, image_path) 104 | 105 | # Verify the result 106 | self.assertIsNotNone(result) 107 | self.assertEqual(result["approval_id"], approval_id) 108 | self.assertFalse(result["approved"]) 109 | self.assertEqual(result["original_path"], image_path) 110 | self.assertFalse("approved_path" in result) 111 | 112 | def test_get_approved_images(self): 113 | """ 114 | Test getting approved images. 115 | """ 116 | # Approve some images 117 | for i, image_path in enumerate(self.test_images): 118 | self.approval_tool.process_approval(f"test_approval_{i}", True, image_path) 119 | 120 | # Call the method 121 | approved_images = self.approval_tool.get_approved_images() 122 | 123 | # Verify the result 124 | self.assertEqual(len(approved_images), len(self.test_images)) 125 | 126 | def test_get_approval_status(self): 127 | """ 128 | Test getting approval status. 129 | """ 130 | # Approve an image 131 | approval_id = "test_approval_status" 132 | self.approval_tool.process_approval(approval_id, True, self.test_images[0]) 133 | 134 | # Call the method 135 | status = self.approval_tool.get_approval_status(approval_id) 136 | 137 | # Verify the result 138 | self.assertIsNotNone(status) 139 | self.assertEqual(status["approval_id"], approval_id) 140 | self.assertTrue(status["approved"]) 141 | self.assertTrue("approved_path" in status) 142 | 143 | # Test with non-existent approval ID 144 | status = self.approval_tool.get_approval_status("non_existent") 145 | self.assertIsNotNone(status) 146 | self.assertEqual(status["approval_id"], "non_existent") 147 | self.assertFalse(status["approved"]) 148 | 149 | def test_batch_process_approvals(self): 150 | """ 151 | Test batch processing of approvals. 152 | """ 153 | # Test parameters 154 | approvals = [ 155 | { 156 | "approval_id": "batch_1", 157 | "approved": True, 158 | "image_path": self.test_images[0] 159 | }, 160 | { 161 | "approval_id": "batch_2", 162 | "approved": False, 163 | "image_path": self.test_images[1] 164 | }, 165 | { 166 | "approval_id": "batch_3", 167 | "approved": True, 168 | "image_path": self.test_images[2] 169 | } 170 | ] 171 | 172 | # Call the method 173 | results = self.approval_tool.batch_process_approvals(approvals) 174 | 175 | # Verify the results 176 | self.assertEqual(len(results), len(approvals)) 177 | 178 | # Check approved images 179 | approved_images = self.approval_tool.get_approved_images() 180 | self.assertEqual(len(approved_images), 2) # Two images were approved 181 | 182 | def tearDown(self): 183 | """ 184 | Clean up after tests. 185 | """ 186 | # Clean up test output directory 187 | import shutil 188 | if os.path.exists(self.test_output_dir): 189 | shutil.rmtree(self.test_output_dir) 190 | 191 | if __name__ == "__main__": 192 | unittest.main() 193 | -------------------------------------------------------------------------------- /test_image_approval_workflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test script for the image approval workflow. 3 | """ 4 | 5 | import os 6 | import sys 7 | import json 8 | import logging 9 | import unittest 10 | from unittest.mock import patch, MagicMock 11 | from pathlib import Path 12 | 13 | # Add the src directory to the path 14 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 15 | 16 | from src.workflow.image_approval import ImageApprovalManager 17 | from src.config import IMAGE_APPROVAL 18 | 19 | # Configure logging 20 | logging.basicConfig(level=logging.INFO) 21 | logger = logging.getLogger(__name__) 22 | 23 | class TestImageApprovalWorkflow(unittest.TestCase): 24 | """ 25 | Test cases for the image approval workflow. 26 | """ 27 | 28 | def setUp(self): 29 | """ 30 | Set up test environment. 31 | """ 32 | # Create test output directories 33 | self.test_output_dir = "output/test_image_approval" 34 | self.test_approved_dir = os.path.join(self.test_output_dir, "approved") 35 | os.makedirs(self.test_output_dir, exist_ok=True) 36 | os.makedirs(self.test_approved_dir, exist_ok=True) 37 | 38 | # Create the approval manager 39 | self.approval_manager = ImageApprovalManager( 40 | output_dir=self.test_output_dir, 41 | approved_dir=self.test_approved_dir, 42 | min_approved_images=3, 43 | auto_approve=False 44 | ) 45 | 46 | # Create test images 47 | self.test_images = [] 48 | for i in range(5): 49 | image_path = os.path.join(self.test_output_dir, f"test_image_{i}.png") 50 | with open(image_path, "w") as f: 51 | f.write(f"test image data {i}") 52 | self.test_images.append({ 53 | "id": f"image_{i}", 54 | "local_path": image_path, 55 | "view_index": i, 56 | "view_direction": f"view_{i}" 57 | }) 58 | 59 | def test_add_images(self): 60 | """ 61 | Test adding images to the approval manager. 62 | """ 63 | # Add images 64 | self.approval_manager.add_images(self.test_images) 65 | 66 | # Verify images were added 67 | self.assertEqual(len(self.approval_manager.images), 5) 68 | self.assertEqual(len(self.approval_manager.pending_images), 5) 69 | self.assertEqual(len(self.approval_manager.approved_images), 0) 70 | self.assertEqual(len(self.approval_manager.rejected_images), 0) 71 | 72 | def test_approve_image(self): 73 | """ 74 | Test approving an image. 75 | """ 76 | # Add images 77 | self.approval_manager.add_images(self.test_images) 78 | 79 | # Approve an image 80 | result = self.approval_manager.approve_image("image_0") 81 | 82 | # Verify the result 83 | self.assertTrue(result["success"]) 84 | self.assertEqual(result["image_id"], "image_0") 85 | self.assertEqual(result["status"], "approved") 86 | 87 | # Verify the image was moved to approved 88 | self.assertEqual(len(self.approval_manager.pending_images), 4) 89 | self.assertEqual(len(self.approval_manager.approved_images), 1) 90 | self.assertEqual(len(self.approval_manager.rejected_images), 0) 91 | 92 | # Verify the image was copied to the approved directory 93 | approved_path = os.path.join(self.test_approved_dir, "image_0.png") 94 | self.assertTrue(os.path.exists(approved_path)) 95 | 96 | def test_reject_image(self): 97 | """ 98 | Test rejecting an image. 99 | """ 100 | # Add images 101 | self.approval_manager.add_images(self.test_images) 102 | 103 | # Reject an image 104 | result = self.approval_manager.reject_image("image_1") 105 | 106 | # Verify the result 107 | self.assertTrue(result["success"]) 108 | self.assertEqual(result["image_id"], "image_1") 109 | self.assertEqual(result["status"], "rejected") 110 | 111 | # Verify the image was moved to rejected 112 | self.assertEqual(len(self.approval_manager.pending_images), 4) 113 | self.assertEqual(len(self.approval_manager.approved_images), 0) 114 | self.assertEqual(len(self.approval_manager.rejected_images), 1) 115 | 116 | def test_get_approval_status(self): 117 | """ 118 | Test getting the approval status. 119 | """ 120 | # Add images 121 | self.approval_manager.add_images(self.test_images) 122 | 123 | # Approve some images 124 | self.approval_manager.approve_image("image_0") 125 | self.approval_manager.approve_image("image_1") 126 | self.approval_manager.approve_image("image_2") 127 | 128 | # Reject an image 129 | self.approval_manager.reject_image("image_3") 130 | 131 | # Get the status 132 | status = self.approval_manager.get_status() 133 | 134 | # Verify the status 135 | self.assertEqual(status["total_images"], 5) 136 | self.assertEqual(status["pending_count"], 1) 137 | self.assertEqual(status["approved_count"], 3) 138 | self.assertEqual(status["rejected_count"], 1) 139 | self.assertTrue(status["has_minimum_approved"]) 140 | self.assertEqual(len(status["approved_images"]), 3) 141 | self.assertEqual(len(status["pending_images"]), 1) 142 | self.assertEqual(len(status["rejected_images"]), 1) 143 | 144 | def test_get_approved_images(self): 145 | """ 146 | Test getting approved images. 147 | """ 148 | # Add images 149 | self.approval_manager.add_images(self.test_images) 150 | 151 | # Approve some images 152 | self.approval_manager.approve_image("image_0") 153 | self.approval_manager.approve_image("image_2") 154 | self.approval_manager.approve_image("image_4") 155 | 156 | # Get approved images 157 | approved = self.approval_manager.get_approved_images() 158 | 159 | # Verify approved images 160 | self.assertEqual(len(approved), 3) 161 | self.assertEqual(approved[0]["id"], "image_0") 162 | self.assertEqual(approved[1]["id"], "image_2") 163 | self.assertEqual(approved[2]["id"], "image_4") 164 | 165 | def test_auto_approve(self): 166 | """ 167 | Test auto-approval mode. 168 | """ 169 | # Create an auto-approve manager 170 | auto_manager = ImageApprovalManager( 171 | output_dir=self.test_output_dir, 172 | approved_dir=self.test_approved_dir, 173 | min_approved_images=3, 174 | auto_approve=True 175 | ) 176 | 177 | # Add images 178 | auto_manager.add_images(self.test_images) 179 | 180 | # Verify all images were auto-approved 181 | self.assertEqual(len(auto_manager.pending_images), 0) 182 | self.assertEqual(len(auto_manager.approved_images), 5) 183 | self.assertEqual(len(auto_manager.rejected_images), 0) 184 | 185 | def test_has_minimum_approved(self): 186 | """ 187 | Test checking if minimum approved images are met. 188 | """ 189 | # Add images 190 | self.approval_manager.add_images(self.test_images) 191 | 192 | # Initially should not have minimum 193 | self.assertFalse(self.approval_manager.has_minimum_approved()) 194 | 195 | # Approve two images 196 | self.approval_manager.approve_image("image_0") 197 | self.approval_manager.approve_image("image_1") 198 | 199 | # Still should not have minimum 200 | self.assertFalse(self.approval_manager.has_minimum_approved()) 201 | 202 | # Approve one more image 203 | self.approval_manager.approve_image("image_2") 204 | 205 | # Now should have minimum 206 | self.assertTrue(self.approval_manager.has_minimum_approved()) 207 | 208 | def test_save_and_load_state(self): 209 | """ 210 | Test saving and loading the approval state. 211 | """ 212 | # Add images 213 | self.approval_manager.add_images(self.test_images) 214 | 215 | # Approve and reject some images 216 | self.approval_manager.approve_image("image_0") 217 | self.approval_manager.approve_image("image_2") 218 | self.approval_manager.reject_image("image_3") 219 | 220 | # Save the state 221 | state_file = os.path.join(self.test_output_dir, "approval_state.json") 222 | self.approval_manager.save_state(state_file) 223 | 224 | # Create a new manager 225 | new_manager = ImageApprovalManager( 226 | output_dir=self.test_output_dir, 227 | approved_dir=self.test_approved_dir, 228 | min_approved_images=3, 229 | auto_approve=False 230 | ) 231 | 232 | # Load the state 233 | new_manager.load_state(state_file) 234 | 235 | # Verify the state was loaded correctly 236 | self.assertEqual(len(new_manager.images), 5) 237 | self.assertEqual(len(new_manager.pending_images), 2) 238 | self.assertEqual(len(new_manager.approved_images), 2) 239 | self.assertEqual(len(new_manager.rejected_images), 1) 240 | 241 | def tearDown(self): 242 | """ 243 | Clean up after tests. 244 | """ 245 | # Clean up test output directory 246 | import shutil 247 | if os.path.exists(self.test_output_dir): 248 | shutil.rmtree(self.test_output_dir) 249 | 250 | if __name__ == "__main__": 251 | unittest.main() 252 | -------------------------------------------------------------------------------- /test_image_to_model_pipeline.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import logging 4 | import argparse 5 | from pathlib import Path 6 | from typing import Dict, Any, List, Optional, Tuple 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | logger = logging.getLogger(__name__) 11 | 12 | # Add project root to path 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 14 | 15 | # Import components 16 | from src.ai.venice_api import VeniceImageGenerator 17 | from src.ai.sam_segmentation import SAMSegmenter 18 | from src.models.threestudio_generator import ThreeStudioGenerator 19 | from src.openscad_wrapper.wrapper import OpenSCADWrapper 20 | from src.workflow.image_to_model_pipeline import ImageToModelPipeline 21 | from src.config import ( 22 | VENICE_API_KEY, IMAGES_DIR, MASKS_DIR, MODELS_DIR, SCAD_DIR, 23 | SAM2_CHECKPOINT_PATH, SAM2_MODEL_TYPE, SAM2_USE_GPU, THREESTUDIO_PATH 24 | ) 25 | 26 | def test_pipeline(prompt: str, output_dir: Optional[str] = None, 27 | venice_model: str = "fluently-xl", skip_steps: List[str] = None): 28 | """ 29 | Test the full image-to-model pipeline. 30 | 31 | Args: 32 | prompt: Text prompt for image generation 33 | output_dir: Directory to save pipeline results 34 | venice_model: Venice.ai model to use for image generation 35 | skip_steps: List of steps to skip ('image', 'segment', 'model3d', 'openscad') 36 | """ 37 | # Use default output directory if not provided 38 | if not output_dir: 39 | output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output", "pipeline_test") 40 | 41 | # Create output directory 42 | os.makedirs(output_dir, exist_ok=True) 43 | 44 | # Initialize skip_steps if None 45 | skip_steps = skip_steps or [] 46 | 47 | logger.info(f"Testing image-to-model pipeline with prompt: {prompt}") 48 | logger.info(f"Output directory: {output_dir}") 49 | logger.info(f"Venice model: {venice_model}") 50 | logger.info(f"Skipping steps: {skip_steps}") 51 | 52 | try: 53 | # Initialize components 54 | logger.info("Initializing pipeline components...") 55 | 56 | # Venice.ai image generator 57 | venice_generator = VeniceImageGenerator( 58 | api_key=VENICE_API_KEY, 59 | output_dir=os.path.join(output_dir, "images") 60 | ) 61 | 62 | # SAM2 segmenter 63 | sam_segmenter = SAMSegmenter( 64 | model_type=SAM2_MODEL_TYPE, 65 | checkpoint_path=SAM2_CHECKPOINT_PATH, 66 | use_gpu=SAM2_USE_GPU, 67 | output_dir=os.path.join(output_dir, "masks") 68 | ) 69 | 70 | # ThreeStudio generator 71 | threestudio_generator = ThreeStudioGenerator( 72 | threestudio_path=THREESTUDIO_PATH, 73 | output_dir=os.path.join(output_dir, "models") 74 | ) 75 | 76 | # OpenSCAD wrapper 77 | openscad_wrapper = OpenSCADWrapper( 78 | output_dir=os.path.join(output_dir, "scad") 79 | ) 80 | 81 | # Initialize pipeline 82 | pipeline = ImageToModelPipeline( 83 | venice_generator=venice_generator, 84 | sam_segmenter=sam_segmenter, 85 | threestudio_generator=threestudio_generator, 86 | openscad_wrapper=openscad_wrapper, 87 | output_dir=output_dir 88 | ) 89 | 90 | # Run pipeline with custom steps 91 | if 'image' in skip_steps: 92 | # Skip image generation, use a test image 93 | logger.info("Skipping image generation, using test image") 94 | image_path = os.path.join(IMAGES_DIR, "test_image.png") 95 | if not os.path.exists(image_path): 96 | logger.error(f"Test image not found: {image_path}") 97 | return 98 | 99 | # TODO: Implement custom pipeline execution with skipped steps 100 | logger.info("Custom pipeline execution not implemented yet") 101 | return 102 | else: 103 | # Run full pipeline 104 | logger.info("Running full pipeline...") 105 | result = pipeline.generate_model_from_text( 106 | prompt=prompt, 107 | venice_params={"model": venice_model}, 108 | sam_params={}, 109 | threestudio_params={} 110 | ) 111 | 112 | # Print results 113 | logger.info("Pipeline completed successfully") 114 | logger.info(f"Pipeline ID: {result.get('pipeline_id')}") 115 | logger.info(f"Image path: {result.get('image', {}).get('local_path')}") 116 | logger.info(f"Mask count: {result.get('segmentation', {}).get('num_masks', 0)}") 117 | logger.info(f"3D model path: {result.get('model_3d', {}).get('exported_files', [])}") 118 | logger.info(f"OpenSCAD file: {result.get('openscad', {}).get('scad_file')}") 119 | 120 | return result 121 | 122 | except Exception as e: 123 | logger.error(f"Error in pipeline: {str(e)}") 124 | import traceback 125 | traceback.print_exc() 126 | return None 127 | 128 | if __name__ == "__main__": 129 | # Parse command line arguments 130 | parser = argparse.ArgumentParser(description="Test image-to-model pipeline") 131 | parser.add_argument("prompt", help="Text prompt for image generation") 132 | parser.add_argument("--output-dir", help="Directory to save pipeline results") 133 | parser.add_argument("--venice-model", default="fluently-xl", help="Venice.ai model to use") 134 | parser.add_argument("--skip", nargs="+", choices=["image", "segment", "model3d", "openscad"], 135 | help="Steps to skip in the pipeline") 136 | 137 | args = parser.parse_args() 138 | 139 | # Run test 140 | test_pipeline( 141 | args.prompt, 142 | args.output_dir, 143 | args.venice_model, 144 | args.skip 145 | ) 146 | -------------------------------------------------------------------------------- /test_model_selection.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from src.ai.venice_api import VeniceImageGenerator 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 7 | logger = logging.getLogger(__name__) 8 | 9 | # Venice.ai API key (replace with your own or use environment variable) 10 | VENICE_API_KEY = os.getenv("VENICE_API_KEY", "B9Y68yQgatQw8wmpmnIMYcGip1phCt-43CS0OktZU6") 11 | OUTPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output", "images") 12 | 13 | # Test natural language model selection 14 | def test_model_selection(): 15 | """Test the natural language model selection functionality.""" 16 | # Initialize the Venice API client 17 | venice_generator = VeniceImageGenerator(VENICE_API_KEY, OUTPUT_DIR) 18 | 19 | # Test cases - natural language preferences to expected model mappings 20 | test_cases = [ 21 | ("default", "fluently-xl"), 22 | ("fastest model please", "fluently-xl"), 23 | ("I need a high quality image", "flux-dev"), 24 | ("create an uncensored image", "flux-dev-uncensored"), 25 | ("make it realistic", "pony-realism"), 26 | ("I want something artistic", "lustify-sdxl"), 27 | ("use stable diffusion", "stable-diffusion-3.5"), 28 | ("invalid model name", "fluently-xl"), # Should default to fluently-xl 29 | ] 30 | 31 | # Run tests 32 | for preference, expected_model in test_cases: 33 | mapped_model = venice_generator.map_model_preference(preference) 34 | logger.info(f"Preference: '{preference}' -> Model: '{mapped_model}'") 35 | assert mapped_model == expected_model, f"Expected {expected_model}, got {mapped_model}" 36 | 37 | logger.info("All model preference mappings tests passed!") 38 | 39 | if __name__ == "__main__": 40 | logger.info("Starting Venice.ai model selection mapping tests") 41 | test_model_selection() 42 | -------------------------------------------------------------------------------- /test_primitives.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Test OpenSCAD primitives with different export formats 3 | 4 | PYTHON="python" 5 | OUTPUT_DIR="test_output" 6 | 7 | # Create output directory 8 | mkdir -p $OUTPUT_DIR 9 | 10 | # Run the tests 11 | $PYTHON -m src.testing.test_primitives --output-dir $OUTPUT_DIR --validate 12 | 13 | echo "Tests completed. Results are in $OUTPUT_DIR" 14 | -------------------------------------------------------------------------------- /test_rabbit_direct.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import requests 5 | import base64 6 | import logging 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | logger = logging.getLogger(__name__) 11 | 12 | # Venice.ai API configuration 13 | VENICE_API_KEY = os.getenv("VENICE_API_KEY", "B9Y68yQgatQw8wmpmnIMYcGip1phCt-43CS0OktZU6") 14 | OUTPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output", "images") 15 | os.makedirs(OUTPUT_DIR, exist_ok=True) 16 | 17 | # API configuration 18 | url = "https://api.venice.ai/api/v1/image/generate" 19 | headers = { 20 | "Authorization": f"Bearer {VENICE_API_KEY}", 21 | "Content-Type": "application/json" 22 | } 23 | 24 | # Payload for image generation 25 | payload = { 26 | "height": 1024, 27 | "width": 1024, 28 | "steps": 20, 29 | "return_binary": True, # Request binary data directly 30 | "hide_watermark": False, 31 | "format": "png", 32 | "embed_exif_metadata": False, 33 | "model": "flux-dev", 34 | "prompt": "A low-poly rabbit with black background. 3d file" 35 | } 36 | 37 | def generate_image(): 38 | """Generate image using Venice.ai API with the rabbit prompt.""" 39 | try: 40 | logger.info(f"Sending request to {url} with prompt: '{payload['prompt']}'") 41 | response = requests.post(url, json=payload, headers=headers) 42 | 43 | logger.info(f"Response status: {response.status_code}") 44 | 45 | if response.status_code == 200: 46 | # Save the raw binary response 47 | filename = "rabbit_low_poly_3d.png" 48 | output_path = os.path.join(OUTPUT_DIR, filename) 49 | 50 | with open(output_path, "wb") as f: 51 | f.write(response.content) 52 | 53 | logger.info(f"Image saved to {output_path}") 54 | return output_path 55 | else: 56 | logger.error(f"Error: {response.status_code} - {response.text}") 57 | 58 | return None 59 | except Exception as e: 60 | logger.error(f"Error: {str(e)}") 61 | return None 62 | 63 | if __name__ == "__main__": 64 | logger.info("Starting Venice.ai image generation test with rabbit prompt") 65 | image_path = generate_image() 66 | 67 | if image_path: 68 | logger.info(f"Successfully generated and saved image to {image_path}") 69 | print(f"\nImage saved to: {image_path}") 70 | else: 71 | logger.error("Failed to generate image") 72 | -------------------------------------------------------------------------------- /test_venice_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import json 4 | 5 | # Venice.ai API configuration 6 | VENICE_API_KEY = os.getenv("VENICE_API_KEY", "") # Set via environment variable 7 | OUTPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output", "images") 8 | os.makedirs(OUTPUT_DIR, exist_ok=True) 9 | 10 | # API endpoint 11 | url = "https://api.venice.ai/api/v1/image/generate" 12 | 13 | # Test prompt 14 | prompt = "A coffee mug with geometric patterns" 15 | 16 | # Prepare payload 17 | payload = { 18 | "model": "fluently-xl", # Try default model instead of flux 19 | "prompt": prompt, 20 | "height": 1024, 21 | "width": 1024, 22 | "steps": 20, 23 | "return_binary": False, 24 | "hide_watermark": False, 25 | "format": "png", 26 | "embed_exif_metadata": False 27 | } 28 | 29 | # Set up headers 30 | headers = { 31 | "Authorization": f"Bearer {VENICE_API_KEY}", 32 | "Content-Type": "application/json" 33 | } 34 | 35 | print(f"Sending request to {url} with prompt: '{prompt}'") 36 | 37 | # Make API request 38 | try: 39 | response = requests.post(url, json=payload, headers=headers) 40 | 41 | print(f"Response status: {response.status_code}") 42 | 43 | if response.status_code == 200: 44 | result = response.json() 45 | print("\nImage generation result:") 46 | print(json.dumps(result, indent=2)) 47 | 48 | # Print image URL if available 49 | if "images" in result and len(result["images"]) > 0: 50 | image_url = result["images"][0] 51 | print(f"\nImage URL: {image_url}") 52 | 53 | # Download image 54 | image_filename = f"{prompt[:20].replace(' ', '_')}_flux.png" 55 | image_path = os.path.join(OUTPUT_DIR, image_filename) 56 | 57 | print(f"Downloading image to {image_path}...") 58 | img_response = requests.get(image_url, stream=True) 59 | if img_response.status_code == 200: 60 | with open(image_path, 'wb') as f: 61 | for chunk in img_response.iter_content(chunk_size=8192): 62 | f.write(chunk) 63 | print(f"Image saved to {image_path}") 64 | else: 65 | print(f"Failed to download image: {img_response.status_code}") 66 | else: 67 | print(f"Error: {response.text}") 68 | except Exception as e: 69 | print(f"Error: {str(e)}") 70 | --------------------------------------------------------------------------------