├── images ├── ll.gif └── LightStudioPlugin.png ├── TODO ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── LICENSE ├── translate.py ├── doku ├── compositor.txt ├── swarm.txt ├── krita-plugin.txt └── ideas.txt ├── __init__.py ├── mist.py ├── materials ├── glass.py ├── lizardskin.py ├── basalt.py ├── brushed.py ├── veiled.py ├── aluminum.py ├── birch.py └── rusted.py ├── snailshell.py ├── spiral.py ├── lightprobe.py ├── rspline.py ├── krita └── kritapal.py ├── pipe.py ├── plugin.py ├── swarmtools.py ├── meshy.py ├── camrig.py ├── CONTRIBUTING.md ├── compositor.py ├── animpart.py ├── .gitignore ├── README.md ├── studiolights.py ├── hdr.py ├── swarm.py └── bpl.py /images/ll.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sq5rix/BlenderPythonLib/HEAD/images/ll.gif -------------------------------------------------------------------------------- /images/LightStudioPlugin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sq5rix/BlenderPythonLib/HEAD/images/LightStudioPlugin.png -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | drones animation - swarm of drones 2 | randomly noving between faces 3 | 4 | classes for modules: models, cameras etc 5 | scriots for setting defsults, like eevee 6 | animation functions 7 | camera rig animation along rail 8 | setting camera 9 | world building- hdr images lights 10 | simulation, particles support in code 11 | materials creation for typical materials like emission etc 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Tom 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /translate.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from PIL import Image 3 | import pytesseract 4 | 5 | # Set your OpenAI API key 6 | openai.api_key = 'your-openai-api-key' 7 | 8 | def translate_polish_image(image_path): 9 | # Open the image file 10 | with Image.open(image_path) as img: 11 | # Use Tesseract to do OCR on the image 12 | text = pytesseract.image_to_string(img, lang='pol') 13 | 14 | # Translate the extracted text using OpenAI's GPT-4 model 15 | response = openai.ChatCompletion.create( 16 | model="gpt-4", 17 | messages=[ 18 | {"role": "system", "content": "You are a helpful assistant."}, 19 | {"role": "user", "content": f"Translate the following Polish text to English:\n\n{text}"} 20 | ], 21 | max_tokens=1000, 22 | n=1, 23 | stop=None, 24 | temperature=0.7, 25 | ) 26 | 27 | # Get the translation from the response 28 | translation = response['choices'][0]['message']['content'].strip() 29 | return translation 30 | 31 | # Example usage 32 | image_path = 'path_to_your_image.jpeg' 33 | translation = translate_polish_image(image_path) 34 | print(translation) -------------------------------------------------------------------------------- /doku/compositor.txt: -------------------------------------------------------------------------------- 1 | Creating a simple Blender plugin to automate the setup of a basic compositing node tree can significantly streamline the post-processing workflow for Blender users. The proposed plugin will add common compositing effects such as denoise, glare, color temperature adjustment, RGB curves, and minimal lens distortion to the scene's compositor node tree. 2 | 3 | Here's a breakdown of how to develop this plugin: 4 | 5 | ### Plugin Structure 6 | 1. **Create an Operator**: This operator will set up the compositing nodes when executed. 7 | 2. **Create a Panel**: The panel will provide a user interface in the compositor context to easily access and execute the operator. 8 | 9 | ### Explanation 10 | 11 | - **Node Setup**: The operator creates a sequence of compositing nodes (denoise, glare, color balance for temperature, RGB curves, lens distortion) and connects them linearly. 12 | - **UI Panel**: A panel is created in the Node Editor under the Compositor context to easily access the setup operator. 13 | - **Activation**: The plugin can be activated from the Tool tab in the Node Editor when in Compositor mode. 14 | 15 | This basic compositor setup provides a quick starting point for common post-processing tasks, which can be further customized based on specific project needs. -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | # __init__.py for Blender Python Library Wrapper (BPL) 2 | 3 | # The purpose of this file is to initialize the BPL package and make its modules and packages accessible for import. 4 | 5 | # Import Python's future print function for compatibility (optional, depends on your Python version and requirements) 6 | from __future__ import print_function 7 | 8 | # Import necessary submodules and packages here. 9 | # This enables a structured access to your library's functionality. 10 | # Example: from .module_name import ClassName, another_function 11 | 12 | # You can also define any package-wide variables or configurations here. 13 | # Example: DEFAULT_RENDER_ENGINE = 'CYCLES' 14 | 15 | # Initialize any necessary configurations or perform package-level setup here. 16 | # This can include logging setup, configuration file parsing, or other initial setup tasks. 17 | # Example: setup_logging() 18 | 19 | # Finally, you can include any cleanup code or package-wide utilities that should be available when using your package. 20 | 21 | # Remember, the specific contents and structure of this __init__.py file will depend on your project's needs. 22 | # The above examples are provided as a starting point and should be modified to fit your specific requirements. 23 | 24 | # For more complex package initializations, consider splitting the functionality into separate modules or packages within the BPL project. -------------------------------------------------------------------------------- /mist.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from mathutils import Vector 3 | 4 | def create_scene_bounding_cube(): 5 | """ 6 | Initial min and max vectors 7 | """ 8 | min_coord = Vector((float('inf'), float('inf'), float('inf'))) 9 | max_coord = Vector((float('-inf'), float('-inf'), float('-inf'))) 10 | 11 | # Calculate the bounding box of all objects in the scene 12 | for obj in bpy.context.scene.objects: 13 | if obj.type == 'MESH': 14 | for vertex in obj.bound_box: 15 | world_vertex = obj.matrix_world @ Vector(vertex) 16 | min_coord = Vector(map(min, zip(min_coord, world_vertex))) 17 | max_coord = Vector(map(max, zip(max_coord, world_vertex))) 18 | 19 | # Calculate center and dimensions 20 | center = (min_coord + max_coord) / 2 21 | dimensions = max_coord - min_coord 22 | 23 | # Scale dimensions by 1.5 24 | dimensions *= 1.5 25 | 26 | # Create a cube 27 | bpy.ops.mesh.primitive_cube_add(size=1, location=center) 28 | cube = bpy.context.object 29 | cube.name = "SceneBoundingCube" 30 | 31 | # Scale the cube to the calculated dimensions 32 | cube.scale = dimensions / 2 # Cube size is in diameters, but we calculate in radius 33 | 34 | # Hide cube faces in the viewport 35 | for polygon in cube.data.polygons: 36 | polygon.hide = True 37 | 38 | return cube 39 | 40 | def main(): 41 | cube = create_scene_bounding_cube() 42 | 43 | if __name__ == "__main__": 44 | main() 45 | 46 | -------------------------------------------------------------------------------- /doku/swarm.txt: -------------------------------------------------------------------------------- 1 | Let’s try to create an ambitious 2 | plugin for blender, called Swarm. 3 | Swarm will create a swarm of 4 | objects, moving between 5 | a set of faces. 6 | 7 | The animation will follow 8 | the schema: object will 9 | start from a face along 10 | the normal, an object origin 11 | will be placed on the initial 12 | face, object move up to 13 | random height between 14 | min and max, 15 | and then move to randomly 16 | chosen face and land on it 17 | parallelly to the normal. 18 | 19 | The object will spend random 20 | time on the face and then 21 | repeat the takeoff and landing 22 | 23 | the plug in should enable a 24 | selection of a set of faces 25 | in edit mode and selection 26 | of initial object which 27 | will be animated. 28 | 29 | Then the object should be 30 | instantiated in a given 31 | number, which is a plugin 32 | parameter. 33 | 34 | Number of animation key frames 35 | is another parameter. 36 | 37 | The start of animation should 38 | be with all the object placed 39 | on randomly selected faces. 40 | 41 | At the end of the nation, 42 | all objects should be 43 | stationary on the faces. 44 | 45 | The objects should avoid 46 | collision with other objects 47 | both when flying, and when 48 | landing 49 | 50 | The plug in parameters: 51 | Number of points/objects 52 | Object 53 | Set of faces 54 | Minimum and maximum height of fly by 55 | Minimum and maximum time spent on the face 56 | Total number of keyframes. 57 | Speed of objects. 58 | 59 | todo 60 | 61 | -------------------------------------------------------------------------------- /materials/glass.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_glass_material(name, ior, roughness): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create a Principled BSDF shader node 13 | shader = nodes.new('ShaderNodeBsdfPrincipled') 14 | shader.location = (0, 0) 15 | 16 | # Set the shader properties for glass 17 | shader.inputs['Transmission'].default_value = 1.0 # Enable full transmission 18 | shader.inputs['Roughness'].default_vsalue = roughness # Control the roughness of the glass 19 | shader.inputs['IOR'].default_value = ior # Index of Refraction 20 | 21 | # Create an Output material node 22 | output = nodes.new('ShaderNodeOutputMaterial') 23 | output.location = (200, 0) 24 | 25 | # Link Principled BSDF to Output 26 | links = mat.node_tree.links 27 | links.new(shader.outputs['BSDF'], output.inputs['Surface']) 28 | 29 | return mat 30 | 31 | def assign_material_to_active_object(material): 32 | obj = bpy.context.active_object 33 | # Ensure the object has a mesh to assign material to 34 | if obj.type == 'MESH': 35 | # Assign it to the object's active material slot or add a new slot 36 | if obj.data.materials: 37 | obj.data.materials[0] = material 38 | else: 39 | obj.data.materials.append(material) 40 | 41 | # Example usage 42 | glass_mat = create_glass_material("CustomGlass", ior=1.45, roughness=0.0) 43 | assign_material_to_active_object(glass_mat) -------------------------------------------------------------------------------- /snailshell.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import math 3 | 4 | def create_snail_shell(steps, initial_radius, growth_factor, angle_step): 5 | # Create a new curve 6 | curve_data = bpy.data.curves.new('SnailShell', type='CURVE') 7 | curve_data.dimensions = '3D' 8 | spline = curve_data.splines.new('BEZIER') 9 | spline.bezier_points.add(steps - 1) 10 | 11 | # Variables for the spiral calculation 12 | radius = initial_radius 13 | angle = 0.0 14 | 15 | # Height increment per step along the z-axis 16 | height_increment = 0.1 17 | z = 0.0 18 | 19 | # Setting points for the spiral 20 | for i, point in enumerate(spline.bezier_points): 21 | x = radius * math.cos(angle) 22 | y = radius * math.sin(angle) 23 | point.co = (x, y, z, 1) 24 | point.handle_right_type = 'VECTOR' 25 | point.handle_left_type = 'VECTOR' 26 | 27 | radius += growth_factor 28 | angle += math.radians(angle_step) 29 | z += height_increment 30 | 31 | # Create the curve object 32 | curve_obj = bpy.data.objects.new('SnailShell', curve_data) 33 | bpy.context.collection.objects.link(curve_obj) 34 | 35 | # Convert curve to mesh 36 | bpy.context.view_layer.objects.active = curve_obj 37 | curve_obj.select_set(True) 38 | bpy.ops.object.convert(target='MESH') 39 | 40 | # Apply Solidify modifier 41 | solidify = curve_obj.modifiers.new(name='Solidify', type='SOLIDIFY') 42 | solidify.thickness = 0.1 # Set the thickness as desired 43 | 44 | # Apply the Solidify modifier 45 | bpy.ops.object.modifier_apply(modifier='Solidify') 46 | 47 | return curve_obj 48 | 49 | # Example usage 50 | create_snail_shell(100, 0.5, 0.05, 5) -------------------------------------------------------------------------------- /spiral.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import math 3 | 4 | def create_spiral(initial_diameter, turns, height, diameter_growth_percent): 5 | # Calculate the initial radius from the diameter 6 | initial_radius = initial_diameter / 2.0 7 | 8 | # Calculate the total number of steps (one step per degree for smoothness) 9 | total_steps = int(turns * 360) 10 | z_step = height / total_steps # Height increment per step 11 | angle_step = 2 * math.pi / 360 # Radians per step 12 | 13 | # Radius increment per step based on growth percent 14 | radius_growth_per_step = initial_radius * (diameter_growth_percent / 100) / 360 15 | 16 | # Create a new curve 17 | curve_data = bpy.data.curves.new('SpiralCurve', type='CURVE') 18 | curve_data.dimensions = '3D' 19 | spline = curve_data.splines.new('POLY') 20 | spline.points.add(total_steps - 1) # total_steps points, one is already there 21 | 22 | # Populate the spline with points 23 | radius = initial_radius 24 | for i in range(total_steps): 25 | x = radius * math.cos(i * angle_step) 26 | y = radius * math.sin(i * angle_step) 27 | z = i * z_step 28 | spline.points[i].co = (x, y, z, 1) # The fourth value (w) must be 1 for POLY type splines 29 | 30 | # Increase the radius for the next point 31 | radius += radius_growth_per_step 32 | 33 | # Create a new object with the curve 34 | curve_obj = bpy.data.objects.new('SpiralObject', curve_data) 35 | bpy.context.collection.objects.link(curve_obj) 36 | bpy.context.view_layer.objects.active = curve_obj 37 | curve_obj.select_set(True) 38 | 39 | return curve_obj 40 | 41 | def main(): 42 | # Example usage 43 | initial_diameter = 1.0 44 | turns = 5 45 | height = 5.0 46 | diameter_growth_percent = 10.0 47 | 48 | create_spiral(initial_diameter, turns, height, diameter_growth_percent) 49 | 50 | if __name__ == "__main__": 51 | main() -------------------------------------------------------------------------------- /lightprobe.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def add_volume_light_probe(cube, resolution_x=4, resolution_y=4, resolution_z=4, falloff=0.75, bleed_bias=0.0): 4 | """ 5 | Adds a Volume Light Probe to the scene based on the dimensions and location of a given cube. 6 | 7 | Parameters: 8 | - cube: The cube object to base the volume light probe dimensions and location on. 9 | - resolution_x, resolution_y, resolution_z: The resolution of the probe in the X, Y, and Z axes. 10 | - falloff: The distance over which light from the probe fades out. 11 | - bleed_bias: The bias (in meters) to reduce light bleeding. 12 | """ 13 | # Ensure the cube exists 14 | if not cube: 15 | print("Cube object is required.") 16 | return 17 | 18 | # Calculate the location and dimensions for the light probe 19 | location = cube.location 20 | scale = cube.scale 21 | 22 | # Add a volume light probe 23 | bpy.ops.object.lightprobe_add(type='VOLUME', location=location) 24 | light_probe = bpy.context.object 25 | light_probe.name = "VolumeLightProbe" 26 | light_probe.data.grid_resolution_x = resolution_x 27 | light_probe.data.grid_resolution_y = resolution_y 28 | light_probe.data.grid_resolution_z = resolution_z 29 | light_probe.data.falloff = falloff 30 | light_probe.data.bleed_bias = bleed_bias 31 | 32 | # Scale the light probe to match the cube dimensions 33 | light_probe.scale = scale 34 | 35 | return light_probe 36 | 37 | def main(): 38 | # Example usage 39 | # First, create the scene bounding cube (assuming the create_scene_bounding_cube function is defined) 40 | cube = create_scene_bounding_cube() 41 | # Then, add a volume light probe based on this cube 42 | volume_light_probe = add_volume_light_probe(cube, resolution_x=8, resolution_y=8, resolution_z=8, falloff=0.75, bleed_bias=0.1) 43 | print(f"Added {volume_light_probe.name} to the scene.") 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /rspline.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import random 3 | from mathutils import Vector 4 | 5 | def create_randomized_curve(length, num_segments, max_displacement): 6 | # Calculate the height increment per segment 7 | height_increment = length / num_segments 8 | 9 | # Initialize the starting point 10 | current_point = Vector((0.0, 0.0, 0.0)) 11 | 12 | # Create a new curve data block 13 | curve_data = bpy.data.curves.new('RandomCurve', type='CURVE') 14 | curve_data.dimensions = '3D' 15 | spline = curve_data.splines.new('POLY') 16 | spline.points.add(num_segments) # Add points; one is already there by default 17 | 18 | # Set the first point at the origin 19 | spline.points[0].co = (current_point.x, current_point.y, current_point.z, 1) 20 | 21 | # Generate and set each subsequent point 22 | for i in range(1, num_segments + 1): 23 | # Random displacement in X and Y 24 | displacement_x = random.uniform(-max_displacement, max_displacement) 25 | displacement_y = random.uniform(-max_displacement, max_displacement) 26 | 27 | # Update current point 28 | current_point.x += displacement_x 29 | current_point.y += displacement_y 30 | current_point.z += height_increment 31 | 32 | # Set the point coordinates (the fourth value, w, must be 1 for POLY type splines) 33 | spline.points[i].co = (current_point.x, current_point.y, current_point.z, 1) 34 | 35 | # Create a new object with the curve 36 | curve_obj = bpy.data.objects.new('RandomCurveObject', curve_data) 37 | bpy.context.collection.objects.link(curve_obj) 38 | bpy.context.view_layer.objects.active = curve_obj 39 | curve_obj.select_set(True) 40 | 41 | return curve_obj 42 | 43 | # Example usage 44 | length = 10.0 # Total length of the curve 45 | num_segments = 10 # Number of segments 46 | max_displacement = 0.5 # Maximum displacement for each segment 47 | 48 | create_randomized_curve(length, num_segments, max_displacement) -------------------------------------------------------------------------------- /krita/kritapal.py: -------------------------------------------------------------------------------- 1 | from krita import * 2 | 3 | class ColorPaletteExtractor(Extension): 4 | 5 | def __init__(self, parent): 6 | super().__init__(parent) 7 | 8 | def setup(self): 9 | pass 10 | 11 | def createActions(self, window): 12 | action = window.createAction("extractPalette", "Extract Palette from Active Layers", "tools/scripts") 13 | action.triggered.connect(self.extractPalette) 14 | 15 | def extractPalette(self): 16 | doc = Krita.instance().activeDocument() 17 | if not doc: 18 | QMessageBox.warning(None, "Error", "No active document found!") 19 | return 20 | 21 | colors = set() 22 | for layer in doc.topLevelNodes(): 23 | if layer.visible(): 24 | try: 25 | pixel_data = layer.projectionPixelData(0, 0, layer.width(), layer.height()).data() 26 | # Iterate through each pixel to extract colors 27 | for i in range(0, len(pixel_data), 4): # RGBA 28 | r = pixel_data[i] 29 | g = pixel_data[i+1] 30 | b = pixel_data[i+2] 31 | a = pixel_data[i+3] 32 | if a > 0: # Check if pixel is not transparent 33 | colors.add((r, g, b)) 34 | except Exception as e: 35 | print(f"Error processing layer {layer.name()}: {str(e)}") 36 | 37 | # Create palette from collected colors 38 | palette = Palette() 39 | palette.setEntryCount(len(colors)) 40 | for i, color in enumerate(colors): 41 | palette.setEntry(i, color[0], color[1], color[2], 255) # RGB and full alpha 42 | 43 | # Save or use the palette as needed 44 | print("Palette extracted with colors:", colors) 45 | 46 | def main(): 47 | Krita.instance().addExtension(ColorPaletteExtractor(Krita.instance())) 48 | 49 | if __name__ == "__main__": 50 | main() 51 | 52 | -------------------------------------------------------------------------------- /materials/lizardskin.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_lizard_skin_material(name): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node 13 | bsdf = nodes.new('ShaderNodeBsdfPrincipled') 14 | bsdf.location = (0, 0) 15 | bsdf.inputs['Base Color'].default_value = (0.2, 0.5, 0.1, 1) # Greenish color 16 | bsdf.inputs['Roughness'].default_value = 0.5 # Semi-rough for a matte finish 17 | bsdf.inputs['Specular'].default_value = 0.5 # Moderate specular reflection 18 | 19 | # Create Voronoi texture for scales pattern 20 | voronoi = nodes.new('ShaderNodeTexVoronoi') 21 | voronoi.location = (-400, 0) 22 | voronoi.feature = 'DISTANCE_TO_EDGE' # Use distance to edge for clear scale patterns 23 | voronoi.inputs['Scale'].default_value = 100.0 # Controls the density of the scales 24 | 25 | # Create a Bump node to give the scales texture 26 | bump = nodes.new('ShaderNodeBump') 27 | bump.location = (-200, -100) 28 | bump.inputs['Strength'].default_value = 0.8 # Adjust strength for more pronounced scales 29 | 30 | # Connect Voronoi to Bump 31 | nodes.links.new(voronoi.outputs['Distance'], bump.inputs['Height']) 32 | 33 | # Connect Bump to BSDF Normal 34 | nodes.links.new(bump.outputs['Normal'], bsdf.inputs['Normal']) 35 | 36 | # Output node 37 | output = nodes.new('ShaderNodeOutputMaterial') 38 | output.location = (200, 0) 39 | nodes.links.new(bsdf.outputs['BSDF'], output.inputs['Surface']) 40 | 41 | return mat 42 | 43 | def assign_material_to_active_object(material): 44 | obj = bpy.context.active_object 45 | if obj.type == 'MESH': 46 | if obj.data.materials: 47 | obj.data.materials[0] = material 48 | else: 49 | obj.data.materials.append(material) 50 | 51 | # Example usage 52 | lizard_skin_mat = create_lizard_skin_material("LizardSkin") 53 | assign_material_to_active_object(lizard_skin_mat) -------------------------------------------------------------------------------- /materials/basalt.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_basalt_material(name): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node 13 | bsdf = nodes.new('ShaderNodeBsdfPrincipled') 14 | bsdf.location = (0, 0) 15 | bsdf.inputs['Base Color'].default_value = (0.1, 0.1, 0.1, 1) # Dark gray, almost black 16 | bsdf.inputs['Roughness'].default_value = 0.9 # Basalt is quite rough 17 | bsdf.inputs['Specular'].default_value = 0.1 # Low specular for rocks 18 | 19 | # Create a Noise Texture for displacement 20 | noise_tex = nodes.new('ShaderNodeTexNoise') 21 | noise_tex.location = (-300, 100) 22 | noise.tex.inputs['Scale'].default_value = 16.0 # Texture scale 23 | noise.tex.inputs['Detail'].default_value = 2.0 # Texture detail 24 | noise.tex.inputs['Distortion'].default_value = 0.5 # Texture distortion 25 | 26 | # Create a Bump node to connect noise to BSDF 27 | bump = nodes.new('ShaderNodeBump') 28 | bump.location = (-300, -100) 29 | bump.inputs['Strength'].default_value = 0.8 # The strength of the bumps 30 | 31 | # Link nodes 32 | links = mat.node_tree.links 33 | links.new(noise_tex.outputs['Fac'], bump.inputs['Height']) 34 | links.new(bump.outputs['Normal'], bsdf.inputs['Normal']) 35 | 36 | # Output node 37 | output = nodes.new('ShaderNodeOutputMaterial') 38 | output.location = (200, 0) 39 | links.new(bsdf.outputs['BSDF'], output.inputs['Surface']) 40 | 41 | return mat 42 | 43 | def assign_material_to_active_object(material): 44 | obj = bpy.context.active_object 45 | # Ensure the object has a mesh to assign material to 46 | if obj.type == 'MESH': 47 | # Assign it to the object's active material slot or add a new slot 48 | if obj.data.materials: 49 | obj.data.materials[0] = material 50 | else: 51 | obj.data.materials.append(material) 52 | 53 | # Example usage 54 | basalt_mat = create_basalt_material("BasaltRock") 55 | assign_material_to_active_action(basalt_mat) -------------------------------------------------------------------------------- /materials/brushed.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_brushed_metal_material(name, base_color=(0.8, 0.8, 0.8, 1), roughness=0.2, anisotropic=0.8, rotation=0.5): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node 13 | shader = nodes.new('ShaderNodeBsdfPrincipled') 14 | shader.location = (0, 0) 15 | 16 | # Set properties for brushed metal 17 | shader.inputs['Base Color'].default_value = base_color 18 | shader.inputs['Metallic'].default_value = 1.0 # Fully metallic 19 | shader.inputs['Roughness'].default_value = roughness 20 | shader.inputs['Anisotropic'].default_value = anisotropic 21 | shader.inputs['Anisotropic Rotation'].default_value = rotation 22 | 23 | # Create a Texture Coordinate node 24 | tex_coord = nodes.new('ShaderNodeTexCoord') 25 | tex_coord.location = (-400, 0) 26 | 27 | # Create a Noise Texture node to vary the anisotropic rotation 28 | noise_tex = nodes.new('ShaderNodeTexNoise') 29 | noise_tex.location = (-200, 0) 30 | noise_tex.inputs['Scale'].default_value = 100 # Texture scale 31 | noise_tex.inputs['Detail'].default_value = 2 # Texture detail 32 | noise_tex.inputs['Distortion'].default_value = 0.1 # Texture distortion 33 | 34 | # Link nodes 35 | links = mat.node_tree.links 36 | links.new(tex_coord.outputs['Object'], noise_tex.inputs['Vector']) 37 | links.new(noise_tex.outputs['Fac'], shader.inputs['Anisotropic Rotation']) 38 | 39 | # Output node 40 | output = nodes.new('ShaderNodeOutputMaterial') 41 | output.location = (200, 0) 42 | links.new(shader.outputs['BSDF'], output.inputs['Surface']) 43 | 44 | return mat 45 | 46 | def assign_material_to_active_object(material): 47 | obj = bpy.context.active_object 48 | if obj.type == 'MESH': 49 | if obj.data.materials: 50 | obj.data.materials[0] = material 51 | else: 52 | obj.data.materials.append(material) 53 | 54 | # Example usage 55 | brushed_metal_mat = create_brushed_metal_material("BrushedMetal", base_color=(0.8, 0.8, 0.8, 1), roughness=0.2, anisotropic=0.8, rotation=0.5) 56 | assign_material_to_active_object(brushed_metal_mat) -------------------------------------------------------------------------------- /materials/veiled.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_porcelain_material(name, texture_path=None): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node 13 | bsdf = nodes.new('ShaderNodeBsdfPrincipled') 14 | bsdf.location = (0, 0) 15 | bsdf.inputs['Base Color'].default_value = (0.95, 0.95, 1, 1) # Slightly bluish white 16 | bsdf.inputs['Subsurface'].default_value = 0.5 # Some subsurface scattering 17 | bsdf.inputs['Subsurface Radius'].default_value = (1, 1, 1) 18 | bsdf.inputs['Subsurface Color'].default_value = (0.95, 0.95, 1, 1) 19 | bsdf.inputs['Roughness'].default_value = 0.1 # Porcelain is quite smooth 20 | bsdf.inputs['Sheen'].default_value = 0.3 # Gives a soft sheen typical for porcelain 21 | 22 | # Check if a texture path is provided for blue patterns 23 | if texture_path: 24 | # Image texture node for blue patterns 25 | texture = nodes.new('ShaderNodeTexImage') 26 | texture.location = (-300, 0) 27 | texture.image = bpy.data.images.load(texturepath) 28 | 29 | # Mix the texture with the base color 30 | mix = nodes.new('ShaderNodeMixRGB') 31 | mix.location = (-150, 0) 32 | mix.inputs['Color1'].default_value = (0.95, 0.95, 1, 1) # Base color 33 | mix.inputs['Fac'].default_value = 0.2 # Mix factor 34 | nodes.links.new(texture.outputs['Color'], mix.inputs['Color2']) 35 | nodes.links.new(mix.outputs['Color'], bsdf.inputs['Base Color']) 36 | 37 | # Output node 38 | output = nodes.new('ShaderNodeOutputMaterial') 39 | output.location = (200, 0) 40 | nodes.links.new(bsdf.outputs['BSDF'], output.inputs['Surface']) 41 | 42 | return mat 43 | 44 | def assign_material_to_active_object(material): 45 | obj = bpy.context.active_object 46 | if obj.type == 'MESH': 47 | if obj.data.materials: 48 | obj.data.materials[0] = material 49 | else: 50 | obj.data.materials.append(material) 51 | 52 | # Example usage 53 | porcelain_mat = create_porcelain_material("BlueWhitePorcelain", texture_path="path/to/your/texture.jpg") 54 | assign_material_to_active_object(porcelain_mat) -------------------------------------------------------------------------------- /materials/aluminum.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_aluminum_material(name): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name) 6 | mat.use_nodes = True 7 | nodes = mat.node-tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node 13 | shader = nodes.new('ShaderNodeBsdfPrincipled') 14 | shader.location = (0, 0) 15 | 16 | # Configure the shader for aluminum 17 | shader.inputs['Metallic'].default_value = 1.0 # Full metallic 18 | shader.inputs['Roughness'].default_value = 0.4 # Slightly rough 19 | shader.inputs['Base Color'].default_value = (0.8, 0.8, 0.8, 1) # Light gray color of aluminum 20 | 21 | # Add a noise texture for scratches 22 | noise_tex = nodes.new('ShaderNodeTexNoise') 23 | noise_tex.location = (-400, 0) 24 | noise_tex.inputs['Scale'].default_value = 25.0 # Scale of the noise 25 | noise_tex.inputs['Detail'].default_value = 16.0 # Detail of the noise 26 | noise_tex.inputs['Distortion'].default_value = 0.5 # Distortion of the noise texture 27 | 28 | # Add a bump node to apply the scratches 29 | bump = nodes.new('ShaderNodeBump') 30 | bump.location = (-200, 0) 31 | bump.inputs['Strength'].default_value = 0.1 # Strength of the bump effect 32 | bump.inputs['Distance'].default_value = 0.1 # Distance for bump effect (affects the height of the scratches) 33 | 34 | # Link nodes 35 | links = mat.node_tree.links 36 | links.new(noise_tex.outputs['Fac'], bump.inputs['Height']) 37 | links.new(bump.outputs['Normal'], shader.inputs['Normal']) 38 | 39 | # Output node 40 | output = nodes.new('ShaderNodeOutputMaterial') 41 | output.location = (200, 0) 42 | links.new(shader.outputs['BSDF'], output.inputs['Surface']) 43 | 44 | return mat 45 | 46 | def assign_material_to_active_object(material): 47 | obj = bpy.context.active_object 48 | # Ensure the object has a mesh to assign material to 49 | if obj.type == 'MESH': 50 | # Assign it to the object's active material slot or add a new slot 51 | if obj.data.materials: 52 | obj.data.materials[0] = material 53 | else: 54 | obj.data.materials.append(material) 55 | 56 | # Create and assign the material 57 | aluminum_mat = create_aluminum_material("AluminumScratched") 58 | assign_material_to_active_object(aluminum_mat) -------------------------------------------------------------------------------- /materials/birch.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_birch_bark_material(name): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node 13 | shader = nodes.new('ShaderNodeBsdfPrincipled') 14 | shader.location = (0, 0) 15 | 16 | # Set base color and roughness 17 | shader.inputs['Base Color'].default_value = (0.9, 0.9, 0.9, 1) # Light grayish color of birch 18 | shader.inputs['Roughness'].default_value = 0.5 # Moderately rough 19 | 20 | # Create Musgrave texture for general bark texture 21 | musgrave = nodes.new('ShaderNodeTexMusgrave') 22 | musgrave.location = (-300, 100) 23 | musgrave.inputs['Scale'].default_value = 150.0 24 | musgrave.inputs['Detail'].default_value = 16.0 25 | musgrave.inputs['Dimension'].default_value = 0.0 26 | musgrave.inputs['Lacunarity'].default_value = 1.0 27 | 28 | # Create Wave texture for stripes 29 | wave = nodes.new('ShaderNodeTexWave') 30 | wave.location = (-300, -100) 31 | wave.inputs['Scale'].default_value = 60.0 32 | wave.inputs['Distortion'].default_value = 5.0 33 | 34 | # Create Color Ramp to control the wave bands 35 | color_ramp = nodes.new('ShaderNodeValToRGB') 36 | color_ramp.location = (-100, -100) 37 | color_ramp.color_ramp.elements[0].color = (0, 0, 0, 1) # Black 38 | color_ramp.color_ramp.elements[1].color = (1, 1, 1, 1) # White 39 | 40 | # Link nodes 41 | links = mat.node_tree.links 42 | links.new(wave.outputs['Color'], color_ramp.inputs['Fac']) 43 | links.new(color_ramp.outputs['Color'], shader.inputs['Base Color']) 44 | links.new(musgrave.outputs['Fac'], shader.inputs['Roughness']) 45 | 46 | # Output node 47 | output = nodes.new('ShaderNodeOutputMaterial') 48 | output.location = (200, 0) 49 | links.new(shader.outputs['BSDF'], output.inputs['Surface']) 50 | 51 | return mat 52 | 53 | def assign_material_to_active_object(material): 54 | obj = bpy.context.active_object 55 | if obj.type == 'MESH': 56 | if obj.data.materials: 57 | obj.data.materials[0] = material 58 | else: 59 | obj.data.materials.append(material) 60 | 61 | # Create and assign the material 62 | birch_bark_mat = create_birch_bark_material("BirchBarkMaterial") 63 | assign_material_to_active_object(birch_bark_mat) -------------------------------------------------------------------------------- /pipe.py: -------------------------------------------------------------------------------- 1 | # Creating an L-shaped pipe programmatically in Blender 2 | # involves several steps, including mesh editing, 3 | # vertex beveling, converting the mesh to a curve, 4 | # and then adjusting the curve's properties to give it 5 | # depth and resolution before finally converting it back 6 | # to a mesh. 7 | # The following function encapsulates this process:import bpy 8 | 9 | def create_l_shaped_pipe(plane_size=2, bevel_segments=8, curve_depth=0.5, curve_resolution=4): 10 | """ 11 | Creates an L-shaped pipe from a plane by deleting two adjacent edges, beveling a vertex, 12 | converting to a curve, and adding depth and resolution to the curve. 13 | 14 | Parameters: 15 | - plane_size: The size of the initial plane. 16 | - bevel_segments: Number of segments in the bevel. 17 | - curve_depth: The depth of the curve to represent the pipe's thickness. 18 | - curve_resolution: The resolution of the curve. 19 | """ 20 | bpy.ops.object.select_all(action='DESELECT') # Deselect all objects 21 | 22 | # Add a plane and enter edit mode 23 | bpy.ops.mesh.primitive_plane_add(size=plane_size, location=(0, 0, 0)) 24 | plane = bpy.context.object 25 | bpy.ops.object.mode_set(mode='EDIT') 26 | 27 | # Delete two adjacent edges to create an L shape 28 | bpy.ops.mesh.select_mode(type='EDGE') 29 | bpy.ops.mesh.select_all(action='SELECT') 30 | bpy.ops.mesh.delete(type='EDGE') 31 | bpy.ops.mesh.select_all(action='SELECT') 32 | 33 | # Bevel the vertex 34 | bpy.ops.mesh.select_mode(type='VERT') 35 | bpy.ops.mesh.bevel(offset=plane_size / 2, segments=bevel_segments, vertex_only=True) 36 | 37 | # Return to object mode and convert to curve 38 | bpy.ops.object.mode_set(mode='OBJECT') 39 | bpy.ops.object.convert(target='CURVE') 40 | 41 | # Set curve properties 42 | curve = bpy.context.object 43 | curve.data.dimensions = '3D' 44 | curve.data.fill_mode = 'FULL' 45 | curve.data.bevel_depth = curve_depth 46 | curve.data.bevel_resolution = curve_resolution 47 | curve.data.resolution_u = curve_resolution 48 | 49 | # Convert the curve to a mesh and rename to "Pipe" 50 | bpy.ops.object.convert(target='MESH') 51 | pipe = bpy.context.object 52 | pipe.name = "Pipe" 53 | 54 | return pipe 55 | 56 | 57 | def main(): 58 | pipe = create_l_shaped_pipe(plane_size=2, bevel_segments=8, curve_depth=0.25, curve_resolution=4) 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /plugin.py: -------------------------------------------------------------------------------- 1 | bl_info = { 2 | "name": "Camera Rig Setup", 3 | "blender": (2, 80, 0), 4 | "category": "Object", 5 | } 6 | 7 | import bpy 8 | 9 | class CamRigSetupOperator(bpy.types.Operator): 10 | """Set up a camera rig along a path with a target""" 11 | bl_idname = "object.camrig_setup" 12 | bl_label = "Set Up Camera Rig" 13 | bl_options = {'REGISTER', 'UNDO'} 14 | 15 | # Properties to be set by the user 16 | camera: bpy.props.PointerProperty(name="Camera", type=bpy.types.Object, poll=lambda self, obj: obj.type == 'CAMERA') 17 | path: bpy.props.PointerProperty(name="Path", type=bpy.types.Object, poll=lambda self, obj: obj.type == 'CURVE') 18 | target: bpy.props.PointerProperty(name="Target", type=bpy.types.Object) 19 | forward_axis: bpy.props.EnumProperty( 20 | name="Forward Axis", 21 | items=[ 22 | ('TRACK_NEGATIVE_X', '-X', ""), 23 | ('TRACK_X', 'X', ""), 24 | ('TRACK_NEGATIVE_Y', '-Y', ""), 25 | ('TRACK_Y', 'Y', ""), 26 | ('TRACK_NEGATIVE_Z', '-Z', ""), 27 | ('TRACK_Z', 'Z', "") 28 | ], 29 | default='TRACK_NEGATIVE_Z' 30 | ) 31 | up_axis: bpy.props.EnumProperty( 32 | name="Up Axis", 33 | items=[ 34 | ('UP_X', 'X', ""), 35 | ('UP_Y', 'Y', ""), 36 | ('UP_Z', 'Z', "") 37 | ], 38 | default='UP_Y' 39 | ) 40 | 41 | def execute(self, context): 42 | # Setup camera constraints here 43 | # This is a placeholder for the actual setup logic you've implemented 44 | 45 | # For example, setting up a "Follow Path" constraint 46 | follow_path_constraint = self.camera.constraints.new(type='FOLLOW_PATH') 47 | follow_path_constraint.target = self.path 48 | 49 | # Assume similar setup for "Track To" constraint and any animation keyframes 50 | # You would use self.forward_axis and self.up_axis where appropriate 51 | 52 | self.report({'INFO'}, "Camera Rig Setup Complete") 53 | return {'FINISHED'} 54 | 55 | def menu_func(self, context): 56 | self.layout.operator(CamRigSetupOperator.bl_idname) 57 | 58 | def register(): 59 | bpy.utils.register_class(CamRigSetupOperator) 60 | bpy.types.VIEW3D_MT_object.append(menu_func) 61 | 62 | def unregister(): 63 | bpy.utils.unregister_class(CamRigSetupOperator) 64 | bpy.types.VIEW3D_MT_object.remove(menu_func) 65 | 66 | if __name__ == "__main__": 67 | register() -------------------------------------------------------------------------------- /swarmtools.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from mathutils import Vector 3 | 4 | def create_bezier_between_faces(obj, face_index1, face_index2): 5 | # Ensure the object is a mesh 6 | if obj.type != 'MESH': 7 | print("Selected object is not a mesh") 8 | return 9 | 10 | mesh = obj.data 11 | # Get the world matrix of the object 12 | mw = obj.matrix_world 13 | 14 | # Get the center and normal of the first face 15 | f1 = mesh.polygons[face_index1] 16 | center1 = mw @ sum((obj.data.vertices[vert].co for vert in f1.vertices), Vector()) / len(f1.vertices) 17 | normal1 = mw.to_3x3() @ f1.normal 18 | 19 | # Get the center and normal of the second face 20 | f2 = mesh.polygons[face_index2] 21 | center2 = mw @ sum((obj.data.vertices[vert].co for vert in f2.vertices), Vector()) / len(f2.vertices) 22 | normal2 = mw.to_3x3() @ f2.normal 23 | 24 | # Calculate the distance between the faces and set the height 25 | distance = (center1 - center2).length 26 | height = 4 * distance 27 | 28 | # Calculate control points 29 | control1 = center1 + normal1 * height 30 | control2 = center2 + normal2 * height 31 | 32 | # Create the curve 33 | curve_data = bpy.data.curves.new('BezierCurve', type='CURVE') 34 | curve_data.dimensions = '3D' 35 | spline = curve_data.splines.new(type='BEZIER') 36 | spline.bezier_points.add(1) 37 | 38 | # Assign points 39 | spline.bezier_points[0].co = center1 40 | spline.bezier_points[0].handle_right_type = 'FREE' 41 | spline.bezier_points[0].handle_right = control1 42 | 43 | spline.bezier_points[1].co = center2 44 | spline.bezier_points[1].handle_left_type = 'FREE' 45 | spline.bezier_points[1].handle_left = control2 46 | 47 | # Create curve object 48 | curve_obj = bpy.data.objects.new('BezierCurveObj', curve_data) 49 | bpy.context.scene.collection.objects.link(curve_obj) 50 | 51 | # Example usage 52 | if bpy.context.object and bpy.context.object.type == 'MESH' and bpy.context.object.mode == 'EDIT': 53 | bpy.ops.object.mode_set(mode='OBJECT') # Temporarily switch to Object Mode to access mesh data 54 | selected_faces = [p.index for p in bpy.context.object.data.polygons if p.select] 55 | 56 | if len(selected_faces) == 2: 57 | create_bezier_between_faces(bpy.context.object, selected_faces[0], selected_faces[1]) 58 | else: 59 | print("Please select exactly two faces.") 60 | 61 | bpy.ops.object.mode_set(mode='EDIT') # Switch back to Edit Mode -------------------------------------------------------------------------------- /meshy.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import requests 3 | import os 4 | 5 | class OBJECT_OT_send_to_meshy(bpy.types.Operator): 6 | bl_idname = "object.send_to_meshy" 7 | bl_label = "Convert Image to Mesh" 8 | 9 | @classmethod 10 | def poll(cls, context): 11 | return context.active_object and context.active_object.type == 'IMAGE' 12 | 13 | def execute(self, context): 14 | api_key = 'YOUR_API_KEY_HERE' 15 | headers = {'Authorization': f'Bearer {api_key}'} 16 | url = 'https://api.meshy.ai/v1/image-to-3d' 17 | 18 | # Assuming the image is saved and accessible 19 | image_path = bpy.path.abspath(context.active_object.filepath) 20 | files = {'file': open(image_path, 'rb')} 21 | response = requests.post(url, headers=headers, files=files) 22 | 23 | if response.status_code == 200: 24 | json_response = response.json() 25 | model_url = json_response['model_urls']['glb'] # Assuming GLB format is desired 26 | 27 | # Download the GLB file 28 | model_response = requests.get(model_url) 29 | if model_response.status_code == 200: 30 | model_path = os.path.join(bpy.path.abspath('//'), 'downloaded_model.glb') 31 | with open(model_path, 'wb') as f: 32 | f.write(model_response.content) 33 | 34 | # Import the model into Blender 35 | bpy.ops.import_scene.gltf(filepath=model_path) 36 | else: 37 | self.report({'ERROR'}, "Failed to download the model") 38 | return {'CANCELLED'} 39 | else: 40 | self.report({'ERROR'}, "API request failed") 41 | return {'CANCELLED'} 42 | 43 | self.report({'INFO'}, "Model imported successfully") 44 | return {'FINISHED'} 45 | 46 | class OBJECT_PT_meshy_panel(bpy.types.Panel): 47 | bl_label = "Meshy AI Integration" 48 | bl_idname = "OBJECT_PT_meshy" 49 | bl_space_type = 'PROPERTIES' 50 | bl_region_type = 'WINDOW' 51 | bl_context = "object" 52 | 53 | def draw(self, context): 54 | self.layout.operator(OBJECT_OT_send_to_meshy.bl_idname) 55 | 56 | def register(): 57 | bpy.utils.register_class(OBJECT_OT_send_to_meshy) 58 | bpy.utils.register_class(OBJECT_PT_meshy_panel) 59 | 60 | def unregister(): 61 | bpy.utils.unregister_class(OBJECT_OT_send_to_meshy) 62 | bpy.utils.unregister_class(OBJECT_PT_meshy_panel) 63 | 64 | if __name__ == "__main__": 65 | register() -------------------------------------------------------------------------------- /materials/rusted.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | def create_rusted_metal_material(name): 4 | # Create a new material 5 | mat = bpy.data.materials.new(name=name) 6 | mat.use_nodes = True 7 | nodes = mat.node_tree.nodes 8 | 9 | # Clear default nodes 10 | nodes.clear() 11 | 12 | # Create Principled BSDF shader node for the base metal 13 | metal_shader = nodes.new('ShaderNodeBsdfPrincipled') 14 | metal_shader.location = (0, 0) 15 | metal_shader.inputs['Base Color'].default_value = (0.6, 0.5, 0.5, 1) # Dark grey metallic 16 | metal_shader.inputs['Metallic'].default_value = 1.0 # Fully metallic 17 | metal_shader.inputs['Roughness'].default_value = 0.5 # Slightly rough 18 | 19 | # Create Principled BSDF shader for rust 20 | rust_shader = nodes.new('ShaderNodeBsdfPrincipled') 21 | rust_shader.location = (0, -200) 22 | rust_shader.inputs['Base Color'].default_value = (0.8, 0.2, 0.1, 1) # Rust color 23 | rust_shader.inputs['Roughness'].default_value = 1.0 # Very rough 24 | rust_shader.inputs['Metallic'].default_value = 0.0 # Non-metallic 25 | 26 | # Geometry node to detect edges 27 | geometry = nodes.new('ShaderNodeNewGeometry') 28 | geometry.location = (-400, 100) 29 | 30 | # Pointiness to control rust effect 31 | pointiness = nodes.new('ShaderNodeValToRGB') 32 | pointiness.location = (-200, 100) 33 | pointiness.color_ramp.elements[0].position = 0.4 # Adjust these to control the edge detection sensitivity 34 | pointiness.color_ramp.elements[1].position = 0.6 35 | nodes.links.new(geometry.outputs['Pointiness'], pointiness.inputs['Fac']) 36 | 37 | # Mix shader to combine metal and rust based on edges 38 | mix_shader = nodes.new('ShaderNodeMixShader') 39 | mix_shader.location = (200, 0) 40 | nodes.links.new(pointiness.outputs['Color'], mix_fr.input[0]) 41 | nodes.links.new(metal_shader.outputs['BSDF'], mix_shader.inputs[1]) 42 | nodes.links.new(rust_shader.outputs['BSDF'], mix_shader.inputs[2]) 43 | 44 | # Output node 45 | output = nodes.new('ShaderNodeOutputMaterial') 46 | output.location = (400, 0) 47 | nodes.links.new(mix_shader.outputs['Shader'], output.inputs['Surface']) 48 | 49 | return mat 50 | 51 | def assign_material_to_active_object(material): 52 | obj = bpy.context.active_object 53 | if obj.type == 'MESH': 54 | if obj.data.materials: 55 | obj.data.materials[0] = material 56 | else: 57 | obj.data.materials.append(material) 58 | 59 | # Example usage 60 | rusted_metal_mat = create_rusted_metal_material("RustedMetal") 61 | assign_material_to_active_object(rusted_metal_mat) -------------------------------------------------------------------------------- /camrig.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from mathutils import Vector 3 | 4 | def update_camera_focal_length(camera, target_object, scale_factor=0.7): 5 | """ 6 | Updates the camera's focal length based on the target object's size and distance, 7 | aiming to keep the target object's largest dimension within a specified percentage 8 | of the frame's width or height. 9 | 10 | Args: 11 | - camera: The camera object to update. 12 | - target_object: The target object the camera focuses on. 13 | - scale_factor (float): Determines how much of the frame's width or height 14 | the target object should occupy. Defaults to 0.7 (70%). 15 | """ 16 | camera_loc = camera.matrix_world.translation 17 | target_loc = target_object.matrix_world.translation 18 | distance = (target_loc - camera_loc).length 19 | 20 | # Calculate dimensions of the target object 21 | dimensions = target_object.dimensions 22 | max_dimension = max(dimensions.x, dimensions.y, dimensions.z) 23 | 24 | # Assuming a sensor width of 36mm (default in Blender) and frame aspect ratio 25 | sensor_width = camera.data.sensor_width 26 | aspect_ratio = bpy.context.scene.render.resolution_x / bpy.context.scene.render.resolution_y 27 | frame_dimension = sensor_width if aspect_ratio >= 1 else sensor_width / aspect_ratio 28 | 29 | # Calculate focal length to fit the object within the specified scale factor of the frame 30 | focal_length = (distance * camera.data.lens) / (max_dimension / frame_dimension * scale_factor) 31 | camera.data.lens = focal_length 32 | 33 | def setup_camera_rig(camera_name, curve_name, target_object_name, initial_focal_length=50): 34 | """ 35 | Sets up a camera rig that moves along a Bézier curve focusing on a specified object, 36 | adjusting the camera's focal length dynamically. 37 | 38 | Args: 39 | - curve_name (str): The name of the Bézier curve object. 40 | - target_object_name (str): The name of the object to focus on. 41 | - initial_focal_length (float): The initial focal length of the camera. 42 | """ 43 | curve = bpy.data.objects.get(curve_name) 44 | target_object = bpy.data.objects.get(target_object_name) 45 | if not curve or not target_object: 46 | print("Curve or target object does not exist.") 47 | return 48 | 49 | camera = bpy.data.objects.get(camera_name) 50 | camera.data.sensor_fit = 'HORIZONTAL' 51 | camera.data.lens = initial_focal_length 52 | camera.location = [0,0,0] 53 | camera.location = [0,0,0] 54 | 55 | camera.constraints.clear() 56 | 57 | follow_path_constraint = camera.constraints.new(type='FOLLOW_PATH') 58 | follow_path_constraint.target = curve 59 | follow_path_constraint.use_curve_follow = True 60 | 61 | track_constrain = camera.constraints.new(type='TRACK_TO') 62 | track_constrain.target = target_object 63 | 64 | print("Camera rig setup complete.") 65 | 66 | def main(): 67 | setup_camera_rig("Camera", "CamPath", "Cube", initial_focal_length=75) 68 | 69 | if __name__ == "__main__": 70 | main() 71 | 72 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to BPL 2 | 3 | Thank you for your interest in contributing to BPL, the Blender Python Library Wrapper! We appreciate contributions from everyone, whether it's in the form of code, documentation, bug reports, or feature requests. This document outlines how you can contribute to the BPL project. 4 | 5 | ## Getting Started 6 | 7 | Before you begin, please ensure you have a GitHub account and are familiar with GitHub workflows. Additionally, you should have a basic understanding of Python programming and, ideally, some experience with Blender's Python API (bpy). 8 | 9 | ## How to Contribute 10 | 11 | ### Reporting Bugs 12 | 13 | If you encounter a bug in BPL, please open an issue on our GitHub repository. Include the following information to help us understand and reproduce the issue: 14 | 15 | - A clear and concise description of the bug. 16 | - Steps to reproduce the bug. 17 | - The expected behavior. 18 | - Any error messages or screenshots. 19 | - Your environment information, such as Blender version and operating system. 20 | 21 | ### Suggesting Enhancements 22 | 23 | We welcome suggestions for enhancements or new features. Please open an issue to describe your suggestion, providing as much detail and context as possible. Explain why you believe this enhancement would be beneficial to BPL users. 24 | 25 | ### Your First Code Contribution 26 | 27 | Ready to contribute code? Great! Here's how to get started: 28 | 29 | 1. **Fork the Repository**: Navigate to the BPL GitHub repository and click the "Fork" button to create your own copy of the project. 30 | 2. **Clone Your Fork**: Clone your forked repository to your local machine using `git clone`. 31 | 3. **Create a Branch**: Create a new branch for your contribution using `git checkout -b feature/YourFeatureName`. 32 | 4. **Make Your Changes**: Implement your feature or fix, adhering to the existing coding style as much as possible. 33 | 5. **Test Your Changes**: Ensure your changes do not introduce any new bugs and that all existing tests pass. 34 | 6. **Commit Your Changes**: Commit your changes using a clear and descriptive commit message. 35 | 7. **Push to Your Fork**: Push your changes to your forked repository on GitHub. 36 | 8. **Submit a Pull Request**: Open a pull request against the original BPL repository. Provide a clear description of your changes and any other relevant information. 37 | 38 | ### Pull Request Guidelines 39 | 40 | - Ensure your code is well-documented and follows PEP 8 style guidelines for Python code. 41 | - Include any necessary tests for your changes. 42 | - Update the README.md or documentation if your changes introduce new features or significant changes to existing functionality. 43 | - Keep your pull requests focused on a single issue or feature to facilitate easier review. 44 | 45 | ## Code of Conduct 46 | 47 | This project adheres to a Code of Conduct. By participating in this project, you agree to abide by its terms. 48 | 49 | ## Questions or Assistance 50 | 51 | If you have any questions or need assistance with contributing to BPL, please feel free to reach out to the project maintainers or open an issue for general questions. 52 | 53 | Thank you for contributing to BPL, and we look forward to your contributions! 54 | -------------------------------------------------------------------------------- /compositor.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | class COMPOSITOR_OT_setup_basic_nodes(bpy.types.Operator): 4 | """ 5 | Set up basic compositing nodes 6 | """ 7 | bl_idname = "compositor.setup_basic_nodes" 8 | bl_label = "Setup Basic Nodes" 9 | bl_options = {'REGISTER', 'UNDO'} 10 | 11 | def execute(self, context): 12 | # Enable use_nodes on the scene 13 | context.scene.use_nodes = True 14 | tree = context.scene.node_tree 15 | 16 | # Clear existing nodes 17 | for node in tree.nodes: 18 | tree.nodes.remove(node) 19 | 20 | # Create nodes 21 | nodes = tree.nodes 22 | links = tree.links 23 | 24 | # Adding denoise node 25 | denoise_node = nodes.new(type='CompositorNodeDenoise') 26 | denoise_node.location = (0, 300) 27 | 28 | # Adding glare node 29 | glare_node = nodes.new(type='CompositorNodeGlare') 30 | glare_node.location = (200, 300) 31 | 32 | # Adding color balance for color temperature adjustment 33 | color_temp_node = nodes.new(type='CompositorNodeColorBalance') 34 | color_temp_node.location = (400, 300) 35 | color_temp_node.correction_method = 'LIFT_GAMMA_GAIN' 36 | 37 | # Adding RGB curves node 38 | rgb_curves_node = nodes.new(type='CompositorNodeRGBCurves') 39 | rgb_curves_node.location = (600, 300) 40 | 41 | # Adding lens distortion node 42 | lens_dist_node = nodes.new(type='CompositorNodeLensDist') 43 | lens_dist_node.location = (800, 300) 44 | lens_dist_node.inputs['Distort'].default_value = 0.01 # Minimal distortion 45 | 46 | # Connect nodes 47 | links.new(denoise_node.outputs['Image'], glare_node.inputs['Image']) 48 | links.new(glare_node.outputs['Image'], color_temp_node.inputs['Image']) 49 | links.new(color_temp_node.outputs['Image'], rgb_curves_node.inputs['Image']) 50 | links.new(rgb_curves_node.outputs['Image'], lens_dist_node.inputs['Image']) 51 | 52 | # Add a composite node to connect the final output 53 | composite_node = nodes.new(type='CompositorNodeComposite') 54 | composite_node.location = (1000, 300) 55 | links.new(lens_dist_node.outputs['Image'], composite_node.inputs['Image']) 56 | 57 | # Render layer node 58 | render_layers = nodes.new('CompositorNodeRLayers') 59 | render_layers.location = (-200, 300) 60 | links.new(render_layers.outputs['Image'], denoise_node.inputs['Image']) 61 | 62 | return {'FINISHED'} 63 | 64 | class COMPOSITOR_PT_custom_panel(bpy.types.Panel): 65 | """ 66 | Creates a Panel in the Compositor 67 | context 68 | """ 69 | bl_label = "Basic Compositor Setup" 70 | bl_idname = "COMPOSITOR_PT_custom_panel" 71 | bl_space_type = 'NODE_EDITOR' 72 | bl_region_type = 'UI' 73 | bl_category = 'Tool' 74 | bl_context = "compositing" 75 | 76 | def draw(self, context): 77 | layout = self.layout 78 | layout.operator(COMPOSITOR_OT_setup_basic_nodes.bl_idname) 79 | 80 | def register(): 81 | bpy.utils.register_class(COMPOSITOR_OT_setup_basic_nodes) 82 | bpy.utils.register_class(COMPOSITOR_PT_custom_panel) 83 | 84 | def unregister(): 85 | bpy.utils.unregister_class(COMPOSITOR_OT_setup_basic_nodes) 86 | bpy.utils.unregister_class(COMPOSITOR_PT_custom_panel) 87 | 88 | if __name__ == "__main__": 89 | register() -------------------------------------------------------------------------------- /animpart.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import random 3 | from mathutils import Vector 4 | 5 | # Function to create an icosphere 6 | def create_icosphere(size): 7 | bpy.ops.mesh.primitive_ico_sphere_add(radius=size, location=(0, 0, 0)) 8 | icosphere = bpy.context.object 9 | icosphere.name = "EmittingIcosphere" 10 | return icosphere 11 | 12 | # Function to create an emission material 13 | def create_emission_material(intensity): 14 | mat = bpy.data.materials.new(name="EmissionMaterial") 15 | mat.use_nodes = True 16 | nodes = mat.node_tree.nodes 17 | links = mat.node_tree.links 18 | 19 | nodes.clear() 20 | 21 | emission = nodes.new(type='ShaderNodeEmission') 22 | emission.inputs['Strength'].default_value = intensity 23 | emission.inputs['Color'].default_value = (1, 1, 1, 1) # White color 24 | 25 | output = nodes.new(type='ShaderNodeOutputMaterial') 26 | 27 | links.new(emission.outputs['Emission'], output.inputs['Surface']) 28 | 29 | return mat 30 | 31 | # Function to generate a random direction vector 32 | def generate_random_direction(): 33 | direction = Vector((random.choice([1, -1]), random.choice([1, -1]), random.choice([1, -1]))) 34 | return direction 35 | 36 | # Main function to create the animation 37 | def animate_icosphere_in_fog(steps, size, intensity): 38 | # Create the icosphere 39 | icosphere = create_icosphere(size) 40 | 41 | # Create the emission material and assign it to the icosphere 42 | emission_material = create_emission_material(intensity) 43 | if len(icosphere.data.materials): 44 | icosphere.data.materials[0] = emission_material 45 | else: 46 | icosphere.data.materials.append(emission_material) 47 | 48 | # Create the fog cube 49 | bpy.ops.mesh.primitive_cube_add(size=10, location=(0, 0, 0)) 50 | fog_cube = bpy.context.object 51 | fog_cube.name = "FogCube" 52 | fog_cube.display_type = 'WIRE' 53 | fog_cube.hide_render = True # Hide the cube in renders 54 | 55 | # Create a volume scatter material for the fog 56 | fog_material = bpy.data.materials.new(name="FogMaterial") 57 | fog_material.use_nodes = True 58 | nodes = fog_material.node_tree.nodes 59 | links = fog_material.node_tree.links 60 | 61 | nodes.clear() 62 | 63 | volume_scatter = nodes.new(type='ShaderNodeVolumeScatter') 64 | volume_scatter.inputs['Density'].default_value = 0.1 65 | volume_scatter.inputs['Color'].default_value = (0.8, 0.8, 0.8, 1) # Light gray 66 | 67 | volume_output = nodes.new(type='ShaderNodeOutputMaterial') 68 | 69 | links.new(volume_scatter.outputs['Volume'], volume_output.inputs['Volume']) 70 | 71 | fog_cube.data.materials.append(fog_material) 72 | 73 | # Set initial position 74 | initial_position = Vector((-4.5, -4.5, -4.5)) 75 | icosphere.location = initial_position 76 | icosphere.keyframe_insert(data_path="location", frame=0) 77 | 78 | # Animate the icosphere 79 | current_position = initial_position 80 | for step in range(1, steps + 1): 81 | direction = generate_random_direction() 82 | current_position += direction * size # Move by the size of the icosphere 83 | icosphere.location = current_position 84 | icosphere.keyframe_insert(data_path="location", frame=step * 10) # 10 frames per step 85 | 86 | def main(): 87 | animate_icosphere_in_fog( 88 | steps=50, # Number of steps in the animation 89 | size=0.1, # Size of the icosphere 90 | intensity=10.0 # Emission intensity 91 | ) 92 | 93 | if __name__ == "__main__": 94 | main() 95 | 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /doku/krita-plugin.txt: -------------------------------------------------------------------------------- 1 | Creating a Krita plugin to extract colors from all active layers and create a palette involves several steps. First, you'll need to have some familiarity with Krita's scripting API, which allows you to interact with the application using Python. 2 | 3 | Below is a high-level outline of how you could create such a plugin: 4 | 5 | 1. **Setup the Environment:** 6 | - Ensure you have Krita installed. 7 | - You need to enable Python plugins in Krita. Go to `Settings` -> `Configure Krita` -> `Python Plugin Manager`. 8 | 9 | 2. **Script Outline:** 10 | - Access the active document. 11 | - Loop through each layer. 12 | - For each layer that is active and visible, extract the colors. 13 | - Generate a color palette based on these colors. 14 | - Optionally, create a new palette in Krita or export the palette as a file. 15 | 16 | 3. **Write the Plugin Script:** 17 | Here's a basic Python script that you can use as a starting point. This script will need to be placed in Krita's plugins directory. 18 | 19 | ```python 20 | from krita import * 21 | 22 | class ColorPaletteExtractor(Extension): 23 | 24 | def __init__(self, parent): 25 | super().__init__(parent) 26 | 27 | def setup(self): 28 | pass 29 | 30 | def createActions(self, window): 31 | action = window.createAction("extractPalette", "Extract Palette from Active Layers", "tools/scripts") 32 | action.triggered.connect(self.extractPalette) 33 | 34 | def extractPalette(self): 35 | doc = Krita.instance().activeDocument() 36 | if not doc: 37 | QMessageBox.warning(None, "Error", "No active document found!") 38 | return 39 | 40 | colors = set() 41 | for layer in doc.topLevelNodes(): 42 | if layer.visible(): 43 | try: 44 | pixel_data = layer.projectionPixelData(0, 0, layer.width(), layer.height()).data() 45 | # Iterate through each pixel to extract colors 46 | for i in range(0, len(pixel_data), 4): # RGBA 47 | r = pixel_data[i] 48 | g = pixel_data[i+1] 49 | b = pixel_data[i+2] 50 | a = pixel_data[i+3] 51 | if a > 0: # Check if pixel is not transparent 52 | colors.add((r, g, b)) 53 | except Exception as e: 54 | print(f"Error processing layer {layer.name()}: {str(e)}") 55 | 56 | # Create palette from collected colors 57 | palette = Palette() 58 | palette.setEntryCount(len(colors)) 59 | for i, color in enumerate(colors): 60 | palette.setEntry(i, color[0], color[1], color[2], 255) # RGB and full alpha 61 | 62 | # Save or use the palette as needed 63 | print("Palette extracted with colors:", colors) 64 | 65 | # And add the extension to Krita's list of extensions: 66 | Krita.instance().addExtension(ColorPaletteExtractor(Krita.instance())) 67 | ``` 68 | 69 | 4. **Installation and Usage:** 70 | - Save the script as `ColorPaletteExtractor.py`. 71 | - Place this file in your Krita plugins folder, usually found at: 72 | - Windows: `C:\Users\[YourUsername]\AppData\Roaming\krita\pykrita` 73 | - macOS and Linux: `~/.local/share/krita/pykrita/` 74 | - Restart Krita, then enable the plugin through the Python Plugin Manager under `Settings`. 75 | - You should now find the "Extract Palette from Active Layers" option under `Tools -> Scripts`. 76 | 77 | 5. **Testing and Refinement:** 78 | - Test the plugin with different documents and layer configurations to ensure it behaves as expected. 79 | - Refine the color extraction and palette creation logic as needed, especially to handle large sets of data or specific color management requirements. 80 | 81 | This basic plugin will extract colors from all visible layers and print out a set of unique colors found in the document. Adjustments may be needed to match specific project requirements or to enhance functionality. -------------------------------------------------------------------------------- /doku/ideas.txt: -------------------------------------------------------------------------------- 1 | ideas to create plugins: 2 | ----- 3 | 4 | Creating unique and original Blender plugins can help to streamline workflows, introduce new artistic tools, or expand the software’s capabilities in exciting ways. Here are a few ideas for plugins that could serve a variety of needs for Blender users, from beginners to advanced professionals: 5 | 6 | 1. **Auto-Compositor Setup**: 7 | - **Purpose**: Automates the setup of common node configurations in the compositor, such as setting up a node tree for depth of field, color grading, or ambient occlusion. 8 | - **Features**: Users could select from presets or customize templates for specific types of scenes, significantly speeding up post-processing workflows. 9 | 10 | 2. **Architectural Toolkit**: 11 | - **Purpose**: Enhances architectural modeling by providing tools for rapid construction of buildings, interiors, and urban planning models. 12 | - **Features**: Includes parametric architectural elements (walls, windows, doors, roofs), auto-generation of floor plans from sketches, and tools for mass editing of architectural elements (like adjusting all windows at once). 13 | 14 | 3. **Procedural Texture Generator**: 15 | - **Purpose**: Allows users to create complex procedural textures from simple inputs using a node-based interface. 16 | - **Features**: Could include a library of base patterns and textures, and advanced options for combining and manipulating these elements to create unique materials. 17 | 18 | 4. **Animation Retargeting and Mixing**: 19 | - **Purpose**: Provides tools to retarget animations from one character to another and to blend animations from different sources seamlessly. 20 | - **Features**: Could support different rigging systems, include a preview tool to see results in real-time, and offer advanced blending controls to adjust the transition between animations. 21 | 22 | 5. **Virtual Reality Scene Editor**: 23 | - **Purpose**: Allows artists to build and edit scenes directly in VR, offering an immersive experience and direct manipulation of objects and environments. 24 | - **Features**: Tools for geometry creation, texture application, and lighting adjustments all within a VR environment, providing a new level of intuitive interaction with 3D content. 25 | 26 | 6. **Dynamic Weather System**: 27 | - **Purpose**: Simulates various weather conditions dynamically affecting the scene, useful for animations and environmental design. 28 | - **Features**: Real-time weather effects like rain, snow, fog, and sunlight changes; could include physical effects on objects (e.g., raindrops causing splashes, wind moving trees). 29 | 30 | 7. **Asset Management and Version Control**: 31 | - **Purpose**: Helps manage large projects with multiple assets and team collaboration by integrating a version control system directly into Blender. 32 | - **Features**: Version tracking for blend files, asset check-in/check-out system, and integration with existing version control systems like Git or SVN. 33 | 34 | 8. **Custom Brush Creator for Sculpting and Painting**: 35 | - **Purpose**: Allows users to create, share, and manage custom brushes for sculpting and texture painting. 36 | - **Features**: Advanced settings for brush dynamics, support for importing various brush shapes, and community features to share and rate brushes. 37 | 38 | 9. **Real-time Collaboration Tool**: 39 | - **Purpose**: Enables multiple artists to work on the same scene file simultaneously from different locations. 40 | - **Features**: Real-time updates of scene changes, chat and annotation tools for communication, and conflict resolution strategies for concurrent changes. 41 | 42 | 10. **Automatic Rigging and Skinning**: 43 | - **Purpose**: Simplifies the rigging and skinning process for characters and creatures by automating these tasks based on pre-defined or custom templates. 44 | - **Features**: Auto-generation of rigs for bipeds, quadrupeds, and other non-standard creatures, with intuitive controls for refining weights and joint parameters. 45 | 46 | These ideas could cater to a range of Blender users, from visual effects artists and game developers to architects and designers, enhancing Blender’s utility and efficiency across diverse creative industries. 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BPL - Blender Python Library Wrapper 2 | 3 | Welcome to BPL, the Blender Python Library Wrapper, an open-source project designed to simplify the process of creating 3D objects, animations, procedural materials, and composition tools in Blender, the renowned open-source 3D creation suite. By abstracting the comprehensive bpy library, BPL offers an easy-to-use interface for both beginners and experienced Blender developers, streamlining the development of Blender scripts and add-ons. 4 | 5 | ## Features 6 | 7 | - **set of tools for various operations 8 | - **Procedural Material Generation**: High-level functions to create and apply procedural materials to objects, enhancing visual aesthetics with minimal effort. 9 | - **Advanced Composition Capabilities**: Tools to facilitate complex composition tasks, improving the efficiency of scene setup and rendering. 10 | - **User-Friendly API**: Designed with usability in mind, providing a clear and concise API that abstracts away the complexities of bpy. 11 | 12 | ## Getting Started 13 | 14 | ### Prerequisites 15 | 16 | - Blender 4.0 or newer installed on your machine. 17 | 18 | ### Installation 19 | 20 | 1. Download the latest version of BPL from the [GitHub repository](https://github.com/sq5rix/BlenderPythonLibrary). 21 | 2. Open Blender 22 | 3. Copy the code in the Scripng scene 23 | 4. Run code - some functions will requre clicking active object 24 | 5. there are two plugins - camera and compositing - they can be installed in normal way 25 | 6. animate drobe swarm us in progress, not working yet 26 | 7. snail shell code 27 | 8. 3D spiral curve 28 | 9. mesh3d api 29 | 30 | ### Configuration 31 | 32 | After installation, BPL does not require additional configuration to start using its features. 33 | You can add a useful function as a plugin to Blender in the usual way. 34 | 35 | ## Usage 36 | 37 | ### Creating a 3D Object 38 | 39 | ```python 40 | from bpl import ObjectCreator 41 | 42 | # Create a new cube with default dimensions 43 | cube = ObjectCreator.create_cube(name="MyCube") 44 | 45 | # Move the cube to a specific location 46 | cube.location = (1, 2, 3) 47 | ``` 48 | 49 | ### Animating an Object 50 | 51 | ```python 52 | from bpl import Animator 53 | 54 | # Animate the cube's rotation over 60 frames 55 | Animator.rotate_object(cube, rotation=(90, 0, 0), start_frame=1, end_frame=60) 56 | ``` 57 | 58 | ### Generating a Procedural Material 59 | 60 | ```python 61 | from bpl import MaterialGenerator 62 | 63 | # Create a new procedural material and apply it to the cube 64 | material = MaterialGenerator.create_procedural_material(name="MyMaterial", color=(0.8, 0.2, 0.2)) 65 | cube.materials.append(material) 66 | ``` 67 | 68 | ### Complex Scene Composition 69 | 70 | ```python 71 | from bpl import SceneComposer 72 | 73 | # Automatically set up lighting and camera for a basic scene 74 | SceneComposer.setup_basic_scene(objects=[cube]) 75 | ``` 76 | 77 | ## Documentation 78 | 79 | For a comprehensive guide and API reference, please refer to the [BPL Documentation](https://github.com/sq5rix/BlenderPythonLibrary/wiki). The documentation is planned it will be continuously updated to reflect new features and improvements. 80 | 81 | ## Contributing 82 | 83 | Contributions to BPL are warmly welcomed, whether it's in the form of bug reports, feature requests, or pull requests. Please see our [CONTRIBUTING.md](https://github.com/sq5rix/BlenderPythonLibrary/CONTRIBUTING.md) for more information on how to contribute. 84 | 85 | ## License 86 | 87 | BPL is licensed under the MIT License. See the [LICENSE](https://github.com/your-repo/bpl/LICENSE) file for more details. 88 | 89 | ## Acknowledgments 90 | 91 | - Special thanks to Ryan King for fantastic Blender tutorials on [YouTube](https://youtube.com/@RyanKingArt?si=wmYyGDFKGZ-_FPmM) 92 | - Shoutout and thanks to Victor Stepano for opening up scripting in Blender [YouTube](https://youtube.com/@CGPython?si=Y_rGrNoMJbe141b1) 93 | - The Blender Foundation, for developing and maintaining Blender. 94 | - The Blender community, for their invaluable contributions and support. 95 | 96 | We hope BPL will enhance your Blender scripting experience by providing an easier and more intuitive way to create, animate, and compose in Blender. For questions, feedback, or support, please reach out through our GitHub repository. 97 | 98 | 99 | -------------------------------------------------------------------------------- /studiolights.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from mathutils import Vector 3 | 4 | bl_info = { 5 | "name": "Studio Lights", 6 | "blender": (2, 80, 0), 7 | "category": "Scene", 8 | } 9 | 10 | def look_at(obj, target): 11 | """ 12 | Rotates 'obj' to look towards 'target' point. 13 | :param obj: The object to be oriented. 14 | :param target: The location (as a Vector) to be targeted. 15 | """ 16 | # Direction from the object to the target point 17 | direction = target - obj.location 18 | # Point the object's '-Z' and 'Y' towards the target 19 | rot_quat = direction.to_track_quat('-Z', 'Y') 20 | obj.rotation_euler = rot_quat.to_euler() 21 | 22 | 23 | class StudioLightsSetup(): 24 | def __init__(self, collection_name="StudioLights"): 25 | self.collection = self.ensure_collection(collection_name) 26 | self.cursor_location = bpy.context.scene.cursor.location 27 | self.main_object_size = 2 * self.calculate_scene_sphere_radius() 28 | self.light_height = 0.8 * self.main_object_size 29 | print('self.main_object_size : ', self.main_object_size ) 30 | 31 | def point_light_to_cursor(self, light_name): 32 | """ 33 | Points the light object with the given name towards the 3D cursor. 34 | :param light_name: The name of the light object. 35 | """ 36 | # Get the light object by name 37 | light = bpy.data.objects.get(light_name) 38 | if not light or light.type != 'LIGHT': 39 | print(f"No light found with the name '{light_name}'.") 40 | return 41 | look_at(light, self.cursor_location) 42 | 43 | def ensure_collection(self, collection_name): 44 | if collection_name not in bpy.data.collections: 45 | new_collection = bpy.data.collections.new(collection_name) 46 | bpy.context.scene.collection.children.link(new_collection) 47 | return bpy.data.collections[collection_name] 48 | 49 | def create_light(self, light_type, name, location, size, energy, color, spot_size=None, spot_blend=None): 50 | bpy.ops.object.light_add(type=light_type, location=location) 51 | light = bpy.context.object 52 | light.name = name 53 | light.data.energy = energy 54 | light.data.color = color 55 | self.point_light_to_cursor(light.name) 56 | 57 | if light_type == 'AREA': 58 | light.data.shape = 'DISK' 59 | light.data.size = size 60 | elif light_type == 'SPOT': 61 | light.data.spot_size = spot_size 62 | light.data.spot_blend = spot_blend 63 | light.data.show_cone = True 64 | 65 | # Remove light from all collections it was added to, then link to the specified collection 66 | for col in light.users_collection: 67 | col.objects.unlink(light) 68 | self.collection.objects.link(light) 69 | return light 70 | 71 | def add_key_light(self): 72 | location = (-self.main_object_size, self.main_object_size, 5) 73 | self.create_light("AREA", "Key Light", location, 1.5, 1000, (1, 1, 1)) 74 | 75 | def add_fill_light(self): 76 | location = (2 * self.main_object_size, -self.main_object_size, self.light_height) 77 | self.create_light("AREA", "Fill Light", location, -2, 500, (0.8, 0.8, 1)) 78 | 79 | def add_rim_light(self): 80 | location = (0, -2 * self.main_object_size, 2) 81 | self.create_light("AREA", "Rim Light", location, 1, 750, (1, 0.8, 0.5)) 82 | 83 | def add_front_spotlight(self): 84 | location = (0, self.main_object_size * 2, self.main_object_size / 2) 85 | self.create_light("SPOT", "Front Spotlight", location, 0, 500, (1, 1, 1), spot_size=1.0, spot_blend=0.1) 86 | 87 | def calculate_scene_sphere_radius(self): 88 | """Calculates a sphere radius that contains all the scene objects, excluding cameras and lights.""" 89 | min_coord = Vector((float('inf'), float('inf'), float('inf'))) 90 | max_coord = Vector((float('-inf'), float('-inf'), float('-inf'))) 91 | 92 | # Iterate through all scene objects 93 | for obj in bpy.context.scene.objects: 94 | # Skip cameras and lights 95 | if obj.type in {'CAMERA', 'LIGHT'}: 96 | continue 97 | 98 | # Update the min and max coordinates based on the object's bounding box 99 | for corner in obj.bound_box: 100 | world_corner = obj.matrix_world @ Vector(corner) 101 | min_coord = Vector(map(min, zip(min_coord, world_corner))) 102 | max_coord = Vector(map(max, zip(max_coord, world_corner))) 103 | 104 | # Calculate the center and the bounding box's dimensions 105 | center = (min_coord + max_coord) / 2 106 | dimensions = max_coord - min_coord 107 | 108 | # Calculate the radius as half of the largest dimension 109 | radius = max(dimensions) / 2 110 | 111 | return radius 112 | 113 | class StudioLightsSetupPanel(bpy.types.Panel): 114 | bl_label = "Studio Lights Setup" 115 | bl_idname = "OBJECT_PT_studio_lights" 116 | bl_space_type = 'VIEW_3D' 117 | bl_region_type = 'UI' 118 | bl_category = 'Tool' 119 | 120 | def draw(self, context): 121 | layout = self.layout 122 | layout.operator("object.setup_studio_lights", text="Add Studio Lights") 123 | 124 | class OBJECT_OT_SetupStudioLights(bpy.types.Operator): 125 | bl_idname = "object.setup_studio_lights" 126 | bl_label = "Studio Lights" 127 | bl_description = "Setup studio lighting based on the selected object size" 128 | bl_options = {'REGISTER', 'UNDO'} 129 | 130 | def execute(self, context): 131 | studio_lights = StudioLightsSetup() 132 | studio_lights.add_key_light() 133 | studio_lights.add_fill_light() 134 | studio_lights.add_rim_light() 135 | studio_lights.add_front_spotlight() 136 | return {'FINISHED'} 137 | 138 | def register(): 139 | bpy.utils.register_class(OBJECT_OT_SetupStudioLights) 140 | bpy.utils.register_class(StudioLightsSetupPanel) 141 | 142 | def unregister(): 143 | bpy.utils.unregister_class(OBJECT_OT_SetupStudioLights) 144 | bpy.utils.unregister_class(StudioLightsSetupPanel) 145 | 146 | if __name__ == "__main__": 147 | register() 148 | 149 | -------------------------------------------------------------------------------- /hdr.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | bl_info = { 4 | "name": "HDR generator", 5 | "blender": (2, 80, 0), 6 | "category": "Scene", 7 | } 8 | 9 | H_RES = 1024 10 | RES = 400 11 | 12 | import bpy 13 | 14 | class RenderSettingsManager: 15 | def __init__(self): 16 | """ 17 | Initialize the RenderSettingsManager 18 | by reading current render settings. 19 | """ 20 | self.get_render() 21 | 22 | def get_render(self): 23 | """ 24 | Reads the current render settings 25 | from the scene and stores them in 26 | instance variables. 27 | """ 28 | scene = bpy.context.scene 29 | render = scene.render 30 | 31 | self.engine = scene.render.engine 32 | self.resolution_x = render.resolution_x 33 | self.resolution_y = render.resolution_y 34 | self.resolution_percentage = render.resolution_percentage 35 | self.use_denoising = scene.cycles.use_denoising 36 | self.file_format = scene.render.image_settings.file_format 37 | # Add more parameters as needed 38 | 39 | def set_render(self, engine='CYCLES', resolution_x=2048, resolution_y=1024, resolution_percentage=400, 40 | use_denoising=True, file_format='HDR'): 41 | """Sets the render settings for the scene based on the provided parameters.""" 42 | scene = bpy.context.scene 43 | render = scene.render 44 | 45 | scene.render.engine = engine 46 | render.resolution_x = resolution_x 47 | render.resolution_y = resolution_y 48 | render.resolution_percentage = resolution_percentage 49 | scene.cycles.use_denoising = use_denoising 50 | scene.render.image_settings.file_format = file_format 51 | # Apply additional settings as needed 52 | 53 | # Update instance variables to reflect the changes 54 | self.get_render() 55 | 56 | 57 | def add_denoise_node(): 58 | """ 59 | Adds a Denoise node to the compositor 60 | with default values. 61 | """ 62 | # Enable use of nodes in the compositor 63 | bpy.context.scene.use_nodes = True 64 | tree = bpy.context.scene.node_tree 65 | 66 | # Clear existing nodes 67 | # Comment out the next line if you don't want to remove existing nodes 68 | # tree.nodes.clear() 69 | 70 | # Create Denoise node 71 | denoise_node = tree.nodes.new(type='CompositorNodeDenoise') 72 | denoise_node.location = (0, 0) # Adjust the location as needed 73 | 74 | # Connect Denoise node to the Render Layers node and Composite node if not already connected 75 | render_layers_node = next(node for node in tree.nodes if node.type == 'R_LAYERS') 76 | composite_node = next(node for node in tree.nodes if node.type == 'COMPOSITE') 77 | 78 | tree.links.new(render_layers_node.outputs['Image'], denoise_node.inputs['Image']) 79 | tree.links.new(render_layers_node.outputs['Normal'], denoise_node.inputs['Normal']) 80 | tree.links.new(render_layers_node.outputs['Albedo'], denoise_node.inputs['Albedo']) 81 | tree.links.new(denoise_node.outputs['Image'], composite_node.inputs['Image']) 82 | 83 | def add_glare_node(): 84 | """ 85 | Adds a Glare node to the compositor 86 | with default values. 87 | """ 88 | # Ensure use of nodes is enabled 89 | bpy.context.scene.use_nodes = True 90 | tree = bpy.context.scene.node_tree 91 | 92 | # Clear existing nodes 93 | # Comment out the next line if you don't want to remove existing nodes 94 | # tree.nodes.clear() 95 | 96 | # Create Glare node 97 | glare_node = tree.nodes.new(type='CompositorNodeGlare') 98 | glare_node.location = (200, 0) # Adjust the location as needed 99 | # Default Glare node values are used, adjust as needed 100 | 101 | # Automatically connecting the Glare node assumes you know which node to connect it to 102 | # Here we connect it after the Denoise node if it exists, otherwise directly to Render Layers 103 | render_layers_node = next((node for node in tree.nodes if node.type == 'R_LAYERS'), None) 104 | denoise_node = next((node for node in tree.nodes if node.type == 'DENOISE'), None) 105 | composite_node = next(node for node in tree.nodes if node.type == 'COMPOSITE') 106 | 107 | if denoise_node: 108 | tree.links.new(denoise_node.outputs['Image'], glare_node.inputs['Image']) 109 | else: 110 | tree.links.new(render_layers_node.outputs['Image'], glare_node.inputs['Image']) 111 | 112 | tree.links.new(glare_node.outputs['Image'], composite_node.inputs['Image']) 113 | 114 | 115 | 116 | def set_render_settings(hres=H_RES, res_percent=RES, engine='CYCLES', device='GPU', denoise=False, blur=True): 117 | """ 118 | Configure render settings. 119 | """ 120 | bpy.context.scene.render.engine = engine 121 | bpy.context.scene.render.resolution_x = 2*hres 122 | bpy.context.scene.render.resolution_y = hres 123 | bpy.context.scene.render.resolution_percentage = res_percent 124 | bpy.context.scene.render.engine = engine 125 | bpy.context.scene.cycles.device = device 126 | bpy.context.scene.cycles.use_denoising = denoise 127 | bpy.context.scene.render.use_motion_blur = blur 128 | bpy.context.scene.view_layers[0].cycles.use_denoising = denoise 129 | 130 | def set_camera_to_panoramic(camera_name='Camera'): 131 | """Set the camera to panoramic and panoramic type to equirectangular.""" 132 | camera = bpy.data.cameras[camera_name] # Adjust if your camera is named differently 133 | camera.type = 'PANO' 134 | camera.panorama_type = 'EQUIRECTANGULAR' 135 | 136 | def render_scene(): 137 | """ 138 | Render the current scene. 139 | """ 140 | bpy.ops.render.render() 141 | 142 | def setup_nodes_for_render_output(file_path, file_format='HDR'): 143 | """Set up compositing nodes to save the render result to a file.""" 144 | scene = bpy.context.scene 145 | scene.use_nodes = True 146 | tree = scene.node_tree 147 | 148 | # Clear existing nodes 149 | tree.nodes.clear() 150 | 151 | # Create Render Layers node 152 | render_layers_node = tree.nodes.new('CompositorNodeRLayers') 153 | 154 | # Create Output node (File Output) 155 | output_node = tree.nodes.new('CompositorNodeOutputFile') 156 | output_node.base_path = '' 157 | output_node.file_slots[0].path = file_path # File name and path 158 | output_node.format.file_format = file_format 159 | 160 | # Link nodes 161 | tree.links.new(render_layers_node.outputs[0], output_node.inputs[0]) 162 | 163 | def render_scene_and_save_image(output_file_name='Blender/pano'): 164 | """ 165 | Render the scene and save the image 166 | using compositor nodes. 167 | """ 168 | # Set output file path (relative to the current .blend file) 169 | output_file_path = f'//{output_file_name}' 170 | 171 | # Configure render settings and camera 172 | set_render_settings() 173 | set_camera_to_panoramic() 174 | 175 | #add_denoise_node() 176 | #add_glare_node() 177 | 178 | # Setup nodes to save render output 179 | setup_nodes_for_render_output(output_file_path) 180 | 181 | # Render the scene 182 | bpy.ops.render.render(write_still=True) # 'write_still' ensures the File Output node writes the file 183 | 184 | def register(): 185 | bpy.utils.register_class(CamRigSetupOperator) 186 | bpy.types.VIEW3D_MT_object.append(menu_func) 187 | render_scene_and_save_image() 188 | 189 | def unregister(): 190 | bpy.utils.unregister_class(CamRigSetupOperator) 191 | bpy.types.VIEW3D_MT_object.remove(menu_func) 192 | 193 | if __name__ == "__main__": 194 | register() 195 | -------------------------------------------------------------------------------- /swarm.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from bpy.props import IntProperty, FloatProperty 3 | from bpy.types import Operator, Panel 4 | from mathutils import Vector 5 | import random 6 | 7 | def distribute_and_animate_objects(obj, curve_list, start_frame, end_frame): 8 | # Ensure the input object is a mesh or another appropriate type 9 | if obj.type not in ['MESH', 'CURVE', 'SURFACE', 'FONT']: 10 | raise TypeError("Unsupported object type. Please use a mesh or curve object.") 11 | 12 | # Loop through each curve in the curve list 13 | for curve in curve_list: 14 | if curve.type != 'CURVE': 15 | print(f"Skipping non-curve object: {curve.name}") 16 | continue 17 | 18 | # Duplicate the object 19 | new_obj = obj.copy() 20 | new_obj.data = obj.data.copy() 21 | bpy.context.collection.objects.link(new_obj) 22 | 23 | # Create and configure the Follow Path constraint 24 | follow_path_constraint = new_obj.constraints.new(type='FOLLOW_PATH') 25 | follow_path_constraint.target = curve 26 | follow_path_constraint.use_fixed_location = True # Ensures the object follows the path based on frame 27 | follow_path_constraint.use_curve_follow = True # Make the object's movement tangent to the curve 28 | follow_path_constraint.forward_axis = 'FORWARD_Y' # Assuming the object's forward direction is along Y 29 | follow_path_constraint.up_axis = 'UP_Z' # Assuming Z is up 30 | 31 | # Set the animation 32 | follow_path_constraint.offset_factor = 0.0 # Start at the beginning of the curve 33 | follow_path_constraint.keyframe_insert(data_path="offset_factor", frame=start_frame) 34 | follow_path_constraint.offset_factor = 1.0 # End at the end of the curve 35 | follow_path_constraint.keyframe_insert(data_path="offset_factor", frame=end_frame) 36 | 37 | print(f"Object {new_obj.name} animated on curve {curve.name} from frame {start_frame} to {end_frame}.") 38 | 39 | 40 | def pair_random_elements(input_list, N): 41 | if 2 * N > len(input_list): 42 | raise ValueError("N must be less than half the length of the input list") 43 | 44 | # Randomly select N elements for the first group 45 | first_group = random.sample(input_list, N) 46 | 47 | # Create a list of remaining elements 48 | remaining_elements = [item for item in input_list if item not in first_group] 49 | 50 | # Randomly select N elements from the remaining elements for the second group 51 | second_group = random.sample(remaining_elements, N) 52 | 53 | # Pair elements from the first group with elements from the second group 54 | paired_elements = list(zip(first_group, second_group)) 55 | 56 | return paired_elements 57 | 58 | def animate_object_along_curve(obj, curve, start_frame, end_frame): 59 | # Check if the curve is a valid curve object 60 | if curve.type != 'CURVE': 61 | print("The provided curve object is not a curve.") 62 | return 63 | 64 | # Create or find the Follow Path constraint 65 | follow_path_constraint = None 66 | for constraint in obj.constraints: 67 | if constraint.type == 'FOLLOW_PATH': 68 | follow_path_constraint = constraint 69 | break 70 | else: 71 | follow_path_constraint = obj.constraints.new(type='FOLLOW_PATH') 72 | 73 | # Set the curve target and options 74 | follow_path_constraint.target = curve 75 | follow_path_constraint.use_curve_follow = True 76 | follow_path_constraint.forward_axis = 'FORWARD_Y' 77 | follow_path_constraint.up_axis = 'UP_Z' 78 | 79 | # Set the object at the start of the curve 80 | obj.location = curve.splines[0].bezier_points[0].co 81 | obj.keyframe_insert(data_path="location", frame=start_frame) 82 | 83 | # Insert keyframe for the constraint influence 84 | follow_path_constraint.offset_factor = 0.0 85 | follow_path_constraint.keyframe_insert(data_path="offset_factor", frame=start_frame) 86 | 87 | # Set the object at the end of the curve 88 | obj.location = curve.splines[0].bezier_points[-1].co 89 | obj.keyframe_insert(data_path="location", frame=end_frame) 90 | 91 | # Insert keyframe for the constraint influence 92 | follow_path_constraint.offset_factor = 1.0 93 | follow_path_constraint.keyframe_insert(data_path="offset_factor", frame=end_frame) 94 | 95 | print("Animation setup completed.") 96 | 97 | 98 | def create_bezier_curves_between_face_pairs(face_pairs): 99 | curve_objects = [] 100 | # To store references to the created curve objects 101 | # todo reat global faces 102 | for face_data_1, face_data_2 in face_pairs: 103 | center1, normal1 = face_data_1 104 | center2, normal2 = face_data_2 105 | 106 | # Calculate handle positions 107 | distance = (center1 - center2).length 108 | height_factor = 4 109 | handle1 = center1 + normal1.normalized() * distance * height_factor 110 | handle2 = center2 + normal2.normalized() * distance * height_factor 111 | 112 | # Create and configure the curve 113 | curve_data = bpy.data.curves.new(name="BezierCurve", type='CURVE') 114 | curve_data.dimensions = '3D' 115 | spline = curve_data.splines.new('BEZIER') 116 | spline.bezier_points.add(1) 117 | 118 | p0, p1 = spline.bezier_points[0], spline.bezier_points[1] 119 | p0.co = center1 120 | p0.handle_right_type = 'FREE' 121 | p0.handle_right = handle1 122 | p1.co = center2 123 | p1.handle_left_type = 'FREE' 124 | p1.handle_left = handle2 125 | 126 | # Create curve object and add to the scene 127 | curve_obj = bpy.data.objects.new("BezierCurveObj", curve_data) 128 | bpy.context.scene.collection.objects.link(curve_obj) 129 | curve_objects.append(curve_obj) 130 | 131 | return curve_objects 132 | 133 | # Global variable to store the data of selected faces 134 | GLOBAL_FACE_DATA = [] 135 | 136 | def store_selected_faces_data(obj): 137 | """Store the center and normal of selected faces.""" 138 | selected_faces_info = [] 139 | if obj.type == 'MESH': 140 | mesh = obj.data 141 | for poly in mesh.polygons: 142 | if poly.select: 143 | center = sum((obj.matrix_world @ obj.data.vertices[vert].co for vert in poly.vertices), Vector()) / len(poly.vertices) 144 | normal = obj.matrix_world.to_3x3() @ poly.normal 145 | selected_faces_info.append((center, normal)) 146 | return selected_faces_info 147 | 148 | class SWARM_OT_CaptureFaces(Operator): 149 | bl_idname = "swarm.capture_faces" 150 | bl_label = "Capture Faces" 151 | bl_description = "Capture selected faces for the swarm animation" 152 | 153 | @classmethod 154 | def poll(cls, context): 155 | return context.active_object is not None and context.active_object.type == 'MESH' 156 | 157 | def execute(self, context): 158 | global GLOBAL_FACE_DATA 159 | GLOBAL_FACE_DATA = store_selected_faces_data(context.active_object) 160 | self.report({'INFO'}, F"Captured {len(GLOBAL_FACE_DATA)} faces") 161 | return {'FINISHED'} 162 | 163 | class SWARM_OT_animate(Operator): 164 | bl_idname = "swarm.animate" 165 | bl_label = "Animate Swarm" 166 | bl_description = "Animate a swarm of objects between selected faces" 167 | bl_options = {'REGISTER', 'UNDO'} 168 | 169 | def execute(self, context): 170 | st = 1 171 | en = 20 172 | # to do - pass object and create curves from faces 173 | obj = bpy.context.scene.objects.get("ObjectName") # Replace "ObjectName" with your object's name 174 | for c in GLOBAL_FACE_DATA: 175 | animate_object_along_curve(obj,c,st,en) 176 | return {'FINISHED'} 177 | 178 | class SWARM_PT_Panel(Panel): 179 | bl_label = "Swarm Animation" 180 | bl_idname = "SWARM_PT_Panel" 181 | bl_space_type = 'VIEW_3D' 182 | bl_region_type = 'UI' 183 | bl_category = 'Swarm' 184 | 185 | def draw(self, context): 186 | layout = self.layout 187 | layout.operator(SWARM_OT_CaptureFaces.bl_idname) 188 | layout.operator(SWARM_OT_animate.bl_idname) 189 | 190 | def register(): 191 | bpy.utils.register_class(SWARM_OT_CaptureFaces) 192 | bpy.utils.register_class(SWARM_OT_animate) 193 | bpy.utils.register_class(SWARM_PT_Panel) 194 | 195 | def unregister(): 196 | bpy.utils.unregister_class(SWARM_OT_CaptureFaces) 197 | bpy.utils.unregister_class(SWARM_OT_animate) 198 | bpy.utils.unregister_class(SWARM_PT_Panel) 199 | 200 | if __name__ == "__main__": 201 | register() 202 | 203 | def test(): 204 | # Example Usage 205 | my_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 206 | N = 3 # Example, must be less than half the length of my_list 207 | paired_list = pair_random_elements(my_list, N) 208 | print(paired_list) 209 | 210 | -------------------------------------------------------------------------------- /bpl.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import random 3 | from mathutils import Vector, Euler 4 | import bpy 5 | 6 | def clear_scene(): 7 | """ 8 | Removes all objects from the current Blender scene. 9 | """ 10 | bpy.ops.object.select_all(action='DESELECT') 11 | bpy.ops.object.select_all(action='SELECT') 12 | bpy.ops.object.delete() 13 | 14 | 15 | def look_at(obj, target_point): 16 | """ 17 | Rotates an object to look at a target point in 3D space. 18 | 19 | Args: 20 | - obj: The object to orient. 21 | - target_point: The location (Vector) to look at. 22 | """ 23 | # Direction from the object to the target point 24 | direction = target_point - obj.location 25 | # Point the object's '-Z' and 'Y' towards the target 26 | obj.rotation_euler = direction.to_track_quat('-Z', 'Y').to_euler() 27 | 28 | 29 | def add_camera(x, y, z): 30 | """ 31 | Adds a Camera to the scene at the specified location and makes it face towards the origin (0, 0, 0). 32 | 33 | Args: 34 | - x, y, z: The coordinates where the Camera will be placed. 35 | """ 36 | # Create the Camera 37 | bpy.ops.object.camera_add(location=(x, y, z)) 38 | camera = bpy.context.object # Get the newly created Camera 39 | 40 | # Use the look_at function to orient the camera 41 | look_at(camera, Vector((0, 0, 0))) 42 | 43 | 44 | def add_sun(x, y, z, strength=1.0): 45 | """ 46 | Adds a Sun lamp to the scene at the specified location, makes it face towards the origin (0, 0, 0), 47 | and sets its light strength. 48 | 49 | Args: 50 | - x, y, z: The coordinates where the Sun lamp will be placed. 51 | - strength: The light strength of the Sun lamp. 52 | """ 53 | # Create the Sun lamp 54 | bpy.ops.object.light_add(type='SUN', location=(x, y, z)) 55 | sun = bpy.context.object # Get the newly created Sun lamp 56 | 57 | # Calculate the direction vector from the Sun to the origin 58 | direction = Vector((0, 0, 0)) - sun.location 59 | # Point the Sun towards the origin 60 | sun.rotation_euler = direction.to_track_quat('Z', 'Y').to_euler() 61 | 62 | # Set the light strength 63 | sun.data.energy = strength 64 | 65 | 66 | def create_bsdf_emission_material(name="BSDF_Emission_Material", color=(1.0, 1.0, 1.0, 1.0), metallic=0.0, roughness=0.5, emission_strength=1.0, default_fac=0.0): 67 | """ 68 | Creates a new material with a Principled BSDF and Emission shader mixed together. 69 | 70 | Args: 71 | - name: The name of the new material. 72 | - color: The base color of the Principled BSDF shader. 73 | - metallic: The metallic property of the Principled BSDF shader. 74 | - roughness: The roughness property of the Principled BSDF shader. 75 | - emission_strength: The strength of the Emission shader. 76 | - default_fac: The default factor for the Mix Shader node. 77 | 78 | Returns: 79 | - The newly created material object. 80 | """ 81 | # Create a new material 82 | material = bpy.data.materials.new(name=name) 83 | material.use_nodes = True 84 | nodes = material.node_tree.nodes 85 | links = material.node_tree.links 86 | 87 | # Clear default nodes 88 | nodes.clear() 89 | 90 | # Create Principled BSDF shader node 91 | principled_bsdf = nodes.new(type='ShaderNodeBsdfPrincipled') 92 | principled_bsdf.location = (-200, 100) 93 | principled_bsdf.inputs['Base Color'].default_value = color 94 | principled_bsdf.inputs['Metallic'].default_value = metallic 95 | principled_bsdf.inputs['Roughness'].default_value = roughness 96 | 97 | # Create Emission shader node 98 | emission = nodes.new(type='ShaderNodeEmission') 99 | emission.location = (-200, -100) 100 | emission.inputs['Strength'].default_value = emission_strength 101 | 102 | # Create Mix Shader node 103 | mix_shader = nodes.new(type='ShaderNodeMixShader') 104 | mix_shader.location = (0, 0) 105 | mix_shader.inputs['Fac'].default_value = default_fac 106 | 107 | # Create Material Output node 108 | material_output = nodes.new(type='ShaderNodeOutputMaterial') 109 | material_output.location = (200, 0) 110 | 111 | # Link nodes 112 | links.new(principled_bsdf.outputs['BSDF'], mix_shader.inputs[1]) 113 | links.new(emission.outputs['Emission'], mix_shader.inputs[2]) 114 | links.new(mix_shader.outputs['Shader'], material_output.inputs['Surface']) 115 | 116 | return material 117 | 118 | 119 | def set_mix_shader_fac(material, fac_value): 120 | """ 121 | Sets the Fac value of the Mix Shader node in the given material. 122 | 123 | Args: 124 | - material: The material object to modify. 125 | - fac_value: The float value to set for the Mix Shader's Fac factor. 126 | """ 127 | if material.use_nodes: 128 | # Try to find the Mix Shader node in the material's node tree 129 | mix_shader = next( 130 | (node for node in material.node_tree.nodes if node.type == 'MIX_SHADER'), None 131 | ) 132 | 133 | if mix_shader: 134 | # Set the Fac value 135 | mix_shader.inputs['Fac'].default_value = fac_value 136 | 137 | 138 | def create_sphere_with_material(material, radius=1.0, location=(0, 0, 0), subdivisions=2, name="SphereWithMaterial"): 139 | """ 140 | Creates a sphere and applies the given material to it. 141 | 142 | Args: 143 | - material: The material to apply to the sphere. 144 | - radius: The radius of the sphere. 145 | - location: The location to place the sphere at. 146 | - subdivisions: The number of subdivisions for the Icosphere. 147 | - name: The name of the new sphere object. 148 | 149 | Returns: 150 | - The newly created sphere object. 151 | """ 152 | # Create an Icosphere 153 | bpy.ops.mesh.primitive_ico_sphere_add(radius=radius, subdivisions=subdivisions, location=location) 154 | 155 | # Get the newly created Icosphere 156 | sphere = bpy.context.object 157 | sphere.name = name 158 | 159 | # Ensure the sphere has a material slot and apply the material 160 | if len(sphere.data.materials) == 0: 161 | sphere.data.materials.append(material) 162 | else: 163 | sphere.data.materials[0] = material 164 | 165 | return sphere 166 | 167 | 168 | def create_icosphere_grid(n, r, d, subs, name): 169 | """ 170 | Creates an n x n x n grid of Icospheres with increased subdivisions and applies an existing material named 'SimpleStar' to them. 171 | 172 | Args: 173 | - n: The number of Icospheres along each axis. 174 | - r: Radius of each Icosphere. 175 | - d: Distance between the centers of adjacent Icospheres. 176 | - subdivisions: The subdivision level for each Icosphere. 177 | """ 178 | spheres = [] 179 | materials = [] 180 | 181 | # Calculate the start position so that the grid is centered at the origin 182 | start_pos = -(n - 1) * d / 2 183 | 184 | # Loop over each dimension 185 | for i in range(n): 186 | for j in range(n): 187 | for k in range(n): 188 | # Calculate the position for the current Icosphere 189 | x = start_pos + i * d 190 | y = start_pos + j * d 191 | z = start_pos + k * d 192 | 193 | # Example usage: 194 | new_material = create_bsdf_emission_material( 195 | name="CustomMaterial", 196 | color=(0.9, 0.1, 0.1, 1.0), # Reddish color 197 | metallic=0.3, 198 | roughness=0.15, 199 | emission_strength=9.9, 200 | default_fac=0.0 201 | ) 202 | 203 | # Create an sphere at the calculated position 204 | ico_sphere = create_sphere_with_material( 205 | new_material, radius=r, 206 | location=(x, y, z), subdivisions=subs, 207 | name=name+str(x)+str(y)+str(z) 208 | ) 209 | spheres.append(ico_sphere) 210 | materials.append(new_material) 211 | return spheres, materials 212 | 213 | 214 | def create_list_activated_points(original_list, step): 215 | """ 216 | Processes a list of objects based on specified rules and returns a modified copy. 217 | 218 | Args: 219 | - original_list: The list of objects to process. 220 | - step: The base step value used to determine the skip count. 221 | 222 | Returns: 223 | - A modified copy of the original list based on the processing rules. 224 | """ 225 | if not original_list: 226 | return [] 227 | 228 | # Initialize the resulting list with the first object 229 | result_list = [original_list[0]] 230 | i = 0 # Start with the first object 231 | 232 | while i < len(original_list) - 1: 233 | rand_choice = random.randint(0, 2) # Get a random number (0, 1, or 2) 234 | 235 | if rand_choice == 0: 236 | # Copy the next object 237 | i += 1 238 | elif rand_choice == 1: 239 | # Skip step-1 objects 240 | i += step 241 | elif rand_choice == 2: 242 | # Skip step^2-1 objects 243 | i += step**2 244 | 245 | # Check if the index is within the bounds of the list 246 | if i < len(original_list): 247 | result_list.append(original_list[i]) 248 | else: 249 | # If the index goes beyond the list, stop the loop 250 | break 251 | 252 | return result_list 253 | 254 | def animate_fac_for_materials(materials, start_frame, step): 255 | """ 256 | Animate the 'Fac' property of the Mix Shader node in each material. Sets 'Fac' to 0.0 at frame 0, 257 | then to 1.0 for each material starting from 'start_frame', incrementing by 'step'. The interpolation 258 | type for these keyframes is set to 'CONSTANT' for a step-like transition. 259 | 260 | Args: 261 | - materials: A list of Blender material objects. 262 | - start_frame: The starting frame for the animation. 263 | - step: The step between keyframes for successive materials. 264 | """ 265 | bpy.context.scene.frame_set(0) # Start at frame 0 266 | 267 | for index, material in enumerate(materials): 268 | if not material.use_nodes: 269 | print(f"Material '{material.name}' does not use nodes.") 270 | continue 271 | 272 | mix_shader = next((node for node in material.node_tree.nodes if node.type == 'MIX_SHADER'), None) 273 | if not mix_shader: 274 | print(f"No Mix Shader found in material '{material.name}'.") 275 | continue 276 | 277 | # Set 'Fac' to 0.0 at frame 0 for all materials 278 | mix_shader.inputs['Fac'].default_value = 0.0 279 | mix_shader.inputs['Fac'].keyframe_insert(data_path="default_value", frame=0) 280 | 281 | # Find the F-Curve for the 'Fac' property and set interpolation to 'CONSTANT' 282 | fcurve = material.node_tree.animation_data.action.fcurves.find('nodes["' + mix_shader.name + '"].inputs[0].default_value') 283 | if fcurve: # Check if the F-Curve exists 284 | for kf in fcurve.keyframe_points: 285 | kf.interpolation = 'CONSTANT' 286 | 287 | # Set 'Fac' to 1.0 at the specified frame for each material 288 | frame = start_frame + (index * step) 289 | bpy.context.scene.frame_set(frame) # Go to the specified frame 290 | mix_shader.inputs['Fac'].default_value = 1.0 291 | mix_shader.inputs['Fac'].keyframe_insert(data_path="default_value", frame=frame) 292 | 293 | # Again, set interpolation to 'CONSTANT' for the new keyframe 294 | if fcurve: # Ensure the F-Curve still exists 295 | for kf in fcurve.keyframe_points: 296 | kf.interpolation = 'CONSTANT' 297 | 298 | def main(): 299 | n = 5 # Grid size 300 | r = 0.8 # Icosphere radius 301 | d = 3 # Distance between the centers of adjacent Icospheres 302 | start_frame=2 303 | subdivisions = 4 # Subdivision level to increase mesh density 304 | 305 | # Call the function to create the grid 306 | clear_scene() 307 | add_camera(25, -35, 20) 308 | add_sun(60, 60, 60, 10) 309 | add_sun(-30, -80, -30, 10) 310 | add_sun(30, -80, 30, 10) 311 | _, mats = create_icosphere_grid(n, r, d, subdivisions,"SimpleStar") 312 | modified_materials = create_list_activated_points(mats, n) 313 | animate_fac_for_materials(modified_materials , start_frame=start_frame, step=1) 314 | 315 | --------------------------------------------------------------------------------