├── .gitignore ├── LICENSE ├── README.md ├── objects ├── expected_output.png ├── face_transformations │ ├── face_transformation_0.png │ ├── face_transformation_1.png │ ├── face_transformation_2.png │ ├── face_transformation_3.png │ ├── face_transformation_4.png │ └── face_transformation_5.png ├── modified.obj ├── original.obj ├── original_texture.png └── transformations │ ├── transformation_0.png │ ├── transformation_1.png │ ├── transformation_10.png │ ├── transformation_11.png │ ├── transformation_2.png │ ├── transformation_3.png │ ├── transformation_4.png │ ├── transformation_5.png │ ├── transformation_6.png │ ├── transformation_7.png │ ├── transformation_8.png │ └── transformation_9.png ├── requirements.txt ├── run.py ├── src ├── __init__.py ├── image_transformer.py ├── load_obj.py ├── load_texture.py ├── matrix_computer.py └── seam_equilizer.py └── test ├── temp └── temp.md ├── test_image_transformer.py ├── test_loaders.py ├── test_matrix_computer.py ├── test_patternfy.py └── test_seam_equillizer.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Patternfy Copyright (C) 2013 Jenny Cheng, Harry Schwartz, Alberty Wang 2 | 3 | Patternfy is free software: you can redistribute it and/or modify 4 | it under the terms of the GNU General Public License as published by 5 | the Free Software Foundation, either version 3 of the License, or 6 | (at your option) any later version. 7 | 8 | Patternfy is distributed in the hope that it will be useful, 9 | but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | GNU General Public License for more details . 12 | 13 | The art assets provided for testing (OBJ files and PNGs) are under CC-BY-SA-3.0 14 | See http://creativecommons.org/licenses/by-sa-/3.0/. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Patternfy 2 | ============ 3 | 4 | Patternfy is a script that transforms the textures from one OBJ to another, 5 | given two OBJs with the same vertexes and faces, but different UVs. 6 | 7 | It can be used for pattern drafting, (creating printable fabric patterns), if the OBJs represent the final sewed object, 8 | UVs represent the sewing pattern, and the textures represent the fabric print. 9 | 10 | I created this script to turn 3D models (extracted from video games) into stuffed animals, 11 | with a procedurally generated sewing pattern that preserves the model's original shape and coloration. 12 | 13 | For examples/tutorials see [MAKE magazine Vol 38 - DIY Video Game Plushies from 3D Models](http://makezine.com/projects/make-38-cameras-and-av/video-game-plushies/). 14 | 15 | Credits 16 | ------------- 17 | 18 | Created by Jenny - [CaretDashCaret](http://caretdashcaret.wordpress.com/) 19 | 20 | License 21 | ------------- 22 | 23 | Patternfy's code is under [GPLv3](http://opensource.org/licenses/gpl-3.0.html). 24 | A copy of GPLv3 can be found at [http://opensource.org/licenses/gpl-3.0.html](http://opensource.org/licenses/gpl-3.0.html). 25 | 26 | Patternfy's art assets are under [Creative Commons Attribution-ShareAlike 3.0](http://creativecommons.org/licenses/by-sa/3.0/). 27 | A copy of the license can be found at [http://creativecommons.org/licenses/by-sa/3.0/](http://creativecommons.org/licenses/by-sa/3.0/). 28 | 29 | Development Environment 30 | ------------- 31 | 32 | It's generally cleaner to set up a development environment. However, you can skip straight to the [Run](https://github.com/caretdashcaret/Patternfy#to-run) section. 33 | Setting up an environment requires [virtualenv](https://pypi.python.org/pypi/virtualenv). Directories may vary depending on operating system. 34 | 35 | ```sh 36 | $ virtualenv ~/.virtualenvs/patternfy 37 | $ . ~/.virtualenvs/patternfy/bin/activate 38 | $ pip install -r requirements.txt 39 | ``` 40 | 41 | The requirements.txt contains `numpy` for solving matrices, `Pillow` for PIL, and `nose` for testing. 42 | 43 | 44 | To Run 45 | ------------- 46 | 47 | Running requires Python 2.7, [PIL](http://www.pythonware.com/products/pil/), and [numpy](http://www.numpy.org/). 48 | 49 | Pass the appropriate arguments into run.py from the command line. 50 | 51 | ```sh 52 | $ python run.py -g "objects/original.obj" -m "objects/modified.obj" -t "objects/original_texture.png" -s "objects/output.png" 53 | 54 | $ Patternfy - 2014-03-30 17:13:40,715 - loading texture 55 | $ Patternfy - 2014-03-30 17:13:40,741 - loading original OBJ 56 | $ Patternfy - 2014-03-30 17:13:40,742 - loading modified OBJ 57 | $ Patternfy - 2014-03-30 17:13:40,742 - seam equilizing 58 | $ Patternfy - 2014-03-30 17:13:40,743 - transforming image 59 | $ Patternfy - 2014-03-30 17:13:41,106 - saving 60 | $ Patternfy - 2014-03-30 17:13:41,408 - success 61 | ``` 62 | 63 | * `-g` or `--original` is the original 3D obj model 64 | * `-m` or `--modified` is the 3D obj model with modified UVs 65 | * `-t` or `--texture` is a png of the original texture of the 3D model 66 | * `-s` or `--save` is the name to save the output image as 67 | 68 | The `objects/output.png` should be the same as the `objects/expected_output.png`. 69 | 70 | Testing 71 | ------------- 72 | 73 | Running the tests requires [nose](https://nose.readthedocs.org/en/latest/). 74 | 75 | ```sh 76 | $ nosetests 77 | ``` 78 | -------------------------------------------------------------------------------- /objects/expected_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/expected_output.png -------------------------------------------------------------------------------- /objects/face_transformations/face_transformation_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/face_transformations/face_transformation_0.png -------------------------------------------------------------------------------- /objects/face_transformations/face_transformation_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/face_transformations/face_transformation_1.png -------------------------------------------------------------------------------- /objects/face_transformations/face_transformation_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/face_transformations/face_transformation_2.png -------------------------------------------------------------------------------- /objects/face_transformations/face_transformation_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/face_transformations/face_transformation_3.png -------------------------------------------------------------------------------- /objects/face_transformations/face_transformation_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/face_transformations/face_transformation_4.png -------------------------------------------------------------------------------- /objects/face_transformations/face_transformation_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/face_transformations/face_transformation_5.png -------------------------------------------------------------------------------- /objects/modified.obj: -------------------------------------------------------------------------------- 1 | # 3ds Max Wavefront OBJ Exporter v0.97b - (c)2007 guruware 2 | # File Created: 16.06.2013 18:08:42 3 | 4 | # 5 | # object Box001 6 | # 7 | 8 | v -31.2294 -10.8674 30.1223 9 | v -31.2294 -10.8674 -20.2252 10 | v 16.9936 -10.8674 -20.2252 11 | v 16.9936 -10.8674 30.1223 12 | v -31.2294 21.8890 30.1223 13 | v 16.9936 21.8890 30.1223 14 | v 16.9936 21.8890 -20.2252 15 | v -31.2294 21.8890 -20.2252 16 | # 8 vertices 17 | 18 | vn 0.0000 -1.0000 -0.0000 19 | vn 0.0000 1.0000 -0.0000 20 | vn 0.0000 0.0000 1.0000 21 | vn 1.0000 0.0000 -0.0000 22 | vn 0.0000 0.0000 -1.0000 23 | vn -1.0000 0.0000 -0.0000 24 | # 6 vertex normals 25 | 26 | vt 0.7972 0.2545 0.0000 27 | vt 0.8516 0.1524 0.0000 28 | vt 0.9537 0.2068 0.0000 29 | vt 0.8993 0.3089 0.0000 30 | vt 0.3139 0.4543 0.0000 31 | vt 0.3972 0.3740 0.0000 32 | vt 0.4775 0.4573 0.0000 33 | vt 0.3941 0.5376 0.0000 34 | vt 0.3110 0.2555 0.0000 35 | vt 0.3949 0.1759 0.0000 36 | vt 0.4745 0.2599 0.0000 37 | vt 0.3906 0.3395 0.0000 38 | vt 0.5547 0.4595 0.0000 39 | vt 0.6338 0.3750 0.0000 40 | vt 0.7183 0.4541 0.0000 41 | vt 0.6392 0.5385 0.0000 42 | vt 0.5258 0.1355 0.0000 43 | vt 0.7387 0.1355 0.0000 44 | vt 0.7387 0.3484 0.0000 45 | vt 0.5258 0.3484 0.0000 46 | vt 0.8367 0.5070 0.0000 47 | vt 0.8357 0.3913 0.0000 48 | vt 0.9110 0.3906 0.0000 49 | vt 0.9120 0.5063 0.0000 50 | # 24 texture coords 51 | 52 | g Box001 53 | s 2 54 | f 1/1/1 2/2/1 3/3/1 4/4/1 55 | s 4 56 | f 5/5/2 6/6/2 7/7/2 8/8/2 57 | s 8 58 | f 1/9/3 4/10/3 6/11/3 5/12/3 59 | s 16 60 | f 4/13/4 3/14/4 7/15/4 6/16/4 61 | s 32 62 | f 3/17/5 2/18/5 8/19/5 7/20/5 63 | s 64 64 | f 2/21/6 1/22/6 5/23/6 8/24/6 65 | # 6 polygons 66 | 67 | -------------------------------------------------------------------------------- /objects/original.obj: -------------------------------------------------------------------------------- 1 | # 3ds Max Wavefront OBJ Exporter v0.97b - (c)2007 guruware 2 | # File Created: 16.06.2013 18:07:01 3 | 4 | # 5 | # object Box001 6 | # 7 | 8 | v -31.2294 -10.8674 30.1223 9 | v -31.2294 -10.8674 -20.2252 10 | v 16.9936 -10.8674 -20.2252 11 | v 16.9936 -10.8674 30.1223 12 | v -31.2294 21.8890 30.1223 13 | v 16.9936 21.8890 30.1223 14 | v 16.9936 21.8890 -20.2252 15 | v -31.2294 21.8890 -20.2252 16 | # 8 vertices 17 | 18 | vn 0.0000 -1.0000 -0.0000 19 | vn 0.0000 1.0000 -0.0000 20 | vn 0.0000 0.0000 1.0000 21 | vn 1.0000 0.0000 -0.0000 22 | vn 0.0000 0.0000 -1.0000 23 | vn -1.0000 0.0000 -0.0000 24 | # 6 vertex normals 25 | 26 | vt 0.1890 0.6133 0.0000 27 | vt 0.1890 0.7290 0.0000 28 | vt 0.0733 0.7290 0.0000 29 | vt 0.0733 0.6133 0.0000 30 | vt 0.0730 0.8024 0.0000 31 | vt 0.1887 0.8024 0.0000 32 | vt 0.1887 0.9181 0.0000 33 | vt 0.0730 0.9181 0.0000 34 | vt 0.2540 0.7993 0.0000 35 | vt 0.3697 0.7993 0.0000 36 | vt 0.3697 0.9150 0.0000 37 | vt 0.2540 0.9150 0.0000 38 | vt 0.4426 0.7869 0.0000 39 | vt 0.5583 0.7869 0.0000 40 | vt 0.5583 0.9026 0.0000 41 | vt 0.4426 0.9026 0.0000 42 | vt 0.6388 0.7890 0.0000 43 | vt 0.7545 0.7890 0.0000 44 | vt 0.7545 0.9047 0.0000 45 | vt 0.6388 0.9047 0.0000 46 | vt 0.8142 0.8010 0.0000 47 | vt 0.9299 0.8010 0.0000 48 | vt 0.9299 0.8763 0.0000 49 | vt 0.8142 0.8763 0.0000 50 | # 24 texture coords 51 | 52 | g Box001 53 | s 2 54 | f 1/1/1 2/2/1 3/3/1 4/4/1 55 | s 4 56 | f 5/5/2 6/6/2 7/7/2 8/8/2 57 | s 8 58 | f 1/9/3 4/10/3 6/11/3 5/12/3 59 | s 16 60 | f 4/13/4 3/14/4 7/15/4 6/16/4 61 | s 32 62 | f 3/17/5 2/18/5 8/19/5 7/20/5 63 | s 64 64 | f 2/21/6 1/22/6 5/23/6 8/24/6 65 | # 6 polygons 66 | 67 | -------------------------------------------------------------------------------- /objects/original_texture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/original_texture.png -------------------------------------------------------------------------------- /objects/transformations/transformation_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_0.png -------------------------------------------------------------------------------- /objects/transformations/transformation_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_1.png -------------------------------------------------------------------------------- /objects/transformations/transformation_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_10.png -------------------------------------------------------------------------------- /objects/transformations/transformation_11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_11.png -------------------------------------------------------------------------------- /objects/transformations/transformation_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_2.png -------------------------------------------------------------------------------- /objects/transformations/transformation_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_3.png -------------------------------------------------------------------------------- /objects/transformations/transformation_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_4.png -------------------------------------------------------------------------------- /objects/transformations/transformation_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_5.png -------------------------------------------------------------------------------- /objects/transformations/transformation_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_6.png -------------------------------------------------------------------------------- /objects/transformations/transformation_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_7.png -------------------------------------------------------------------------------- /objects/transformations/transformation_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_8.png -------------------------------------------------------------------------------- /objects/transformations/transformation_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caretdashcaret/Patternfy/705d8487357e551bf447ad44e425218e495e6709/objects/transformations/transformation_9.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | nose 2 | numpy 3 | Pillow 4 | logging 5 | argparse 6 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | from src import ObjectLoader 2 | from src import TextureLoader 3 | from src import SeamEquilizer 4 | from src import ImageTransformer 5 | import argparse 6 | import logging 7 | 8 | def get_args(): 9 | parser = argparse.ArgumentParser() 10 | 11 | parser.add_argument('-g', '--original', help="Original OBJ file") 12 | parser.add_argument('-m', '--modified', help="Modified OBJ file") 13 | parser.add_argument('-t', '--texture', help="PNG texture of the original OBJ file") 14 | parser.add_argument('-s', '--save', help="Filename to save the output as") 15 | 16 | return parser.parse_args() 17 | 18 | def setup_logger(): 19 | LOG_FORMAT = "Patternfy - %(asctime)s - %(message)s" 20 | logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) 21 | return logging.getLogger(__name__) 22 | 23 | def main(args): 24 | 25 | LOGGER.info("loading texture") 26 | image = TextureLoader(args.texture).load_texture() 27 | 28 | LOGGER.info("loading original OBJ") 29 | original_face_to_vt, original_edges, original_vt = ObjectLoader(args.original).load_obj() 30 | 31 | LOGGER.info("loading modified OBJ") 32 | modified_face_to_vt, modified_edges, modified_vt = ObjectLoader(args.modified).load_obj() 33 | 34 | LOGGER.info("seam equilizing") 35 | SeamEquilizer(modified_edges, modified_vt).equilize() 36 | 37 | LOGGER.info("transforming image") 38 | image_transformer = ImageTransformer(image, original_face_to_vt, original_vt, modified_face_to_vt, modified_vt) 39 | transformed_image = image_transformer.transform() 40 | 41 | LOGGER.info("saving") 42 | transformed_image.save(args.save) 43 | 44 | LOGGER.info("success") 45 | 46 | args = get_args() 47 | LOGGER = setup_logger() 48 | 49 | if __name__ == '__main__': 50 | main(args) 51 | 52 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Jenny' 2 | from load_obj import ObjectLoader 3 | from load_texture import TextureLoader 4 | from seam_equilizer import SeamEquilizer 5 | from image_transformer import ImageTransformer 6 | from matrix_computer import MatrixComputer -------------------------------------------------------------------------------- /src/image_transformer.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from PIL import ImageDraw 3 | import numpy 4 | from matrix_computer import MatrixComputer 5 | 6 | class ImageTransformer(): 7 | 8 | def __init__(self, original_image, original_face_to_vts, original_vts, mod_face_to_vts, mod_vts): 9 | self.original_image = original_image 10 | self.original_face_to_vts = original_face_to_vts 11 | self.original_vts = original_vts 12 | self.mod_face_to_vts = mod_face_to_vts 13 | self.modified_vts = mod_vts 14 | self.width, self.height = original_image.size 15 | self.modified_image = None 16 | 17 | def transform(self): 18 | """creates a new image and transforms the triangles from the old image to the new image""" 19 | 20 | self.modified_image = self.create_new_image() 21 | self.transform_faces() 22 | 23 | return self.modified_image 24 | 25 | def create_new_image(self): 26 | factor_w, factor_h = self.find_scaling_factors() 27 | new_width, new_height = self.find_new_image_size(factor_w, factor_h) 28 | new_image = Image.new("RGB", (new_width, new_height), "white") 29 | 30 | return new_image 31 | 32 | def map_within_range(self, number): 33 | return number - numpy.floor(number) 34 | 35 | def find_min_max(self, vts): 36 | y_coords = [vt[1] for vt in vts] 37 | x_coords = [vt[0] for vt in vts] 38 | 39 | return max(x_coords) - min(x_coords), max(y_coords) - min(y_coords) 40 | 41 | def find_scaling_factors(self): 42 | orignal_vts_width, original_vts_height = self.find_min_max(self.original_vts) 43 | modified_vts_width, modified_vts_height = self.find_min_max(self.modified_vts) 44 | 45 | factor_w = numpy.ceil(modified_vts_width / orignal_vts_width) 46 | factor_h = numpy.ceil(modified_vts_height / original_vts_height) 47 | 48 | return factor_w, factor_h 49 | 50 | def find_new_image_size(self, factor_w, factor_h): 51 | new_width = int(self.width * factor_w) 52 | new_height = int(self.height * factor_h) 53 | return new_width, new_height 54 | 55 | def transform_faces(self): 56 | 57 | for original_face_idx, original_face in self.original_face_to_vts.items(): 58 | original_image_points, modified_image_points = self.get_all_image_points(original_face_idx) 59 | self.transform_image_points(original_image_points, modified_image_points) 60 | 61 | #the getting image points from coords is a little tricky because UVs (vts) have 0,0 as lower left corner 62 | #and PIL Image has 0,0 as the upper left corner 63 | #vts on the orignal model obey the UV definition and are mapped back between 0 and 1 to get the proper coordinate on 64 | #the original texture 65 | #that rule is disobeyed in the modified model, and can result in larger, non-square textures, which is good for 66 | #pattern drafting but bad for reconstructing the original UV mapping 67 | def get_img_pt_x(self, point): 68 | return int(point*self.width) 69 | 70 | def get_img_pt_y(self, point): 71 | return int((1-point)*self.height) 72 | 73 | def get_img_pts(self, pts): 74 | return [(self.get_img_pt_x(x), self.get_img_pt_y(y)) for x, y in pts] 75 | 76 | def get_original_img_pts(self, pts): 77 | fx = [(self.map_within_range(x), self.map_within_range(y)) for x, y in pts] 78 | return self.get_img_pts(fx) 79 | 80 | def get_modified_img_pts(self, pts): 81 | return self.get_img_pts(pts) 82 | 83 | def get_all_image_points(self, index): 84 | original_face = self.original_face_to_vts[index] 85 | pts = [self.original_vts[i-1] for i in original_face] 86 | orignal_image_points = self.get_original_img_pts(pts) 87 | 88 | corresponding_modified_face = self.mod_face_to_vts[index] 89 | modified_pts = [self.modified_vts[i-1] for i in corresponding_modified_face] 90 | modified_image_points = self.get_modified_img_pts(modified_pts) 91 | 92 | return orignal_image_points, modified_image_points 93 | 94 | def transform_image_points(self, original_image_points, modified_image_points): 95 | matrix_computer = MatrixComputer(original_image_points, modified_image_points) 96 | sections = matrix_computer.get_transforming_triangles() 97 | matrices = matrix_computer.get_transformations(sections) 98 | 99 | for triangles, matrix in zip(sections, matrices): 100 | source_triangle, destination_triangle = triangles 101 | self.apply_transformation_to_image(source_triangle, destination_triangle, matrix) 102 | 103 | def apply_transformation_to_image(self, source_triangle, destination_triangle, transformation): 104 | 105 | source_image_copy = self.original_image.copy() 106 | source_image_draw = ImageDraw.Draw(source_image_copy) 107 | source_image_draw.polygon(source_triangle) 108 | 109 | transformed = self.original_image.transform(self.modified_image.size, Image.AFFINE, transformation) 110 | 111 | #image mask 112 | mask = Image.new('1', self.modified_image.size) 113 | mask_draw = ImageDraw.Draw(mask) 114 | mask_draw.polygon(destination_triangle, fill=255) 115 | 116 | destination_draw = ImageDraw.Draw(self.modified_image) 117 | destination_draw.polygon(destination_triangle, fill=255) 118 | 119 | #paste final transformed image 120 | self.modified_image.paste(transformed, mask=mask) -------------------------------------------------------------------------------- /src/load_obj.py: -------------------------------------------------------------------------------- 1 | class ObjectLoader(): 2 | """handles reading of the OBJ files""" 3 | 4 | def __init__(self, filename): 5 | self.filename = filename 6 | 7 | def load_obj(self): 8 | """vts are the texture coordinates (x,y) 9 | faces are an array of points, indexing into the vts""" 10 | vts, faces = self.read_obj_file() 11 | 12 | f_to_vts = self.map_faces_to_vts(faces) 13 | 14 | edges_to_vts = self.map_edges_to_vts(faces) 15 | 16 | return f_to_vts, edges_to_vts, vts 17 | 18 | def read_obj_file(self): 19 | """extracts the texture coordinates, vts, an array of coordinates 20 | and faces, a array of points that index into the vts array, as well as other arrays""" 21 | 22 | obj_file = open(self.filename) 23 | #texture coords 24 | vts = [] 25 | 26 | faces = [] 27 | 28 | for line in obj_file: 29 | if line.startswith("vt"): 30 | vts.append(self.parse_coordinates(line)) 31 | if line.startswith("f"): 32 | faces.append(self.parse_face(line)) 33 | 34 | return vts, faces 35 | 36 | def map_edges_to_vts(self, faces): 37 | edges_to_vts = {} 38 | for face_idx, face_value in enumerate(faces): 39 | edges_to_vts = self.parse_edges(face_value, edges_to_vts) 40 | return edges_to_vts 41 | 42 | def parse_edges(self, points_array, edges_to_vts): 43 | max_edges = len(points_array) 44 | 45 | this_edge = 0 46 | next_edge = 1 47 | while this_edge < max_edges: 48 | 49 | #handles special case when there's only 2 points to a face 50 | #in reality, can't form a face with only two points, so this shouldn't happen 51 | if (this_edge == 1) and (max_edges == 2): 52 | break 53 | 54 | start = points_array[this_edge] 55 | end = points_array[next_edge] 56 | edge = (start[0], end[0]) 57 | edge2 = (end[0],start[0]) 58 | 59 | if edge in edges_to_vts: 60 | edges_to_vts[edge].append([start[1],end[1]]) 61 | elif edge2 in edges_to_vts: 62 | edges_to_vts[edge2].append([end[1], start[1]]) 63 | else: 64 | edges_to_vts[edge] = [[start[1],end[1]]] 65 | 66 | #increase count 67 | this_edge += 1 68 | if next_edge == max_edges - 1: 69 | next_edge = 0 70 | else: 71 | next_edge += 1 72 | 73 | return edges_to_vts 74 | 75 | def parse_coordinates(self, line): 76 | coords = line.split() 77 | return [float(x) for x in coords[1:3]] 78 | 79 | def parse_face(self, line): 80 | parsed_face = [] 81 | points_in_face = line.split() 82 | for point in points_in_face[1:]: 83 | split_point = point.split("/") 84 | converted_to_int = [int(x) for x in split_point] 85 | parsed_face.append(converted_to_int) 86 | return parsed_face 87 | 88 | def extract_vt_index(self, point_array): 89 | extracted_vts = [] 90 | for point in point_array: 91 | extracted_vts.append(point[1]) 92 | return extracted_vts 93 | 94 | def map_faces_to_vts(self, faces): 95 | f_to_vts = {} 96 | for face_idx, face_value in enumerate(faces): 97 | f_to_vts[face_idx] = self.extract_vt_index(face_value) 98 | return f_to_vts -------------------------------------------------------------------------------- /src/load_texture.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | class TextureLoader(): 4 | """loads the image texture associated with the original OBJ""" 5 | 6 | def __init__(self, image_name): 7 | self.image_name = image_name 8 | 9 | def load_texture(self): 10 | image = Image.open(self.image_name) 11 | return image 12 | -------------------------------------------------------------------------------- /src/matrix_computer.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | class MatrixComputer(): 4 | 5 | def __init__(self, source_pts, destination_pts): 6 | self.source_pts = source_pts 7 | self.destination_pts = destination_pts 8 | 9 | def get_transformations(self, transforming_triangles): 10 | transformation_matrices = [] 11 | 12 | for source, destination in transforming_triangles: 13 | transformation_matrices.append(self.compute_transformation_matrix(source, destination)) 14 | return transformation_matrices 15 | 16 | def get_transforming_triangles(self): 17 | """only handles faces with 3 or 4 points""" 18 | triangles = [] 19 | 20 | source_triangle = self.source_pts[0:3] 21 | destination_triangle = self.destination_pts[0:3] 22 | 23 | triangles.append((source_triangle, destination_triangle)) 24 | 25 | if (len(self.source_pts)>3) and (len(self.destination_pts)>3): 26 | source_triangle = self.source_pts[2:] 27 | source_triangle.append(self.source_pts[0]) 28 | 29 | destination_triangle = self.destination_pts[2:] 30 | destination_triangle.append(self.destination_pts[0]) 31 | 32 | triangles.append((source_triangle, destination_triangle)) 33 | 34 | return triangles 35 | 36 | def compute_transformation_matrix(self, source_triangle, destination_triangle): 37 | ((x11,x12), (x21,x22), (x31,x32)) = source_triangle 38 | ((y11,y12), (y21,y22), (y31,y32)) = destination_triangle 39 | 40 | M = numpy.array([ 41 | [y11, y12, 1, 0, 0, 0], 42 | [y21, y22, 1, 0, 0, 0], 43 | [y31, y32, 1, 0, 0, 0], 44 | [0, 0, 0, y11, y12, 1], 45 | [0, 0, 0, y21, y22, 1], 46 | [0, 0, 0, y31, y32, 1] 47 | ]) 48 | 49 | y = numpy.array([x11, x21, x31, x12, x22, x32]) 50 | 51 | #try-catch is to prevent singular matrices, it's a hack solution to just shift pixels by 1 52 | #singular matrices can occur because the point coordinates are mapped to the nearest integer pixel 53 | try: 54 | A = numpy.linalg.solve(M, y) 55 | except: 56 | M = numpy.array([ 57 | [y11, y12+1, 1, 0, 0, 0], 58 | [y21, y22-1, 1, 0, 0, 0], 59 | [y31, y32, 1, 0, 0, 0], 60 | [0, 0, 0, y11, y12+1, 1], 61 | [0, 0, 0, y21, y22-1, 1], 62 | [0, 0, 0, y31, y32, 1] 63 | ]) 64 | y = numpy.array([x11, x21, x31, x12, x22, x32]) 65 | A = numpy.linalg.solve(M, y) 66 | return A -------------------------------------------------------------------------------- /src/seam_equilizer.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | class SeamEquilizer(): 4 | 5 | def __init__(self, edges, vts): 6 | self.edges = edges 7 | self.vts = vts 8 | 9 | #makes sure seams are of equal length for reassembly 10 | def equilize(self): 11 | 12 | for edge_key, edge_variants in self.edges.items(): 13 | 14 | variants_to_lengths = self.find_lengths_of_edge_variants(edge_variants) 15 | 16 | #find max length / scale factor 17 | max_edge_length = max(variants_to_lengths.values()) 18 | 19 | #scale by that length 20 | self.scale_variant_edge(edge_variants, max_edge_length) 21 | 22 | 23 | def find_lengths_of_edge_variants(self, edge_variants): 24 | variants_to_lengths = {} 25 | for variant in edge_variants: 26 | hashable_key = str(variant) 27 | variants_to_lengths[hashable_key] = self.find_length(variant) 28 | 29 | return variants_to_lengths 30 | 31 | def scale_variant_edge(self, edge_variants, scale_factor): 32 | for variant in edge_variants: 33 | pt0, pt1 = self.get_vts_values(variant) 34 | newpt0, newpt1 = self.scale(pt0, pt1, scale_factor) 35 | self.set_new_vts_values(newpt0, newpt1, variant) 36 | 37 | def set_new_vts_values(self, value_0, value_1, edge_variant): 38 | idx_0, idx_1 = self.get_vts_index_for_edge(edge_variant) 39 | self.vts[idx_0] = value_0 40 | self.vts[idx_1] = value_1 41 | 42 | def get_vts_index_for_edge(self, edge_variant): 43 | return edge_variant[0] - 1, edge_variant[1] - 1 44 | 45 | def get_vts_values(self, edge_variant): 46 | idx_0, idx_1 = self.get_vts_index_for_edge(edge_variant) 47 | return self.vts[idx_0], self.vts[idx_1] 48 | 49 | def find_length(self, edge_variant): 50 | pt0, pt1 = self.get_vts_values(edge_variant) 51 | 52 | length = self.euclidean_dist(pt0,pt1) 53 | return length 54 | 55 | def scale(self, pt0, pt1, factor): 56 | #scale from the midpoint of the two points 57 | currentlen = self.euclidean_dist(pt0, pt1) 58 | midpt = self.midpoint(pt0, pt1) 59 | 60 | newpt_x = pt0[0] + (pt1[0]-pt0[0]) / currentlen * factor 61 | newpt_y = pt0[1] + (pt1[1]-pt0[1]) / currentlen * factor 62 | 63 | newpt1 = [newpt_x,newpt_y] 64 | 65 | new_midpt = self.midpoint(pt0, newpt1) 66 | diff_x = new_midpt[0] - midpt[0] 67 | diff_y = new_midpt[1] - midpt[1] 68 | 69 | newpt0 = [pt0[0]-diff_x, pt0[1]-diff_y] 70 | newpt1 = [newpt1[0]-diff_x, newpt1[1]-diff_y] 71 | 72 | return [round(x,4) for x in newpt0], [round(x,4) for x in newpt1] 73 | 74 | def midpoint(self, pt_a, pt_b): 75 | return [(pt_a[0]+pt_b[0])/2.0, (pt_a[1]+pt_b[1])/2.0] 76 | 77 | def euclidean_dist(self, pt_a, pt_b): 78 | diff1 = pt_a[0] - pt_b[0] 79 | diff2 = pt_a[1] - pt_b[1] 80 | return math.sqrt(diff1 * diff1 + diff2 * diff2) 81 | -------------------------------------------------------------------------------- /test/temp/temp.md: -------------------------------------------------------------------------------- 1 | This directory contains files (PNGs) that will be created and deleted during by the tests. -------------------------------------------------------------------------------- /test/test_image_transformer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src import ObjectLoader 3 | from src import TextureLoader 4 | from src import SeamEquilizer 5 | from src import ImageTransformer 6 | from src import MatrixComputer 7 | from PIL import ImageChops 8 | from PIL import Image 9 | import random 10 | import os 11 | 12 | class ImageTransformerTest(unittest.TestCase): 13 | 14 | def setUp(self): 15 | path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 16 | texture_filename = "objects/original_texture.png" 17 | texture = os.path.join(path, texture_filename) 18 | 19 | original_filename = "objects/original.obj" 20 | original = os.path.join(path, original_filename) 21 | 22 | modified_filename = "objects/modified.obj" 23 | modified = os.path.join(path, modified_filename) 24 | 25 | image = TextureLoader(texture).load_texture() 26 | original_face_to_vt, original_edges, original_vt = ObjectLoader(original).load_obj() 27 | modified_face_to_vt, modified_edges, modified_vt = ObjectLoader(modified).load_obj() 28 | SeamEquilizer(modified_edges, modified_vt).equilize() 29 | self.image_transformer = ImageTransformer(image, original_face_to_vt, original_vt, modified_face_to_vt, modified_vt) 30 | 31 | self.path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 32 | self.transformations = range(0, 12) 33 | self.face_transformations = range(0, 6) 34 | 35 | def test_map_within_range(self): 36 | mapped = self.image_transformer.map_within_range(250.5) 37 | assert mapped == 0.5 38 | 39 | random_number = random.random() * 10 40 | random_mapped = self.image_transformer.map_within_range(random_number) 41 | 42 | assert random_mapped <= 1.0 43 | 44 | def test_find_min_max(self): 45 | vts = [(1.0, 2.0), (3.0, 10.0)] 46 | result = self.image_transformer.find_min_max(vts) 47 | assert result == (2.0, 8.0) 48 | 49 | def test_find_scaling_factors(self): 50 | #based on loaded data 51 | factorw, factorh = self.image_transformer.find_scaling_factors() 52 | 53 | assert factorw == 1 54 | assert factorh == 2 55 | 56 | def test_find_new_image_size(self): 57 | new_width, new_height = self.image_transformer.find_new_image_size(10, 20) 58 | 59 | assert new_width == 10240 60 | assert new_height == 20480 61 | 62 | def test_get_img_pt_x(self): 63 | point = 0.2 64 | image_point = self.image_transformer.get_img_pt_x(point) 65 | assert image_point == 204 66 | 67 | point = 0.0 68 | image_point = self.image_transformer.get_img_pt_x(point) 69 | assert image_point == 0 70 | 71 | def test_get_img_pt_y(self): 72 | point = 0.2 73 | image_point = self.image_transformer.get_img_pt_y(point) 74 | assert image_point == 819 75 | 76 | point = 0.0 77 | image_point = self.image_transformer.get_img_pt_y(point) 78 | assert image_point == 1024 79 | 80 | def test_get_img_pts(self): 81 | points = [(0.2, 0.0)] 82 | image_points = self.image_transformer.get_img_pts(points) 83 | 84 | assert image_points == [(204, 1024)] 85 | 86 | def test_get_original_img_pts(self): 87 | points = [(2.2, 500.0)] 88 | image_points = self.image_transformer.get_original_img_pts(points) 89 | 90 | assert image_points == [(204, 1024)] 91 | 92 | def test_get_modified_img_pts(self): 93 | points = [(0.2, 0.0)] 94 | image_points = self.image_transformer.get_modified_img_pts(points) 95 | 96 | assert image_points == [(204, 1024)] 97 | 98 | def test_get_all_image_points(self): 99 | #this is from loaded data 100 | originial, modified = self.image_transformer.get_all_image_points(2) 101 | expected_original = [(260, 205), (378, 205), (378, 87), (260, 87)] 102 | expected_modified = [(318, 762), (404, 843), (485, 757), (399, 676)] 103 | 104 | assert originial == expected_original 105 | assert modified == expected_modified 106 | 107 | def test_apply_transformation_to_image(self): 108 | 109 | counter = 0 110 | self.image_transformer.modified_image = self.image_transformer.create_new_image() 111 | 112 | for original_face_idx, original_face in self.image_transformer.original_face_to_vts.items(): 113 | original_image_points, modified_image_points = self.image_transformer.get_all_image_points(original_face_idx) 114 | matrix_computer = MatrixComputer(original_image_points, modified_image_points) 115 | sections = matrix_computer.get_transforming_triangles() 116 | matrices = matrix_computer.get_transformations(sections) 117 | 118 | for triangles, matrix in zip(sections, matrices): 119 | source_triangle, destination_triangle = triangles 120 | self.image_transformer.apply_transformation_to_image(source_triangle, destination_triangle, matrix) 121 | filename = self.get_apply_transformation_filename(counter) 122 | self.image_transformer.modified_image.save(filename) 123 | counter += 1 124 | 125 | for step_number in self.transformations: 126 | expected = self.get_expected_applied_filename(step_number) 127 | actual = self.get_apply_transformation_filename(step_number) 128 | self.assertTrue(self.image_equal(expected, actual), "transformation_" + str(step_number) + ".pngs are not equal ") 129 | 130 | self.teardown_apply_transformation_to_image() 131 | 132 | 133 | def get_apply_transformation_filename(self, step_num): 134 | transf_filename = "test/temp/transformation_" + str(step_num) + ".png" 135 | transf_file = os.path.join(self.path, transf_filename) 136 | return transf_file 137 | 138 | def get_expected_applied_filename(self, step_num): 139 | transf_filename = "objects/transformations/transformation_" + str(step_num) + ".png" 140 | transf_file = os.path.join(self.path, transf_filename) 141 | return transf_file 142 | 143 | def image_equal(self, im1_name, im2_name): 144 | im1 = Image.open(im1_name) 145 | im2 = Image.open(im2_name) 146 | return ImageChops.difference(im1, im2).getbbox() is None 147 | 148 | def teardown_apply_transformation_to_image(self): 149 | for transf in self.transformations: 150 | filename = self.get_apply_transformation_filename(transf) 151 | try: #need a better way to check for if file exists 152 | os.remove(filename) 153 | except: 154 | pass 155 | 156 | def test_transform_image_points(self): 157 | counter = 0 158 | self.image_transformer.modified_image = self.image_transformer.create_new_image() 159 | 160 | for original_face_idx, original_face in self.image_transformer.original_face_to_vts.items(): 161 | original_image_points, modified_image_points = self.image_transformer.get_all_image_points(original_face_idx) 162 | self.image_transformer.transform_image_points(original_image_points, modified_image_points) 163 | filename = self.get_transform_imgpts_filename(counter) 164 | self.image_transformer.modified_image.save(filename) 165 | counter += 1 166 | 167 | for transformation in self.face_transformations: 168 | expected = self.get_expected_imgpts_filename(transformation) 169 | actual = self.get_transform_imgpts_filename(transformation) 170 | self.assertTrue(self.image_equal(expected, actual), "face_transformation_" + str(transformation) + ".pngs are not equal") 171 | 172 | self.teardown_transform_imgpts() 173 | 174 | def get_transform_imgpts_filename(self, step_num): 175 | transf_filename = "test/temp/face_transformation_" + str(step_num) + ".png" 176 | transf_file = os.path.join(self.path, transf_filename) 177 | return transf_file 178 | 179 | def get_expected_imgpts_filename(self, step_num): 180 | transf_filename = "objects/face_transformations/face_transformation_" + str(step_num) + ".png" 181 | transf_file = os.path.join(self.path, transf_filename) 182 | return transf_file 183 | 184 | def teardown_transform_imgpts(self): 185 | for transf in self.face_transformations: 186 | filename = self.get_transform_imgpts_filename(transf) 187 | try: #need a better way to check for if file exists 188 | os.remove(filename) 189 | except: 190 | pass 191 | -------------------------------------------------------------------------------- /test/test_loaders.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src import TextureLoader 3 | from src import ObjectLoader 4 | import os 5 | import PIL 6 | 7 | class TextureLoaderTest(unittest.TestCase): 8 | 9 | def test_load_texture(self): 10 | 11 | path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 12 | filename = "objects/original_texture.png" 13 | file_location = os.path.join(path, filename) 14 | 15 | new_texture = TextureLoader(file_location) 16 | image = new_texture.load_texture() 17 | 18 | assert isinstance(image, PIL.PngImagePlugin.PngImageFile) 19 | 20 | class ObjectLoaderTest(unittest.TestCase): 21 | 22 | def setUp(self): 23 | path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 24 | filename = "objects/original.obj" 25 | file_location = os.path.join(path, filename) 26 | 27 | self.loader = ObjectLoader(file_location) 28 | 29 | def test_load_object(self): 30 | f_to_vts, edges_to_vts, vts = self.loader.load_obj() 31 | 32 | expected_vts = [[0.189, 0.6133], [0.189, 0.729], [0.0733, 0.729], [0.0733, 0.6133], [0.073, 0.8024], [0.1887, 0.8024], [0.1887, 0.9181], [0.073, 0.9181], [0.254, 0.7993], [0.3697, 0.7993], [0.3697, 0.915], [0.254, 0.915], [0.4426, 0.7869], [0.5583, 0.7869], [0.5583, 0.9026], [0.4426, 0.9026], [0.6388, 0.789], [0.7545, 0.789], [0.7545, 0.9047], [0.6388, 0.9047], [0.8142, 0.801], [0.9299, 0.801], [0.9299, 0.8763], [0.8142, 0.8763]] 33 | expected_edges_to_vts = {(1, 2): [[1, 2], [22, 21]], (7, 8): [[7, 8], [20, 19]], (6, 7): [[6, 7], [16, 15]], (4, 6): [[10, 11], [13, 16]], (5, 6): [[5, 6], [12, 11]], (4, 1): [[4, 1], [10, 9]], (2, 8): [[18, 19], [21, 24]], (8, 5): [[8, 5], [24, 23]], (2, 3): [[2, 3], [18, 17]], (3, 7): [[14, 15], [17, 20]], (5, 1): [[12, 9], [23, 22]], (3, 4): [[3, 4], [14, 13]]} 34 | expected_f_to_vts = {0: [1, 2, 3, 4], 1: [5, 6, 7, 8], 2: [9, 10, 11, 12], 3: [13, 14, 15, 16], 4: [17, 18, 19, 20], 5: [21, 22, 23, 24]} 35 | 36 | assert f_to_vts == expected_f_to_vts 37 | assert edges_to_vts == expected_edges_to_vts 38 | assert vts == expected_vts 39 | 40 | def test_read_obj_file(self): 41 | vts, faces = self.loader.read_obj_file() 42 | 43 | expected_vts = [[0.189, 0.6133], [0.189, 0.729], [0.0733, 0.729], [0.0733, 0.6133], [0.073, 0.8024], [0.1887, 0.8024], [0.1887, 0.9181], [0.073, 0.9181], [0.254, 0.7993], [0.3697, 0.7993], [0.3697, 0.915], [0.254, 0.915], [0.4426, 0.7869], [0.5583, 0.7869], [0.5583, 0.9026], [0.4426, 0.9026], [0.6388, 0.789], [0.7545, 0.789], [0.7545, 0.9047], [0.6388, 0.9047], [0.8142, 0.801], [0.9299, 0.801], [0.9299, 0.8763], [0.8142, 0.8763]] 44 | expected_faces = [[[1, 1, 1], [2, 2, 1], [3, 3, 1], [4, 4, 1]], [[5, 5, 2], [6, 6, 2], [7, 7, 2], [8, 8, 2]], [[1, 9, 3], [4, 10, 3], [6, 11, 3], [5, 12, 3]], [[4, 13, 4], [3, 14, 4], [7, 15, 4], [6, 16, 4]], [[3, 17, 5], [2, 18, 5], [8, 19, 5], [7, 20, 5]], [[2, 21, 6], [1, 22, 6], [5, 23, 6], [8, 24, 6]]] 45 | 46 | assert vts == expected_vts 47 | assert faces == expected_faces 48 | 49 | def test_parse_coordinates(self): 50 | test_string = "vt 0.1887 0.8024 0.0000" 51 | parsed_result = self.loader.parse_coordinates(test_string) 52 | 53 | assert parsed_result == [0.1887, 0.8024] 54 | 55 | def test_parse_face(self): 56 | test_string = "f 1/9/3 4/10/3 6/11/3 5/12/3" 57 | parsed_result = self.loader.parse_face(test_string) 58 | 59 | assert parsed_result == [[1, 9, 3], [4, 10, 3], [6, 11, 3], [5, 12, 3]] 60 | 61 | def test_extract_vt_index(self): 62 | test_face = [[1, 9, 3], [4, 10, 3], [6, 11, 3], [5, 12, 3]] 63 | extracted_vt_index = self.loader.extract_vt_index(test_face) 64 | 65 | assert extracted_vt_index == [9, 10, 11, 12] 66 | 67 | def test_map_faces_to_vts(self): 68 | test_faces = [[[1, 9, 3], [4, 10, 3], [6, 11, 3], [5, 12, 3]],[[1, 1, 3], [4, 2, 3], [6, 3, 3], [5, 4, 3]]] 69 | f_to_vts = self.loader.map_faces_to_vts(test_faces) 70 | expected_f_to_vts = {0:[9, 10, 11, 12], 1:[1, 2, 3, 4]} 71 | 72 | assert f_to_vts == expected_f_to_vts 73 | 74 | def test_parse_edges(self): 75 | test_face = [[1, 9, 3], [4, 10, 3], [6, 11, 3], [5, 12, 3]] 76 | edges_to_vts = {} 77 | edges_to_vts = self.loader.parse_edges(test_face, edges_to_vts) 78 | 79 | expected_edges_to_vts = {(5, 1): [[12, 9]], (6, 5): [[11, 12]], (4, 6): [[10, 11]], (1, 4): [[9, 10]]} 80 | assert edges_to_vts == expected_edges_to_vts 81 | 82 | test_additional_face = [[5, 90, 3], [1, 120, 3]] 83 | edges_to_vts = self.loader.parse_edges(test_additional_face, edges_to_vts) 84 | 85 | expected_edges_to_vts = {(5, 1): [[12, 9], [90, 120]], (6, 5): [[11, 12]], (4, 6): [[10, 11]], (1, 4): [[9, 10]]} 86 | assert edges_to_vts == expected_edges_to_vts 87 | 88 | def test_map_edges_to_vts(self): 89 | test_faces = [[[1, 9, 3], [4, 10, 3], [6, 11, 3], [5, 12, 3]], [[1, 90, 3], [5, 120, 3]]] 90 | edges_to_vts = self.loader.map_edges_to_vts(test_faces) 91 | expected_edges_to_vts = {(5, 1): [[12, 9], [120, 90]], (6, 5): [[11, 12]], (4, 6): [[10, 11]], (1, 4): [[9, 10]]} 92 | 93 | assert edges_to_vts == expected_edges_to_vts -------------------------------------------------------------------------------- /test/test_matrix_computer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src import MatrixComputer 3 | 4 | class MatrixComputerTest(unittest.TestCase): 5 | 6 | def setUp(self): 7 | #the points are picked because they're a simple translation 8 | source_pts = [(0.0,0.0), (1.0,0.0), (1.0,1.0), (0.0, 1.0)] 9 | destination_pts = [(1.0,0.0), (2.0,0.0), (2.0,1.0), (1.0, 1.0)] 10 | 11 | self.computer = MatrixComputer(source_pts, destination_pts) 12 | 13 | def test_get_transforming_triangles(self): 14 | results = self.computer.get_transforming_triangles() 15 | expected_results = [([(0.0, 0.0), (1.0, 0.0), (1.0, 1.0)], [(1.0, 0.0), (2.0, 0.0), (2.0, 1.0)]), ([(1.0, 1.0), (0.0, 1.0), (0.0, 0.0)], [(2.0, 1.0), (1.0, 1.0), (1.0, 0.0)])] 16 | assert results == expected_results 17 | assert len(results) == 2 18 | 19 | def test_compute_transformation_matrix(self): 20 | source_point = ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0)) 21 | destination_point = ((1.0, 0.0), (2.0, 0.0), (2.0, 1.0)) 22 | result = self.computer.compute_transformation_matrix(source_point, destination_point) 23 | 24 | #the transformation maxtrix should be a column matrix: 25 | # [1.0 26 | # 0 27 | # -1.0 28 | # 0 29 | # 1.0 30 | # 0] 31 | 32 | assert result[0] == 1.0 33 | assert result[1] == 0.0 34 | assert result[2] == -1.0 35 | assert result[3] == 0 36 | assert result[4] == 1.0 37 | assert result[5] == 0 38 | 39 | def test_get_transformations(self): 40 | transforming_triangles = [([(0.0, 0.0), (1.0, 0.0), (1.0, 1.0)], [(1.0, 0.0), (2.0, 0.0), (2.0, 1.0)]), ([(1.0, 1.0), (0.0, 1.0), (0.0, 0.0)], [(2.0, 1.0), (1.0, 1.0), (1.0, 0.0)])] 41 | transformations = self.computer.get_transformations(transforming_triangles) 42 | 43 | assert len(transforming_triangles) == len(transformations) -------------------------------------------------------------------------------- /test/test_patternfy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src import ObjectLoader 3 | from src import TextureLoader 4 | from src import SeamEquilizer 5 | from src import ImageTransformer 6 | from PIL import ImageChops, Image 7 | import os 8 | 9 | class PatternfyTest(unittest.TestCase): 10 | 11 | def test_patternfy(self): 12 | path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 13 | texture_filename = "objects/original_texture.png" 14 | texture = os.path.join(path, texture_filename) 15 | 16 | original_filename = "objects/original.obj" 17 | original = os.path.join(path, original_filename) 18 | 19 | modified_filename = "objects/modified.obj" 20 | modified = os.path.join(path, modified_filename) 21 | 22 | save_filename = "test/temp/output.png" 23 | save = os.path.join(path, save_filename) 24 | 25 | image = TextureLoader(texture).load_texture() 26 | original_face_to_vt, original_edges, original_vt = ObjectLoader(original).load_obj() 27 | modified_face_to_vt, modified_edges, modified_vt = ObjectLoader(modified).load_obj() 28 | SeamEquilizer(modified_edges, modified_vt).equilize() 29 | image_transformer = ImageTransformer(image, original_face_to_vt, original_vt, modified_face_to_vt, modified_vt) 30 | transformed_image = image_transformer.transform() 31 | transformed_image.save(save) 32 | 33 | expected_filename = "objects/expected_output.png" 34 | expected = os.path.join(path, expected_filename) 35 | 36 | self.assertTrue(self.image_equal(save, expected), expected_filename + " and " + save_filename + " are not equal") 37 | 38 | def image_equal(self, im1_name, im2_name): 39 | im1 = Image.open(im1_name) 40 | im2 = Image.open(im2_name) 41 | return ImageChops.difference(im1, im2).getbbox() is None 42 | 43 | def tearDown(self): 44 | path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 45 | save_filename = "test/temp/output.png" 46 | save = os.path.join(path, save_filename) 47 | 48 | os.remove(save) 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /test/test_seam_equillizer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src import ObjectLoader 3 | from src import SeamEquilizer 4 | import os 5 | import random 6 | 7 | class SeamEquilizerTest(unittest.TestCase): 8 | 9 | def setUp(self): 10 | path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 11 | filename = "objects/modified.obj" 12 | file_location = os.path.join(path, filename) 13 | new_object = ObjectLoader(file_location) 14 | f_to_vts, edges_to_vts, vts = new_object.load_obj() 15 | 16 | self.equilizer = SeamEquilizer(edges_to_vts, vts) 17 | 18 | def test_equilize(self): 19 | self.equilizer.equilize() 20 | expected_vts = [[0.7972, 0.2545], [0.8087, 0.1295], [0.9966, 0.2297], [0.8993, 0.3089], [0.3122, 0.45], [0.3972, 0.374], [0.5125, 0.4236], [0.3608, 0.5756], [0.311, 0.2555], [0.3949, 0.1759], [0.4745, 0.2599], [0.3906, 0.3395], [0.5544, 0.4604], [0.5973, 0.3425], [0.7585, 0.4821], [0.6358, 0.5421], [0.5258, 0.1355], [0.7387, 0.1355], [0.7387, 0.3484], [0.5258, 0.3484], [0.7679, 0.5076], [0.8155, 0.3915], [0.9312, 0.3904], [0.9808, 0.5057]] 21 | assert self.equilizer.vts == expected_vts 22 | 23 | def test_scale_variant_edge(self): 24 | edge_variants ={(5, 1): [[12, 9], [10, 11]]} 25 | self.equilizer.scale_variant_edge(edge_variants, 10) 26 | 27 | expected_vts = [[5.1763, -1.5558], [0.8516, 0.1524], [0.9537, 0.2068], [0.8993, 0.3089], [-4.0652, 2.2646], [0.3972, 0.374], [0.4775, 0.4573], [0.3941, 0.5376], [0.311, 0.2555], [0.3949, 0.1759], [0.4745, 0.2599], [0.3906, 0.3395], [0.5547, 0.4595], [0.6338, 0.375], [0.7183, 0.4541], [0.6392, 0.5385], [0.5258, 0.1355], [0.7387, 0.1355], [0.7387, 0.3484], [0.5258, 0.3484], [0.8367, 0.507], [0.8357, 0.3913], [0.911, 0.3906], [0.912, 0.5063]] 28 | 29 | assert self.equilizer.vts == expected_vts 30 | 31 | def test_find_lengths_of_edge_variants(self): 32 | edges = {(5, 1): [[12, 9], [120, 90]], (6, 5): [[11, 12]], (4, 6): [[10, 11]], (1, 4): [[9, 10]]} 33 | result = self.equilizer.find_lengths_of_edge_variants(edges) 34 | expected = {'(5, 1)': 0.5229712515999326, '(6, 5)': 0.11570211752599861, '(4, 6)': 0.5063026960228436, '(1, 4)': 0.11568824486524114} 35 | 36 | assert result == expected 37 | 38 | def test_set_new_vts_values(self): 39 | value_a = 100.0 40 | value_b = 200.0 41 | edge = [1, 2] #not a real edge, but valid for lookups 42 | 43 | self.equilizer.set_new_vts_values(value_a, value_b, edge) 44 | 45 | assert self.equilizer.vts[0] == value_a 46 | assert self.equilizer.vts[1] == value_b 47 | 48 | def test_get_vts_values(self): 49 | test_edge_variant = [5, 4] #not a real edge but will do a valid lookup in self.equilizer's vts 50 | result = self.equilizer.get_vts_values(test_edge_variant) 51 | 52 | assert result == ([0.3139, 0.4543], [0.8993, 0.3089]) 53 | 54 | def test_get_vts_index_for_edge(self): 55 | test_edge_variant = [5, 4] 56 | result = self.equilizer.get_vts_index_for_edge(test_edge_variant) 57 | 58 | assert result == (4, 3) 59 | 60 | def test_scale(self): 61 | pt_a = (4.0, 2.0) 62 | pt_b = (-6.0, -6.0) 63 | factor = 10.0 64 | 65 | result = self.equilizer.scale(pt_a, pt_b, factor) 66 | expected = ([2.9043, 1.1235], [-4.9043, -5.1235]) 67 | 68 | assert result == expected 69 | 70 | def test_midpoint(self): 71 | pt_a = (5, 2) 72 | pt_b = (-5, 2) 73 | 74 | assert self.equilizer.midpoint(pt_a, pt_b) == [0.0, 2.0] 75 | 76 | def test_euclidean_dist(self): 77 | pt_a = (5, 2) 78 | pt_b = (random.random() * -10, random.random() * -2) 79 | pt_c = (-5, 2) 80 | 81 | random_result = self.equilizer.euclidean_dist(pt_a, pt_b) 82 | assert random_result == self.equilizer.euclidean_dist(pt_b, pt_a) 83 | assert random_result > 0 84 | 85 | result = self.equilizer.euclidean_dist(pt_a, pt_c) 86 | assert result == 10 87 | 88 | --------------------------------------------------------------------------------