├── .gitignore ├── docs └── teaser.jpg ├── data ├── print_examples │ ├── 1.jpg │ ├── 2.jpg │ ├── 3.jpg │ ├── 4.jpg │ └── 5.jpg └── texture_examples │ ├── 1.jpg │ ├── 2.jpg │ ├── 3.jpg │ ├── 4.jpg │ └── 5.jpg ├── environment.yml ├── inference_print.py ├── inference_texture.py ├── README.md └── pipeline.py /.gitignore: -------------------------------------------------------------------------------- 1 | outputs/ 2 | models/ 3 | __pycache__/ 4 | upload_model.py -------------------------------------------------------------------------------- /docs/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/docs/teaser.jpg -------------------------------------------------------------------------------- /data/print_examples/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/print_examples/1.jpg -------------------------------------------------------------------------------- /data/print_examples/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/print_examples/2.jpg -------------------------------------------------------------------------------- /data/print_examples/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/print_examples/3.jpg -------------------------------------------------------------------------------- /data/print_examples/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/print_examples/4.jpg -------------------------------------------------------------------------------- /data/print_examples/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/print_examples/5.jpg -------------------------------------------------------------------------------- /data/texture_examples/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/texture_examples/1.jpg -------------------------------------------------------------------------------- /data/texture_examples/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/texture_examples/2.jpg -------------------------------------------------------------------------------- /data/texture_examples/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/texture_examples/3.jpg -------------------------------------------------------------------------------- /data/texture_examples/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/texture_examples/4.jpg -------------------------------------------------------------------------------- /data/texture_examples/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/humansensinglab/fabric-diffusion/HEAD/data/texture_examples/5.jpg -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: fabric-diff 2 | channels: 3 | - defaults 4 | dependencies: 5 | - pip: 6 | - diffusers==0.32.1 7 | - torch==2.5.1 8 | - transformers==4.47.1 9 | -------------------------------------------------------------------------------- /inference_print.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pipeline import FabricDiffusionPipeline 3 | import argparse 4 | 5 | 6 | def run_flatten_print(pipeline, warp_dataset_path, output_path=None, n_samples=3): 7 | os.makedirs(os.path.join(output_path), exist_ok=True) 8 | all_image_names = os.listdir(warp_dataset_path) 9 | for image_name in all_image_names: 10 | texture_name = image_name.split('.')[0] 11 | texture_patch = pipeline.load_patch_data(os.path.join(warp_dataset_path, image_name)) 12 | gen_imgs = pipeline.flatten_print(texture_patch, n_samples=n_samples) 13 | for i, gen_img in enumerate(gen_imgs): 14 | gen_img.save(os.path.join(output_path, f'{texture_name}_gen_{i}.png')) 15 | 16 | 17 | def get_args(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument( 20 | "--device", type=str, default="cuda:0", help="Device to run the model" 21 | ) 22 | parser.add_argument( 23 | "--texture_checkpoint", default=None, type=str, help="Path to the texture model checkpoint" 24 | ) 25 | parser.add_argument( 26 | "--print_checkpoint", default=None, type=str, help="Path to the logo model checkpoint" 27 | ) 28 | parser.add_argument( 29 | "--src_dir", default='./data/print_examples', type=str, help="Path to the input image directory" 30 | ) 31 | parser.add_argument( 32 | "--save_dir", type=str, default='./outputs/print', help="Directory to save the output" 33 | ) 34 | parser.add_argument( 35 | "--n_samples", type=int, default=3, help="Number of generated images per input" 36 | ) 37 | return parser.parse_args() 38 | 39 | 40 | if __name__ == "__main__": 41 | args = get_args() 42 | device = args.device 43 | texture_checkpoint = args.texture_checkpoint 44 | print_checkpoint = args.print_checkpoint 45 | src_dir = args.src_dir 46 | save_dir = args.save_dir 47 | 48 | pipeline = FabricDiffusionPipeline(device, texture_checkpoint, print_checkpoint=print_checkpoint) 49 | 50 | os.makedirs(save_dir, exist_ok=True) 51 | run_flatten_print(pipeline, src_dir, output_path=save_dir, n_samples=args.n_samples) 52 | -------------------------------------------------------------------------------- /inference_texture.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pipeline import FabricDiffusionPipeline 3 | import argparse 4 | 5 | 6 | def run_flatten_texture(pipeline, warp_dataset_path, output_path=None, n_samples=3): 7 | os.makedirs(os.path.join(output_path), exist_ok=True) 8 | all_image_names = os.listdir(warp_dataset_path) 9 | for image_name in all_image_names: 10 | texture_name = image_name.split('.')[0] 11 | texture_patch = pipeline.load_patch_data(os.path.join(warp_dataset_path, image_name)) 12 | gen_imgs = pipeline.flatten_texture(texture_patch, n_samples=n_samples) 13 | for i, gen_img in enumerate(gen_imgs): 14 | gen_img.save(os.path.join(output_path, f'{texture_name}_gen_{i}.png')) 15 | 16 | 17 | def get_args(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument( 20 | "--device", type=str, default="cuda:0", help="Device to run the model" 21 | ) 22 | parser.add_argument( 23 | "--texture_checkpoint", default=None, type=str, help="Path to the texture model checkpoint" 24 | ) 25 | parser.add_argument( 26 | "--print_checkpoint", default=None, type=str, help="Path to the logo model checkpoint" 27 | ) 28 | parser.add_argument( 29 | "--src_dir", default='./data/texture_examples', type=str, help="Path to the input image directory" 30 | ) 31 | parser.add_argument( 32 | "--save_dir", type=str, default='./outputs/texture', help="Directory to save the output" 33 | ) 34 | parser.add_argument( 35 | "--n_samples", type=int, default=3, help="Number of generated images per input" 36 | ) 37 | return parser.parse_args() 38 | 39 | 40 | if __name__ == "__main__": 41 | args = get_args() 42 | device = args.device 43 | texture_checkpoint = args.texture_checkpoint 44 | print_checkpoint = args.print_checkpoint 45 | src_dir = args.src_dir 46 | save_dir = args.save_dir 47 | 48 | pipeline = FabricDiffusionPipeline(device, texture_checkpoint, print_checkpoint=print_checkpoint) 49 | 50 | os.makedirs(save_dir, exist_ok=True) 51 | run_flatten_texture(pipeline, src_dir, output_path=save_dir, n_samples=args.n_samples) 52 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FabricDiffusion 2 | 3 | [](https://dl.acm.org/doi/10.1145/3680528.3687637) 4 | [](https://arxiv.org/abs/2410.01801) 5 | [](https://humansensinglab.github.io/fabric-diffusion/) 6 | [](https://youtu.be/xYiyjwldtWc) 7 | 8 | 9 | ## Overview 10 | 11 |
12 |
13 |