├── .gitignore
├── .idea
├── .gitignore
├── aws.xml
├── houseganpp.iml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── .ipynb_checkpoints
├── test-checkpoint.py
└── train-checkpoint.py
├── LICENSE
├── README.md
├── checkpoints
└── pretrained.pth
├── data
├── json
│ ├── 18477.json
│ ├── 19307.json
│ ├── 36233.json
│ ├── 45012.json
│ ├── 45161.json
│ └── 7513.json
└── sample_list.txt
├── dataset
├── __init__.py
└── floorplan_dataset_maps_functional_high_res.py
├── fix_script.py
├── misc
├── .ipynb_checkpoints
│ └── utils-checkpoint.py
├── __init__.py
├── arial.ttf
├── clustering_tsne.py
├── compatibility_figure.py
├── compute_FID.py
├── convert_to_onnx.py
├── debug.py
├── intersections.py
├── old
│ ├── autoencoder_dataset.py
│ ├── coordconv.py
│ ├── data_stats.py
│ ├── dump_to_html.sh
│ ├── experiment_replace_feature_volumes.py
│ ├── experiment_replace_feature_volumes_feats.py
│ ├── finetune_generator.py
│ ├── finetune_generator_feats.py
│ ├── generate_features.py
│ ├── generate_features_reconstruction.py
│ └── generate_floorplans_for_vectorization.py
├── read_data.py
├── read_floorplan.py
├── teaser
│ ├── 0_for_figure_output.svg
│ ├── 0_for_figure_output_new.svg
│ ├── 1.svg
│ ├── 1_for_figure_output.svg
│ ├── 1_for_figure_output_new.svg
│ ├── 2_for_figure_output.svg
│ ├── 3.svg
│ ├── 3_for_figure_output.svg
│ ├── 3_for_figure_output_new.svg
│ ├── 4_for_figure_output.svg
│ ├── 5.svg
│ ├── 5_for_figure_output.svg
│ ├── 5_for_figure_output_new (3rd copy).svg
│ ├── 5_for_figure_output_new (another copy).svg
│ ├── 5_for_figure_output_new (copy).svg
│ ├── 5_for_figure_output_new.svg
│ ├── 6_for_figure_output.svg
│ ├── 6_for_figure_output_new.svg
│ ├── 7_for_figure_output.svg
│ ├── 7_for_figure_output_new.svg
│ ├── 8_for_figure_output.svg
│ ├── 8_for_figure_output_new.svg
│ ├── 9_for_figure_output.svg
│ ├── 9_for_figure_output_new.svg
│ └── edit_svg.py
├── train_autoencoder.py
├── train_exp_3.py
├── train_exp_high_res.py
├── train_exp_high_res_per_node_type.py
└── utils.py
├── models
├── .ipynb_checkpoints
│ ├── models-checkpoint.py
│ └── models_improved-checkpoint.py
├── __init__.py
├── model_resnet.py
├── models.py
└── models_improved.py
├── refs
└── sample.png
├── requirements.txt
├── scripts
├── bayesian_opt.py
├── evaluation_FID.py
├── evaluation_GED_v3.py
├── evaluation_high_res_init.py
├── optimization_valid_houses_v3.py
├── optimizing_compatibility.py
├── optimizing_compatibility_valid_houses.py
├── train_exp_3.py
├── train_exp_high_res.py
├── train_exp_high_res_per_node_type.py
└── viz.py
├── test.py
├── testing
├── evaluate_parallel.py
├── evaluation_high_res_max_subgraph.py
├── evaluation_high_res_random.py
├── pytorch_visualize.py
├── reconstruct.py
├── reconstruct_heuristic.py
├── reconstruction_dataset.py
├── run_exps.py
├── test.py
├── test_experiment_given_masks_gt.py
├── variation_bbs.py
├── variation_bbs_with_target_graph.py
├── variation_bbs_with_target_graph_segments.py
├── variation_bbs_with_target_graph_segments_suppl.py
├── variation_bbs_with_target_graph_segments_suppl_conditioned.py
├── variation_test.py
├── vectorize_OLD.py
├── vectorize_floorplans.py
└── visualize.py
└── train.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pth
2 | *.png
3 | *.npy
4 | *.pyc
5 | *cache*
6 | *.txt
7 | *.jpg
8 | venv
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 | # CodeStream ignored files
10 | /codestream.xml
11 |
--------------------------------------------------------------------------------
/.idea/aws.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/.idea/houseganpp.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/test-checkpoint.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from dataset.floorplan_dataset_maps_functional_high_res import FloorplanGraphDataset, floorplan_collate_fn
12 |
13 | from torch.utils.data import DataLoader
14 | from torchvision import datasets
15 | from torch.autograd import Variable
16 |
17 | import torch.nn as nn
18 | import torch.nn.functional as F
19 | import torch.autograd as autograd
20 | import torch
21 | from PIL import Image, ImageDraw, ImageFont
22 | import svgwrite
23 | from models.models import Generator
24 | # from models.models_improved import Generator
25 |
26 | from misc.utils import _init_input, ID_COLOR, draw_masks, draw_graph, estimate_graph
27 | from collections import defaultdict
28 | import matplotlib.pyplot as plt
29 | import networkx as nx
30 | import glob
31 | import cv2
32 | import webcolors
33 | import time
34 |
35 | parser = argparse.ArgumentParser()
36 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
37 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
38 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
39 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
40 | parser.add_argument("--num_variations", type=int, default=1, help="number of variations")
41 | parser.add_argument("--exp_folder", type=str, default='exps', help="destination folder")
42 |
43 | opt = parser.parse_args()
44 | print(opt)
45 |
46 | # PARAMS
47 | target_set = 8
48 | phase='eval'
49 | checkpoint = './checkpoints/exp_debug_E_165000.pth'
50 | PREFIX = "./"
51 | IM_SIZE = 64
52 | output_dir = "./dump"
53 |
54 | # Create folder
55 | os.makedirs(opt.exp_folder, exist_ok=True)
56 |
57 | # Initialize generator and discriminator
58 | model = Generator()
59 | model.load_state_dict(torch.load(checkpoint), strict=False)
60 | model = model.eval()
61 |
62 | # Initialize variables
63 | cuda = True if torch.cuda.is_available() else False
64 | if cuda:
65 | model.cuda()
66 | rooms_path = '../'
67 |
68 | # initialize dataset iterator
69 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
70 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
71 | batch_size=opt.batch_size,
72 | shuffle=False, collate_fn=floorplan_collate_fn)
73 | # optimizers
74 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
75 |
76 |
77 | # run inference
78 | def _infer(graph, model, prev_state=None):
79 |
80 | # configure input to the network
81 | z, given_masks_in, given_nds, given_eds = _init_input(graph, prev_state)
82 | # run inference model
83 | with torch.no_grad():
84 | masks = model(z.to('cuda'), given_masks_in.to('cuda'), given_nds.to('cuda'), given_eds.to('cuda'))
85 | masks = masks.detach().cpu().numpy()
86 | return masks
87 |
88 | def main():
89 | globalIndex = 0
90 | for i, sample in enumerate(fp_loader):
91 |
92 | # draw real graph and groundtruth
93 | mks, nds, eds, _, _ = sample
94 | real_nodes = np.where(nds.detach().cpu()==1)[-1]
95 | graph = [nds, eds]
96 | true_graph_obj, graph_im = draw_graph([real_nodes, eds.detach().cpu().numpy()])
97 | graph_im.save('{}/{}/graph_{}.png'.format(PREFIX, output_dir, i)) # save graph
98 |
99 | # add room types incrementally
100 | _types = sorted(list(set(real_nodes)))
101 | selected_types = [_types[:k+1] for k in range(10)]
102 | os.makedirs('./{}/'.format(output_dir), exist_ok=True)
103 | _round = 0
104 |
105 | # initialize layout
106 | state = {'masks': None, 'fixed_nodes': []}
107 | masks = _infer(graph, model, state)
108 | im0 = draw_masks(masks.copy(), real_nodes)
109 | im0 = torch.tensor(np.array(im0).transpose((2, 0, 1)))/255.0
110 | save_image(im0, './{}/fp_init_{}.png'.format(output_dir, i), nrow=1, normalize=False)
111 |
112 | # generate per room type
113 | for _iter, _types in enumerate(selected_types):
114 | _fixed_nds = np.concatenate([np.where(real_nodes == _t)[0] for _t in _types]) \
115 | if len(_types) > 0 else np.array([])
116 | state = {'masks': masks, 'fixed_nodes': _fixed_nds}
117 | masks = _infer(graph, model, state)
118 |
119 | # save final floorplans
120 | imk = draw_masks(masks.copy(), real_nodes)
121 | imk = torch.tensor(np.array(imk).transpose((2, 0, 1)))/255.0
122 | save_image(imk, './{}/fp_final_{}.png'.format(output_dir, i), nrow=1, normalize=False)
123 | asdasd
124 |
125 | # miss, gim = estimate_graph(masks, real_nodes, true_graph_obj)
126 | # if miss >= 0:
127 | # gim = torch.tensor(np.array(gim).transpose((2, 0, 1)))/255.0
128 | # save_image(imk, './{}/fp_graph_{}_iter_{}_final.png'.format(output_dir, _iter, _round), nrow=1, normalize=False)
129 | # save_image(gim, './{}/gp_graph_{}_iter_{}_final.png'.format(output_dir, _iter, _round), nrow=1, normalize=False)
130 | # _round += 1
131 | # end = time.time()
132 |
133 | if __name__ == '__main__':
134 | main()
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | House-GAN++
2 | ======
3 |
4 | Code and instructions for our paper:
5 | [House-GAN++: Generative Adversarial Layout Refinement Network towards Intelligent Computational Agent for Professional Architects](https://arxiv.org/abs/2103.02574), CVPR 2021. Project [website](https://ennauata.github.io/houseganpp/page.html).
6 |
7 | Data
8 | ------
9 | 
10 | We have used the [RPLAN dataset](http://staff.ustc.edu.cn/~fuxm/projects/DeepLayout/index.html), which offers 60k vector-graphics floorplans designed by professional architects. Qualitative and quantitative evaluations based on the three standard metrics (i.e., realism, diversity, and compatibility) in the literature demonstrate that the proposed system outperforms the current-state-of-the-art by a large margin.
11 |
12 |
13 | Demo
14 | ------
15 | 
16 | Please check out our live [demo](http://www.houseganpp.com).
17 |
18 | Running pretrained models
19 | ------
20 | ***See requirements.txt for checking the dependencies before running the code***
21 |
22 | For running a pretrained model check out the following steps:
23 | - Run ***python test.py***.
24 | - Check out the results in output folder.
25 |
26 | Training models
27 | ------
28 | - Download the raw [RPLAN dataset](http://staff.ustc.edu.cn/~fuxm/projects/DeepLayout/index.html).
29 | - Run [this script](https://github.com/sepidsh/Housegan-data-reader) for processing the dataset and extracting JSON files.
30 | - The extracted JSON files serves directly as input to our dataloader.
31 |
32 | Citation
33 | ------
34 | Please consider citing our work.
35 | ```
36 | @inproceedings{nauata2021house,
37 | title={House-GAN++: Generative Adversarial Layout Refinement Network towards Intelligent Computational Agent for Professional Architects},
38 | author={Nauata, Nelson and Hosseini, Sepidehsadat and Chang, Kai-Hung and Chu, Hang and Cheng, Chin-Yi and Furukawa, Yasutaka},
39 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
40 | pages={13632--13641},
41 | year={2021}
42 | }
43 | ```
44 |
45 | Contact
46 | ------
47 | If you have any question, feel free to contact me at nnauata@sfu.ca.
48 |
49 |
50 | Acknowledgement
51 | ------
52 | This research is partially supported by NSERC Discovery Grants, NSERC Discovery Grants Accelerator Supplements, DND/NSERC Discovery Grant Supplement, and Autodesk. We would like to thank architects and students for participating in our user study.
53 |
--------------------------------------------------------------------------------
/checkpoints/pretrained.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/checkpoints/pretrained.pth
--------------------------------------------------------------------------------
/data/json/18477.json:
--------------------------------------------------------------------------------
1 | {"room_type": [3, 2, 5, 3, 4, 1, 17, 17, 17, 17, 17, 15], "boxes": [[84.0, 44.0, 123.0, 93.0], [160.0, 53.0, 194.0, 93.0], [62.0, 154.0, 80.0, 171.0], [84.0, 154.0, 133.0, 212.0], [147.0, 117.0, 171.0, 150.0], [62.0, 44.0, 171.0, 150.0], [81.0, 159.0, 83.0, 171.0], [161.0, 94.0, 173.0, 96.0], [124.0, 82.0, 126.0, 94.0], [122.0, 151.0, 134.0, 153.0], [146.0, 114.0, 158.0, 116.0], [172.0, 99.0, 174.0, 111.0]], "edges": [[84.0, 44.0, 84.0, 93.0, 3, 0], [84.0, 93.0, 123.0, 93.0, 3, 0], [123.0, 93.0, 123.0, 44.0, 3, 1], [123.0, 44.0, 84.0, 44.0, 3, 0], [160.0, 53.0, 160.0, 93.0, 2, 0], [160.0, 93.0, 171.0, 93.0, 2, 1], [171.0, 93.0, 171.0, 77.0, 2, 0], [171.0, 77.0, 194.0, 77.0, 2, 0], [194.0, 77.0, 194.0, 53.0, 2, 0], [194.0, 53.0, 160.0, 53.0, 2, 0], [62.0, 154.0, 62.0, 171.0, 5, 0], [62.0, 171.0, 80.0, 171.0, 5, 0], [80.0, 171.0, 80.0, 154.0, 5, 3], [80.0, 154.0, 62.0, 154.0, 5, 0], [84.0, 154.0, 84.0, 212.0, 3, 5], [84.0, 212.0, 133.0, 212.0, 3, 0], [133.0, 212.0, 133.0, 154.0, 3, 0], [133.0, 154.0, 84.0, 154.0, 3, 1], [147.0, 117.0, 147.0, 150.0, 4, 0], [147.0, 150.0, 171.0, 150.0, 4, 0], [171.0, 150.0, 171.0, 117.0, 4, 0], [171.0, 117.0, 147.0, 117.0, 4, 1], [127.0, 44.0, 127.0, 97.0, 1, 3], [127.0, 97.0, 62.0, 97.0, 1, 0], [62.0, 97.0, 62.0, 150.0, 1, 0], [62.0, 150.0, 143.0, 150.0, 1, 3], [143.0, 150.0, 143.0, 113.0, 1, 0], [143.0, 113.0, 171.0, 113.0, 1, 4], [171.0, 113.0, 171.0, 97.0, 1, 0], [171.0, 97.0, 156.0, 97.0, 1, 2], [156.0, 97.0, 156.0, 44.0, 1, 0], [156.0, 44.0, 127.0, 44.0, 1, 0], [81.0, 159.0, 81.0, 171.0, 17, 5], [81.0, 171.0, 83.0, 171.0, 17, 0], [83.0, 171.0, 83.0, 159.0, 17, 3], [83.0, 159.0, 81.0, 159.0, 17, 0], [161.0, 94.0, 161.0, 96.0, 17, 2], [161.0, 96.0, 173.0, 96.0, 17, 1], [173.0, 96.0, 173.0, 94.0, 17, 0], [173.0, 94.0, 161.0, 94.0, 17, 0], [124.0, 82.0, 124.0, 94.0, 17, 3], [124.0, 94.0, 126.0, 94.0, 17, 0], [126.0, 94.0, 126.0, 82.0, 17, 1], [126.0, 82.0, 124.0, 82.0, 17, 0], [122.0, 151.0, 122.0, 153.0, 17, 0], [122.0, 153.0, 134.0, 153.0, 17, 3], [134.0, 153.0, 134.0, 151.0, 17, 0], [134.0, 151.0, 122.0, 151.0, 17, 1], [146.0, 114.0, 146.0, 116.0, 17, 4], [146.0, 116.0, 158.0, 116.0, 17, 0], [158.0, 116.0, 158.0, 114.0, 17, 0], [158.0, 114.0, 146.0, 114.0, 17, 1], [172.0, 99.0, 172.0, 111.0, 15, 1], [172.0, 111.0, 174.0, 111.0, 15, 0], [174.0, 111.0, 174.0, 99.0, 15, 0], [174.0, 99.0, 172.0, 99.0, 15, 0]], "ed_rm": [[0], [0], [0, 5], [0], [1], [1, 5], [1], [1], [1], [1], [2], [2], [2, 3], [2], [3, 2], [3], [3], [3, 5], [4], [4], [4], [4, 5], [5, 0], [5], [5], [5, 3], [5], [5, 4], [5], [5, 1], [5], [5], [6, 2], [6], [6, 3], [6], [7, 1], [7, 5], [7], [7], [8, 0], [8], [8, 5], [8], [9], [9, 3], [9], [9, 5], [10, 4], [10], [10], [10, 5], [11, 5], [11], [11], [11]]}
--------------------------------------------------------------------------------
/data/json/19307.json:
--------------------------------------------------------------------------------
1 | {"room_type": [5, 3, 3, 4, 2, 1, 17, 17, 17, 17, 17, 15], "boxes": [[75.0, 33.0, 128.0, 45.0], [134.0, 51.0, 181.0, 106.0], [134.0, 145.0, 171.0, 202.0], [152.0, 112.0, 181.0, 139.0], [93.0, 177.0, 128.0, 223.0], [75.0, 51.0, 146.0, 171.0], [133.0, 107.0, 144.0, 111.0], [81.0, 46.0, 121.0, 50.0], [133.0, 140.0, 144.0, 144.0], [147.0, 115.0, 151.0, 125.0], [116.0, 172.0, 126.0, 176.0], [74.0, 140.0, 86.0, 144.0]], "edges": [[75.0, 33.0, 75.0, 45.0, 5, 0], [75.0, 45.0, 128.0, 45.0, 5, 1], [128.0, 45.0, 128.0, 33.0, 5, 0], [128.0, 33.0, 75.0, 33.0, 5, 0], [134.0, 51.0, 134.0, 106.0, 3, 0], [134.0, 106.0, 181.0, 106.0, 3, 1], [181.0, 106.0, 181.0, 51.0, 3, 0], [181.0, 51.0, 134.0, 51.0, 3, 0], [134.0, 145.0, 134.0, 202.0, 3, 0], [134.0, 202.0, 171.0, 202.0, 3, 0], [171.0, 202.0, 171.0, 145.0, 3, 0], [171.0, 145.0, 134.0, 145.0, 3, 1], [152.0, 112.0, 152.0, 139.0, 4, 1], [152.0, 139.0, 181.0, 139.0, 4, 0], [181.0, 139.0, 181.0, 112.0, 4, 0], [181.0, 112.0, 152.0, 112.0, 4, 0], [93.0, 177.0, 93.0, 223.0, 2, 0], [93.0, 223.0, 128.0, 223.0, 2, 0], [128.0, 223.0, 128.0, 177.0, 2, 0], [128.0, 177.0, 93.0, 177.0, 2, 1], [75.0, 51.0, 75.0, 139.0, 1, 0], [75.0, 139.0, 93.0, 139.0, 1, 0], [93.0, 139.0, 93.0, 171.0, 1, 0], [93.0, 171.0, 128.0, 171.0, 1, 2], [128.0, 171.0, 128.0, 139.0, 1, 0], [128.0, 139.0, 146.0, 139.0, 1, 3], [146.0, 139.0, 146.0, 112.0, 1, 4], [146.0, 112.0, 128.0, 112.0, 1, 3], [128.0, 112.0, 128.0, 51.0, 1, 0], [128.0, 51.0, 75.0, 51.0, 1, 5], [133.0, 107.0, 133.0, 111.0, 17, 0], [133.0, 111.0, 144.0, 111.0, 17, 1], [144.0, 111.0, 144.0, 107.0, 17, 0], [144.0, 107.0, 133.0, 107.0, 17, 3], [81.0, 46.0, 81.0, 50.0, 17, 0], [81.0, 50.0, 121.0, 50.0, 17, 1], [121.0, 50.0, 121.0, 46.0, 17, 0], [121.0, 46.0, 81.0, 46.0, 17, 5], [133.0, 140.0, 133.0, 144.0, 17, 0], [133.0, 144.0, 144.0, 144.0, 17, 3], [144.0, 144.0, 144.0, 140.0, 17, 0], [144.0, 140.0, 133.0, 140.0, 17, 1], [147.0, 115.0, 147.0, 125.0, 17, 1], [147.0, 125.0, 151.0, 125.0, 17, 0], [151.0, 125.0, 151.0, 115.0, 17, 4], [151.0, 115.0, 147.0, 115.0, 17, 0], [116.0, 172.0, 116.0, 176.0, 17, 0], [116.0, 176.0, 126.0, 176.0, 17, 2], [126.0, 176.0, 126.0, 172.0, 17, 0], [126.0, 172.0, 116.0, 172.0, 17, 1], [74.0, 140.0, 74.0, 144.0, 15, 0], [74.0, 144.0, 86.0, 144.0, 15, 0], [86.0, 144.0, 86.0, 140.0, 15, 0], [86.0, 140.0, 74.0, 140.0, 15, 1]], "ed_rm": [[0], [0, 5], [0], [0], [1], [1, 5], [1], [1], [2], [2], [2], [2, 5], [3, 5], [3], [3], [3], [4], [4], [4], [4, 5], [5], [5], [5], [5, 4], [5], [5, 2], [5, 3], [5, 1], [5], [5, 0], [6], [6, 5], [6], [6, 1], [7], [7, 5], [7], [7, 0], [8], [8, 2], [8], [8, 5], [9, 5], [9], [9, 3], [9], [10], [10, 4], [10], [10, 5], [11], [11], [11], [11, 5]]}
--------------------------------------------------------------------------------
/data/json/36233.json:
--------------------------------------------------------------------------------
1 | {"room_type": [3, 1, 3, 4, 2, 17, 17, 17, 17, 15], "boxes": [[85.0, 128.0, 121.0, 174.0], [64.0, 68.0, 145.0, 137.0], [125.0, 128.0, 168.0, 196.0], [149.0, 91.0, 192.0, 124.0], [149.0, 60.0, 192.0, 95.0], [107.0, 125.0, 119.0, 127.0], [128.0, 125.0, 139.0, 127.0], [146.0, 109.0, 148.0, 122.0], [146.0, 68.0, 148.0, 82.0], [123.0, 65.0, 147.0, 67.0]], "edges": [[85.0, 128.0, 85.0, 174.0, 3, 0], [85.0, 174.0, 121.0, 174.0, 3, 0], [121.0, 174.0, 121.0, 128.0, 3, 0], [121.0, 128.0, 85.0, 128.0, 3, 1], [64.0, 68.0, 64.0, 137.0, 1, 0], [64.0, 137.0, 81.0, 137.0, 1, 0], [81.0, 137.0, 81.0, 124.0, 1, 0], [81.0, 124.0, 145.0, 124.0, 1, 3], [145.0, 124.0, 145.0, 68.0, 1, 2], [145.0, 68.0, 64.0, 68.0, 1, 0], [125.0, 128.0, 125.0, 196.0, 3, 0], [125.0, 196.0, 168.0, 196.0, 3, 0], [168.0, 196.0, 168.0, 128.0, 3, 0], [168.0, 128.0, 125.0, 128.0, 3, 1], [149.0, 91.0, 149.0, 124.0, 4, 1], [149.0, 124.0, 192.0, 124.0, 4, 0], [192.0, 124.0, 192.0, 99.0, 4, 0], [192.0, 99.0, 168.0, 99.0, 4, 0], [168.0, 99.0, 168.0, 91.0, 4, 0], [168.0, 91.0, 149.0, 91.0, 4, 0], [172.0, 60.0, 172.0, 68.0, 2, 0], [172.0, 68.0, 149.0, 68.0, 2, 0], [149.0, 68.0, 149.0, 87.0, 2, 1], [149.0, 87.0, 172.0, 87.0, 2, 0], [172.0, 87.0, 172.0, 95.0, 2, 0], [172.0, 95.0, 192.0, 95.0, 2, 0], [192.0, 95.0, 192.0, 60.0, 2, 0], [192.0, 60.0, 172.0, 60.0, 2, 0], [107.0, 125.0, 107.0, 127.0, 17, 0], [107.0, 127.0, 119.0, 127.0, 17, 3], [119.0, 127.0, 119.0, 125.0, 17, 0], [119.0, 125.0, 107.0, 125.0, 17, 1], [128.0, 125.0, 128.0, 127.0, 17, 0], [128.0, 127.0, 139.0, 127.0, 17, 3], [139.0, 127.0, 139.0, 125.0, 17, 0], [139.0, 125.0, 128.0, 125.0, 17, 1], [146.0, 109.0, 146.0, 122.0, 17, 1], [146.0, 122.0, 148.0, 122.0, 17, 0], [148.0, 122.0, 148.0, 109.0, 17, 4], [148.0, 109.0, 146.0, 109.0, 17, 0], [146.0, 68.0, 146.0, 82.0, 17, 1], [146.0, 82.0, 148.0, 82.0, 17, 0], [148.0, 82.0, 148.0, 68.0, 17, 2], [148.0, 68.0, 146.0, 68.0, 17, 0], [123.0, 65.0, 123.0, 67.0, 15, 0], [123.0, 67.0, 147.0, 67.0, 15, 1], [147.0, 67.0, 147.0, 65.0, 15, 0], [147.0, 65.0, 123.0, 65.0, 15, 0]], "ed_rm": [[0], [0], [0], [0, 1], [1], [1], [1], [1, 2], [1, 4], [1], [2], [2], [2], [2, 1], [3, 1], [3], [3], [3], [3], [3], [4], [4], [4, 1], [4], [4], [4], [4], [4], [5], [5, 0], [5], [5, 1], [6], [6, 2], [6], [6, 1], [7, 1], [7], [7, 3], [7], [8, 1], [8], [8, 4], [8], [9], [9, 1], [9], [9]]}
--------------------------------------------------------------------------------
/data/json/45012.json:
--------------------------------------------------------------------------------
1 | {"room_type": [5, 3, 3, 4, 2, 1, 17, 17, 17, 17, 17, 15], "boxes": [[70.0, 52.0, 127.0, 67.0], [131.0, 71.0, 175.0, 109.0], [143.0, 113.0, 186.0, 150.0], [131.0, 154.0, 175.0, 175.0], [131.0, 179.0, 175.0, 204.0], [70.0, 71.0, 139.0, 204.0], [129.0, 110.0, 137.0, 112.0], [80.0, 68.0, 114.0, 70.0], [140.0, 116.0, 142.0, 123.0], [129.0, 151.0, 137.0, 153.0], [128.0, 182.0, 130.0, 190.0], [111.0, 205.0, 125.0, 207.0]], "edges": [[70.0, 52.0, 70.0, 67.0, 5, 0], [70.0, 67.0, 127.0, 67.0, 5, 1], [127.0, 67.0, 127.0, 52.0, 5, 0], [127.0, 52.0, 70.0, 52.0, 5, 0], [131.0, 71.0, 131.0, 109.0, 3, 0], [131.0, 109.0, 175.0, 109.0, 3, 1], [175.0, 109.0, 175.0, 71.0, 3, 0], [175.0, 71.0, 131.0, 71.0, 3, 0], [143.0, 113.0, 143.0, 150.0, 3, 1], [143.0, 150.0, 186.0, 150.0, 3, 0], [186.0, 150.0, 186.0, 113.0, 3, 0], [186.0, 113.0, 143.0, 113.0, 3, 0], [131.0, 154.0, 131.0, 175.0, 4, 0], [131.0, 175.0, 175.0, 175.0, 4, 0], [175.0, 175.0, 175.0, 154.0, 4, 0], [175.0, 154.0, 131.0, 154.0, 4, 1], [131.0, 179.0, 131.0, 204.0, 2, 1], [131.0, 204.0, 175.0, 204.0, 2, 0], [175.0, 204.0, 175.0, 179.0, 2, 0], [175.0, 179.0, 131.0, 179.0, 2, 0], [70.0, 71.0, 70.0, 162.0, 1, 0], [70.0, 162.0, 107.0, 162.0, 1, 0], [107.0, 162.0, 107.0, 204.0, 1, 0], [107.0, 204.0, 127.0, 204.0, 1, 0], [127.0, 204.0, 127.0, 150.0, 1, 2], [127.0, 150.0, 139.0, 150.0, 1, 4], [139.0, 150.0, 139.0, 113.0, 1, 3], [139.0, 113.0, 127.0, 113.0, 1, 3], [127.0, 113.0, 127.0, 71.0, 1, 0], [127.0, 71.0, 70.0, 71.0, 1, 5], [129.0, 110.0, 129.0, 112.0, 17, 0], [129.0, 112.0, 137.0, 112.0, 17, 1], [137.0, 112.0, 137.0, 110.0, 17, 0], [137.0, 110.0, 129.0, 110.0, 17, 3], [80.0, 68.0, 80.0, 70.0, 17, 0], [80.0, 70.0, 114.0, 70.0, 17, 1], [114.0, 70.0, 114.0, 68.0, 17, 0], [114.0, 68.0, 80.0, 68.0, 17, 5], [140.0, 116.0, 140.0, 123.0, 17, 1], [140.0, 123.0, 142.0, 123.0, 17, 0], [142.0, 123.0, 142.0, 116.0, 17, 3], [142.0, 116.0, 140.0, 116.0, 17, 0], [129.0, 151.0, 129.0, 153.0, 17, 0], [129.0, 153.0, 137.0, 153.0, 17, 4], [137.0, 153.0, 137.0, 151.0, 17, 0], [137.0, 151.0, 129.0, 151.0, 17, 1], [128.0, 182.0, 128.0, 190.0, 17, 1], [128.0, 190.0, 130.0, 190.0, 17, 0], [130.0, 190.0, 130.0, 182.0, 17, 2], [130.0, 182.0, 128.0, 182.0, 17, 0], [111.0, 205.0, 111.0, 207.0, 15, 0], [111.0, 207.0, 125.0, 207.0, 15, 0], [125.0, 207.0, 125.0, 205.0, 15, 0], [125.0, 205.0, 111.0, 205.0, 15, 1]], "ed_rm": [[0], [0, 5], [0], [0], [1], [1, 5], [1], [1], [2, 5], [2], [2], [2], [3], [3], [3], [3, 5], [4, 5], [4], [4], [4], [5], [5], [5], [5], [5, 4], [5, 3], [5, 2], [5, 1], [5], [5, 0], [6], [6, 5], [6], [6, 1], [7], [7, 5], [7], [7, 0], [8, 5], [8], [8, 2], [8], [9], [9, 3], [9], [9, 5], [10, 5], [10], [10, 4], [10], [11], [11], [11], [11, 5]]}
--------------------------------------------------------------------------------
/data/json/45161.json:
--------------------------------------------------------------------------------
1 | {"room_type": [3, 5, 3, 3, 4, 2, 1, 17, 17, 17, 17, 17, 17, 15], "boxes": [[36.0, 124.0, 89.0, 170.0], [93.0, 174.0, 150.0, 196.0], [154.0, 117.0, 220.0, 170.0], [172.0, 60.0, 220.0, 113.0], [132.0, 60.0, 168.0, 96.0], [88.0, 63.0, 128.0, 96.0], [88.0, 100.0, 185.0, 170.0], [101.0, 171.0, 143.0, 173.0], [90.0, 123.0, 92.0, 137.0], [153.0, 114.0, 167.0, 116.0], [186.0, 100.0, 188.0, 111.0], [136.0, 97.0, 164.0, 99.0], [95.0, 97.0, 123.0, 99.0], [85.0, 104.0, 87.0, 118.0]], "edges": [[36.0, 124.0, 36.0, 170.0, 3, 0], [36.0, 170.0, 89.0, 170.0, 3, 0], [89.0, 170.0, 89.0, 124.0, 3, 1], [89.0, 124.0, 36.0, 124.0, 3, 0], [93.0, 174.0, 93.0, 196.0, 5, 0], [93.0, 196.0, 150.0, 196.0, 5, 0], [150.0, 196.0, 150.0, 174.0, 5, 0], [150.0, 174.0, 93.0, 174.0, 5, 1], [154.0, 117.0, 154.0, 170.0, 3, 0], [154.0, 170.0, 220.0, 170.0, 3, 0], [220.0, 170.0, 220.0, 117.0, 3, 0], [220.0, 117.0, 154.0, 117.0, 3, 1], [172.0, 60.0, 172.0, 96.0, 3, 0], [172.0, 96.0, 189.0, 96.0, 3, 0], [189.0, 96.0, 189.0, 113.0, 3, 1], [189.0, 113.0, 220.0, 113.0, 3, 0], [220.0, 113.0, 220.0, 60.0, 3, 0], [220.0, 60.0, 172.0, 60.0, 3, 0], [132.0, 60.0, 132.0, 96.0, 4, 0], [132.0, 96.0, 168.0, 96.0, 4, 1], [168.0, 96.0, 168.0, 60.0, 4, 0], [168.0, 60.0, 132.0, 60.0, 4, 0], [88.0, 63.0, 88.0, 96.0, 2, 0], [88.0, 96.0, 128.0, 96.0, 2, 1], [128.0, 96.0, 128.0, 63.0, 2, 0], [128.0, 63.0, 88.0, 63.0, 2, 0], [88.0, 100.0, 88.0, 120.0, 1, 0], [88.0, 120.0, 94.0, 120.0, 1, 0], [94.0, 120.0, 94.0, 124.0, 1, 0], [94.0, 124.0, 93.0, 124.0, 1, 0], [93.0, 124.0, 93.0, 170.0, 1, 3], [93.0, 170.0, 150.0, 170.0, 1, 5], [150.0, 170.0, 150.0, 113.0, 1, 0], [150.0, 113.0, 185.0, 113.0, 1, 3], [185.0, 113.0, 185.0, 100.0, 1, 3], [185.0, 100.0, 88.0, 100.0, 1, 2], [101.0, 171.0, 101.0, 173.0, 17, 0], [101.0, 173.0, 143.0, 173.0, 17, 5], [143.0, 173.0, 143.0, 171.0, 17, 0], [143.0, 171.0, 101.0, 171.0, 17, 1], [90.0, 123.0, 90.0, 137.0, 17, 3], [90.0, 137.0, 92.0, 137.0, 17, 0], [92.0, 137.0, 92.0, 123.0, 17, 1], [92.0, 123.0, 90.0, 123.0, 17, 0], [153.0, 114.0, 153.0, 116.0, 17, 3], [153.0, 116.0, 167.0, 116.0, 17, 0], [167.0, 116.0, 167.0, 114.0, 17, 0], [167.0, 114.0, 153.0, 114.0, 17, 1], [186.0, 100.0, 186.0, 111.0, 17, 1], [186.0, 111.0, 188.0, 111.0, 17, 0], [188.0, 111.0, 188.0, 100.0, 17, 3], [188.0, 100.0, 186.0, 100.0, 17, 0], [136.0, 97.0, 136.0, 99.0, 17, 0], [136.0, 99.0, 164.0, 99.0, 17, 1], [164.0, 99.0, 164.0, 97.0, 17, 0], [164.0, 97.0, 136.0, 97.0, 17, 4], [95.0, 97.0, 95.0, 99.0, 17, 0], [95.0, 99.0, 123.0, 99.0, 17, 1], [123.0, 99.0, 123.0, 97.0, 17, 0], [123.0, 97.0, 95.0, 97.0, 17, 2], [85.0, 104.0, 85.0, 118.0, 15, 0], [85.0, 118.0, 87.0, 118.0, 15, 0], [87.0, 118.0, 87.0, 104.0, 15, 1], [87.0, 104.0, 85.0, 104.0, 15, 0]], "ed_rm": [[0], [0], [0, 6], [0], [1], [1], [1], [1, 6], [2], [2], [2], [2, 6], [3], [3], [3, 6], [3], [3], [3], [4], [4, 6], [4], [4], [5], [5, 6], [5], [5], [6], [6], [6], [6], [6, 0], [6, 1], [6], [6, 2], [6, 3], [6, 5], [7], [7, 1], [7], [7, 6], [8, 0], [8], [8, 6], [8], [9, 2], [9], [9], [9, 6], [10, 6], [10], [10, 3], [10], [11], [11, 6], [11], [11, 4], [12], [12, 6], [12], [12, 5], [13], [13], [13, 6], [13]]}
--------------------------------------------------------------------------------
/data/json/7513.json:
--------------------------------------------------------------------------------
1 | {"room_type": [3, 5, 3, 5, 4, 1, 3, 2, 17, 17, 17, 17, 17, 17, 17, 15], "boxes": [[109.0, 58.0, 137.0, 119.0], [143.0, 35.0, 191.0, 52.0], [65.0, 144.0, 109.0, 197.0], [65.0, 203.0, 109.0, 216.0], [65.0, 110.0, 87.0, 138.0], [93.0, 58.0, 191.0, 204.0], [65.0, 58.0, 103.0, 119.0], [140.0, 203.0, 170.0, 221.0], [68.0, 198.0, 102.0, 202.0], [70.0, 139.0, 82.0, 143.0], [146.0, 53.0, 192.0, 57.0], [112.0, 120.0, 123.0, 124.0], [96.0, 139.0, 108.0, 143.0], [96.0, 120.0, 101.0, 124.0], [159.0, 198.0, 171.0, 202.0], [177.0, 205.0, 189.0, 209.0]], "edges": [[109.0, 58.0, 109.0, 119.0, 3, 0], [109.0, 119.0, 137.0, 119.0, 3, 1], [137.0, 119.0, 137.0, 58.0, 3, 0], [137.0, 58.0, 109.0, 58.0, 3, 0], [143.0, 35.0, 143.0, 52.0, 5, 0], [143.0, 52.0, 191.0, 52.0, 5, 1], [191.0, 52.0, 191.0, 35.0, 5, 0], [191.0, 35.0, 143.0, 35.0, 5, 0], [65.0, 144.0, 65.0, 197.0, 3, 0], [65.0, 197.0, 109.0, 197.0, 3, 5], [109.0, 197.0, 109.0, 144.0, 3, 0], [109.0, 144.0, 65.0, 144.0, 3, 1], [65.0, 203.0, 65.0, 216.0, 5, 0], [65.0, 216.0, 109.0, 216.0, 5, 0], [109.0, 216.0, 109.0, 203.0, 5, 0], [109.0, 203.0, 65.0, 203.0, 5, 3], [65.0, 110.0, 65.0, 138.0, 4, 0], [65.0, 138.0, 87.0, 138.0, 4, 3], [87.0, 138.0, 87.0, 110.0, 4, 0], [87.0, 110.0, 65.0, 110.0, 4, 0], [143.0, 58.0, 143.0, 125.0, 1, 0], [143.0, 125.0, 93.0, 125.0, 1, 3], [93.0, 125.0, 93.0, 138.0, 1, 0], [93.0, 138.0, 115.0, 138.0, 1, 3], [115.0, 138.0, 115.0, 197.0, 1, 0], [115.0, 197.0, 176.0, 197.0, 1, 2], [176.0, 197.0, 176.0, 204.0, 1, 0], [176.0, 204.0, 191.0, 204.0, 1, 0], [191.0, 204.0, 191.0, 58.0, 1, 0], [191.0, 58.0, 143.0, 58.0, 1, 5], [65.0, 58.0, 65.0, 104.0, 3, 0], [65.0, 104.0, 93.0, 104.0, 3, 0], [93.0, 104.0, 93.0, 119.0, 3, 0], [93.0, 119.0, 103.0, 119.0, 3, 1], [103.0, 119.0, 103.0, 58.0, 3, 0], [103.0, 58.0, 65.0, 58.0, 3, 0], [140.0, 203.0, 140.0, 221.0, 2, 0], [140.0, 221.0, 170.0, 221.0, 2, 0], [170.0, 221.0, 170.0, 203.0, 2, 0], [170.0, 203.0, 140.0, 203.0, 2, 1], [68.0, 198.0, 68.0, 202.0, 17, 0], [68.0, 202.0, 102.0, 202.0, 17, 5], [102.0, 202.0, 102.0, 198.0, 17, 0], [102.0, 198.0, 68.0, 198.0, 17, 3], [70.0, 139.0, 70.0, 143.0, 17, 0], [70.0, 143.0, 82.0, 143.0, 17, 3], [82.0, 143.0, 82.0, 139.0, 17, 0], [82.0, 139.0, 70.0, 139.0, 17, 4], [146.0, 53.0, 146.0, 57.0, 17, 0], [146.0, 57.0, 192.0, 57.0, 17, 1], [192.0, 57.0, 192.0, 53.0, 17, 0], [192.0, 53.0, 146.0, 53.0, 17, 5], [112.0, 120.0, 112.0, 124.0, 17, 0], [112.0, 124.0, 123.0, 124.0, 17, 1], [123.0, 124.0, 123.0, 120.0, 17, 0], [123.0, 120.0, 112.0, 120.0, 17, 3], [96.0, 139.0, 96.0, 143.0, 17, 0], [96.0, 143.0, 108.0, 143.0, 17, 3], [108.0, 143.0, 108.0, 139.0, 17, 0], [108.0, 139.0, 96.0, 139.0, 17, 1], [96.0, 120.0, 96.0, 124.0, 17, 0], [96.0, 124.0, 101.0, 124.0, 17, 1], [101.0, 124.0, 101.0, 120.0, 17, 0], [101.0, 120.0, 96.0, 120.0, 17, 3], [159.0, 198.0, 159.0, 202.0, 17, 0], [159.0, 202.0, 171.0, 202.0, 17, 2], [171.0, 202.0, 171.0, 198.0, 17, 0], [171.0, 198.0, 159.0, 198.0, 17, 1], [177.0, 205.0, 177.0, 209.0, 15, 0], [177.0, 209.0, 189.0, 209.0, 15, 0], [189.0, 209.0, 189.0, 205.0, 15, 0], [189.0, 205.0, 177.0, 205.0, 15, 1]], "ed_rm": [[0], [0, 5], [0], [0], [1], [1, 5], [1], [1], [2], [2, 3], [2], [2, 5], [3], [3], [3], [3, 2], [4], [4, 2], [4], [4], [5], [5, 6], [5], [5, 2], [5], [5, 7], [5], [5], [5], [5, 1], [6], [6], [6], [6, 5], [6], [6], [7], [7], [7], [7, 5], [8], [8, 3], [8], [8, 2], [9], [9, 2], [9], [9, 4], [10], [10, 5], [10], [10, 1], [11], [11, 5], [11], [11, 0], [12], [12, 2], [12], [12, 5], [13], [13, 5], [13], [13, 6], [14], [14, 7], [14], [14, 5], [15], [15], [15], [15, 5]]}
--------------------------------------------------------------------------------
/data/sample_list.txt:
--------------------------------------------------------------------------------
1 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/21532.json
2 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/56608.json
3 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/41375.json
4 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/69225.json
5 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/31360.json
6 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/22963.json
7 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/32249.json
8 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/74865.json
9 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/72962.json
10 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/31706.json
11 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/60634.json
12 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/65743.json
13 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/37662.json
14 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/64408.json
15 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/72878.json
16 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/46783.json
17 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/64256.json
18 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/59230.json
19 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/59950.json
20 | /home/akmal/APIIT/FYP Code/Housegan-data-reader/sample_output/37627.json/
--------------------------------------------------------------------------------
/dataset/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/dataset/__init__.py
--------------------------------------------------------------------------------
/fix_script.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/fix_script.py
--------------------------------------------------------------------------------
/misc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/misc/__init__.py
--------------------------------------------------------------------------------
/misc/arial.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/misc/arial.ttf
--------------------------------------------------------------------------------
/misc/clustering_tsne.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import glob
3 | import matplotlib.pyplot as plt
4 | from collections import defaultdict
5 | import operator
6 | from sklearn.cluster import KMeans
7 | from models.models_exp_high_res import Autoencoder
8 | import torch
9 | import torch.nn.functional as F
10 | from torchvision.utils import save_image
11 | from sklearn.manifold import TSNE
12 | from sklearn.decomposition import PCA
13 |
14 | autoencoderTest = Autoencoder()
15 | autoencoderTest.load_state_dict(torch.load('./checkpoints/exp_autoencoder_A_72900_ae.pth'))
16 | autoencoderTest = autoencoderTest.eval()
17 | autoencoderTest.cuda()
18 |
19 | # function returns WSS score for k values from 1 to kmax
20 | def calculate_WSS(points, kmax):
21 | sse = []
22 | for k in range(1, kmax+1):
23 | kmeans = KMeans(n_clusters = k).fit(points)
24 | centroids = kmeans.cluster_centers_
25 | pred_clusters = kmeans.predict(points)
26 | curr_sse = 0
27 |
28 | # calculate square of Euclidean distance of each point from its cluster center and add to current WSS
29 | for i in range(len(points)):
30 | curr_center = centroids[pred_clusters[i]]
31 | curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (points[i, 1] - curr_center[1]) ** 2
32 |
33 | sse.append(curr_sse)
34 | return sse
35 |
36 | def extract_features(mks, _types):
37 |
38 | mks_tensor = torch.tensor(mks).float().cuda().unsqueeze(1)
39 | mks_tensor[mks_tensor<=0] = 0.0
40 | mks_tensor[mks_tensor>0] = 1.0
41 | gen_rec, feat = autoencoderTest(mks_tensor)
42 | gen_rec = torch.sigmoid(gen_rec)
43 |
44 | inds = np.where(_types <= 10)[0]
45 | feat = feat[inds, :]
46 | feat = feat.view(-1)
47 |
48 | # ### DEBUG
49 | # all_images = []
50 | # for m1, m2 in zip(mks_tensor, gen_rec):
51 | # all_images.append(m1)
52 | # all_images.append(m2)
53 | # save_image(all_images, "./test.png", padding=2, pad_value=255, nrow=32, scale_each=True, normalize=True)
54 |
55 | return feat
56 |
57 | def compute_cm(mks):
58 | cms = []
59 | for m in mks:
60 | yc, xc = np.mean(np.where(m>0), -1)
61 | cms.append((yc, xc))
62 | return cms
63 |
64 | # list files
65 | fp_files = glob.glob('./clustering_exp/floorplans_output/*.npy')
66 |
67 | # get node with most conenctions
68 | data = np.load(fp_files[0], allow_pickle=True).item()
69 | nds, eds = data['nodes'], data['edges']
70 | counts = defaultdict(int)
71 | _types = np.where(nds==1)[1]
72 | for e in eds:
73 | if e[1] > 0:
74 | n1, n2 = int(e[0]), int(e[2])
75 | if _types[n1] <= 10 and _types[n2] <= 10:
76 | counts[n1] += 1
77 | counts[n2] += 1
78 | sorted_x = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)
79 | n_max = sorted_x[0]
80 |
81 | # get centers
82 | fp_info = []
83 | for fname in fp_files:
84 | l = fname.split('/')[-1].split('_')[0]
85 | data = np.load(fname, allow_pickle=True).item()
86 | mks = data['masks']
87 | feat = extract_features(mks, _types)
88 | cms = compute_cm(mks)
89 | fp_info.append([int(l), cms, mks, feat])
90 |
91 | X = np.array([feat.detach().cpu().numpy() for _, _, _, feat in fp_info])
92 | labels = np.array([l for l, _, _, _ in fp_info])
93 | X_embedded = TSNE(n_components=2, n_iter=5000).fit_transform(X)
94 | # X_embedded = PCA(n_components=2).fit_transform(X)
95 |
96 | # zip joins x and y coordinates in pairs
97 | plt.figure()
98 | colors = []
99 | for x, y, l in zip(X_embedded[:, 0], X_embedded[:, 1] , labels):
100 | label = "{}".format(l)
101 | if l == 999:
102 | plt.annotate(label, # this is the text
103 | (x, y), # this is the point to label
104 | textcoords="offset points", # how to position the text
105 | xytext=(0,10), # distance from text to points (x,y)
106 | ha='center',
107 | color='red') # horizontal alignment can be left, right or center
108 | colors.append('red')
109 |
110 | elif l == 1000:
111 | plt.annotate(label, # this is the text
112 | (x, y), # this is the point to label
113 | textcoords="offset points", # how to position the text
114 | xytext=(0,10), # distance from text to points (x,y)
115 | ha='center',
116 | color='green') # horizontal alignment can be left, right or center
117 | colors.append('green')
118 |
119 | else:
120 | plt.annotate(label, # this is the text
121 | (x, y), # this is the point to label
122 | textcoords="offset points", # how to position the text
123 | xytext=(0,10), # distance from text to points (x,y)
124 | ha='center',
125 | color='blue') # horizontal alignment can be left, right or center
126 | colors.append('blue')
127 |
128 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=colors)
129 | plt.savefig('./tsne.png')
130 |
131 | plt.figure()
132 | sse = calculate_WSS(X_embedded, kmax=100)
133 | plt.plot(sse)
134 | plt.savefig('./sse.png')
135 |
136 | plt.figure()
137 | y_pred = KMeans(n_clusters=20).fit_predict(X_embedded)
138 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y_pred)
139 | plt.savefig('./clusters.png')
--------------------------------------------------------------------------------
/misc/compatibility_figure.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR, bb_to_im_fid
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=10, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | print(opt)
39 |
40 | numb_iters = 200000
41 | exp_name = 'exp_with_graph_global_new'
42 | target_set = 'D'
43 | phase='eval'
44 | checkpoint = './checkpoints/{}_{}_{}.pth'.format(exp_name, target_set, numb_iters)
45 |
46 | def make_sequence(given_nds, given_eds, noise):
47 | n_nodes = given_nds.shape[0]
48 | seq = []
49 | for k in range(n_nodes):
50 | curr_nds = given_nds[:k+1]
51 | curr_noise = noise[:k+1]
52 | curr_eds = []
53 | for i in range(k+1):
54 | for j in range(k+1):
55 | if j > i:
56 | for e in given_eds:
57 | if (e[0] == i and e[2] == j) or (e[2] == i and e[0] == j):
58 | curr_eds.append([i, e[1], j])
59 | curr_eds = torch.tensor(curr_eds)
60 | seq.append([curr_nds, curr_noise, curr_eds])
61 | return seq
62 |
63 | def pad_im(cr_im, final_size=299, bkg_color='white'):
64 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
65 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
66 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
67 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
68 | return padded_im
69 |
70 | def draw_graph(g_true):
71 | # build true graph
72 | G_true = nx.Graph()
73 | colors_H = []
74 | for k, label in enumerate(g_true[0]):
75 | _type = label+1
76 | if _type >= 0:
77 | G_true.add_nodes_from([(k, {'label':_type})])
78 | colors_H.append(ID_COLOR[_type])
79 | for k, m, l in g_true[1]:
80 | if m > 0:
81 | G_true.add_edges_from([(k, l)], color='b',weight=4)
82 | plt.figure()
83 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='dot')
84 |
85 | edges = G_true.edges()
86 | colors = ['black' for u,v in edges]
87 | weights = [4 for u,v in edges]
88 |
89 | nx.draw(G_true, pos, node_size=1000, node_color=colors_H, font_size=0, font_weight='bold', edges=edges, edge_color=colors, width=weights)
90 | plt.tight_layout()
91 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
92 | rgb_im = Image.open('./dump/_true_graph.jpg')
93 | rgb_arr = pad_im(rgb_im)
94 | return rgb_arr
95 |
96 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
97 |
98 | # draw edges
99 | for k, l in lines_on:
100 | x1, y1 = np.array(junctions[k])
101 | x2, y2 = np.array(junctions[l])
102 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
103 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=1.0))
104 |
105 | # draw corners
106 | for j in juncs_on:
107 | x, y = np.array(junctions[j])
108 | dwg.add(dwg.circle(center=(float(x), float(y)), r=3, stroke='red', fill='white', stroke_width=2, opacity=1.0))
109 | return
110 |
111 | # Create folder
112 | os.makedirs(opt.exp_folder, exist_ok=True)
113 |
114 | # Initialize generator and discriminator
115 | generator = Generator()
116 | generator.load_state_dict(torch.load(checkpoint))
117 |
118 | # Initialize variables
119 | cuda = True if torch.cuda.is_available() else False
120 | if cuda:
121 | generator.cuda()
122 | rooms_path = '/local-scratch4/nnauata/autodesk/FloorplanDataset/'
123 |
124 | # Initialize dataset iterator
125 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
126 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
127 | batch_size=opt.batch_size,
128 | shuffle=False, collate_fn=floorplan_collate_fn)
129 | # Optimizers
130 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
131 |
132 | # ------------
133 | # Vectorize
134 | # ------------
135 | globalIndex = 0
136 | final_images = []
137 | target_graph = [47]
138 | for i, batch in enumerate(fp_loader):
139 | if i not in target_graph:
140 | continue
141 |
142 | # Unpack batch
143 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
144 |
145 | # Configure input
146 | real_mks = Variable(mks.type(Tensor))
147 | given_nds = Variable(nds.type(Tensor))
148 | given_eds = eds
149 | noise = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
150 | samples = make_sequence(given_nds, given_eds, noise)
151 |
152 |
153 | for k, el in enumerate(samples):
154 |
155 | print('var num {}'.format(k))
156 | given_nds, z, given_eds = el
157 | # plot images
158 | with torch.no_grad():
159 | gen_mks = generator(z, given_nds, given_eds)
160 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
161 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
162 | print(real_nodes)
163 | gen_bbs = gen_bbs[np.newaxis, :, :]/32.0
164 | graph = [real_nodes, None]
165 | graph_arr = draw_graph([real_nodes, given_eds.detach().cpu().numpy()])
166 | final_images.append(graph_arr)
167 |
168 | # reconstruct
169 | fake_im = bb_to_im_fid(gen_bbs, real_nodes)
170 | final_images.append(fake_im)
171 |
172 | row = 0
173 | for k, im in enumerate(final_images):
174 | path = './figure_seq/var_{}/'.format(row)
175 | os.makedirs(path, exist_ok=True)
176 | im.save('{}/{}.jpg'.format(path, k))
177 | if (k+1) % 20 == 0:
178 | row+=1
179 | # final_images = torch.stack(final_images).transpose(1, 3)
180 | # save_image(final_images, "./output/rendered_{}.png".format(target_set), nrow=opt.num_variations+1)
181 |
--------------------------------------------------------------------------------
/misc/compute_FID.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR, bb_to_im_fid
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=10, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | print(opt)
39 |
40 | numb_iters = 200000
41 | exp_name = 'exp_with_graph_global_new'
42 | target_set = 'E'
43 | phase='eval'
44 | checkpoint = './checkpoints/{}_{}_{}.pth'.format(exp_name, target_set, numb_iters)
45 |
46 | # Create folder
47 | path_real = './FID/{}_{}/real'.format(exp_name, target_set)
48 | path_fake = './FID/{}_{}/fake'.format(exp_name, target_set)
49 | os.makedirs(path_real, exist_ok=True)
50 | os.makedirs(path_fake, exist_ok=True)
51 |
52 | # Initialize generator and discriminator
53 | generator = Generator()
54 | generator.load_state_dict(torch.load(checkpoint))
55 |
56 | # Initialize variables
57 | cuda = True if torch.cuda.is_available() else False
58 | if cuda:
59 | generator.cuda()
60 | rooms_path = '/home/nelson/Workspace/autodesk/autodesk/FloorplanDataset/'
61 |
62 | # Initialize dataset iterator
63 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
64 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
65 | batch_size=opt.batch_size,
66 | shuffle=True, collate_fn=floorplan_collate_fn)
67 | # Optimizers
68 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
69 |
70 | # ------------
71 | # Vectorize
72 | # ------------
73 | globalIndexReal = 0
74 | globalIndexFake = 0
75 | final_images = []
76 | for i, batch in enumerate(fp_loader):
77 | print(i)
78 | # if i >= 100:
79 | # break
80 |
81 | # Unpack batch
82 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
83 |
84 | # Configure input
85 | real_mks = Variable(mks.type(Tensor))
86 | given_nds = Variable(nds.type(Tensor))
87 | given_eds = eds
88 | for k in range(opt.num_variations):
89 | # plot images
90 | z = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
91 | with torch.no_grad():
92 | gen_mks = generator(z, given_nds, given_eds.cuda())
93 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
94 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
95 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
96 |
97 | if k == 0:
98 | real_bbs = real_bbs[np.newaxis, :, :]/32.0
99 | real_im = bb_to_im_fid(real_bbs, real_nodes)
100 | real_im.save('{}/{}.jpg'.format(path_real, globalIndexReal))
101 | globalIndexReal += 1
102 |
103 | # draw vector
104 | gen_bbs = gen_bbs[np.newaxis, :, :]/32.0
105 | fake_im = bb_to_im_fid(gen_bbs, real_nodes)
106 | fake_im.save('{}/{}.jpg'.format(path_fake, globalIndexFake))
107 | globalIndexFake += 1
--------------------------------------------------------------------------------
/misc/convert_to_onnx.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.init as init
4 | from models.models_exp_high_res import Generator
5 | import onnx
6 | import onnxruntime
7 | import numpy as np
8 | from viz import draw_graph, draw_masks
9 | import matplotlib.pyplot as plt
10 |
11 | # Initialize model
12 | checkpoint = '../exp_random_types_attempt_3_A_500000_G.pth'
13 | generator = Generator()
14 | generator.load_state_dict(torch.load(checkpoint), strict=False)
15 | generator = generator.eval()
16 |
17 | # Convert by tracing input
18 | z = torch.randn(16, 128, requires_grad=True)
19 | given_masks = torch.full((16, 1, 64, 64), -1.0, requires_grad=True)
20 | inds_masks = torch.zeros_like(given_masks)
21 | given_masks_in = torch.cat([given_masks, inds_masks], 1)
22 |
23 | given_nds = torch.tensor([[0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
24 | [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
25 | [0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
26 | [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
27 | [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
28 | [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
29 | [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
30 | [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
31 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
32 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
33 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
34 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
35 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
36 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
37 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
38 | [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]],
39 | device='cpu')
40 |
41 | given_eds = torch.tensor([[ 0, -1, 1],
42 | [ 0, -1, 2],
43 | [ 0, -1, 3],
44 | [ 0, -1, 4],
45 | [ 0, -1, 5],
46 | [ 0, -1, 6],
47 | [ 0, 1, 7],
48 | [ 0, -1, 8],
49 | [ 0, -1, 9],
50 | [ 0, -1, 10],
51 | [ 0, 1, 11],
52 | [ 0, -1, 12],
53 | [ 0, -1, 13],
54 | [ 0, -1, 14],
55 | [ 0, -1, 15],
56 | [ 1, -1, 2],
57 | [ 1, -1, 3],
58 | [ 1, -1, 4],
59 | [ 1, -1, 5],
60 | [ 1, -1, 6],
61 | [ 1, 1, 7],
62 | [ 1, -1, 8],
63 | [ 1, -1, 9],
64 | [ 1, 1, 10],
65 | [ 1, -1, 11],
66 | [ 1, -1, 12],
67 | [ 1, -1, 13],
68 | [ 1, -1, 14],
69 | [ 1, -1, 15],
70 | [ 2, -1, 3],
71 | [ 2, -1, 4],
72 | [ 2, 1, 5],
73 | [ 2, -1, 6],
74 | [ 2, -1, 7],
75 | [ 2, 1, 8],
76 | [ 2, -1, 9],
77 | [ 2, -1, 10],
78 | [ 2, -1, 11],
79 | [ 2, -1, 12],
80 | [ 2, -1, 13],
81 | [ 2, -1, 14],
82 | [ 2, -1, 15],
83 | [ 3, -1, 4],
84 | [ 3, -1, 5],
85 | [ 3, -1, 6],
86 | [ 3, 1, 7],
87 | [ 3, -1, 8],
88 | [ 3, -1, 9],
89 | [ 3, -1, 10],
90 | [ 3, -1, 11],
91 | [ 3, 1, 12],
92 | [ 3, -1, 13],
93 | [ 3, -1, 14],
94 | [ 3, -1, 15],
95 | [ 4, -1, 5],
96 | [ 4, 1, 6],
97 | [ 4, -1, 7],
98 | [ 4, -1, 8],
99 | [ 4, 1, 9],
100 | [ 4, -1, 10],
101 | [ 4, -1, 11],
102 | [ 4, -1, 12],
103 | [ 4, -1, 13],
104 | [ 4, -1, 14],
105 | [ 4, -1, 15],
106 | [ 5, -1, 6],
107 | [ 5, 1, 7],
108 | [ 5, 1, 8],
109 | [ 5, -1, 9],
110 | [ 5, -1, 10],
111 | [ 5, -1, 11],
112 | [ 5, -1, 12],
113 | [ 5, 1, 13],
114 | [ 5, -1, 14],
115 | [ 5, -1, 15],
116 | [ 6, 1, 7],
117 | [ 6, -1, 8],
118 | [ 6, 1, 9],
119 | [ 6, -1, 10],
120 | [ 6, -1, 11],
121 | [ 6, -1, 12],
122 | [ 6, -1, 13],
123 | [ 6, 1, 14],
124 | [ 6, -1, 15],
125 | [ 7, -1, 8],
126 | [ 7, -1, 9],
127 | [ 7, 1, 10],
128 | [ 7, 1, 11],
129 | [ 7, 1, 12],
130 | [ 7, 1, 13],
131 | [ 7, 1, 14],
132 | [ 7, 1, 15],
133 | [ 8, -1, 9],
134 | [ 8, -1, 10],
135 | [ 8, -1, 11],
136 | [ 8, -1, 12],
137 | [ 8, -1, 13],
138 | [ 8, -1, 14],
139 | [ 8, -1, 15],
140 | [ 9, -1, 10],
141 | [ 9, -1, 11],
142 | [ 9, -1, 12],
143 | [ 9, -1, 13],
144 | [ 9, -1, 14],
145 | [ 9, -1, 15],
146 | [10, -1, 11],
147 | [10, -1, 12],
148 | [10, -1, 13],
149 | [10, -1, 14],
150 | [10, -1, 15],
151 | [11, -1, 12],
152 | [11, -1, 13],
153 | [11, -1, 14],
154 | [11, -1, 15],
155 | [12, -1, 13],
156 | [12, -1, 14],
157 | [12, -1, 15],
158 | [13, -1, 14],
159 | [13, -1, 15],
160 | [14, -1, 15]])
161 |
162 |
163 |
164 | # Export the model
165 | torch.onnx.export(generator.float(), # model being run
166 | (z.float(), given_masks_in.float(), given_nds.float(), given_eds.long()), # model input (or a tuple for multiple inputs)
167 | "houseganpp.onnx", # where to save the model (can be a file or file-like object)
168 | export_params=True, # store the trained parameter weights inside the model file
169 | opset_version=11, # the ONNX version to export the model to
170 | do_constant_folding=True, # whether to execute constant folding for optimization
171 | input_names = ['z', 'given_masks_in', 'given_nds', 'given_eds'], # the model's input names
172 | output_names = ['output'], # the model's output names
173 | dynamic_axes={'z' : {0 : 'n'}, # variable lenght axes
174 | 'given_masks_in' : {0 : 'n'},
175 | 'given_nds' : {0 : 'n'},
176 | 'given_eds' : {0 : 'm'},
177 | 'output' : {0 : 'n'}})
178 |
179 | # # Checking onnx
180 | # onnx_model = onnx.load("houseganpp.onnx")
181 | # onnx.checker.check_model(onnx_model)
182 |
183 | # ort_session = onnxruntime.InferenceSession("houseganpp.onnx")
184 |
185 | # def to_numpy(tensor):
186 | # return tensor.detach().cpu().numpy().astype('float32') if tensor.requires_grad else tensor.cpu().numpy().astype('float32')
187 |
188 | # # Run pytorch
189 | # torch_out = generator(z, given_masks_in, given_nds, given_eds)
190 |
191 | # # compute ONNX Runtime output prediction
192 | # ort_inputs = {'z': to_numpy(z), 'given_masks_in': to_numpy(given_masks_in), 'given_nds': to_numpy(given_nds), 'given_eds': to_numpy(given_eds).long()}
193 | # ort_outs = ort_session.run(None, ort_inputs)
194 |
195 | # real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
196 | # real_edges = given_eds.detach().cpu().numpy()
197 |
198 | # _, rgb_im = draw_graph((real_nodes, real_edges))
199 | # fp_img = draw_masks(ort_outs[0], real_nodes, im_size=256)
200 |
201 | # plt.imshow(rgb_im)
202 | # plt.imshow(fp_img)
203 | # plt.show()
204 |
--------------------------------------------------------------------------------
/misc/debug.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.environ['OPENBLAS_CORETYPE']='Haswell'
3 |
4 | from pytorch_fid.fid_score import calculate_fid_given_paths
5 | fid_value = calculate_fid_given_paths(['./FID/gt/', './FID/test/debug/'], 2, 'cpu', 2048)
6 | print(fid_value)
--------------------------------------------------------------------------------
/misc/intersections.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | # Given three colinear points p, q, r, the function checks if
4 | # point q lies on line segment 'pr'
5 | def onSegment(p, q, r):
6 | if (q[0] <= max(p[0], r[0]) and q[0] >= min(p[0], r[0]) and
7 | q[1] <= max(p[1], r[1]) and q[1] >= min(p[1], r[1])):
8 | return True
9 | return False
10 |
11 | # To find orientation of ordered triplet (p, q, r).
12 | # The function returns following values
13 | # 0 --> p, q and r are colinear
14 | # 1 --> Clockwise
15 | # 2 --> Counterclockwise
16 | def orientation(p, q, r):
17 |
18 | # See https://www.geeksforgeeks.org/orientation-3-ordered-points/
19 | # for details of below formula.
20 | val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])
21 |
22 | if (val == 0): return 0 #
23 | if val > 0:
24 | return 1
25 | else:
26 | return 2
27 |
28 | # The main function that returns true if line segment 'p1q1'
29 | # and 'p2q2' intersect.
30 | def helperDoIntersect(p1, q1, p2, q2):
31 |
32 | # Find the four orientations needed for general and
33 | # special cases
34 | o1 = orientation(p1, q1, p2)
35 | o2 = orientation(p1, q1, q2)
36 | o3 = orientation(p2, q2, p1)
37 | o4 = orientation(p2, q2, q1)
38 |
39 | # General case
40 | if (o1 != o2 and o3 != o4):
41 | return True
42 |
43 | # Special Cases
44 | # p1, q1 and p2 are colinear and p2 lies on segment p1q1
45 | if (o1 == 0 and onSegment(p1, p2, q1)): return True
46 |
47 | # p1, q1 and q2 are colinear and q2 lies on segment p1q1
48 | if (o2 == 0 and onSegment(p1, q2, q1)): return True
49 |
50 | # p2, q2 and p1 are colinear and p1 lies on segment p2q2
51 | if (o3 == 0 and onSegment(p2, p1, q2)): return True
52 |
53 | # p2, q2 and q1 are colinear and q1 lies on segment p2q2
54 | if (o4 == 0 and onSegment(p2, q1, q2)): return True
55 |
56 | return False # Doesn't fall in any of the above cases
57 |
58 | def scale_dimension(pt1, pt2, factor):
59 | base_length = pt2 - pt1
60 | ret1 = pt1 - (base_length * (factor-1) / 2)
61 | return ret1
62 |
63 | def doIntersect(p1, q1, p2, q2):
64 |
65 | p1 = np.array(p1)
66 | q1 = np.array(q1)
67 | p2 = np.array(p2)
68 | q2 = np.array(q2)
69 |
70 | # same line segment - intersect
71 | if ((np.array_equal(p1, p2)) and (np.array_equal(q1, q2))) or\
72 | ((np.array_equal(p1, q2)) and (np.array_equal(q1, p2))):
73 | return True
74 |
75 | # line segment shares only one of the endpoints
76 | if (np.array_equal(p1, p2)):
77 | np1 = scale_dimension(p1, q1, .999)
78 | np2 = scale_dimension(p2, q2, .999)
79 | return helperDoIntersect(np1, q1, np2, q2)
80 | if (np.array_equal(p1, q2)):
81 | np1 = scale_dimension(p1, q1, .999)
82 | nq2 = scale_dimension(q2, p2, .999)
83 | return helperDoIntersect(np1, q1, p2, nq2)
84 | if (np.array_equal(q1, p2)):
85 | nq1 = scale_dimension(q1, p1, .999)
86 | np2 = scale_dimension(p2, q2, .999)
87 | return helperDoIntersect(p1, nq1, np2, q2)
88 | if (np.array_equal(q1, q2)):
89 | nq1 = scale_dimension(q1, p1, .999)
90 | nq2 = scale_dimension(q2, p2, .999)
91 | return helperDoIntersect(p1, nq1, p2, nq2)
92 |
93 | # do not share any endpoint
94 | return helperDoIntersect(p1, q1, p2, q2)
95 |
96 | if __name__ == '__main__':
97 |
98 | p1 = (-10, -10)
99 | q1 = (10, 10)
100 | p2 = (-10, 10)
101 | q2 = (10, -10)
102 |
103 | print(doIntersect(p1, q1, p2, q2))
104 |
105 | p1 = (0, 0)
106 | q1 = (-10, -10)
107 | p2 = (-10, -10)
108 | q2 = (-10, -15)
109 |
110 | print(doIntersect(p1, q1, p2, q2))
111 |
112 | # p1 = (10, 0)
113 | # q1 = (0, 10)
114 | # p2 = (0, 0)
115 | # q2 = (10, 10)
116 |
117 | # print(doIntersect(p1, q1, p2, q2))
118 |
119 | # p1 = (10, 0)
120 | # q1 = (0, 10)
121 | # p2 = (0, 0)
122 | # q2 = (10, 10)
123 |
124 | # print(doIntersect(p1, q1, p2, q2))
125 |
126 | # p1 = (10, 0)
127 | # q1 = (0, 10)
128 | # p2 = (0, 0)
129 | # q2 = (10, 10)
130 |
131 | # print(doIntersect(p1, q1, p2, q2))
132 | # p1 = (-5, -5)
133 | # q1 = (0, 0)
134 | # p1 = np.array(p1)
135 | # q1 = np.array(q1)
136 | # # p2 = (0, 0)
137 | # # q2 = (10, 10);
138 | # p2 = (-5, -5)
139 | # q2 = (10, 10)
140 | # p2 = np.array(p2)
141 | # q2 = np.array(q2)
142 | # print(doIntersect(p1, q1, p2, q2))
143 | # print(scale_dimension(p1, q1, .999))
144 | # print(scale_dimension(p2, q2, 1.001))
--------------------------------------------------------------------------------
/misc/old/autoencoder_dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # Copyright 2018 Google LLC
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import json, os, random, math
18 | from collections import defaultdict
19 | import torch.nn as nn
20 | import torch.nn.functional as F
21 | import torch
22 | from torch.utils.data import Dataset
23 | import torchvision.transforms as T
24 | import math
25 | import numpy as np
26 | import PIL
27 | from skimage.transform import resize as imresize
28 | import pycocotools.mask as mask_utils
29 | import glob
30 | from PIL import Image, ImageDraw, ImageOps, ImageFilter
31 | import matplotlib.pyplot as plt
32 | import random
33 | from utils import mask_to_bb, ROOM_CLASS, ID_COLOR
34 |
35 | def conv_block(in_channels, out_channels, k, s, p, act=None, upsample=False, spec_norm=False):
36 | block = []
37 |
38 | if upsample:
39 | if spec_norm:
40 | block.append(spectral_norm(torch.nn.ConvTranspose2d(in_channels, out_channels, \
41 | kernel_size=k, stride=s, \
42 | padding=p, bias=True)))
43 | else:
44 | block.append(torch.nn.ConvTranspose2d(in_channels, out_channels, \
45 | kernel_size=k, stride=s, \
46 | padding=p, bias=True))
47 | else:
48 | if spec_norm:
49 | block.append(spectral_norm(torch.nn.Conv2d(in_channels, out_channels, \
50 | kernel_size=k, stride=s, \
51 | padding=p, bias=True)))
52 | else:
53 | block.append(torch.nn.Conv2d(in_channels, out_channels, \
54 | kernel_size=k, stride=s, \
55 | padding=p, bias=True))
56 | if "leaky" in act:
57 | block.append(torch.nn.LeakyReLU(0.1, inplace=True))
58 | elif "relu" in act:
59 | block.append(torch.nn.ReLU(True))
60 | elif "tanh":
61 | block.append(torch.nn.Tanh())
62 | elif "None":
63 | continue
64 |
65 | return block
66 |
67 | class Autoencoder(nn.Module):
68 | def __init__(self):
69 | super(Autoencoder, self).__init__()
70 | self.enc = nn.Sequential(
71 | *conv_block(1, 256, 3, 2, 1, act="relu"),
72 | *conv_block(256, 256, 3, 2, 1, act="relu"),
73 | *conv_block(256, 128, 3, 1, 1, act="relu"),
74 | *conv_block(128, 128, 3, 1, 1, act="relu"),
75 | *conv_block(128, 16, 3, 1, 1, act="None"))
76 |
77 |
78 | def forward(self, x):
79 | x = x.cuda()
80 | x = self.enc(x.unsqueeze(1))
81 | return x
82 |
83 |
84 |
85 | class AutoencoderDataset(Dataset):
86 | def __init__(self, transform=None):
87 | super(Dataset, self).__init__()
88 | self.data = np.load('./autoencoder_data.npy', allow_pickle=True)
89 | self.data = dict(self.data[()])
90 | self.feats = self.data['feats_tensor']
91 | self.masks = self.data['masks_tensor']
92 | self.transform = transform
93 | def __len__(self):
94 | return len(self.feats)
95 |
96 | def __getitem__(self, index):
97 | feat = self.feats[index]
98 | mask = self.masks[index]
99 | mask = self.transform(mask.unsqueeze(0)).squeeze(0)
100 | return feat, mask
101 |
102 |
--------------------------------------------------------------------------------
/misc/old/coordconv.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.modules.conv as conv
4 |
5 |
6 | class AddCoords(nn.Module):
7 | def __init__(self, rank, with_r=False, use_cuda=True):
8 | super(AddCoords, self).__init__()
9 | self.rank = rank
10 | self.with_r = with_r
11 | self.use_cuda = use_cuda
12 |
13 | def forward(self, input_tensor):
14 | """
15 | :param input_tensor: shape (N, C_in, H, W)
16 | :return:
17 | """
18 | if self.rank == 1:
19 | batch_size_shape, channel_in_shape, dim_x = input_tensor.shape
20 | xx_range = torch.arange(dim_x, dtype=torch.int32)
21 | xx_channel = xx_range[None, None, :]
22 |
23 | xx_channel = xx_channel.float() / (dim_x - 1)
24 | xx_channel = xx_channel * 2 - 1
25 | xx_channel = xx_channel.repeat(batch_size_shape, 1, 1)
26 |
27 | if torch.cuda.is_available and self.use_cuda:
28 | input_tensor = input_tensor.cuda()
29 | xx_channel = xx_channel.cuda()
30 | out = torch.cat([input_tensor, xx_channel], dim=1)
31 |
32 | if self.with_r:
33 | rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2))
34 | out = torch.cat([out, rr], dim=1)
35 |
36 | elif self.rank == 2:
37 | batch_size_shape, channel_in_shape, dim_y, dim_x = input_tensor.shape
38 | xx_ones = torch.ones([1, 1, 1, dim_x], dtype=torch.int32)
39 | yy_ones = torch.ones([1, 1, 1, dim_y], dtype=torch.int32)
40 |
41 | xx_range = torch.arange(dim_y, dtype=torch.int32)
42 | yy_range = torch.arange(dim_x, dtype=torch.int32)
43 | xx_range = xx_range[None, None, :, None]
44 | yy_range = yy_range[None, None, :, None]
45 |
46 | xx_channel = torch.matmul(xx_range, xx_ones)
47 | yy_channel = torch.matmul(yy_range, yy_ones)
48 |
49 | # transpose y
50 | yy_channel = yy_channel.permute(0, 1, 3, 2)
51 |
52 | xx_channel = xx_channel.float() / (dim_y - 1)
53 | yy_channel = yy_channel.float() / (dim_x - 1)
54 |
55 | xx_channel = xx_channel * 2 - 1
56 | yy_channel = yy_channel * 2 - 1
57 |
58 | xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1)
59 | yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1)
60 |
61 | if torch.cuda.is_available and self.use_cuda:
62 | input_tensor = input_tensor.cuda()
63 | xx_channel = xx_channel.cuda()
64 | yy_channel = yy_channel.cuda()
65 | out = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
66 |
67 | if self.with_r:
68 | rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
69 | out = torch.cat([out, rr], dim=1)
70 |
71 | elif self.rank == 3:
72 | batch_size_shape, channel_in_shape, dim_z, dim_y, dim_x = input_tensor.shape
73 | xx_ones = torch.ones([1, 1, 1, 1, dim_x], dtype=torch.int32)
74 | yy_ones = torch.ones([1, 1, 1, 1, dim_y], dtype=torch.int32)
75 | zz_ones = torch.ones([1, 1, 1, 1, dim_z], dtype=torch.int32)
76 |
77 | xy_range = torch.arange(dim_y, dtype=torch.int32)
78 | xy_range = xy_range[None, None, None, :, None]
79 |
80 | yz_range = torch.arange(dim_z, dtype=torch.int32)
81 | yz_range = yz_range[None, None, None, :, None]
82 |
83 | zx_range = torch.arange(dim_x, dtype=torch.int32)
84 | zx_range = zx_range[None, None, None, :, None]
85 |
86 | xy_channel = torch.matmul(xy_range, xx_ones)
87 | xx_channel = torch.cat([xy_channel + i for i in range(dim_z)], dim=2)
88 |
89 | yz_channel = torch.matmul(yz_range, yy_ones)
90 | yz_channel = yz_channel.permute(0, 1, 3, 4, 2)
91 | yy_channel = torch.cat([yz_channel + i for i in range(dim_x)], dim=4)
92 |
93 | zx_channel = torch.matmul(zx_range, zz_ones)
94 | zx_channel = zx_channel.permute(0, 1, 4, 2, 3)
95 | zz_channel = torch.cat([zx_channel + i for i in range(dim_y)], dim=3)
96 |
97 | if torch.cuda.is_available and self.use_cuda:
98 | input_tensor = input_tensor.cuda()
99 | xx_channel = xx_channel.cuda()
100 | yy_channel = yy_channel.cuda()
101 | zz_channel = zz_channel.cuda()
102 | out = torch.cat([input_tensor, xx_channel, yy_channel, zz_channel], dim=1)
103 |
104 | if self.with_r:
105 | rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) +
106 | torch.pow(yy_channel - 0.5, 2) +
107 | torch.pow(zz_channel - 0.5, 2))
108 | out = torch.cat([out, rr], dim=1)
109 | else:
110 | raise NotImplementedError
111 |
112 | return out
113 |
114 |
115 | class CoordConv2d(conv.Conv2d):
116 | def __init__(self, in_channels, out_channels, kernel_size, stride=1,
117 | padding=0, dilation=1, groups=1, bias=True, with_r=False, use_cuda=True):
118 | super(CoordConv2d, self).__init__(in_channels, out_channels, kernel_size,
119 | stride, padding, dilation, groups, bias)
120 | self.rank = 2
121 | self.addcoords = AddCoords(self.rank, with_r, use_cuda=use_cuda)
122 | self.conv = nn.Conv2d(in_channels + self.rank + int(with_r), out_channels,
123 | kernel_size, stride, padding, dilation, groups, bias)
124 |
125 | def forward(self, input_tensor):
126 | """
127 | input_tensor_shape: (N, C_in,H,W)
128 | output_tensor_shape: N,C_out,H_out,W_out)
129 | :return: CoordConv2d Result
130 | """
131 | out = self.addcoords(input_tensor)
132 | out = self.conv(out)
133 |
134 | return out
135 |
136 | class CoordConvTranspose2d(nn.Module):
137 | """CoordConvTranspose layer for segmentation tasks."""
138 | def __init__(self, in_channels, out_channels, kernel_size, stride=1,
139 | padding=0, dilation=1, groups=1, bias=True, with_r=False, use_cuda=True):
140 | super(CoordConv, self).__init__()
141 | self.rank = 2
142 | self.addcoord = AddCoords(self.rank, with_r, use_cuda=use_cuda)
143 | self.convT = nn.ConvTranspose2d(in_channels + self.rank + int(with_r), out_channels,
144 | kernel_size, stride, padding, dilation, groups, bias)
145 |
146 | def forward(self, in_tensor):
147 | out = self.addcoord(in_tensor)
148 | out = self.convT(out)
149 | return out
--------------------------------------------------------------------------------
/misc/old/data_stats.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn, is_adjacent
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb
21 | from PIL import Image, ImageDraw
22 | from reconstruct import reconstructFloorplan
23 | import svgwrite
24 |
25 | from models import Generator
26 | import networkx as nx
27 | import matplotlib.pyplot as plt
28 | from utils import ID_COLOR
29 | from tqdm import tqdm
30 | from collections import defaultdict
31 |
32 | parser = argparse.ArgumentParser()
33 | parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
34 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
35 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches - does not support larger batchs")
36 | parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
37 | parser.add_argument("--with_boundary", action='store_true', default=True, help="include floorplan footprint")
38 | parser.add_argument("--num_variations", type=int, default=10, help="number of variations")
39 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
40 |
41 |
42 | opt = parser.parse_args()
43 | print(opt)
44 |
45 | def return_eq(node1, node2):
46 | return node1['label']==node2['label']
47 |
48 | def compute_dist(bb1, bb2):
49 |
50 | x0, y0, x1, y1 = bb1
51 | x2, y2, x3, y3 = bb2
52 |
53 | h1, h2 = x1-x0, x3-x2
54 | w1, w2 = y1-y0, y3-y2
55 |
56 | xc1, xc2 = (x0+x1)/2.0, (x2+x3)/2.0
57 | yc1, yc2 = (y0+y1)/2.0, (y2+y3)/2.0
58 |
59 | delta_x = abs(xc2-xc1) - (h1 + h2)/2.0
60 | delta_y = abs(yc2-yc1) - (w1 + w2)/2.0
61 |
62 | return delta_x, delta_y
63 |
64 |
65 | def retrieve_connections(nodes, room_bb):
66 | edges = []
67 | nodes = [x for x in nodes if x >= 0]
68 | room_bb = room_bb.reshape((-1, 4))
69 | for k, bb1 in enumerate(room_bb):
70 | for l, bb2 in enumerate(room_bb):
71 | if k > l:
72 | if is_adjacent(bb1, bb2):
73 | edges.append((k, l))
74 | return nodes, edges
75 |
76 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
77 |
78 | # draw edges
79 | for k, l in lines_on:
80 | x1, y1 = np.array(junctions[k])/2.0
81 | x2, y2 = np.array(junctions[l])/2.0
82 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
83 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=0.5))
84 |
85 | # draw corners
86 | for j in juncs_on:
87 | x, y = np.array(junctions[j])/2.0
88 | dwg.add(dwg.circle(center=(x, y), r=2, stroke='red', fill='white', stroke_width=1, opacity=0.75))
89 | return
90 |
91 |
92 | # Initialize variables
93 | rooms_path = '/local-scratch/nnauata/autodesk/FloorplanDataset/'
94 |
95 | # Configure data loader
96 | rooms_path = '/local-scratch/nnauata/autodesk/FloorplanDataset/'
97 | fp_dataset = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), split='eval')
98 | fp_loader = torch.utils.data.DataLoader(fp_dataset,
99 | batch_size=opt.batch_size,
100 | shuffle=False,
101 | num_workers=0,
102 | collate_fn=floorplan_collate_fn)
103 | fp_iter = tqdm(fp_loader, total=len(fp_dataset) // opt.batch_size + 1)
104 |
105 | # Generate samples
106 | cuda = False #True if torch.cuda.is_available() else False
107 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
108 | graphs = []
109 | for i, batch in enumerate(fp_iter):
110 |
111 | # Unpack batch
112 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
113 | real_nodes = np.where(nds.detach().cpu()==1)[-1]
114 | graphs.append(len(real_nodes))
115 |
116 | samples_per_len = defaultdict(int)
117 | for g_len in graphs:
118 | samples_per_len[g_len] += 1
119 |
120 | print(samples_per_len)
121 |
--------------------------------------------------------------------------------
/misc/old/dump_to_html.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ###########################################################
3 | # Usage: imgDetails.sh
4 | #
5 | # Script will save to /private/tmp/imgDetails_(PID).html
6 | # with the details of images it came across in the current
7 | # directory which the script was run. When complete,
8 | # the script will open the tmp file in textEdit.
9 | ############################################################
10 | PREFIX=$1
11 |
12 | declare -r OUT=./all.html
13 | declare -r CMD="sips -g pixelWidth -g pixelHeight"
14 | declare -a PROPS=()
15 | declare -ar ALLOWED=(
16 | $PREFIX/*.jpg *.JPG $PREFIX/*.svg
17 | *.GIF *.gif
18 | *.png *.PNG
19 | )
20 |
21 | let COUNT=0
22 |
23 | for ITEM in ${ALLOWED[@]}; do
24 | if [ -f $ITEM ]; then
25 | pos=0
26 | for PROP in $($CMD "$ITEM"|tail -2|sed 's/ //g'|awk -F':' '{print $2}')
27 | do
28 | echo $PROP | egrep '[0-9]+'>/dev/null 2>&1
29 | if [ $? == 0 ]; then
30 | PROPS[$pos]=$PROP
31 | pos=$((pos+1))
32 | fi
33 | done
34 | if [ -n ${PROPS[0]} -a -n ${PROPS[1]} ]; then
35 | echo "" | tee -a $OUT
36 | COUNT=$((COUNT+1))
37 | fi
38 | if [ $COUNT -ge 900 ]; then
39 | break
40 | fi
41 | fi
42 | done
43 |
44 | echo -e "\nAttempted to process (${COUNT}) files."
45 |
46 | [ -f $OUT ] && open -e $OUT
47 |
48 | exit 0
49 |
--------------------------------------------------------------------------------
/misc/old/finetune_generator.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 | import torch
8 | from reconstruction_dataset import AutoencoderDataset, Autoencoder
9 | import torchvision.transforms as transforms
10 | from torchvision.utils import save_image
11 | import torch.optim as optim
12 | import torch.nn as nn
13 | from torch.autograd import Variable
14 | from PIL import Image
15 | from models import Discriminator, Generator, compute_gradient_penalty, weights_init_normal
16 | import matplotlib.pyplot as plt
17 |
18 |
19 |
20 |
21 | # Initialize dataset iterator
22 | checkpoint = '/home/nelson/Workspace/autodesk/housegan2/checkpoints/gen_housegan_E_1000000.pth'
23 | ae_dataset = AutoencoderDataset(transform=transforms.Normalize(mean=[0.5], std=[0.5]))
24 | batch_size = 1
25 | ae_loader = torch.utils.data.DataLoader(ae_dataset,
26 | batch_size=batch_size,
27 | shuffle=True)
28 | generator = Generator().cuda()
29 | update_freq = 256
30 | pretrained_dict = torch.load(checkpoint)
31 | model_dict = generator.state_dict()
32 |
33 | # 1. filter out unnecessary keys
34 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
35 | # 2. overwrite entries in the existing state dict
36 | model_dict.update(pretrained_dict)
37 | # 3. load the new state dict
38 | generator.load_state_dict(model_dict)
39 |
40 | def selectRandomNodes(nd_to_sample, batch_size):
41 | fixed_rooms_num = []
42 | fixed_nodes = []
43 | shift = 0
44 | for k in range(batch_size):
45 | rooms = np.where(nd_to_sample == k)
46 | rooms_num = np.array(rooms).shape[-1]
47 | N = np.random.randint(rooms_num, size=1)
48 | fixed_nodes_state = torch.tensor(np.random.choice(list(range(rooms_num)), size=N, replace=False)).cuda()
49 | fixed_nodes_state += shift
50 | fixed_nodes.append(fixed_nodes_state)
51 | shift += rooms_num
52 | fixed_nodes = torch.cat(fixed_nodes)
53 | ind_fixed_nodes = torch.zeros((nd_to_sample.shape[0], 1))
54 | ind_fixed_nodes[fixed_nodes] = 1.0
55 | ind_fixed_nodes = ind_fixed_nodes.cuda()
56 | return ind_fixed_nodes
57 |
58 | optimizer = optim.Adam(generator.parameters(), lr=0.0001)
59 | optimizer.zero_grad()
60 | running_loss = 0.0
61 | for k in range(1000):
62 | for i, batch in enumerate(ae_loader):
63 |
64 | # retrieve data
65 | prev_feats, mask, nodes, edges = batch
66 | prev_feats, mask = Variable(prev_feats).squeeze(0).cuda(), Variable(mask).squeeze(0).cuda()
67 | given_nds, given_eds = Variable(nodes).squeeze(0).cuda(), Variable(edges).squeeze(0).cuda()
68 |
69 | # generate random state
70 | N = np.random.randint(given_nds.shape[1], size=1)
71 | fixed_nodes_state = torch.tensor(np.random.choice(list(range(given_nds.shape[1])), size=N, replace=False)).cuda()
72 | # print('running state: {}'.format(str(fixed_nodes_state)))
73 | z = Variable(torch.Tensor(np.random.normal(0, 1, (mask.shape[0], 128)))).cuda()
74 | gen_mks, curr_feats = generator(z, ind_fixed_nodes, given_masks, given_nds, given_eds, given_v=mask, state=fixed_nodes_state)
75 |
76 | # Select random nodes
77 | ind_fixed_nodes = selectRandomNodes(nd_to_sample, batch_size)
78 | given_masks = torch.tensor(real_mks)
79 |
80 | # reconstruction loss
81 | target_mask = mask[fixed_nodes_state]
82 | recon_mask = gen_mks[fixed_nodes_state]
83 |
84 | if (k % 10 == 0):
85 | # debug masks
86 | for m_t, m_r, f in zip(target_mask, recon_mask, range(len(fixed_nodes_state))):
87 | if i > 10:
88 | continue
89 | m_t = m_t.detach().cpu().numpy()*255.0
90 | m_r = m_r.detach().cpu().numpy()*255.0
91 | fig = plt.figure(figsize=(12, 6))
92 | fig.add_subplot(1, 2, 1)
93 | plt.imshow(np.rot90(m_r, 2))
94 | fig.add_subplot(1, 2, 2)
95 | plt.imshow(np.rot90(m_t, 2))
96 | plt.savefig('./debug/debug_{}_{}.png'.format(k*len(ae_dataset)+i, f))
97 | plt.close('all')
98 |
99 | loss = nn.MSELoss(reduction='none')
100 | out = loss(recon_mask.view(batch_size, -1), target_mask.view(batch_size, -1)).sum()
101 | out.backward()
102 | running_loss += out
103 |
104 | step = k*len(ae_dataset)+i
105 | if (step + 1)%update_freq == 0:
106 | print(step)
107 | optimizer.step()
108 | optimizer.zero_grad()
109 | print('[%d] loss: %.3f' %
110 | (k + 1, running_loss / len(ae_dataset)))
111 | running_loss = 0
112 |
113 | if (k + 1) % 50 == 0:
114 | torch.save(generator.state_dict(), './finetuned_generator_{}.pth'.format(k))
115 |
116 |
117 |
118 |
119 |
--------------------------------------------------------------------------------
/misc/old/finetune_generator_feats.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 | import torch
8 | from reconstruction_dataset import AutoencoderDataset, Autoencoder
9 | import torchvision.transforms as transforms
10 | from torchvision.utils import save_image
11 | import torch.optim as optim
12 | import torch.nn as nn
13 | from torch.autograd import Variable
14 | from PIL import Image
15 | from models import Discriminator, Generator, compute_gradient_penalty, weights_init_normal
16 | import matplotlib.pyplot as plt
17 |
18 |
19 |
20 |
21 | # Initialize dataset iterator
22 | checkpoint = '/home/nelson/Workspace/autodesk/housegan2/checkpoints/gen_housegan_E_1000000.pth'
23 | ae_dataset = AutoencoderDataset(transform=transforms.Normalize(mean=[0.5], std=[0.5]))
24 | batch_size = 1
25 | ae_loader = torch.utils.data.DataLoader(ae_dataset,
26 | batch_size=batch_size,
27 | shuffle=True)
28 | generator = Generator().cuda()
29 | update_freq = 256
30 | pretrained_dict = torch.load(checkpoint)
31 | model_dict = generator.state_dict()
32 |
33 | # 1. filter out unnecessary keys
34 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
35 | # 2. overwrite entries in the existing state dict
36 | model_dict.update(pretrained_dict)
37 | # 3. load the new state dict
38 | generator.load_state_dict(model_dict)
39 |
40 |
41 | optimizer = optim.Adam(generator.parameters(), lr=0.0001)
42 | optimizer.zero_grad()
43 | running_loss = 0.0
44 | for k in range(1000):
45 | for i, batch in enumerate(ae_loader):
46 |
47 | # retrieve data
48 | prev_feats, mask, nodes, edges = batch
49 | prev_feats, mask = Variable(prev_feats).squeeze(0).cuda(), Variable(mask).squeeze(0).cuda()
50 | given_nds, given_eds = Variable(nodes).squeeze(0).cuda(), Variable(edges).squeeze(0).cuda()
51 |
52 | # generate random state
53 | N = np.random.randint(given_nds.shape[1], size=1)
54 | fixed_nodes_state = torch.tensor(np.random.choice(list(range(given_nds.shape[1])), size=N, replace=False)).cuda()
55 | # print('running state: {}'.format(str(fixed_nodes_state)))
56 | z = Variable(torch.Tensor(np.random.normal(0, 1, (mask.shape[0], 128)))).cuda()
57 | gen_mks, curr_feats = generator(z, given_nds, given_eds, given_v=prev_feats, state=fixed_nodes_state)
58 |
59 | # reconstruction loss
60 | target_mask = mask[fixed_nodes_state]
61 | recon_mask = gen_mks[fixed_nodes_state]
62 |
63 | if (k % 10 == 0):
64 | # debug masks
65 | for m_t, m_r, f in zip(target_mask, recon_mask, range(len(fixed_nodes_state))):
66 | if i > 10:
67 | continue
68 | m_t = m_t.detach().cpu().numpy()*255.0
69 | m_r = m_r.detach().cpu().numpy()*255.0
70 | fig = plt.figure(figsize=(12, 6))
71 | fig.add_subplot(1, 2, 1)
72 | plt.imshow(np.rot90(m_r, 2))
73 | fig.add_subplot(1, 2, 2)
74 | plt.imshow(np.rot90(m_t, 2))
75 | plt.savefig('./debug/debug_{}_{}.png'.format(k*len(ae_dataset)+i, f))
76 | plt.close('all')
77 |
78 | loss = nn.MSELoss(reduction='none')
79 | out = loss(recon_mask.view(batch_size, -1), target_mask.view(batch_size, -1)).sum()
80 | out.backward()
81 | running_loss += out
82 |
83 | step = k*len(ae_dataset)+i
84 | if (step + 1)%update_freq == 0:
85 | print(step)
86 | optimizer.step()
87 | optimizer.zero_grad()
88 | print('[%d] loss: %.3f' %
89 | (k + 1, running_loss / len(ae_dataset)))
90 | running_loss = 0
91 |
92 | if (k + 1) % 50 == 0:
93 | torch.save(generator.state_dict(), './finetuned_generator_{}.pth'.format(k))
94 |
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/misc/old/generate_features.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR, extract_rooms
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=8, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exps', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | print(opt)
39 |
40 | target_set = 'E'
41 | phase='eval'
42 | checkpoint = '/home/nelson/Workspace/autodesk/housegan2/checkpoints/gen_housegan_E_1000000.pth'
43 |
44 | def pad_im(cr_im, final_size=256, bkg_color='white'):
45 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
46 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
47 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
48 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
49 | return padded_im
50 |
51 | def draw_graph(g_true):
52 | # build true graph
53 | G_true = nx.Graph()
54 | colors_H = []
55 | for k, label in enumerate(g_true[0]):
56 | _type = label+1
57 | if _type >= 0:
58 | G_true.add_nodes_from([(k, {'label':_type})])
59 | colors_H.append(ID_COLOR[_type])
60 | for k, m, l in g_true[1]:
61 | if m > 0:
62 | G_true.add_edges_from([(k, l)], color='b',weight=4)
63 | plt.figure()
64 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='neato')
65 |
66 | edges = G_true.edges()
67 | colors = ['black' for u,v in edges]
68 | weights = [4 for u,v in edges]
69 |
70 | nx.draw(G_true, pos, node_size=1000, node_color=colors_H, font_size=0, font_weight='bold', edges=edges, edge_color=colors, width=weights)
71 | plt.tight_layout()
72 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
73 | rgb_im = Image.open('./dump/_true_graph.jpg')
74 | rgb_arr = pad_im(rgb_im).convert('RGBA')
75 | return rgb_arr
76 |
77 |
78 | import cv2
79 | import webcolors
80 | def draw_masks(masks, real_nodes):
81 |
82 | # transp = Image.new('RGBA', img.size, (0,0,0,0)) # Temp drawing image.
83 | # draw = ImageDraw.Draw(transp, "RGBA")
84 | # draw.ellipse(xy, **kwargs)
85 | # # Alpha composite two images together and replace first with result.
86 | # img.paste(Image.alpha_composite(img, transp))
87 |
88 | bg_img = Image.new("RGBA", (256, 256), (255, 255, 255, 0)) # Semitransparent background.
89 | for m, nd in zip(masks, real_nodes):
90 | reg = Image.new('RGBA', (32, 32), (0,0,0,0))
91 | dr_reg = ImageDraw.Draw(reg)
92 | m[m>0] = 255
93 | m[m<0] = 0
94 | m = m.detach().cpu().numpy()
95 | m = Image.fromarray(m)
96 | color = ID_COLOR[nd+1]
97 | r, g, b = webcolors.name_to_rgb(color)
98 | dr_reg.bitmap((0, 0), m.convert('L'), fill=(r, g, b, 32))
99 | reg = reg.resize((256, 256))
100 |
101 | bg_img.paste(Image.alpha_composite(bg_img, reg))
102 |
103 |
104 | for m, nd in zip(masks, real_nodes):
105 | cnt = Image.new('RGBA', (256, 256), (0,0,0,0))
106 | dr_cnt = ImageDraw.Draw(cnt)
107 | mask = np.zeros((256,256,3)).astype('uint8')
108 | m[m>0] = 255
109 | m[m<0] = 0
110 | m = m.detach().cpu().numpy()[:, :, np.newaxis].astype('uint8')
111 | m = cv2.resize(m, (256, 256), interpolation = cv2.INTER_AREA)
112 | ret,thresh = cv2.threshold(m,127,255,0)
113 | contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
114 | if len(contours) > 0:
115 | contours = [c for c in contours]
116 | color = ID_COLOR[nd+1]
117 | r, g, b = webcolors.name_to_rgb(color)
118 | cv2.drawContours(mask, contours, -1, (255, 255, 255), 2)
119 | mask = Image.fromarray(mask)
120 | dr_cnt.bitmap((0, 0), mask.convert('L'), fill=(r, g, b, 256))
121 | bg_img.paste(Image.alpha_composite(bg_img, cnt))
122 |
123 |
124 | return bg_img
125 |
126 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
127 |
128 | # draw edges
129 | for k, l in lines_on:
130 | x1, y1 = np.array(junctions[k])
131 | x2, y2 = np.array(junctions[l])
132 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
133 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=1.0))
134 |
135 | # draw corners
136 | for j in juncs_on:
137 | x, y = np.array(junctions[j])
138 | dwg.add(dwg.circle(center=(float(x), float(y)), r=3, stroke='red', fill='white', stroke_width=2, opacity=1.0))
139 | return
140 |
141 | # Create folder
142 | os.makedirs(opt.exp_folder, exist_ok=True)
143 |
144 | # Initialize generator and discriminator
145 | generator = Generator()
146 | generator.load_state_dict(torch.load(checkpoint))
147 |
148 | # Initialize variables
149 | cuda = True if torch.cuda.is_available() else False
150 | if cuda:
151 | generator.cuda()
152 | rooms_path = '../'
153 |
154 | # Initialize dataset iterator
155 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
156 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
157 | batch_size=opt.batch_size,
158 | shuffle=False, collate_fn=floorplan_collate_fn)
159 | # Optimizers
160 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
161 |
162 | # Vectorize
163 | # ------------
164 | globalIndex = 0
165 | final_images = []
166 | target_graph = list(range(500))
167 | page_count = 0
168 | n_rows = 0
169 | feats_tensor = []
170 | masks_tensor = []
171 |
172 | for i, batch in enumerate(fp_loader):
173 | if i not in target_graph:
174 | continue
175 |
176 | # Unpack batch
177 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
178 |
179 | # Configure input
180 | real_mks = Variable(mks.type(Tensor))
181 | given_nds = Variable(nds.type(Tensor))
182 | given_eds = eds
183 | for k in range(opt.num_variations):
184 | z = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
185 | with torch.no_grad():
186 | gen_mks, feats = generator(z, given_nds, given_eds)
187 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
188 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
189 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
190 | feats_tensor.append(feats)
191 | masks_tensor.append(gen_mks)
192 |
193 | # save data
194 | feats_tensor = torch.cat(feats_tensor, 0)
195 | masks_tensor = torch.cat(masks_tensor, 0)
196 | data = {'feats_tensor':feats_tensor, 'masks_tensor':masks_tensor}
197 | np.save('./autoencoder_data.npy', data)
198 |
199 | print(feats_tensor.shape, masks_tensor.shape)
--------------------------------------------------------------------------------
/misc/old/generate_features_reconstruction.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR, extract_rooms
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=8, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exps', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | print(opt)
39 |
40 | target_set = 'E'
41 | phase='eval'
42 | checkpoint = '/home/nelson/Workspace/autodesk/housegan2/checkpoints/gen_housegan_E_1000000.pth'
43 |
44 | def pad_im(cr_im, final_size=256, bkg_color='white'):
45 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
46 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
47 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
48 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
49 | return padded_im
50 |
51 | def draw_graph(g_true):
52 | # build true graph
53 | G_true = nx.Graph()
54 | colors_H = []
55 | for k, label in enumerate(g_true[0]):
56 | _type = label+1
57 | if _type >= 0:
58 | G_true.add_nodes_from([(k, {'label':_type})])
59 | colors_H.append(ID_COLOR[_type])
60 | for k, m, l in g_true[1]:
61 | if m > 0:
62 | G_true.add_edges_from([(k, l)], color='b',weight=4)
63 | plt.figure()
64 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='neato')
65 |
66 | edges = G_true.edges()
67 | colors = ['black' for u,v in edges]
68 | weights = [4 for u,v in edges]
69 |
70 | nx.draw(G_true, pos, node_size=1000, node_color=colors_H, font_size=0, font_weight='bold', edges=edges, edge_color=colors, width=weights)
71 | plt.tight_layout()
72 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
73 | rgb_im = Image.open('./dump/_true_graph.jpg')
74 | rgb_arr = pad_im(rgb_im).convert('RGBA')
75 | return rgb_arr
76 |
77 |
78 | import cv2
79 | import webcolors
80 | def draw_masks(masks, real_nodes):
81 |
82 | # transp = Image.new('RGBA', img.size, (0,0,0,0)) # Temp drawing image.
83 | # draw = ImageDraw.Draw(transp, "RGBA")
84 | # draw.ellipse(xy, **kwargs)
85 | # # Alpha composite two images together and replace first with result.
86 | # img.paste(Image.alpha_composite(img, transp))
87 |
88 | bg_img = Image.new("RGBA", (256, 256), (255, 255, 255, 0)) # Semitransparent background.
89 | for m, nd in zip(masks, real_nodes):
90 | reg = Image.new('RGBA', (32, 32), (0,0,0,0))
91 | dr_reg = ImageDraw.Draw(reg)
92 | m[m>0] = 255
93 | m[m<0] = 0
94 | m = m.detach().cpu().numpy()
95 | m = Image.fromarray(m)
96 | color = ID_COLOR[nd+1]
97 | r, g, b = webcolors.name_to_rgb(color)
98 | dr_reg.bitmap((0, 0), m.convert('L'), fill=(r, g, b, 32))
99 | reg = reg.resize((256, 256))
100 |
101 | bg_img.paste(Image.alpha_composite(bg_img, reg))
102 |
103 |
104 | for m, nd in zip(masks, real_nodes):
105 | cnt = Image.new('RGBA', (256, 256), (0,0,0,0))
106 | dr_cnt = ImageDraw.Draw(cnt)
107 | mask = np.zeros((256,256,3)).astype('uint8')
108 | m[m>0] = 255
109 | m[m<0] = 0
110 | m = m.detach().cpu().numpy()[:, :, np.newaxis].astype('uint8')
111 | m = cv2.resize(m, (256, 256), interpolation = cv2.INTER_AREA)
112 | ret,thresh = cv2.threshold(m,127,255,0)
113 | contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
114 | if len(contours) > 0:
115 | contours = [c for c in contours]
116 | color = ID_COLOR[nd+1]
117 | r, g, b = webcolors.name_to_rgb(color)
118 | cv2.drawContours(mask, contours, -1, (255, 255, 255), 2)
119 | mask = Image.fromarray(mask)
120 | dr_cnt.bitmap((0, 0), mask.convert('L'), fill=(r, g, b, 256))
121 | bg_img.paste(Image.alpha_composite(bg_img, cnt))
122 |
123 |
124 | return bg_img
125 |
126 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
127 |
128 | # draw edges
129 | for k, l in lines_on:
130 | x1, y1 = np.array(junctions[k])
131 | x2, y2 = np.array(junctions[l])
132 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
133 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=1.0))
134 |
135 | # draw corners
136 | for j in juncs_on:
137 | x, y = np.array(junctions[j])
138 | dwg.add(dwg.circle(center=(float(x), float(y)), r=3, stroke='red', fill='white', stroke_width=2, opacity=1.0))
139 | return
140 |
141 | # Create folder
142 | os.makedirs(opt.exp_folder, exist_ok=True)
143 |
144 | # Initialize generator and discriminator
145 | generator = Generator()
146 | generator.load_state_dict(torch.load(checkpoint))
147 |
148 | # Initialize variables
149 | cuda = True if torch.cuda.is_available() else False
150 | if cuda:
151 | generator.cuda()
152 | rooms_path = '../'
153 |
154 | # Initialize dataset iterator
155 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
156 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
157 | batch_size=opt.batch_size,
158 | shuffle=False, collate_fn=floorplan_collate_fn)
159 | # Optimizers
160 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
161 |
162 | # Vectorize
163 | # ------------
164 | globalIndex = 0
165 | final_images = []
166 | target_graph = list(range(500))
167 | page_count = 0
168 | n_rows = 0
169 |
170 | feats_list = []
171 | gen_masks_list = []
172 | nodes_list = []
173 | edges_list = []
174 |
175 | for i, batch in enumerate(fp_loader):
176 | if i not in target_graph:
177 | continue
178 |
179 | # Unpack batch
180 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
181 |
182 | # Configure input
183 | real_mks = Variable(mks.type(Tensor))
184 | given_nds = Variable(nds.type(Tensor))
185 | given_eds = eds
186 | for k in range(opt.num_variations):
187 | z = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
188 | with torch.no_grad():
189 | gen_mks, feats = generator(z, given_nds, given_eds)
190 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
191 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
192 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
193 |
194 | # store data
195 | feats_list.append(feats)
196 | gen_masks_list.append(gen_mks)
197 | nodes_list.append(nds)
198 | edges_list.append(eds)
199 |
200 | # save data
201 | # feats_tensor = torch.cat(feats_tensor, 0)
202 | # masks_tensor = torch.cat(masks_tensor, 0)
203 | data = {'feats_list':feats_list, 'gen_masks_list':gen_masks_list, 'nodes_list':nodes_list, 'edges_list':edges_list}
204 | np.save('./reconstruction_data.npy', data)
205 |
206 | # print(feats_tensor.shape, masks_tensor.shape)
--------------------------------------------------------------------------------
/misc/read_data.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from PIL import Image
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | def read_data(line):
8 | poly=[]
9 | img = np.asarray(Image.open(line))
10 | img_room_type=img[:,:,1]
11 | img_room_number=img[:,:,2]
12 | room_no=img_room_number.max()
13 | room_imgs=[]
14 | rm_types=[]
15 | for i in range(room_no):
16 |
17 | room_img=np.zeros((256, 256))
18 |
19 | for k in range(256):
20 | for h in range(256):
21 | if(img_room_number[k][h]==i+1):
22 | room_img[k][h]=1
23 | k_=k
24 | h_=h
25 | rm_t=img_room_type[k_][h_]
26 | if(rm_t==0):
27 | rm_types.append(1)
28 | elif(rm_t==1):
29 | rm_types.append(3)
30 | elif(rm_t==2):
31 | rm_types.append(2)
32 | elif(rm_t==3):
33 | rm_types.append(4)
34 | elif(rm_t==4):
35 | rm_types.append(7)
36 | elif(rm_t==5):
37 | rm_types.append(3)
38 | elif(rm_t==6):
39 | rm_types.append(8)
40 | elif(rm_t==7):
41 | rm_types.append(3)
42 | elif(rm_t==8):
43 | rm_types.append(3)
44 | elif(rm_t==9):
45 | rm_types.append(5)
46 | elif(rm_t==10):
47 | rm_types.append(6)
48 | elif(rm_t==11):
49 | rm_types.append(10)
50 | else:
51 | rm_types.append(16)
52 | room_imgs.append(room_img)
53 | walls=[]
54 | rm_type=rm_types
55 | for t in range(len(room_imgs)):
56 | tmp=room_imgs[t]
57 | for k in range(254):
58 | for h in range(254):
59 | if(tmp[k][h]==1) & (tmp[k+1][h]==0) & (tmp[k+2][h]==1):
60 | tmp[k+1][h] =1
61 |
62 | for k in range(254):
63 | for h in range(254):
64 | if(tmp[h][k]==1) & (tmp[h][k+1]==0) & (tmp[h][k+2]==1):
65 | tmp[h][k+1] =1
66 |
67 | for k in range(254):
68 | for h in range(254):
69 | if(tmp[k][h]==0) & (tmp[k+1][h]==1) & (tmp[k+2][h]==0):
70 | tmp[k+1][h] =0
71 |
72 | for k in range(254):
73 | for h in range(254):
74 | if(tmp[h][k]==0) & (tmp[h][k+1]==1) & (tmp[h][k+2]==0):
75 | tmp[h][k+1] =0
76 | room_imgs[t]=tmp
77 | coords=[]
78 | for k in range(2,254):
79 | for h in range(2,254):
80 | if(tmp[k][h]==1):
81 | if((tmp[k-2][h]==0) & (tmp[k-2][h-2]==0)&(tmp[k][h-2]==0) &(tmp[k-1][h]==0) & (tmp[k-1][h-1]==0)&(tmp[k][h-1]==0)):
82 | coords.append([h,k,0,0,t,rm_type[t]])
83 | elif(tmp[k+2][h]==0)&(tmp[k+2][h-2]==0)&(tmp[k][h-2]==0)& (tmp[k+1][h]==0)&(tmp[k+1][h-1]==0)&(tmp[k][h-1]==0):
84 | coords.append([h,k,0,0,t,rm_type[t]])
85 | elif(tmp[k+2][h]==0)&(tmp[k+2][h+2]==0)&(tmp[k][h+2]==0)& (tmp[k+1][h]==0)&(tmp[k+1][h+1]==0)&(tmp[k][h+1]==0):
86 | coords.append([h,k,0,0,t,rm_type[t]])
87 | elif(tmp[k-2][h]==0)&(tmp[k-2][h+2]==0)&(tmp[k][h+2]==0)& (tmp[k-1][h]==0)&(tmp[k-1][h+1]==0)&(tmp[k][h+1]==0):
88 | coords.append([h,k,0,0,t,rm_type[t]])
89 | elif(tmp[k+1][h]==1)&(tmp[k+2][h+2]==0)&(tmp[k][h+1]==1)& (tmp[k+1][h+1]==0):
90 | coords.append([h,k,0,0,t,rm_type[t]])
91 | elif(tmp[k-1][h]==1)&(tmp[k-2][h+2]==0)&(tmp[k][h+1]==1)& (tmp[k-1][h+1]==0):
92 | coords.append([h,k,0,0,t,rm_type[t]])
93 | elif(tmp[k+1][h]==1)&(tmp[k+2][h-2]==0)&(tmp[k][h-1]==1)&(tmp[k+1][h-1]==0) :
94 | coords.append([h,k,0,0,t,rm_type[t]])
95 | elif(tmp[k-1][h]==1) & (tmp[k-2][h-2]==0)&(tmp[k][h-1]==1) & (tmp[k-1][h-1]==0):
96 | coords.append([h,k,0,0,t,rm_type[t]])
97 | p=0
98 | for c in range(len(coords)):
99 | for c2 in range(len(coords)):
100 |
101 | if(c2==c):
102 | continue
103 | if(coords[c][0]==coords[c2][0])&(coords[c][2]!=1) &(coords[c2][2]!=1):
104 | walls.append([coords[c][0],coords[c][1],coords[c2][0],coords[c2][1],-1,coords[c][5],coords[c][4],-1,0])
105 | p=p+1
106 | coords[c][2]=1
107 | coords[c2][2]=1
108 |
109 | if(coords[c][1]==coords[c2][1])&(coords[c][3]!=1) &(coords[c2][3]!=1) :
110 | walls.append([coords[c][0],coords[c][1],coords[c2][0],coords[c2][1],-1,coords[c][5],coords[c][4],-1,0])
111 | coords[c][3]=1
112 | p=p+1
113 | coords[c2][3]=1
114 | poly.append(p)
115 |
116 | tmp=img[:,:,1]
117 | door_img=np.zeros((256, 256))
118 |
119 | for k in range(256):
120 | for h in range(256):
121 | if(tmp[k][h]==17)| (tmp[k][h]==15):
122 | door_img[k][h]=1
123 |
124 |
125 | rms_type=rm_type
126 | tmp=door_img
127 | tmp=door_img
128 | door_img=tmp
129 | coords=[]
130 | for k in range(2,254):
131 | for h in range(2,254):
132 | if(tmp[k][h]==1):
133 | if((tmp[k-1][h]==0) & (tmp[k-1][h-1]==0)&(tmp[k][h-1]==0)):
134 | coords.append([h,k,0,0])
135 | elif (tmp[k+1][h]==0)&(tmp[k+1][h-1]==0)&(tmp[k][h-1]==0):
136 | coords.append([h,k,0,0])
137 | elif (tmp[k+1][h]==0)&(tmp[k+1][h+1]==0)&(tmp[k][h+1]==0):
138 | coords.append([h,k,0,0])
139 | elif (tmp[k-1][h]==0)&(tmp[k-1][h+1]==0)&(tmp[k][h+1]==0):
140 | coords.append([h,k,0,0])
141 | elif(tmp[k+1][h]==1)&(tmp[k][h+1]==1)& (tmp[k+1][h+1]==0):
142 | coords.append([h,k,0,0])
143 | elif(tmp[k-1][h]==1)&(tmp[k][h+1]==1)& (tmp[k-1][h+1]==0):
144 | coords.append([h,k,0,0])
145 | elif(tmp[k+1][h]==1)&(tmp[k][h-1]==1)&(tmp[k+1][h-1]==0) :
146 | coords.append([h,k,0,0])
147 | elif(tmp[k-1][h]==1) & (tmp[k][h-1]==1) & (tmp[k-1][h-1]==0):
148 | coords.append([h,k,0,0])
149 | for k in range(len(coords)):
150 | for p in range(-2,3):
151 | for t in range(-2,3):
152 | tmp[coords[k][1]+p][coords[k][0]+t]=0
153 | tmp=door_img
154 | tmp=door_img
155 |
156 | for k in range(1,253):
157 | for h in range(1,253):
158 | if(tmp[k-1][h]==0) & (tmp[k+1][h]==1) & (tmp[k+2][h]==0):
159 | tmp[k+1][h] =0
160 | tmp[k][h]=0
161 |
162 | for k in range(1,253):
163 | for h in range(1,253):
164 | if(tmp[h][k-1]==0) & (tmp[h][k+1]==1) & (tmp[h][k+2]==0):
165 | tmp[h][k+1] =0
166 | tmp[h][k]=0
167 |
168 | for k in range(254):
169 | for h in range(254):
170 | if(tmp[k][h]==0) & (tmp[k+1][h]==1) & (tmp[k+2][h]==0):
171 | tmp[k][h] =1
172 | tmp[k+2][h]==1
173 |
174 |
175 | for k in range(254):
176 | for h in range(254):
177 | if(tmp[h][k]==0) & (tmp[h][k+1]==1) & (tmp[h][k+2]==0):
178 | tmp[h][k] =1
179 | tmp[h][k+2]=1
180 |
181 | coords=[]
182 | door_img=tmp
183 | for k in range(2,254):
184 | for h in range(2,254):
185 | if(tmp[k][h]==1):
186 | if((tmp[k-1][h]==0) & (tmp[k-1][h-1]==0)&(tmp[k][h-1]==0)):
187 | coords.append([h,k,0,0,-1])
188 | elif (tmp[k+1][h]==0)&(tmp[k+1][h-1]==0)&(tmp[k][h-1]==0):
189 | coords.append([h,k,0,0,-1])
190 | elif (tmp[k+1][h]==0)&(tmp[k+1][h+1]==0)&(tmp[k][h+1]==0):
191 | coords.append([h,k,0,0,-1])
192 | elif (tmp[k-1][h]==0)&(tmp[k-1][h+1]==0)&(tmp[k][h+1]==0):
193 | coords.append([h,k,0,0,-1])
194 | elif(tmp[k+1][h]==1)&(tmp[k][h+1]==1)& (tmp[k+1][h+1]==0):
195 | coords.append([h,k,0,0,-1])
196 | elif(tmp[k-1][h]==1)&(tmp[k][h+1]==1)& (tmp[k-1][h+1]==0):
197 | coords.append([h,k,0,0,-1])
198 | elif(tmp[k+1][h]==1)&(tmp[k][h-1]==1)&(tmp[k+1][h-1]==0) :
199 | coords.append([h,k,0,0,-1])
200 | elif(tmp[k-1][h]==1) & (tmp[k][h-1]==1) & (tmp[k-1][h-1]==0):
201 | coords.append([h,k,0,0,-1])
202 | doors=[]
203 | h=0
204 | no_doors=int(len(coords)/4)
205 |
206 | for c in range(len(coords)):
207 |
208 | for c2 in range(len(coords)):
209 | if(c2==c):
210 | continue
211 | if(coords[c][0]==coords[c2][0])&(coords[c][2]!=1) &(coords[c2][2]!=1):
212 | coords[c][2]=1
213 | coords[c2][2]=1
214 | if(coords[c][4]==-1) &(coords[c2][4]==-1):
215 | coords[c2][4]=h
216 | coords[c][4]=h
217 | h=h+1
218 | elif(coords[c][4]!=-1):
219 | coords[c2][4]=coords[c][4]
220 | coords[c][4]=coords[c][4]
221 |
222 | elif (coords[c2][4]!=-1):
223 | coords[c2][4]=coords[c2][4]
224 | coords[c][4]=coords[c2][4]
225 | walls.append([coords[c][0],coords[c][1],coords[c2][0],coords[c2][1],-1,17,len(rms_type)+coords[c][4],-1,0])
226 | doors.append([coords[c][0],coords[c][1],coords[c2][0],coords[c2][1]])
227 | if(coords[c][1]==coords[c2][1])&(coords[c][3]!=1) &(coords[c2][3]!=1):
228 | coords[c][3]=1
229 | coords[c2][3]=1
230 | if(coords[c][4]==-1) &(coords[c2][4]==-1):
231 | coords[c2][4]=h
232 | coords[c][4]=h
233 | h=h+1
234 | elif(coords[c][4]!=-1):
235 | coords[c2][4]=coords[c][4]
236 | coords[c][4]=coords[c][4]
237 |
238 | elif (coords[c2][4]!=-1):
239 | coords[c2][4]=coords[c2][4]
240 | coords[c][4]=coords[c2][4]
241 | walls.append([coords[c][0],coords[c][1],coords[c2][0],coords[c2][1],-1,17,len(rms_type)+coords[c][4],-1,0])
242 | doors.append([coords[c][0],coords[c][1],coords[c2][0],coords[c2][1]])
243 |
244 | for i in range(no_doors):
245 | rms_type.append(17)
246 | poly.append(4)
247 | out=1
248 | for i in range(len(poly)):
249 | if(poly[i]<4):
250 | out=-1
251 | if (len(doors)%4!=0):
252 | out=-3
253 | return rms_type,poly,doors,walls,out
254 |
--------------------------------------------------------------------------------
/misc/teaser/0_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/0_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/1.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/1_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/1_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/2_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/3.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/3_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/3_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/4_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/5.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/5_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/5_for_figure_output_new (3rd copy).svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/5_for_figure_output_new (another copy).svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/5_for_figure_output_new (copy).svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/5_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/6_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/6_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/7_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/7_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/8_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/8_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/9_for_figure_output.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/misc/teaser/9_for_figure_output_new.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/misc/teaser/edit_svg.py:
--------------------------------------------------------------------------------
1 | import random
2 | import os
3 | from shutil import copy
4 | import glob
5 | from xml.dom import minidom
6 |
7 |
8 | def edit_svg(svg_path, dst_path):
9 | xmldoc = minidom.parse(svg_path)
10 | polys = xmldoc.getElementsByTagName("polygon")
11 | for p in polys:
12 | if p.getAttribute('fill') not in ["#D3A2C7", "#727171"]:
13 | p.setAttribute("stroke-width", "4")
14 | else:
15 | p.setAttribute("stroke-width", "0")
16 | svg_str = xmldoc.toxml()
17 | with open(dst_path, 'w') as f:
18 | f.write(svg_str)
19 | return
20 |
21 |
22 | files_svg = glob.glob('./*')
23 | for _file in files_svg:
24 | edit_svg(_file, _file.replace('.svg', '_new.svg'))
--------------------------------------------------------------------------------
/misc/train_autoencoder.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 | import torch
8 | from autoencoder_dataset import AutoencoderDataset, Autoencoder
9 | import torchvision.transforms as transforms
10 | from torchvision.utils import save_image
11 | import torch.optim as optim
12 | import torch.nn as nn
13 | from torch.autograd import Variable
14 |
15 | # Initialize dataset iterator
16 | model = Autoencoder().cuda()
17 | ae_dataset = AutoencoderDataset(transform=transforms.Normalize(mean=[0.5], std=[0.5]))
18 | batch_size = 1024
19 | ae_loader = torch.utils.data.DataLoader(ae_dataset,
20 | batch_size=batch_size,
21 | shuffle=True)
22 |
23 | optimizer = optim.Adam(model.parameters(), lr=0.0001)
24 |
25 |
26 | for k in range(1000):
27 | running_loss = 0.0
28 | for i, batch in enumerate(ae_loader):
29 | y, x = batch
30 | y, x = Variable(y), Variable(x)
31 | y_hat = model(x)
32 | # print(y_hat[:3, :3, :3, :3])
33 | # print(y[:3, :3, :3, :3])
34 | loss = nn.L1Loss(reduction='none')
35 | out = loss(y_hat.view(batch_size, -1), y.view(batch_size, -1)).sum()
36 | out.backward()
37 | optimizer.step()
38 | running_loss += out.item()
39 |
40 | print('[%d] loss: %.3f' %
41 | (k + 1, running_loss / len(ae_dataset)))
42 |
43 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/models/__init__.py
--------------------------------------------------------------------------------
/refs/sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sakmalh/houseganpp/8e6b0b9eaa8f32fdef2fd31a4d3ea4e621728237/refs/sample.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib>=3.4.1
2 | networkx>=2.5.1
3 | numpy>=1.20.2
4 | opencv_python>=4.7.0.68
5 | Pillow>=8.2.0
6 | pytz>=2021.1
7 | svgwrite>=1.4.1
8 | torch>=1.11.0
9 | torchaudio>=0.11.0
10 | torchvision>=0.12.0
11 | webcolors>=1.11.1
--------------------------------------------------------------------------------
/scripts/viz.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 | from PIL import Image, ImageDraw, ImageFont
8 | import svgwrite
9 | from collections import defaultdict
10 | import matplotlib.pyplot as plt
11 | import networkx as nx
12 | import glob
13 | import cv2
14 | import webcolors
15 | import time
16 |
17 |
18 | ROOM_CLASS = {"living_room": 1, "kitchen": 2, "bedroom": 3, "bathroom": 4, "balcony": 5, "entrance": 6, "dining room": 7, "study room": 8,
19 | "storage": 10 , "front door": 15, "unknown": 16, "interior_door": 17}
20 |
21 | CLASS_ROM = {}
22 | for x, y in ROOM_CLASS.items():
23 | CLASS_ROM[y] = x
24 | ID_COLOR = {1: '#EE4D4D', 2: '#C67C7B', 3: '#FFD274', 4: '#BEBEBE', 5: '#BFE3E8', 6: '#7BA779', 7: '#E87A90', 8: '#FF8C69', 10: '#1F849B', 15: '#727171', 16: '#785A67', 17: '#D3A2C7'}
25 |
26 |
27 | def pad_im(cr_im, final_size=256, bkg_color='white'):
28 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
29 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
30 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
31 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
32 | return padded_im
33 |
34 | def draw_graph(g_true):
35 | # build true graph
36 | G_true = nx.Graph()
37 | colors_H = []
38 | node_size = []
39 | edge_color = []
40 | linewidths = []
41 | edgecolors = []
42 |
43 | # add nodes
44 | for k, label in enumerate(g_true[0]):
45 | _type = label+1
46 | if _type >= 0 and _type not in [15, 17]:
47 | G_true.add_nodes_from([(k, {'label':k})])
48 | colors_H.append(ID_COLOR[_type])
49 | node_size.append(1000)
50 | edgecolors.append('blue')
51 | linewidths.append(0.0)
52 |
53 | # add outside node
54 | G_true.add_nodes_from([(-1, {'label':-1})])
55 | colors_H.append("white")
56 | node_size.append(750)
57 | edgecolors.append('black')
58 | linewidths.append(3.0)
59 |
60 | # add edges
61 | for k, m, l in g_true[1]:
62 | _type_k = g_true[0][k]+1
63 | _type_l = g_true[0][l]+1
64 | if m > 0 and (_type_k not in [15, 17] and _type_l not in [15, 17]):
65 | G_true.add_edges_from([(k, l)])
66 | edge_color.append('#D3A2C7')
67 | elif m > 0 and (_type_k==15 or _type_l==15) and (_type_l!=17 and _type_k!=17):
68 | if _type_k==15:
69 | G_true.add_edges_from([(l, -1)])
70 | elif _type_l==15:
71 | G_true.add_edges_from([(k, -1)])
72 | edge_color.append('#727171')
73 |
74 | # # # visualization - debug
75 | # print(len(node_size))
76 | # print(len(colors_H))
77 | # print(len(linewidths))
78 | # print(G_true.nodes())
79 | # print(g_true[0])
80 | # print(len(edgecolors))
81 |
82 |
83 | plt.figure()
84 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='neato')
85 | nx.draw(G_true, pos, node_size=node_size, linewidths=linewidths, node_color=colors_H, font_size=14, font_color='white',\
86 | font_weight='bold', edgecolors=edgecolors, edge_color=edge_color, width=4.0, with_labels=False)
87 | plt.tight_layout()
88 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
89 | plt.close('all')
90 | rgb_im = Image.open('./dump/_true_graph.jpg')
91 | rgb_arr = pad_im(rgb_im).convert('RGBA')
92 | return G_true, rgb_im
93 |
94 | def draw_masks(masks, real_nodes, im_size=256):
95 |
96 | bg_img = Image.new("RGBA", (im_size, im_size), (255, 255, 255, 255)) # Semitransparent background.
97 | for m, nd in zip(masks, real_nodes):
98 |
99 | # resize map
100 | m[m>0] = 255
101 | m[m<0] = 0
102 | m_lg = cv2.resize(m, (im_size, im_size), interpolation = cv2.INTER_AREA)
103 |
104 | # pick color
105 | color = ID_COLOR[nd+1]
106 | r, g, b = webcolors.hex_to_rgb(color)
107 |
108 | # set drawer
109 | dr_bkg = ImageDraw.Draw(bg_img)
110 |
111 | # draw region
112 | m_pil = Image.fromarray(m_lg)
113 | dr_bkg.bitmap((0, 0), m_pil.convert('L'), fill=(r, g, b, 256))
114 |
115 | # draw contour
116 | m_cv = m_lg[:, :, np.newaxis].astype('uint8')
117 | ret,thresh = cv2.threshold(m_cv, 127, 255, 0)
118 | contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
119 | contours = [c for c in contours if len(contours) > 0]
120 | cnt = np.zeros((256, 256, 3)).astype('uint8')
121 | cv2.drawContours(cnt, contours, -1, (255, 255, 255, 255), 1)
122 | cnt = Image.fromarray(cnt)
123 | dr_bkg.bitmap((0, 0), cnt.convert('L'), fill=(0, 0, 0, 255))
124 |
125 | return bg_img.resize((im_size, im_size))
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from dataset.floorplan_dataset_maps_functional_high_res import FloorplanGraphDataset, floorplan_collate_fn
12 |
13 | from torch.utils.data import DataLoader
14 | from torchvision import datasets
15 | from torch.autograd import Variable
16 |
17 | import torch.nn as nn
18 | import torch.nn.functional as F
19 | import torch.autograd as autograd
20 | import torch
21 | from PIL import Image, ImageDraw, ImageFont
22 | import svgwrite
23 | from models.models import Generator
24 | # from models.models_improved import Generator
25 |
26 | from misc.utils import _init_input, ID_COLOR, draw_masks, draw_graph, estimate_graph
27 | from collections import defaultdict
28 | import matplotlib.pyplot as plt
29 | import networkx as nx
30 | import glob
31 | import cv2
32 | import webcolors
33 | import time
34 |
35 | parser = argparse.ArgumentParser()
36 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
37 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
38 | parser.add_argument("--checkpoint", type=str, default='./checkpoints/exp_8_10.pth', help="checkpoint path")
39 | parser.add_argument("--data_path", type=str, default='./data/sample_list.txt', help="path to dataset list file")
40 | parser.add_argument("--out", type=str, default='./dump', help="output folder")
41 | opt = parser.parse_args()
42 | print(opt)
43 |
44 | # Create output dir
45 | os.makedirs(opt.out, exist_ok=True)
46 |
47 | # Initialize generator and discriminator
48 | model = Generator()
49 | model.load_state_dict(torch.load(opt.checkpoint), strict=True)
50 | model = model.eval()
51 |
52 | # Initialize variables
53 | if torch.cuda.is_available():
54 | model.cuda()
55 |
56 | # initialize dataset iterator
57 | fp_dataset_test = FloorplanGraphDataset(opt.data_path, transforms.Normalize(mean=[0.5], std=[0.5]), split='test')
58 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
59 | batch_size=opt.batch_size,
60 | shuffle=False, collate_fn=floorplan_collate_fn)
61 | # optimizers
62 | Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
63 |
64 |
65 | # run inference
66 | def _infer(graph, model, prev_state=None):
67 | # configure input to the network
68 | z, given_masks_in, given_nds, given_eds = _init_input(graph, prev_state)
69 | # run inference model
70 | with torch.no_grad():
71 | masks = model(z.to('cuda'), given_masks_in.to('cuda'), given_nds.to('cuda'), given_eds.to('cuda'))
72 | masks = masks.detach().cpu().numpy()
73 | return masks
74 |
75 |
76 | def main():
77 | globalIndex = 0
78 | for i, sample in enumerate(fp_loader):
79 |
80 | # draw real graph and groundtruth
81 | mks, nds, eds, _, _ = sample
82 | real_nodes = np.where(nds.detach().cpu() == 1)[-1]
83 | graph = [nds, eds]
84 | true_graph_obj, graph_im = draw_graph([real_nodes, eds.detach().cpu().numpy()])
85 | graph_im.save('./{}/graph_{}.png'.format(opt.out, i)) # save graph
86 |
87 | # add room types incrementally
88 | _types = sorted(list(set(real_nodes)))
89 | selected_types = [_types[:k + 1] for k in range(10)]
90 | os.makedirs('./{}/'.format(opt.out), exist_ok=True)
91 | _round = 0
92 |
93 | # initialize layout
94 | state = {'masks': None, 'fixed_nodes': []}
95 | masks = _infer(graph, model, state)
96 | im0 = draw_masks(masks.copy(), real_nodes)
97 | im0 = torch.tensor(np.array(im0).transpose((2, 0, 1))) / 255.0
98 | # save_image(im0, './{}/fp_init_{}.png'.format(opt.out, i), nrow=1, normalize=False) # visualize init image
99 |
100 | # generate per room type
101 | for _iter, _types in enumerate(selected_types):
102 | _fixed_nds = np.concatenate([np.where(real_nodes == _t)[0] for _t in _types]) \
103 | if len(_types) > 0 else np.array([])
104 | state = {'masks': masks, 'fixed_nodes': _fixed_nds}
105 | masks = _infer(graph, model, state)
106 |
107 | # save final floorplans
108 | imk = draw_masks(masks.copy(), real_nodes)
109 | imk = torch.tensor(np.array(imk).transpose((2, 0, 1))) / 255.0
110 | save_image(imk, './{}/fp_final_{}.png'.format(opt.out, i), nrow=1, normalize=False)
111 |
112 |
113 | if __name__ == '__main__':
114 | main()
115 |
--------------------------------------------------------------------------------
/testing/reconstruct_heuristic.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from utils import extract_edges, visualize_sample, preprocess, check_polygon_connectivity, check_polygon_intersection, iou_polygon_intersection, split_edge, slide_wall, remove_colinear_edges, valid_layout
3 | from utils import vectorize_heuristic, visualize_vector, draw_graph_with_types
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 | from torchvision.utils import save_image
7 | import torch
8 |
9 | im_list = []
10 |
11 | # initial polygon
12 | for k in range(150, 160):
13 | for l in range(4):
14 | types, polys, conns = np.load('/home/nelson/Workspace/autodesk/housegan2/raster/{}_{}.npy'.format(k, l), allow_pickle=True)
15 | polys = extract_edges(polys)
16 |
17 | # add graph
18 | if l == 0:
19 | graph_arr = draw_graph_with_types(types, conns)
20 | im_list.append(torch.tensor(graph_arr.transpose((2, 0, 1)))/255.0)
21 |
22 | # add images
23 | raw_arr = visualize_sample(types, polys)
24 | vec_arr = vectorize_heuristic(types, polys)
25 |
26 | im_list.append(torch.tensor(raw_arr.transpose((2, 0, 1)))/255.0)
27 | im_list.append(torch.tensor(vec_arr.transpose((2, 0, 1)))/255.0)
28 |
29 | im_tensor = torch.stack(im_list)
30 | save_image(im_tensor, 'out_img.png', nrow=9, padding=2)
31 |
32 |
--------------------------------------------------------------------------------
/testing/reconstruction_dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # Copyright 2018 Google LLC
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import json, os, random, math
18 | from collections import defaultdict
19 | import torch.nn as nn
20 | import torch.nn.functional as F
21 | import torch
22 | from torch.utils.data import Dataset
23 | import torchvision.transforms as T
24 | import math
25 | import numpy as np
26 | import PIL
27 | from skimage.transform import resize as imresize
28 | import pycocotools.mask as mask_utils
29 | import glob
30 | from PIL import Image, ImageDraw, ImageOps, ImageFilter
31 | import matplotlib.pyplot as plt
32 | import random
33 | from utils import mask_to_bb, ROOM_CLASS, ID_COLOR
34 |
35 | def conv_block(in_channels, out_channels, k, s, p, act=None, upsample=False, spec_norm=False):
36 | block = []
37 |
38 | if upsample:
39 | if spec_norm:
40 | block.append(spectral_norm(torch.nn.ConvTranspose2d(in_channels, out_channels, \
41 | kernel_size=k, stride=s, \
42 | padding=p, bias=True)))
43 | else:
44 | block.append(torch.nn.ConvTranspose2d(in_channels, out_channels, \
45 | kernel_size=k, stride=s, \
46 | padding=p, bias=True))
47 | else:
48 | if spec_norm:
49 | block.append(spectral_norm(torch.nn.Conv2d(in_channels, out_channels, \
50 | kernel_size=k, stride=s, \
51 | padding=p, bias=True)))
52 | else:
53 | block.append(torch.nn.Conv2d(in_channels, out_channels, \
54 | kernel_size=k, stride=s, \
55 | padding=p, bias=True))
56 | if "leaky" in act:
57 | block.append(torch.nn.LeakyReLU(0.1, inplace=True))
58 | elif "relu" in act:
59 | block.append(torch.nn.ReLU(True))
60 | elif "tanh":
61 | block.append(torch.nn.Tanh())
62 | elif "None":
63 | continue
64 |
65 | return block
66 |
67 | class Autoencoder(nn.Module):
68 | def __init__(self):
69 | super(Autoencoder, self).__init__()
70 | self.enc = nn.Sequential(
71 | *conv_block(1, 256, 3, 2, 1, act="relu"),
72 | *conv_block(256, 256, 3, 2, 1, act="relu"),
73 | *conv_block(256, 128, 3, 1, 1, act="relu"),
74 | *conv_block(128, 128, 3, 1, 1, act="relu"),
75 | *conv_block(128, 16, 3, 1, 1, act="None"))
76 |
77 |
78 | def forward(self, x):
79 | x = x.cuda()
80 | x = self.enc(x.unsqueeze(1))
81 | return x
82 |
83 |
84 | class AutoencoderDataset(Dataset):
85 | def __init__(self, transform=None):
86 | super(Dataset, self).__init__()
87 | self.data = np.load('./reconstruction_data.npy', allow_pickle=True)
88 | self.data = dict(self.data[()])
89 | self.feats = self.data['feats_list']
90 | self.masks = self.data['gen_masks_list']
91 | self.nodes = self.data['nodes_list']
92 | self.edges = self.data['edges_list']
93 |
94 | self.transform = transform
95 | def __len__(self):
96 | return len(self.feats)
97 |
98 | def __getitem__(self, index):
99 | feat = self.feats[index]
100 | mask = self.masks[index]
101 | nodes = self.nodes[index]
102 | edges = self.edges[index]
103 |
104 | # mask[mask>0] = 1.0
105 | # mask[mask<=0] = -1.0
106 |
107 | # mask = self.transform(mask.unsqueeze(0)).squeeze(0)
108 | # print(mask.shape, feat.shape)
109 |
110 | return feat, mask, nodes, edges
111 |
112 |
--------------------------------------------------------------------------------
/testing/run_exps.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 |
4 | sets = ['A', 'B', 'C', 'D', 'E']
5 | exp_name = 'exp_with_number_and_types_new'
6 | numb_iters = 200000
7 | for s in sets:
8 |
9 | # # train
10 | # os.system('python main.py --exp_folder={} --target_set={} --sample_interval={}'.format(exp_name, s, numb_iters))
11 |
12 | # test
13 | output = subprocess.run('python evaluate_parallel.py --checkpoint=./checkpoints/{}_{}_{}.pth --target_set={}'.format(exp_name, s, numb_iters, s), shell=True, stdout=subprocess.PIPE)
14 |
15 | # save results
16 | text_file = open('./logs/{}_{}_{}.txt'.format(exp_name, numb_iters, s), "w")
17 | print(output.stdout)
18 | text_file.write(str(output.stdout))
19 | text_file.close()
20 |
--------------------------------------------------------------------------------
/testing/test.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import print_function
3 | import math
4 | import random
5 | from simanneal import Annealer
6 |
7 |
8 | def distance(a, b):
9 | """Calculates distance between two latitude-longitude coordinates."""
10 | R = 3963 # radius of Earth (miles)
11 | lat1, lon1 = math.radians(a[0]), math.radians(a[1])
12 | lat2, lon2 = math.radians(b[0]), math.radians(b[1])
13 | return math.acos(math.sin(lat1) * math.sin(lat2) +
14 | math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R
15 |
16 |
17 | class TravellingSalesmanProblem(Annealer):
18 |
19 | """Test annealer with a travelling salesman problem.
20 | """
21 |
22 | # pass extra data (the distance matrix) into the constructor
23 | def __init__(self, state, distance_matrix):
24 | self.distance_matrix = distance_matrix
25 | super(TravellingSalesmanProblem, self).__init__(state) # important!
26 |
27 | def move(self):
28 | """Swaps two cities in the route."""
29 | # no efficiency gain, just proof of concept
30 | # demonstrates returning the delta energy (optional)
31 | initial_energy = self.energy()
32 |
33 | a = random.randint(0, len(self.state) - 1)
34 | b = random.randint(0, len(self.state) - 1)
35 | self.state[a], self.state[b] = self.state[b], self.state[a]
36 |
37 | return self.energy() - initial_energy
38 |
39 | def energy(self):
40 | """Calculates the length of the route."""
41 | e = 0
42 | for i in range(len(self.state)):
43 | e += self.distance_matrix[self.state[i-1]][self.state[i]]
44 | return e
45 |
46 |
47 | if __name__ == '__main__':
48 |
49 | # latitude and longitude for the twenty largest U.S. cities
50 | cities = {
51 | 'New York City': (40.72, 74.00),
52 | 'Los Angeles': (34.05, 118.25),
53 | 'Chicago': (41.88, 87.63),
54 | 'Houston': (29.77, 95.38),
55 | 'Phoenix': (33.45, 112.07),
56 | 'Philadelphia': (39.95, 75.17),
57 | 'San Antonio': (29.53, 98.47),
58 | 'Dallas': (32.78, 96.80),
59 | 'San Diego': (32.78, 117.15),
60 | 'San Jose': (37.30, 121.87),
61 | 'Detroit': (42.33, 83.05),
62 | 'San Francisco': (37.78, 122.42),
63 | 'Jacksonville': (30.32, 81.70),
64 | 'Indianapolis': (39.78, 86.15),
65 | 'Austin': (30.27, 97.77),
66 | 'Columbus': (39.98, 82.98),
67 | 'Fort Worth': (32.75, 97.33),
68 | 'Charlotte': (35.23, 80.85),
69 | 'Memphis': (35.12, 89.97),
70 | 'Baltimore': (39.28, 76.62)
71 | }
72 |
73 | # initial state, a randomly-ordered itinerary
74 | init_state = list(cities.keys())
75 | random.shuffle(init_state)
76 |
77 | # create a distance matrix
78 | distance_matrix = {}
79 | for ka, va in cities.items():
80 | distance_matrix[ka] = {}
81 | for kb, vb in cities.items():
82 | if kb == ka:
83 | distance_matrix[ka][kb] = 0.0
84 | else:
85 | distance_matrix[ka][kb] = distance(va, vb)
86 |
87 | tsp = TravellingSalesmanProblem(init_state, distance_matrix)
88 | tsp.set_schedule(tsp.auto(minutes=0.2))
89 | # since our state is just a list, slice is the fastest way to copy
90 | tsp.copy_strategy = "slice"
91 | state, e = tsp.anneal()
92 |
93 | while state[0] != 'New York City':
94 | state = state[1:] + state[:1] # rotate NYC to start
95 |
96 | print()
97 | print("%i mile route:" % e)
98 | print(" ➞ ".join(state))
--------------------------------------------------------------------------------
/testing/variation_bbs.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR, bb_to_im_fid
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=100, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | print(opt)
39 |
40 | numb_iters = 200000
41 | exp_name = 'exp_with_graph_global_new'
42 | target_set = 'E'
43 | phase='eval'
44 | checkpoint = './checkpoints/{}_{}_{}.pth'.format(exp_name, target_set, numb_iters)
45 |
46 | def pad_im(cr_im, final_size=299, bkg_color='white'):
47 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
48 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
49 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
50 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
51 | return padded_im
52 |
53 | def draw_graph(g_true):
54 | # build true graph
55 | G_true = nx.Graph()
56 | colors_H = []
57 | for k, label in enumerate(g_true[0]):
58 | _type = label+1
59 | if _type >= 0:
60 | G_true.add_nodes_from([(k, {'label':_type})])
61 | colors_H.append(ID_COLOR[_type])
62 | for k, m, l in g_true[1]:
63 | if m > 0:
64 | G_true.add_edges_from([(k, l)], color='b',weight=4)
65 | plt.figure()
66 | # pos = nx.spring_layout(G_true, scale=2)
67 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='dot')
68 | nx.draw(G_true, pos, node_size=1000, node_color=colors_H, font_size=0, font_weight='bold')
69 | plt.tight_layout()
70 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
71 | rgb_im = Image.open('./dump/_true_graph.jpg')
72 | rgb_arr = np.array(pad_im(rgb_im))/255.0
73 | return rgb_arr
74 |
75 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
76 |
77 | # draw edges
78 | for k, l in lines_on:
79 | x1, y1 = np.array(junctions[k])
80 | x2, y2 = np.array(junctions[l])
81 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
82 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=1.0))
83 |
84 | # draw corners
85 | for j in juncs_on:
86 | x, y = np.array(junctions[j])
87 | dwg.add(dwg.circle(center=(float(x), float(y)), r=3, stroke='red', fill='white', stroke_width=2, opacity=1.0))
88 | return
89 |
90 | # Create folder
91 | os.makedirs(opt.exp_folder, exist_ok=True)
92 |
93 | # Initialize generator and discriminator
94 | generator = Generator()
95 | generator.load_state_dict(torch.load(checkpoint))
96 |
97 | # Initialize variables
98 | cuda = True if torch.cuda.is_available() else False
99 | if cuda:
100 | generator.cuda()
101 | rooms_path = '/local-scratch4/nnauata/autodesk/FloorplanDataset/'
102 |
103 | # Initialize dataset iterator
104 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
105 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
106 | batch_size=opt.batch_size,
107 | shuffle=True, collate_fn=floorplan_collate_fn)
108 | # Optimizers
109 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
110 |
111 | # ------------
112 | # Vectorize
113 | # ------------
114 | globalIndex = 0
115 | final_images = []
116 | for i, batch in enumerate(fp_loader):
117 | print(i)
118 | if i > 10:
119 | break
120 |
121 | # Unpack batch
122 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
123 |
124 | # Configure input
125 | real_mks = Variable(mks.type(Tensor))
126 | given_nds = Variable(nds.type(Tensor))
127 | given_eds = eds
128 | for k in range(opt.num_variations):
129 | print('var num {}'.format(k))
130 | # plot images
131 | z = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
132 | with torch.no_grad():
133 | gen_mks = generator(z, given_nds, given_eds)
134 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
135 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
136 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
137 | gen_bbs = gen_bbs[np.newaxis, :, :]/32.0
138 | junctions = np.array(bb_to_vec(gen_bbs))[0, :, :]
139 | regions = np.array(bb_to_seg(gen_bbs))[0, :, :, :].transpose((1, 2, 0))
140 | graph = [real_nodes, None]
141 |
142 | if k == 0:
143 | graph_arr = draw_graph([real_nodes, eds.detach().cpu().numpy()])
144 | final_images.append(torch.tensor(graph_arr))
145 |
146 | # # place real
147 | # real_bbs = real_bbs[np.newaxis, :, :]/32.0
148 | # real_im = bb_to_im_fid(real_bbs, real_nodes)
149 | # rgb_arr = np.array(real_im)
150 | # final_images.append(torch.tensor(rgb_arr/255.0))
151 |
152 |
153 | # reconstruct
154 | fake_im = bb_to_im_fid(gen_bbs, real_nodes)
155 | rgb_arr = np.array(fake_im)
156 | final_images.append(torch.tensor(rgb_arr/255.0))
157 |
158 | final_images = torch.stack(final_images).transpose(1, 3)
159 | save_image(final_images, "./output/rendered_{}.png".format(target_set), nrow=opt.num_variations+1)
160 |
--------------------------------------------------------------------------------
/testing/variation_bbs_with_target_graph.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR, bb_to_im_fid
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=1, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | print(opt)
39 |
40 | numb_iters = 200000
41 | exp_name = 'exp_with_graph_global_new'
42 | target_set = 'E'
43 | phase='eval'
44 | checkpoint = './checkpoints/{}_{}_{}.pth'.format(exp_name, target_set, numb_iters)
45 |
46 | def pad_im(cr_im, final_size=299, bkg_color='white'):
47 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
48 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
49 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
50 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
51 | return padded_im
52 |
53 | def draw_graph(g_true):
54 | # build true graph
55 | G_true = nx.Graph()
56 | colors_H = []
57 | for k, label in enumerate(g_true[0]):
58 | _type = label+1
59 | if _type >= 0:
60 | G_true.add_nodes_from([(k, {'label':k})])
61 | colors_H.append(ID_COLOR[_type])
62 | for k, m, l in g_true[1]:
63 | if m > 0:
64 | G_true.add_edges_from([(k, l)], color='b',weight=4)
65 | plt.figure()
66 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='neato')
67 |
68 | edges = G_true.edges()
69 | colors = ['black' for u,v in edges]
70 | weights = [4 for u,v in edges]
71 |
72 | nx.draw(G_true, pos, node_size=1000, node_color=colors_H, font_size=0, font_weight='bold', edges=edges, edge_color=colors, width=weights, with_labels=True)
73 | plt.tight_layout()
74 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
75 | rgb_im = Image.open('./dump/_true_graph.jpg')
76 | rgb_arr = pad_im(rgb_im)
77 | return rgb_arr
78 |
79 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
80 |
81 | # draw edges
82 | for k, l in lines_on:
83 | x1, y1 = np.array(junctions[k])
84 | x2, y2 = np.array(junctions[l])
85 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
86 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=1.0))
87 |
88 | # draw corners
89 | for j in juncs_on:
90 | x, y = np.array(junctions[j])
91 | dwg.add(dwg.circle(center=(float(x), float(y)), r=3, stroke='red', fill='white', stroke_width=2, opacity=1.0))
92 | return
93 |
94 | # Create folder
95 | os.makedirs(opt.exp_folder, exist_ok=True)
96 |
97 | # Initialize generator and discriminator
98 | generator = Generator()
99 | generator.load_state_dict(torch.load(checkpoint))
100 |
101 | # Initialize variables
102 | cuda = True if torch.cuda.is_available() else False
103 | if cuda:
104 | generator.cuda()
105 | rooms_path = '/local-scratch4/nnauata/autodesk/FloorplanDataset/'
106 |
107 | # Initialize dataset iterator
108 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
109 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
110 | batch_size=opt.batch_size,
111 | shuffle=False, collate_fn=floorplan_collate_fn)
112 | # Optimizers
113 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
114 |
115 | # ------------
116 | # Vectorize
117 | # ------------
118 | globalIndex = 0
119 | final_images = []
120 | target_graph = list(range(100))
121 | for i, batch in enumerate(fp_loader):
122 | print(i)
123 | if i not in target_graph:
124 | continue
125 |
126 | # Unpack batch
127 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
128 |
129 | # Configure input
130 | real_mks = Variable(mks.type(Tensor))
131 | given_nds = Variable(nds.type(Tensor))
132 | given_eds = eds
133 | for k in range(opt.num_variations):
134 | print('var num {}'.format(k))
135 | # plot images
136 | z = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
137 | with torch.no_grad():
138 | gen_mks = generator(z, given_nds, given_eds)
139 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
140 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
141 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
142 | print(real_nodes)
143 | gen_bbs = gen_bbs[np.newaxis, :, :]/32.0
144 | junctions = np.array(bb_to_vec(gen_bbs))[0, :, :]
145 | regions = np.array(bb_to_seg(gen_bbs))[0, :, :, :].transpose((1, 2, 0))
146 | graph = [real_nodes, None]
147 |
148 | if k == 0:
149 | graph_arr = draw_graph([real_nodes, eds.detach().cpu().numpy()])
150 | final_images.append(graph_arr)
151 |
152 | # place real
153 | real_bbs = real_bbs[np.newaxis, :, :]/32.0
154 | real_im = bb_to_im_fid(real_bbs, real_nodes)
155 | final_images.append(real_im)
156 |
157 |
158 | # reconstruct
159 | fake_im = bb_to_im_fid(gen_bbs, real_nodes)
160 | final_images.append(fake_im)
161 |
162 |
163 | row = 0
164 | for k, im in enumerate(final_images):
165 | path = './out/var_{}/'.format(row)
166 | os.makedirs(path, exist_ok=True)
167 | im.save('{}/{}.jpg'.format(path, k))
168 | if (k+1) % 12 == 0:
169 | row+=1
170 | # final_images = torch.stack(final_images).transpose(1, 3)
171 | # save_image(final_images, "./output/rendered_{}.png".format(target_set), nrow=opt.num_variations+1)
172 |
--------------------------------------------------------------------------------
/testing/variation_test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn, is_adjacent
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, draw_graph
21 | from PIL import Image, ImageDraw
22 | from reconstruct import reconstructFloorplan
23 | import svgwrite
24 |
25 | from models import Generator
26 | import networkx as nx
27 | import matplotlib.pyplot as plt
28 | from utils import ID_COLOR
29 | from tqdm import tqdm
30 | from collections import defaultdict
31 |
32 | parser = argparse.ArgumentParser()
33 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
34 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
35 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches - does not support larger batchs")
36 | parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
37 | parser.add_argument("--with_boundary", action='store_true', default=True, help="include floorplan footprint")
38 | parser.add_argument("--num_variations", type=int, default=10, help="number of variations")
39 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
40 |
41 | opt = parser.parse_args()
42 | print(opt)
43 |
44 | numb_iters = 200000
45 | exp_name = 'exp_with_graph_global_new'
46 | target_set = 'D'
47 | checkpoint = './checkpoints/{}_{}_{}.pth'.format(exp_name, target_set, numb_iters)
48 |
49 | def return_eq(node1, node2):
50 | return node1['label']==node2['label']
51 |
52 | def compute_dist(bb1, bb2):
53 |
54 | x0, y0, x1, y1 = bb1
55 | x2, y2, x3, y3 = bb2
56 |
57 | h1, h2 = x1-x0, x3-x2
58 | w1, w2 = y1-y0, y3-y2
59 |
60 | xc1, xc2 = (x0+x1)/2.0, (x2+x3)/2.0
61 | yc1, yc2 = (y0+y1)/2.0, (y2+y3)/2.0
62 |
63 | delta_x = abs(xc2-xc1) - (h1 + h2)/2.0
64 | delta_y = abs(yc2-yc1) - (w1 + w2)/2.0
65 |
66 | return delta_x, delta_y
67 |
68 |
69 | def retrieve_connections(nodes, room_bb):
70 | edges = []
71 | nodes = [x for x in nodes.detach().cpu().numpy() if x >= 0]
72 | room_bb = room_bb.view(-1, 4).detach().cpu().numpy()
73 | for k, bb1 in enumerate(room_bb):
74 | for l, bb2 in enumerate(room_bb):
75 | if k > l:
76 | if is_adjacent(bb1, bb2):
77 | edges.append((k, l))
78 | return nodes, edges
79 |
80 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
81 | # draw edges
82 | for k, l in lines_on:
83 | x1, y1 = np.array(junctions[k])/2.0
84 | x2, y2 = np.array(junctions[l])/2.0
85 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
86 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=0.5))
87 |
88 | # draw corners
89 | for j in juncs_on:
90 | x, y = np.array(junctions[j])/2.0
91 | dwg.add(dwg.circle(center=(x, y), r=2, stroke='red', fill='white', stroke_width=1, opacity=0.75))
92 | return
93 |
94 | # Initialize generator and discriminator
95 | generator = Generator()
96 | generator.load_state_dict(torch.load(checkpoint))
97 | generator.eval()
98 |
99 | # Initialize variables
100 | cuda = True if torch.cuda.is_available() else False
101 | if cuda:
102 | generator.cuda()
103 | rooms_path = '/local-scratch2/nnauata/autodesk/FloorplanDataset/'
104 |
105 | # Configure data loader
106 | rooms_path = '/local-scratch2/nnauata/autodesk/FloorplanDataset/'
107 | fp_dataset = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split='eval')
108 | fp_loader = torch.utils.data.DataLoader(fp_dataset,
109 | batch_size=opt.batch_size,
110 | shuffle=True,
111 | num_workers=opt.n_cpu,
112 | collate_fn=floorplan_collate_fn)
113 | fp_iter = tqdm(fp_loader, total=len(fp_dataset) // opt.batch_size + 1)
114 |
115 | # Optimizers
116 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
117 |
118 | # Generate samples
119 | all_imgs = []
120 | for i, batch in enumerate(fp_iter):
121 | if i > 64:
122 | break
123 |
124 | # Unpack batch
125 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
126 |
127 | # Configure input
128 | real_mks = Variable(mks.type(Tensor))
129 | given_nds = Variable(nds.type(Tensor))
130 | given_eds = eds
131 |
132 | # Sample noise as generator input
133 | layouts_imgs_tensor = []
134 |
135 | # draw graph
136 | graph_img = draw_graph(nds.detach().cpu().numpy(), eds.detach().cpu().numpy(), 0, im_size=256)
137 | all_imgs.append(graph_img)
138 |
139 | # reconstruct
140 | for j in range(opt.num_variations):
141 | z_shape = [real_mks.shape[0], opt.latent_dim]
142 | z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape))))
143 |
144 | with torch.no_grad():
145 | gen_mks = generator(z, given_nds, given_eds)
146 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
147 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
148 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
149 |
150 | # draw boxes - filling
151 | comb_img = np.ones((256, 256, 3)) * 255
152 | comb_img = Image.fromarray(comb_img.astype('uint8'))
153 | dr = ImageDraw.Draw(comb_img)
154 | for bb in gen_bbs:
155 | dr.rectangle(tuple(bb*8.0), fill='beige')
156 |
157 | # draw boxes - outline
158 | for nd, bb in zip(real_nodes, gen_bbs):
159 | color = ID_COLOR[nd + 1]
160 | dr.rectangle(tuple(bb*8.0), outline=color, width=4)
161 |
162 | im_arr = torch.tensor(np.array(comb_img).transpose(2, 0, 1)/255.0).float()
163 | all_imgs.append(im_arr)
164 | all_imgs = torch.stack(all_imgs)
165 | save_image(all_imgs, "layout_variations.png", nrow=11, normalize=False)
166 |
167 |
--------------------------------------------------------------------------------
/testing/vectorize_OLD.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 |
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.autograd as autograd
19 | import torch
20 | from PIL import Image, ImageDraw
21 | from reconstruct import reconstructFloorplan
22 | import svgwrite
23 | from utils import bb_to_img, bb_to_vec, bb_to_seg, mask_to_bb, remove_junctions, ID_COLOR
24 | from models import Generator
25 | from collections import defaultdict
26 | import matplotlib.pyplot as plt
27 | import networkx as nx
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
31 | parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
32 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--num_variations", type=int, default=8, help="number of variations")
35 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
36 |
37 | opt = parser.parse_args()
38 | IM_SIZE = 256
39 | print(opt)
40 |
41 | numb_iters = 200000
42 | exp_name = 'exp_with_graph_global_new'
43 | target_set = 'E'
44 | phase='eval'
45 | checkpoint = './checkpoints/{}_{}_{}.pth'.format(exp_name, target_set, numb_iters)
46 |
47 | def pad_im(cr_im, final_size=256, bkg_color='white'):
48 | new_size = int(np.max([np.max(list(cr_im.size)), final_size]))
49 | padded_im = Image.new('RGB', (new_size, new_size), 'white')
50 | padded_im.paste(cr_im, ((new_size-cr_im.size[0])//2, (new_size-cr_im.size[1])//2))
51 | padded_im = padded_im.resize((final_size, final_size), Image.ANTIALIAS)
52 | return padded_im
53 |
54 | def draw_graph(g_true):
55 | # build true graph
56 | G_true = nx.Graph()
57 | colors_H = []
58 | for k, label in enumerate(g_true[0]):
59 | _type = label+1
60 | if _type >= 0:
61 | G_true.add_nodes_from([(k, {'label':_type})])
62 | colors_H.append(ID_COLOR[_type])
63 | for k, m, l in g_true[1]:
64 | if m > 0:
65 | G_true.add_edges_from([(k, l)], color='b',weight=4)
66 | plt.figure()
67 | # pos = nx.spring_layout(G_true, scale=2)
68 | pos = nx.nx_agraph.graphviz_layout(G_true, prog='dot')
69 | nx.draw(G_true, pos, node_size=1000, node_color=colors_H, font_size=0, font_weight='bold')
70 | plt.tight_layout()
71 | plt.savefig('./dump/_true_graph.jpg', format="jpg")
72 | rgb_im = Image.open('./dump/_true_graph.jpg')
73 | rgb_arr = np.array(pad_im(rgb_im))/255.0
74 | return rgb_arr
75 |
76 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
77 |
78 | # draw edges
79 | for k, l in lines_on:
80 | x1, y1 = np.array(junctions[k])
81 | x2, y2 = np.array(junctions[l])
82 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
83 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=1.0))
84 |
85 | # draw corners
86 | for j in juncs_on:
87 | x, y = np.array(junctions[j])
88 | dwg.add(dwg.circle(center=(float(x), float(y)), r=3, stroke='red', fill='white', stroke_width=2, opacity=1.0))
89 | return
90 |
91 | # Create folder
92 | os.makedirs(opt.exp_folder, exist_ok=True)
93 |
94 | # Initialize generator and discriminator
95 | generator = Generator()
96 | generator.load_state_dict(torch.load(checkpoint))
97 |
98 | # Initialize variables
99 | cuda = True if torch.cuda.is_available() else False
100 | if cuda:
101 | generator.cuda()
102 | rooms_path = '/local-scratch2/nnauata/autodesk/FloorplanDataset/'
103 |
104 | # Initialize dataset iterator
105 | fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=target_set, split=phase)
106 | fp_loader = torch.utils.data.DataLoader(fp_dataset_test,
107 | batch_size=opt.batch_size,
108 | shuffle=True, collate_fn=floorplan_collate_fn)
109 | # Optimizers
110 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
111 |
112 | # ------------
113 | # Vectorize
114 | # ------------
115 | globalIndex = 0
116 | final_images = []
117 | for i, batch in enumerate(fp_loader):
118 | print(i)
119 | if i > 16:
120 | break
121 |
122 | # Unpack batch
123 | mks, nds, eds, nd_to_sample, ed_to_sample = batch
124 |
125 | # Configure input
126 | real_mks = Variable(mks.type(Tensor))
127 | given_nds = Variable(nds.type(Tensor))
128 | given_eds = eds
129 | for k in range(opt.num_variations):
130 | print('var num {}'.format(k))
131 | # plot images
132 | z = Variable(Tensor(np.random.normal(0, 1, (real_mks.shape[0], opt.latent_dim))))
133 | with torch.no_grad():
134 | gen_mks = generator(z, given_nds, given_eds)
135 | gen_bbs = np.array([np.array(mask_to_bb(mk)) for mk in gen_mks.detach().cpu()])
136 | real_bbs = np.array([np.array(mask_to_bb(mk)) for mk in real_mks.detach().cpu()])
137 | real_nodes = np.where(given_nds.detach().cpu()==1)[-1]
138 | gen_bbs = gen_bbs[np.newaxis, :, :]/32.0
139 | junctions = np.array(bb_to_vec(gen_bbs))[0, :, :]
140 | regions = np.array(bb_to_seg(gen_bbs))[0, :, :, :].transpose((1, 2, 0))
141 | fake_imgs_tensor = bb_to_img(gen_bbs, [given_nds, given_eds], nd_to_sample, ed_to_sample, im_size=IM_SIZE)
142 | graph = [real_nodes, None]
143 |
144 | if k == 0:
145 | graph_arr = draw_graph([real_nodes, eds.detach().cpu().numpy()])
146 | print(graph_arr.shape)
147 | final_images.append(torch.tensor(graph_arr))
148 |
149 | # place real
150 | real_bbs = real_bbs[np.newaxis, :, :]/32.0
151 | real_junctions = np.array(bb_to_vec(real_bbs))[0, :, :]
152 | real_regions = np.array(bb_to_seg(real_bbs))[0, :, :, :].transpose((1, 2, 0))
153 | real_imgs_tensor = bb_to_img(real_bbs, [given_nds, given_eds], nd_to_sample, ed_to_sample, im_size=IM_SIZE)
154 | real_junctions, real_juncs_on, real_lines_on = reconstructFloorplan(real_regions, graph, globalIndex)
155 |
156 | # draw vector
157 | dwg = svgwrite.Drawing('./svg/floorplan_vec_{}.svg'.format(globalIndex), (256, 256))
158 | dwg.add(svgwrite.image.Image(os.path.abspath('./rooms/{}_rooms_updated.png'.format(globalIndex)), size=(256, 256)))
159 | draw_floorplan(dwg, real_junctions, real_juncs_on, real_lines_on)
160 | dwg.save()
161 |
162 | print('running inkscape ...')
163 | os.system('inkscape ./svg/floorplan_vec_{}.svg --export-png=_temp.png -w {}'.format(globalIndex, IM_SIZE))
164 | png_im = Image.open("_temp.png")
165 |
166 | rgb_img = Image.new('RGB', (256, 256), 'white')
167 | rgb_img.paste(png_im, (0, 0), mask=png_im)
168 | rgb_arr = np.array(rgb_img)
169 | final_images.append(torch.tensor(rgb_arr/255.0))
170 |
171 |
172 | # reconstruct
173 | junctions, juncs_on, lines_on = reconstructFloorplan(regions, graph, globalIndex)
174 |
175 | # draw vector
176 | dwg = svgwrite.Drawing('./svg/floorplan_vec_{}.svg'.format(globalIndex), (256, 256))
177 | dwg.add(svgwrite.image.Image(os.path.abspath('./rooms/{}_rooms_updated.png'.format(globalIndex)), size=(256, 256)))
178 | draw_floorplan(dwg, junctions, juncs_on, lines_on)
179 | dwg.save()
180 |
181 | print('running inkscape ...')
182 | os.system('inkscape ./svg/floorplan_vec_{}.svg --export-png=_temp.png -w {}'.format(globalIndex, IM_SIZE))
183 | png_im = Image.open("_temp.png")
184 |
185 | rgb_img = Image.new('RGB', (256, 256), 'white')
186 | rgb_img.paste(png_im, (0, 0), mask=png_im)
187 |
188 | rgb_arr = np.array(rgb_img)
189 | final_images.append(torch.tensor(rgb_arr/255.0))
190 | globalIndex += 1
191 |
192 | final_images = torch.stack(final_images).transpose(1, 3)
193 | print(final_images.shape)
194 | save_image(final_images, "./output/rendered_all_images.png", nrow=opt.num_variations+2)
195 |
--------------------------------------------------------------------------------
/testing/vectorize_floorplans.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from utils import extract_edges, visualize_sample, preprocess, check_polygon_connectivity, check_polygon_intersection, iou_polygon_intersection, split_edge, slide_wall, remove_colinear_edges, valid_layout
3 | from simanneal import Annealer
4 | from intersections import doIntersect
5 | import math
6 | from PIL import Image, ImageDraw
7 | import matplotlib.pyplot as plt
8 | import copy
9 | from scipy.spatial import ConvexHull, convex_hull_plot_2d
10 |
11 | class FloorplanVectorization(Annealer):
12 |
13 | def __init__(self, state, conns):
14 | super(FloorplanVectorization, self).__init__(state, conns) # important!
15 | self.conns = conns
16 |
17 | def move(self):
18 | # initial_energy = self.energy()
19 | new_polys = copy.deepcopy(self.state)
20 |
21 | # pick a polygon
22 | p_ind = np.random.choice(range(len(new_polys)))
23 |
24 | # move only if polygon is not empty
25 | _, es = new_polys[p_ind]
26 | if len(es) == 0:
27 | return
28 |
29 | # perform step
30 | new_polys = split_edge(new_polys, p_ind)
31 | new_polys = slide_wall(new_polys, p_ind)
32 | new_polys = remove_colinear_edges(new_polys, p_ind)
33 |
34 | # check if modified layout is valid
35 | if valid_layout(new_polys, p_ind) == False:
36 | return
37 |
38 | self.state = copy.deepcopy(new_polys)
39 | return
40 |
41 | def energy(self):
42 |
43 | # number of self intersections
44 | polys = copy.deepcopy(self.state)
45 | n_intersec = 0
46 | all_edges = []
47 | # for p in polys:
48 | # cs, es = p
49 | # for e in es:
50 | # x0, y0 = cs[e[0]]
51 | # x1, y1 = cs[e[1]]
52 | # all_edges.append(np.array((x0, y0, x1, y1))/255.0)
53 |
54 | # for k, e1 in enumerate(all_edges):
55 | # for l, e2 in enumerate(all_edges):
56 | # if k > l:
57 | # x0, y0, x1, y1 = e1
58 | # x2, y2, x3, y3 = e2
59 | # if doIntersect(np.array([x0, y0]), np.array([x1, y1]), np.array([x2, y2]), np.array([x3, y3])):
60 | # n_intersec += 1
61 |
62 | # compute connectivity mistakes
63 | conn_mistakes = 0
64 | for c in self.conns:
65 | # check connectivity
66 | n1, val, n2 = c
67 | p1 = polys[n1]
68 | p2 = polys[n2]
69 | if (check_polygon_connectivity(p1, p2) == False) and (val == 1):
70 | conn_mistakes += 10
71 | elif (check_polygon_connectivity(p1, p2) == True) and (val == -1):
72 | conn_mistakes += 10
73 |
74 | # compute overlapping regions
75 | overlaps = 0
76 | for k, p1 in enumerate(polys):
77 | for l, p2 in enumerate(polys):
78 | if k > l:
79 | overlaps += 100*iou_polygon_intersection(p1, p2)
80 |
81 | # compute area hull
82 | all_corners = [np.array(cs) for cs, _ in polys if len(cs) > 0]
83 | corner_penalty = 0
84 | for cs in all_corners:
85 | corner_penalty += len(cs)/10.0
86 |
87 | points = np.concatenate(all_corners, 0)
88 | hull = ConvexHull(points)
89 | points_hull = [(x, y) for x, y in zip(points[hull.vertices, 0], points[hull.vertices, 1])]
90 | hull_im = Image.new('L', (256, 256))
91 | dr = ImageDraw.Draw(hull_im)
92 | dr.polygon(points_hull, fill='white')
93 | hull_arr = np.array(hull_im)/255.0
94 |
95 | reg_im = Image.new('L', (256, 256))
96 | dr = ImageDraw.Draw(reg_im)
97 | for p in polys:
98 | if len(p[0]) > 2:
99 | points = [(x, y) for x, y in p[0]]
100 | dr.polygon(points, fill='white')
101 | reg_arr = np.array(reg_im)/255.0
102 | shape = 100*(hull_arr-reg_arr).sum()/reg_arr.sum()
103 | # plt.imshow(reg_im)
104 | # plt.show()
105 |
106 | # # dr.polygon(points_hull, fill='white')
107 | # # plt.imshow(hull_im)
108 | # # plt.show()
109 |
110 |
111 | print('e inter: {}, conn miss: {}, overlaps: {}, shape: {}, corners: {}'.format(n_intersec, conn_mistakes, overlaps, shape, corner_penalty))
112 | return float(n_intersec) + float(conn_mistakes) + float(overlaps) + float(shape) + float(corner_penalty)
113 |
114 | if __name__ == '__main__':
115 |
116 | # load initial state
117 | types, polys, conns = np.load('/home/nelson/Workspace/autodesk/housegan2/raster/0_0.npy', allow_pickle=True)
118 | polys_raw = extract_edges(polys)
119 | polys = preprocess(polys_raw)
120 |
121 | # set params
122 | fp2vec = FloorplanVectorization(polys, conns)
123 | fp2vec.copy_strategy = "deepcopy"
124 | fp2vec.Tmax = 0.001
125 | fp2vec.Tmin = 0.0001
126 | fp2vec.steps = 200000
127 | # fp2vec.updates = 10
128 | new_polys, e = fp2vec.anneal()
129 |
130 | # display output
131 | import matplotlib.pyplot as plt
132 | print('final energy: {}'.format(e))
133 |
134 | print('rectified')
135 | for cs, es in polys:
136 | print(cs)
137 |
138 | print('final')
139 | for cs, es in new_polys:
140 | print(cs)
141 |
142 | raw_im = visualize_sample(types, polys_raw, conns)
143 | before_im = visualize_sample(types, polys, conns)
144 | after_im = visualize_sample(types, new_polys, conns)
145 |
146 | plt.figure()
147 | plt.imshow(raw_im)
148 |
149 | plt.figure()
150 | plt.imshow(before_im)
151 |
152 | plt.figure()
153 | plt.imshow(after_im)
154 |
155 | plt.show()
--------------------------------------------------------------------------------
/testing/visualize.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import numpy as np
4 | import math
5 | import sys
6 | import random
7 |
8 | import torchvision.transforms as transforms
9 | from torchvision.utils import save_image
10 |
11 | from floorplan_dataset_no_masks import FloorplanGraphDataset, floorplan_collate_fn
12 | from torch.utils.data import DataLoader
13 | from torchvision import datasets
14 | from torch.autograd import Variable
15 | from graph import GraphTripleConv, GraphTripleConvNet
16 |
17 | import torch.nn as nn
18 | import torch.nn.functional as F
19 | import torch.autograd as autograd
20 | import torch
21 | from utils import bb_to_img, bb_to_vec, bb_to_seg
22 | from PIL import Image, ImageDraw
23 | from MyIP import reconstructFloorplan
24 | import svgwrite
25 | from input_graphs import *
26 | from models import Generator
27 |
28 | parser = argparse.ArgumentParser()
29 | parser.add_argument("--n_cpu", type=int, default=16, help="number of cpu threads to use during batch generation")
30 | parser.add_argument("--latent_dim", type=int, default=20, help="dimensionality of the latent space")
31 | parser.add_argument("--batch_size", type=int, default=2, help="size of the batches")
32 | parser.add_argument("--img_size", type=int, default=4, help="size of each image dimension")
33 | parser.add_argument("--channels", type=int, default=1, help="number of image channels")
34 | parser.add_argument("--with_boundary", action='store_true', default=True, help="include floorplan footprint")
35 | parser.add_argument("--num_variations", type=int, default=10, help="number of variations")
36 | parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
37 | parser.add_argument("--checkpoint", type=str, default='checkpoints/gen_neighbour_exp_10_nodes_train_split_1000000.pth', help="destination folder")
38 | opt = parser.parse_args()
39 | print(opt)
40 |
41 | def draw_floorplan(dwg, junctions, juncs_on, lines_on):
42 |
43 | # draw edges
44 | for k, l in lines_on:
45 | x1, y1 = np.array(junctions[k])/2.0
46 | x2, y2 = np.array(junctions[l])/2.0
47 | #fill='rgb({},{},{})'.format(*(np.random.rand(3)*255).astype('int'))
48 | dwg.add(dwg.line((float(x1), float(y1)), (float(x2), float(y2)), stroke='black', stroke_width=4, opacity=0.5))
49 |
50 | # draw corners
51 | for j in juncs_on:
52 | x, y = np.array(junctions[j])/2.0
53 | dwg.add(dwg.circle(center=(x, y), r=2, stroke='red', fill='white', stroke_width=1, opacity=0.75))
54 | return
55 |
56 | # Create folder
57 | os.makedirs(opt.exp_folder, exist_ok=True)
58 |
59 | # Initialize generator and discriminator
60 | generator = Generator(opt.with_boundary)
61 | generator.load_state_dict(torch.load(opt.checkpoint))
62 | generator.eval()
63 |
64 | # Initialize variables
65 | img_shape = (opt.channels, opt.img_size, opt.img_size)
66 | cuda = True if torch.cuda.is_available() else False
67 | if cuda:
68 | generator.cuda()
69 | rooms_path = '/local-scratch2/nnauata/autodesk/CubiCasa5k/'
70 |
71 | # Initialize dataset iterator
72 | fp_dataset = FloorplanGraphDataset(rooms_path, split='test')
73 | fp_loader = torch.utils.data.DataLoader(fp_dataset,
74 | batch_size=opt.batch_size,
75 | shuffle=True, collate_fn=floorplan_collate_fn)
76 | # Optimizers
77 | Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
78 |
79 | # ------------
80 | # Vectorize
81 | # ------------
82 | globalIndex = 0
83 | # np.random.seed(100)
84 |
85 | # Generate Mod 1
86 | boundary_bb = Variable(torch.tensor(in_boundary_1).type(Tensor))
87 | nodes = torch.tensor(in_nodes_1)
88 | triples = torch.tensor(in_triples_1)
89 | room_to_sample = torch.tensor(np.zeros((nodes.shape[0])))
90 | triple_to_sample = torch.tensor(np.zeros((triples.shape[0])))
91 | z = Variable(Tensor(np.random.normal(0, 1, (1, opt.latent_dim))))
92 | gen_room_bb = generator(z, [nodes, triples], room_to_sample, boundary=boundary_bb)
93 | gen_imgs_tensor_1 = bb_to_img(gen_room_bb.data, [nodes, triples], room_to_sample, triple_to_sample)
94 | print(z)
95 |
96 | # Generate Mod 2
97 | boundary_bb = Variable(torch.tensor(in_boundary_2).type(Tensor))
98 | nodes = torch.tensor(in_nodes_2)
99 | triples = torch.tensor(in_triples_2)
100 | room_to_sample = torch.tensor(np.zeros((nodes.shape[0])))
101 | triple_to_sample = torch.tensor(np.zeros((triples.shape[0])))
102 | # z = Variable(Tensor(np.random.normal(0, 1, (1, opt.latent_dim))))
103 | gen_room_bb = generator(z, [nodes, triples], room_to_sample, boundary=boundary_bb)
104 | gen_imgs_tensor_2 = bb_to_img(gen_room_bb.data, [nodes, triples], room_to_sample, triple_to_sample)
105 | print(z)
106 |
107 | # Generate Mod 3
108 | boundary_bb = Variable(torch.tensor(in_boundary_3).type(Tensor))
109 | nodes = torch.tensor(in_nodes_3)
110 | triples = torch.tensor(in_triples_3)
111 | room_to_sample = torch.tensor(np.zeros((nodes.shape[0])))
112 | triple_to_sample = torch.tensor(np.zeros((triples.shape[0])))
113 | # z = Variable(Tensor(np.random.normal(0, 1, (1, opt.latent_dim))))
114 | gen_room_bb = generator(z, [nodes, triples], room_to_sample, boundary=boundary_bb)
115 | gen_imgs_tensor_3 = bb_to_img(gen_room_bb.data, [nodes, triples], room_to_sample, triple_to_sample)
116 | print(z)
117 |
118 | # Generate Mod 4
119 | boundary_bb = Variable(torch.tensor(in_boundary_4).type(Tensor))
120 | nodes = torch.tensor(in_nodes_4)
121 | triples = torch.tensor(in_triples_4)
122 | room_to_sample = torch.tensor(np.zeros((nodes.shape[0])))
123 | triple_to_sample = torch.tensor(np.zeros((triples.shape[0])))
124 | # z = Variable(Tensor(np.random.normal(0, 1, (1, opt.latent_dim))))
125 | gen_room_bb = generator(z, [nodes, triples], room_to_sample, boundary=boundary_bb)
126 | gen_imgs_tensor_4 = bb_to_img(gen_room_bb.data, [nodes, triples], room_to_sample, triple_to_sample)
127 | print(z)
128 |
129 | # Generate Mod 5
130 | boundary_bb = Variable(torch.tensor(in_boundary_5).type(Tensor))
131 | nodes = torch.tensor(in_nodes_5)
132 | triples = torch.tensor(in_triples_5)
133 | room_to_sample = torch.tensor(np.zeros((nodes.shape[0])))
134 | triple_to_sample = torch.tensor(np.zeros((triples.shape[0])))
135 | # z = Variable(Tensor(np.random.normal(0, 1, (1, opt.latent_dim))))
136 | gen_room_bb = generator(z, [nodes, triples], room_to_sample, boundary=boundary_bb)
137 | gen_imgs_tensor_5 = bb_to_img(gen_room_bb.data, [nodes, triples], room_to_sample, triple_to_sample)
138 | print(z)
139 |
140 | # Generate Mod 6
141 | boundary_bb = Variable(torch.tensor(in_boundary_6).type(Tensor))
142 | nodes = torch.tensor(in_nodes_6)
143 | triples = torch.tensor(in_triples_6)
144 | room_to_sample = torch.tensor(np.zeros((nodes.shape[0])))
145 | triple_to_sample = torch.tensor(np.zeros((triples.shape[0])))
146 | # z = Variable(Tensor(np.random.normal(0, 1, (1, opt.latent_dim))))
147 | gen_room_bb = generator(z, [nodes, triples], room_to_sample, boundary=boundary_bb)
148 | gen_imgs_tensor_6 = bb_to_img(gen_room_bb.data, [nodes, triples], room_to_sample, triple_to_sample)
149 | print(z)
150 |
151 | # Save all images
152 | all_imgs = torch.cat([gen_imgs_tensor_1, gen_imgs_tensor_2, gen_imgs_tensor_3, gen_imgs_tensor_4, gen_imgs_tensor_5, gen_imgs_tensor_6])
153 | save_image(all_imgs, "./exp_mod/test_1.png", nrow=4)
154 |
155 | exit(0)
--------------------------------------------------------------------------------