├── data
├── __init__.py
├── human_data.py
└── animal_data.py
├── util
├── __init__.py
├── iter_counter.py
└── util.py
├── options
├── __init__.py
├── test_options.py
├── train_options.py
└── base_options.py
├── files
└── 3dpt.gif
├── LICENSE
├── models
├── __init__.py
├── networks
│ ├── architecture.py
│ ├── normalization.py
│ ├── __init__.py
│ ├── base_network.py
│ ├── generator.py
│ └── correspondence.py
└── ver2ver_model.py
├── .gitignore
├── ver2ver_trainer.py
├── test.py
├── train.py
├── README.md
├── human_test_list
└── animal_test_list
/data/__init__.py:
--------------------------------------------------------------------------------
1 | # data init
--------------------------------------------------------------------------------
/util/__init__.py:
--------------------------------------------------------------------------------
1 | #util init
--------------------------------------------------------------------------------
/options/__init__.py:
--------------------------------------------------------------------------------
1 | # option init
--------------------------------------------------------------------------------
/files/3dpt.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChaoyueSong/3D-CoreNet/HEAD/files/3dpt.gif
--------------------------------------------------------------------------------
/options/test_options.py:
--------------------------------------------------------------------------------
1 | from .base_options import BaseOptions
2 |
3 |
4 | class TestOptions(BaseOptions):
5 | def initialize(self, parser):
6 | BaseOptions.initialize(self, parser)
7 | parser.add_argument('--results_dir', type=str, default='./test_results/', help='saves results here.')
8 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
9 |
10 | parser.set_defaults(phase='test')
11 | self.isTrain = False
12 | return parser
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 ChaoyueSong
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import torch
3 |
4 |
5 | def find_model_using_name(model_name):
6 | # Given the option --model [modelname],
7 | # the file "models/modelname_model.py"
8 | # will be imported.
9 | model_filename = "models." + model_name + "_model"
10 | modellib = importlib.import_module(model_filename)
11 |
12 | # In the file, the class called ModelNameModel() will
13 | # be instantiated. It has to be a subclass of torch.nn.Module,
14 | # and it is case-insensitive.
15 | model = None
16 | target_model_name = model_name.replace('_', '') + 'model'
17 | for name, cls in modellib.__dict__.items():
18 | if name.lower() == target_model_name.lower() \
19 | and issubclass(cls, torch.nn.Module):
20 | model = cls
21 |
22 | if model is None:
23 | print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name))
24 | exit(0)
25 |
26 | return model
27 |
28 |
29 | def get_option_setter(model_name):
30 | model_class = find_model_using_name(model_name)
31 | return model_class.modify_commandline_options
32 |
33 |
34 | def create_model(opt):
35 | model = find_model_using_name(opt.model)
36 | instance = model(opt)
37 | print("model [%s] was created" % (type(instance).__name__))
38 |
39 | return instance
40 |
--------------------------------------------------------------------------------
/models/networks/architecture.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import torch.nn.utils.spectral_norm as spectral_norm
5 | from models.networks.normalization import ElaIN
6 |
7 | class ElaINResnetBlock(nn.Module):
8 | def __init__(self, fin, fout, ic):
9 | super().__init__()
10 | # Attributes
11 | self.learned_shortcut = (fin != fout)
12 | fmiddle = min(fin, fout)
13 |
14 | # create conv layers
15 | self.conv_0 = nn.Conv1d(fin, fmiddle, kernel_size=1)
16 | self.conv_1 = nn.Conv1d(fmiddle, fout, kernel_size=1)
17 | self.conv_s = nn.Conv1d(fin, fout, kernel_size=1, bias=False)
18 |
19 | # apply spectral norm
20 | self.conv_0 = spectral_norm(self.conv_0)
21 | self.conv_1 = spectral_norm(self.conv_1)
22 | self.conv_s = spectral_norm(self.conv_s)
23 |
24 | # define normalization layers
25 | self.norm_0 = ElaIN(fin, ic)
26 | self.norm_1 = ElaIN(fmiddle, ic)
27 | self.norm_s = ElaIN(fin, ic)
28 |
29 | def forward(self, x, addition):
30 | x_s = self.conv_s(self.actvn(self.norm_s(x, addition)))
31 | dx = self.conv_0(self.actvn(self.norm_0(x, addition)))
32 | dx = self.conv_1(self.actvn(self.norm_1(dx, addition)))
33 | out = x_s + dx
34 |
35 | return out
36 |
37 | def actvn(self, x):
38 | return F.leaky_relu(x, 2e-1)
39 |
40 |
--------------------------------------------------------------------------------
/models/networks/normalization.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from util import util
4 |
5 | class ElaIN(nn.Module):
6 | def __init__(self, norm_nc, addition_nc):
7 | super().__init__()
8 |
9 | self.mlp_same = nn.Conv1d(addition_nc, norm_nc, 1)
10 | self.mlp_gamma = nn.Conv1d(norm_nc, norm_nc, 1)
11 | self.mlp_beta = nn.Conv1d(norm_nc, norm_nc, 1)
12 |
13 | self.mlp_weight = nn.Conv1d(2*norm_nc, norm_nc, 1)
14 |
15 | def forward(self, x, addition):
16 |
17 | # feature dim align
18 | addition = self.mlp_same(addition)
19 |
20 | # get gamma and beta
21 | addition_gamma = self.mlp_gamma(addition)
22 | addition_beta = self.mlp_beta(addition)
23 |
24 | # calculate the mean of identity features and warped features in dim=2
25 | id_avg = torch.mean(addition, 2 ,keepdim=True)
26 | x_avg = torch.mean(x, 2, keepdim=True)
27 |
28 | # get the adaptive weight
29 | weight_cat = torch.cat((id_avg, x_avg), 1)
30 | weight = self.mlp_weight(weight_cat)
31 |
32 | # calculate the final modulation parameters
33 | x_mean, x_std = util.calc_mean_std(x)
34 | gamma = addition_gamma * weight + x_std * (1-weight)
35 | beta = addition_beta * weight + x_mean * (1-weight)
36 |
37 | # normalization and denormalization
38 | x = (x - x_mean) / x_std
39 | out = x * (1 + gamma) + beta
40 |
41 | return out
--------------------------------------------------------------------------------
/models/networks/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from models.networks.base_network import BaseNetwork
3 | from models.networks.generator import *
4 | from models.networks.correspondence import *
5 | import util.util as util
6 |
7 | def find_network_using_name(target_network_name, filename, add=True):
8 | #for netG: elaingenerator
9 | target_class_name = target_network_name + filename if add else target_network_name
10 | module_name = 'models.networks.' + filename
11 | network = util.find_class_in_module(target_class_name, module_name)
12 |
13 | assert issubclass(network, BaseNetwork), \
14 | "Class %s should be a subclass of BaseNetwork" % network
15 |
16 | return network
17 |
18 | def modify_commandline_options(parser, is_train):
19 | opt, _ = parser.parse_known_args()
20 |
21 | netG_cls = find_network_using_name(opt.netG, 'generator')
22 | parser = netG_cls.modify_commandline_options(parser, is_train)
23 |
24 | return parser
25 |
26 | def create_network(cls, opt, stage1=False):
27 | if stage1:
28 | net = cls(opt, stage1=True)
29 | else:
30 | net = cls(opt)
31 | net.print_network()
32 | if len(opt.gpu_ids) > 0:
33 | assert(torch.cuda.is_available())
34 | net.cuda()
35 | net.init_weights(opt.init_type, opt.init_variance)
36 | return net
37 |
38 | def define_G(opt):
39 | netG_cls = find_network_using_name(opt.netG, 'generator') #ElaINGenerator
40 | return create_network(netG_cls, opt)
41 |
42 | def define_Corr(opt): #Correspondence
43 | netCoor_cls = find_network_using_name('', 'correspondence')
44 | return create_network(netCoor_cls, opt)
45 |
--------------------------------------------------------------------------------
/options/train_options.py:
--------------------------------------------------------------------------------
1 | from .base_options import BaseOptions
2 |
3 |
4 | class TrainOptions(BaseOptions):
5 | def initialize(self, parser):
6 | BaseOptions.initialize(self, parser)
7 | # for displays
8 | parser.add_argument('--display_freq', type=int, default=2000, help='frequency of saving training results')
9 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of printing training losses')
10 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
11 | parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
12 |
13 | # for training
14 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
15 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
16 | parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay')
17 | parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
18 | parser.add_argument('--optimizer', type=str, default='adam')
19 | parser.add_argument('--beta1', type=float, default=0, help='momentum term of adam')
20 | parser.add_argument('--beta2', type=float, default=0.9, help='momentum term of adam')
21 | parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
22 |
23 | # for discriminators
24 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
25 | parser.add_argument('--lambda_edge', type=float, default=0.5, help='weight for edge loss')
26 | parser.add_argument('--lambda_rec', type=float, default=1000.0, help='weight for rec loss')
27 |
28 | self.isTrain = True
29 | return parser
30 |
--------------------------------------------------------------------------------
/models/networks/base_network.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from torch.nn import init
3 |
4 |
5 | class BaseNetwork(nn.Module):
6 | def __init__(self):
7 | super(BaseNetwork, self).__init__()
8 |
9 | def print_network(self):
10 | if isinstance(self, list):
11 | self = self[0]
12 | num_params = 0
13 | for param in self.parameters():
14 | num_params += param.numel()
15 | print('Network [%s] was created. Total number of parameters: %.1f million. '
16 | 'To see the architecture, do print(network).'
17 | % (type(self).__name__, num_params / 1000000))
18 |
19 | def init_weights(self, init_type='normal', gain=0.02):
20 | def init_func(m):
21 | classname = m.__class__.__name__
22 | if classname.find('BatchNorm2d') != -1:
23 | if hasattr(m, 'weight') and m.weight is not None:
24 | init.normal_(m.weight.data, 1.0, gain)
25 | if hasattr(m, 'bias') and m.bias is not None:
26 | init.constant_(m.bias.data, 0.0)
27 | elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
28 | if init_type == 'normal':
29 | init.normal_(m.weight.data, 0.0, gain)
30 | elif init_type == 'xavier':
31 | init.xavier_normal_(m.weight.data, gain=gain)
32 | elif init_type == 'xavier_uniform':
33 | init.xavier_uniform_(m.weight.data, gain=1.0)
34 | elif init_type == 'kaiming':
35 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
36 | elif init_type == 'orthogonal':
37 | init.orthogonal_(m.weight.data, gain=gain)
38 | elif init_type == 'none': # uses pytorch's default init method
39 | m.reset_parameters()
40 | else:
41 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
42 | if hasattr(m, 'bias') and m.bias is not None:
43 | init.constant_(m.bias.data, 0.0)
44 |
45 | self.apply(init_func)
46 |
47 | # propagate to children
48 | for m in self.children():
49 | if hasattr(m, 'init_weights'):
50 | m.init_weights(init_type, gain)
51 |
--------------------------------------------------------------------------------
/models/networks/generator.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from models.networks.base_network import BaseNetwork
4 | from models.networks.architecture import ElaINResnetBlock as ElaINResnetBlock
5 |
6 | class ElaINGenerator(BaseNetwork):
7 | @staticmethod
8 | def modify_commandline_options(parser, is_train):
9 | return parser
10 |
11 | def __init__(self, opt):
12 | super().__init__()
13 | self.opt = opt
14 | nf = opt.ngf #64
15 | self.fc = nn.Conv1d(3, 16 * nf, 3, padding=1)
16 |
17 | self.conv1 = torch.nn.Conv1d(16 * nf, 16 * nf, 1)
18 | self.conv2 = torch.nn.Conv1d(16 * nf, 8 * nf, 1)
19 | self.conv3 = torch.nn.Conv1d(8 * nf, 4 * nf, 1)
20 | self.conv4 = torch.nn.Conv1d(4 * nf, 3, 1)
21 |
22 | self.elain_block1 = ElaINResnetBlock(16 * nf, 16 * nf, 256)
23 | self.elain_block2 = ElaINResnetBlock(8 * nf, 8 * nf, 256)
24 | self.elain_block3 = ElaINResnetBlock(4 * nf, 4 * nf, 256)
25 |
26 |
27 | def forward(self, identity_features, warp_out):
28 | x = warp_out.transpose(2,1)
29 | addition = identity_features
30 |
31 | x = self.fc(x)
32 | x = self.conv1(x)
33 | x = self.elain_block1(x, addition)
34 | x = self.conv2(x)
35 | x = self.elain_block2(x, addition)
36 | x = self.conv3(x)
37 | x = self.elain_block3(x, addition)
38 | x = 2*torch.tanh(self.conv4(x))
39 |
40 | return x
41 |
42 | class AdaptiveFeatureGenerator(BaseNetwork):
43 | @staticmethod
44 | def modify_commandline_options(parser, is_train):
45 | return parser
46 |
47 | def __init__(self, opt):
48 | super().__init__()
49 | self.opt = opt
50 | ndf = opt.ngf #64
51 |
52 | self.layer1 = nn.Conv1d(3, ndf, 1)
53 | self.layer2 = nn.Conv1d(ndf * 1, ndf * 2, 1)
54 | self.layer3 = nn.Conv1d(ndf * 2, ndf * 4, 1)
55 |
56 | self.norm1 = nn.InstanceNorm1d(ndf)
57 | self.norm2 = nn.InstanceNorm1d(ndf * 2)
58 | self.norm3 = nn.InstanceNorm1d(ndf * 4)
59 |
60 | self.actvn = nn.LeakyReLU(0.2, False)
61 | self.opt = opt
62 |
63 | def forward(self, input):
64 | x1 = self.layer1(input)
65 | x1 = self.norm1(x1)
66 | x2 = self.layer2(self.actvn(x1))
67 | x2 = self.norm2(x2)
68 | x3 = self.layer3(self.actvn(x2))
69 | result = self.norm3(x3)
70 |
71 | return result
72 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/ver2ver_trainer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 |
4 | from models.networks.sync_batchnorm import DataParallelWithCallback
5 | from models.ver2ver_model import Ver2VerModel
6 |
7 | class Ver2VerTrainer():
8 | """
9 | Trainer creates the model and optimizers, and uses them to
10 | updates the weights of the network while reporting losses
11 | and the latest visuals to visualize the progress in training.
12 | """
13 |
14 | def __init__(self, opt):
15 | self.opt = opt
16 | self.ver2ver_model = Ver2VerModel(opt)
17 | if len(opt.gpu_ids) > 1:
18 | self.ver2ver_model = DataParallelWithCallback(self.ver2ver_model,
19 | device_ids=opt.gpu_ids)
20 | self.ver2ver_model_on_one_gpu = self.ver2ver_model.module
21 | else:
22 | self.ver2ver_model.to(opt.gpu_ids[0])
23 | self.ver2ver_model_on_one_gpu = self.ver2ver_model
24 |
25 | if opt.isTrain:
26 | self.optimizer = self.ver2ver_model_on_one_gpu.create_optimizers(opt)
27 | self.old_lr = opt.lr
28 | if opt.continue_train and opt.which_epoch == 'latest':
29 | checkpoint = torch.load(os.path.join(opt.checkpoints_dir, opt.dataset_mode, 'optimizer.pth'))
30 | self.optimizer.load_state_dict(checkpoint['G'])
31 | self.old_lr = checkpoint['lr']
32 |
33 | def train_model(self, identity_points, pose_points, gt_points, id_face):
34 | self.optimizer.zero_grad()
35 | losses, out = self.ver2ver_model(identity_points, pose_points, gt_points, id_face, mode='train')
36 | loss = sum(losses.values()).mean()
37 | loss.backward()
38 | self.optimizer.step()
39 | self.losses = losses
40 | self.out = out
41 |
42 | def get_latest_losses(self):
43 | return {**self.losses}
44 |
45 | def get_latest_generated(self):
46 | return self.out['fake_points']
47 |
48 | def update_learning_rate(self, epoch):
49 | self.update_learning_rate(epoch)
50 |
51 | def save(self, epoch):
52 | self.ver2ver_model_on_one_gpu.save(epoch)
53 | if epoch == 'latest':
54 | torch.save({'G': self.optimizer.state_dict(),
55 | 'lr': self.old_lr,
56 | }, os.path.join(self.opt.checkpoints_dir, self.opt.dataset_mode, 'optimizer.pth'))
57 |
58 | ##################################################################
59 | # Helper functions
60 | ##################################################################
61 |
62 | def update_learning_rate(self, epoch):
63 | if epoch > self.opt.niter:
64 | lrd = self.opt.lr / self.opt.niter_decay
65 | new_lr = self.old_lr - lrd
66 | else:
67 | new_lr = self.old_lr
68 |
69 | if new_lr != self.old_lr:
70 | new_lr_G = new_lr
71 |
72 | for param_group in self.optimizer.param_groups:
73 | param_group['lr'] = new_lr_G
74 | print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
75 | self.old_lr = new_lr
76 |
--------------------------------------------------------------------------------
/util/iter_counter.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 |
5 |
6 | # Helper class that keeps track of training iterations
7 | class IterationCounter():
8 | def __init__(self, opt, dataset_size):
9 | self.opt = opt
10 | self.dataset_size = dataset_size
11 |
12 | self.first_epoch = 1
13 | self.total_epochs = opt.niter + opt.niter_decay #default 100 + 100
14 | self.epoch_iter = 0 # iter number within each epoch
15 | self.iter_record_path = os.path.join(self.opt.checkpoints_dir, self.opt.dataset_mode, 'iter.txt')
16 | if opt.isTrain and opt.continue_train:
17 | try:
18 | self.first_epoch, self.epoch_iter = np.loadtxt(
19 | self.iter_record_path, delimiter=',', dtype=int)
20 | print('Resuming from epoch %d at iteration %d' % (self.first_epoch, self.epoch_iter))
21 | except:
22 | print('Could not load iteration record at %s. Starting from beginning.' %
23 | self.iter_record_path)
24 |
25 | self.total_steps_so_far = (self.first_epoch - 1) * dataset_size + self.epoch_iter
26 |
27 | # return the iterator of epochs for the training
28 | def training_epochs(self):
29 | return range(self.first_epoch, self.total_epochs + 1)
30 |
31 | def record_epoch_start(self, epoch):
32 | self.epoch_start_time = time.time()
33 | self.epoch_iter = 0
34 | self.last_iter_time = time.time()
35 | self.current_epoch = epoch
36 |
37 | def record_one_iteration(self):
38 | current_time = time.time()
39 |
40 | # the last remaining batch is dropped,
41 | # so we can assume batch size is always opt.batchSize
42 | self.time_per_iter = (current_time - self.last_iter_time) / self.opt.batchSize
43 | self.last_iter_time = current_time
44 | self.total_steps_so_far += self.opt.batchSize
45 | self.epoch_iter += self.opt.batchSize
46 |
47 | def record_epoch_end(self):
48 | current_time = time.time()
49 | self.time_per_epoch = current_time - self.epoch_start_time
50 | print('End of epoch %d / %d \t Time Taken: %d sec' %
51 | (self.current_epoch, self.total_epochs, self.time_per_epoch))
52 | if self.current_epoch % self.opt.save_epoch_freq == 0:
53 | np.savetxt(self.iter_record_path, (self.current_epoch + 1, 0),
54 | delimiter=',', fmt='%d')
55 | print('Saved current iteration count at %s.' % self.iter_record_path)
56 |
57 | def record_current_iter(self):
58 | np.savetxt(self.iter_record_path, (self.current_epoch, self.epoch_iter),
59 | delimiter=',', fmt='%d')
60 | print('Saved current iteration count at %s.' % self.iter_record_path)
61 |
62 | def needs_saving(self):
63 | return (self.total_steps_so_far % self.opt.save_latest_freq) < self.opt.batchSize
64 |
65 | def needs_printing(self):
66 | return (self.total_steps_so_far % self.opt.print_freq) < self.opt.batchSize
67 |
68 | def needs_displaying(self):
69 | return (self.total_steps_so_far % self.opt.display_freq) < self.opt.batchSize
70 |
--------------------------------------------------------------------------------
/data/human_data.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data as data
2 | import torch
3 | import numpy as np
4 | import pymesh
5 |
6 | class SMPL_DATA(data.Dataset):
7 | def __init__(self, dataroot, vertex_num=6890, shuffle_point = True):
8 | self.shuffle_point = shuffle_point
9 | self.vertex_num = vertex_num
10 | self.path= dataroot
11 | self.datapath = []
12 | for _ in range(4000):
13 | identity_i = np.random.randint(15)
14 | identity_p = np.random.randint(200,600)
15 | data_in = [identity_i, identity_p]
16 | self.datapath.append(data_in)
17 |
18 | def __getitem__(self, index):
19 | np.random.seed()
20 | mesh_set = self.datapath[index]
21 | identity_mesh_i = mesh_set[0]
22 | identity_mesh_p = mesh_set[1]
23 | pose_mesh_i = np.random.randint(15)
24 | pose_mesh_p = np.random.randint(200,600)
25 | identity_mesh = pymesh.load_mesh(self.path+'id'+str(identity_mesh_i)+'_'+str(identity_mesh_p)+'.obj')
26 | pose_mesh = pymesh.load_mesh(self.path+'id'+str(pose_mesh_i)+'_'+str(pose_mesh_p)+'.obj')
27 | gt_mesh = pymesh.load_mesh(self.path+'id'+str(identity_mesh_i)+'_'+str(pose_mesh_p)+'.obj')
28 |
29 | identity_points = identity_mesh.vertices
30 | identity_faces = identity_mesh.faces
31 | pose_points = pose_mesh.vertices
32 | pose_faces = pose_mesh.faces
33 | gt_points = gt_mesh.vertices
34 |
35 | pose_points = pose_points - (pose_mesh.bbox[0] + pose_mesh.bbox[1]) / 2
36 | pose_points = torch.from_numpy(pose_points.astype(np.float32))
37 |
38 | identity_points = identity_points-(identity_mesh.bbox[0]+identity_mesh.bbox[1])/2
39 | identity_points = torch.from_numpy(identity_points.astype(np.float32))
40 |
41 | gt_points = gt_points-(gt_mesh.bbox[0]+gt_mesh.bbox[1]) / 2
42 | gt_points = torch.from_numpy(gt_points.astype(np.float32))
43 |
44 | random_sample = np.random.choice(self.vertex_num,size=self.vertex_num,replace=False)
45 | random_sample2 = np.random.choice(self.vertex_num,size=self.vertex_num,replace=False)
46 |
47 | new_id_faces = identity_faces
48 | new_pose_faces = pose_faces
49 |
50 | # Before input, shuffle the vertices randomly to be close to real-world problems.
51 | if self.shuffle_point:
52 | pose_points = pose_points[random_sample2]
53 | identity_points = identity_points[random_sample]
54 | gt_points = gt_points[random_sample]
55 |
56 | face_dict = {}
57 | for i in range(len(random_sample)):
58 | face_dict[random_sample[i]] = i
59 | new_f = []
60 | for i in range(len(identity_faces)):
61 | new_f.append([face_dict[identity_faces[i][0]],face_dict[identity_faces[i][1]],face_dict[identity_faces[i][2]]])
62 | new_id_faces = np.array(new_f)
63 |
64 | face_dict = {}
65 | for i in range(len(random_sample2)):
66 | face_dict[random_sample2[i]] = i
67 | new_f = []
68 | for i in range(len(pose_faces)):
69 | new_f.append([face_dict[pose_faces[i][0]],face_dict[pose_faces[i][1]],face_dict[pose_faces[i][2]]])
70 | new_pose_faces = np.array(new_f)
71 |
72 | return identity_points, pose_points, gt_points, new_id_faces, new_pose_faces
73 |
74 | def __len__(self):
75 | return len(self.datapath)
76 |
--------------------------------------------------------------------------------
/data/animal_data.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data as data
2 | import torch
3 | import numpy as np
4 | import pymesh
5 |
6 | class SMAL_DATA(data.Dataset):
7 | def __init__(self, dataroot, vertex_num=3889, shuffle_point = True):
8 | self.shuffle_point = shuffle_point
9 | self.vertex_num = vertex_num
10 | self.path= dataroot
11 | self.datapath = []
12 | self.id_num = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21, 23, 24, 26, 27, 28, 29, 30, 31, 34, 35, 38, 39])
13 | for _ in range(11600):
14 | identity_i = np.random.choice(self.id_num)
15 | identity_p = np.random.randint(400)
16 | data_in = [identity_i, identity_p]
17 | self.datapath.append(data_in)
18 |
19 | def __getitem__(self, index):
20 |
21 | np.random.seed()
22 | mesh_set = self.datapath[index]
23 | identity_mesh_i = mesh_set[0]
24 | identity_mesh_p = mesh_set[1]
25 | pose_mesh_i = np.random.choice(self.id_num)
26 | pose_mesh_p = np.random.randint(400)
27 | identity_mesh = pymesh.load_mesh(self.path+'toy_'+str(identity_mesh_i)+'_'+str(identity_mesh_p)+'.ply')
28 | pose_mesh = pymesh.load_mesh(self.path+'toy_'+str(pose_mesh_i)+'_'+str(pose_mesh_p)+'.ply')
29 | gt_mesh = pymesh.load_mesh(self.path+'toy_'+str(identity_mesh_i)+'_'+str(pose_mesh_p)+'.ply')
30 |
31 | identity_points = identity_mesh.vertices
32 | identity_faces = identity_mesh.faces
33 | pose_points = pose_mesh.vertices
34 | pose_faces = pose_mesh.faces
35 | gt_points = gt_mesh.vertices
36 |
37 | pose_points = pose_points - (pose_mesh.bbox[0] + pose_mesh.bbox[1]) / 2
38 | pose_points = torch.from_numpy(pose_points.astype(np.float32))
39 |
40 | identity_points = identity_points-(identity_mesh.bbox[0]+identity_mesh.bbox[1])/2
41 | identity_points = torch.from_numpy(identity_points.astype(np.float32))
42 |
43 | gt_points = gt_points-(gt_mesh.bbox[0]+gt_mesh.bbox[1]) / 2
44 | gt_points = torch.from_numpy(gt_points.astype(np.float32))
45 |
46 | random_sample = np.random.choice(self.vertex_num,size=self.vertex_num,replace=False)
47 | random_sample2 = np.random.choice(self.vertex_num,size=self.vertex_num,replace=False)
48 |
49 | new_id_faces = identity_faces
50 | new_pose_faces = pose_faces
51 |
52 | # Before input, shuffle the vertices randomly to be close to real-world problems.
53 | if self.shuffle_point:
54 | pose_points = pose_points[random_sample2]
55 | identity_points = identity_points[random_sample]
56 | gt_points = gt_points[random_sample]
57 |
58 | face_dict = {}
59 | for i in range(len(random_sample)):
60 | face_dict[random_sample[i]] = i
61 | new_f = []
62 | for i in range(len(identity_faces)):
63 | new_f.append([face_dict[identity_faces[i][0]],face_dict[identity_faces[i][1]],face_dict[identity_faces[i][2]]])
64 | new_id_faces = np.array(new_f)
65 |
66 | face_dict = {}
67 | for i in range(len(random_sample2)):
68 | face_dict[random_sample2[i]] = i
69 | new_f = []
70 | for i in range(len(pose_faces)):
71 | new_f.append([face_dict[pose_faces[i][0]],face_dict[pose_faces[i][1]],face_dict[pose_faces[i][2]]])
72 | new_pose_faces = np.array(new_f)
73 |
74 | return identity_points, pose_points, gt_points, new_id_faces, new_pose_faces
75 |
76 | def __len__(self):
77 | return len(self.datapath)
78 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import numpy as np
4 | import pymesh
5 | from options.test_options import TestOptions
6 | from models.ver2ver_model import Ver2VerModel
7 |
8 | opt = TestOptions().parse()
9 |
10 | torch.manual_seed(0)
11 |
12 | model = Ver2VerModel(opt)
13 | model.eval()
14 |
15 | def face_reverse(faces, random_sample):
16 | identity_faces=faces
17 | face_dict = {}
18 | for i in range(len(random_sample)):
19 | face_dict[random_sample[i]] = i
20 | new_f = []
21 | for i in range(len(identity_faces)):
22 | new_f.append([face_dict[identity_faces[i][0]],face_dict[identity_faces[i][1]],face_dict[identity_faces[i][2]]])
23 | new_face = np.array(new_f)
24 | return new_face
25 |
26 | if opt.dataset_mode == 'human':
27 | print('test model on unseen data in SMPL')
28 |
29 | save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), opt.results_dir, 'human')
30 | test_list_name = 'human_test_list'
31 | vertex_num = 6890
32 | elif opt.dataset_mode == 'animal':
33 | print('test model on unseen data in SMAL')
34 |
35 | save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), opt.results_dir, 'animal')
36 | test_list_name = 'animal_test_list'
37 | vertex_num = 3889
38 | else:
39 | raise ValueError("|dataset_mode| is invalid")
40 |
41 | if not os.path.exists(save_root):
42 | os.makedirs(save_root)
43 |
44 | data_path = opt.dataroot
45 | PMD_test = 0.0
46 | mesh_num = 0
47 | for line in open(test_list_name, "r"):
48 | mesh_num += 1
49 | data_list = line.strip('\n').split(' ')
50 | id_mesh_name = data_list[0]
51 | pose_mesh_name = data_list[1]
52 | gt_mesh_name = data_list[2]
53 |
54 | identity_mesh = pymesh.load_mesh(data_path + id_mesh_name)
55 | pose_mesh = pymesh.load_mesh(data_path + pose_mesh_name)
56 | gt_mesh = pymesh.load_mesh(data_path + gt_mesh_name)
57 |
58 | random_sample = np.random.choice(vertex_num,size=vertex_num,replace=False)
59 | random_sample2 = np.random.choice(vertex_num,size=vertex_num,replace=False)
60 |
61 | identity_points = identity_mesh.vertices[random_sample]
62 | identity_points = identity_points - (identity_mesh.bbox[0] + identity_mesh.bbox[1]) / 2
63 | identity_points = torch.from_numpy(identity_points.astype(np.float32)).cuda()
64 |
65 | pose_points = pose_mesh.vertices[random_sample2]
66 | pose_points = pose_points - (pose_mesh.bbox[0] + pose_mesh.bbox[1]) / 2
67 | pose_points = torch.from_numpy(pose_points.astype(np.float32)).cuda()
68 |
69 | gt_mesh_points = gt_mesh.vertices[random_sample]
70 | gt_mesh_points = gt_mesh_points - (gt_mesh.bbox[0] + gt_mesh.bbox[1]) / 2
71 |
72 | # generate results
73 | out = model(identity_points.transpose(1,0).unsqueeze(0), pose_points.transpose(1,0).unsqueeze(0), None, None, mode='inference')
74 |
75 | out['fake_points'] = out['fake_points'].squeeze().transpose(1,0).cpu().detach().numpy()
76 | bbox = np.array([[np.max(out['fake_points'][:,0]),np.max(out['fake_points'][:,1]),np.max(out['fake_points'][:,2])],
77 | [np.min(out['fake_points'][:,0]),np.min(out['fake_points'][:,1]),np.min(out['fake_points'][:,2])]])
78 | out['fake_points'] = out['fake_points'] - (bbox[0] + bbox[1] ) / 2
79 |
80 | # calculate PMD
81 | PMD_test = PMD_test + np.mean((out['fake_points'] - gt_mesh_points)**2)
82 |
83 | # save the generated meshes
84 | new_face = face_reverse(identity_mesh.faces, random_sample)
85 | if opt.dataset_mode == 'human':
86 | pymesh.save_mesh_raw(save_root + '/' + gt_mesh_name, out['fake_points'], new_face)
87 | elif opt.dataset_mode == 'animal':
88 | pymesh.save_mesh_raw(save_root + '/' + gt_mesh_name.strip('.ply') + '.obj', out['fake_points'], new_face)
89 |
90 | print('Final score for ' + test_list_name + ' is ' + str(PMD_test/mesh_num))
91 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import pymesh
4 | import torch
5 |
6 | from data.human_data import SMPL_DATA
7 | from data.animal_data import SMAL_DATA
8 | from ver2ver_trainer import Ver2VerTrainer
9 | from options.train_options import TrainOptions
10 | from util.iter_counter import IterationCounter
11 | from util.util import print_current_errors
12 |
13 |
14 | # parse options
15 | opt = TrainOptions().parse()
16 |
17 | # print options to help debugging
18 | print(' '.join(sys.argv))
19 |
20 | # load the dataset
21 | if opt.dataset_mode == 'human':
22 | dataset = SMPL_DATA(opt.dataroot, shuffle_point = True)
23 | elif opt.dataset_mode == 'animal':
24 | dataset = SMAL_DATA(opt.dataroot, shuffle_point = True)
25 | else:
26 | raise ValueError("|dataset_mode| is invalid")
27 | dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.nThreads), drop_last=opt.isTrain)
28 |
29 |
30 | # create tool for counting iterations
31 | iter_counter = IterationCounter(opt, len(dataloader))
32 |
33 | # create trainer for our model
34 | trainer = Ver2VerTrainer(opt)
35 |
36 | # save root of the optputs
37 | save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), 'output', opt.dataset_mode)
38 | if not os.path.exists(save_root):
39 | os.makedirs(save_root)
40 |
41 |
42 | for epoch in iter_counter.training_epochs():
43 | iter_counter.record_epoch_start(epoch)
44 | for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter):
45 | iter_counter.record_one_iteration()
46 |
47 | # get data
48 | identity_points, pose_points, gt_points, id_face, pose_face = data_i
49 |
50 | # training
51 | trainer.train_model(identity_points, pose_points, gt_points, id_face)
52 |
53 | # print loss
54 | if iter_counter.needs_printing():
55 | losses = trainer.get_latest_losses()
56 | try:
57 | print_current_errors(opt, epoch, iter_counter.epoch_iter,
58 | losses, iter_counter.time_per_iter)
59 | except OSError as err:
60 | print(err)
61 |
62 | # save mesh
63 | if iter_counter.needs_displaying():
64 | try:
65 | pymesh.save_mesh_raw(save_root + '/' + str(epoch) + '_' + str(iter_counter.total_steps_so_far) + '_id.obj',
66 | identity_points[0,:,:].cpu().numpy(),id_face[0,:,:].cpu().numpy())
67 | pymesh.save_mesh_raw(save_root + '/' + str(epoch) + '_' + str(iter_counter.total_steps_so_far) + '_pose.obj',
68 | pose_points[0,:,:].cpu().numpy(),pose_face[0,:,:].cpu().numpy())
69 | pymesh.save_mesh_raw(save_root + '/' + str(epoch) + '_' + str(iter_counter.total_steps_so_far) + '_warp.obj',
70 | trainer.out['warp_out'][0,:,:].cpu().detach().numpy(),id_face[0,:,:].cpu().numpy())
71 | pymesh.save_mesh_raw(save_root + '/' + str(epoch) + '_' + str(iter_counter.total_steps_so_far) + '_out.obj',
72 | trainer.get_latest_generated().data[0,:,:].cpu().detach().numpy().transpose(1,0),id_face[0,:,:].cpu().numpy())
73 | except OSError as err:
74 | print(err)
75 |
76 | if iter_counter.needs_saving():
77 | print('saving the latest model (epoch %d, total_steps %d)' %
78 | (epoch, iter_counter.total_steps_so_far))
79 | try:
80 | trainer.save('latest')
81 | iter_counter.record_current_iter()
82 | except OSError as err:
83 | print(err)
84 |
85 | trainer.update_learning_rate(epoch)
86 | iter_counter.record_epoch_end()
87 |
88 | if epoch % opt.save_epoch_freq == 0 or \
89 | epoch == iter_counter.total_epochs:
90 | print('saving the model at the end of epoch %d, iters %d' %
91 | (epoch, iter_counter.total_steps_so_far))
92 | try:
93 | trainer.save('latest')
94 | trainer.save(epoch)
95 | except OSError as err:
96 | print(err)
97 |
98 | print('Training was successfully finished.')
99 |
--------------------------------------------------------------------------------
/models/ver2ver_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import models.networks as networks
3 | import util.util as util
4 |
5 |
6 | class Ver2VerModel(torch.nn.Module):
7 | @staticmethod
8 | def modify_commandline_options(parser, is_train):
9 | networks.modify_commandline_options(parser, is_train)
10 | return parser
11 |
12 | def __init__(self, opt):
13 | super().__init__()
14 |
15 | self.opt = opt
16 | self.net = torch.nn.ModuleDict(self.initialize_networks(opt))
17 |
18 | def forward(self, identity_points, pose_points, gt_points, id_face, mode):
19 | if mode == 'inference':
20 | pass
21 | else:
22 | identity_points=identity_points.transpose(2,1) #(bs, 3, n)
23 | identity_points=identity_points.cuda()
24 |
25 | pose_points=pose_points.transpose(2,1)
26 | pose_points=pose_points.cuda()
27 |
28 | gt_points=gt_points.transpose(2,1)
29 | gt_points=gt_points.cuda()
30 |
31 | generated_out = {}
32 | if mode == 'train':
33 |
34 | loss, generated_out = self.compute_loss(identity_points, pose_points, gt_points, id_face)
35 |
36 | out = {}
37 | out['fake_points'] = generated_out['fake_points']
38 | out['identity_points'] = identity_points
39 | out['pose_points'] = pose_points
40 | out['warp_out'] = None if 'warp_out' not in generated_out else generated_out['warp_out']
41 | return loss, out
42 |
43 | elif mode == 'inference':
44 | out = {}
45 | with torch.no_grad():
46 | out = self.inference(identity_points, pose_points)
47 | out['identity_points'] = identity_points
48 | out['pose_points'] = pose_points
49 | return out
50 | else:
51 | raise ValueError("|mode| is invalid")
52 |
53 | def create_optimizers(self, opt):
54 | G_params = list()
55 | G_params += [{'params': self.net['netG'].parameters(), 'lr': opt.lr}]
56 | G_params += [{'params': self.net['netCorr'].parameters(), 'lr': opt.lr}]
57 |
58 | beta1, beta2 = opt.beta1, opt.beta2
59 | G_lr = opt.lr
60 |
61 | optimizer = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2), eps=1e-3)
62 |
63 | return optimizer
64 |
65 | def save(self, epoch):
66 | util.save_network(self.net['netG'], 'G', epoch, self.opt)
67 | util.save_network(self.net['netCorr'], 'Corr', epoch, self.opt)
68 |
69 | ############################################################################
70 | # Private helper methods
71 | ############################################################################
72 |
73 | def initialize_networks(self, opt):
74 | net = {}
75 | net['netG'] = networks.define_G(opt)
76 | net['netCorr'] = networks.define_Corr(opt)
77 |
78 | if not opt.isTrain or opt.continue_train:
79 | net['netG'] = util.load_network(net['netG'], 'G', opt.which_epoch, opt)
80 | net['netCorr'] = util.load_network(net['netCorr'], 'Corr', opt.which_epoch, opt)
81 |
82 | return net
83 |
84 | def compute_loss(self, identity_points, pose_points, gt_points, id_face):
85 | losses = {}
86 | generate_out = self.generate_fake(identity_points, pose_points)
87 |
88 | # edge loss
89 | losses['edge_loss'] = 0.0
90 | for i in range(len(identity_points)):
91 | f = id_face[i].cpu().numpy()
92 | v = identity_points[i].transpose(0,1).cpu().numpy()
93 | losses['edge_loss'] = losses['edge_loss'] + util.compute_score(generate_out['fake_points'][i].transpose(1,0).unsqueeze(0),f,util.get_target(v,f,1))
94 | losses['edge_loss'] = losses['edge_loss']/len(identity_points) * self.opt.lambda_edge
95 |
96 | # reconstruction loss
97 | losses['rec_loss'] = torch.mean((generate_out['fake_points'] - gt_points)**2) * self.opt.lambda_rec
98 |
99 | return losses, generate_out
100 |
101 | def generate_fake(self, identity_points, pose_points):
102 | generate_out = {}
103 |
104 | corr_out = self.net['netCorr'](pose_points, identity_points)
105 | generate_out['fake_points'] = self.net['netG'](corr_out['id_features'], corr_out['warp_out'])
106 |
107 | generate_out = {**generate_out, **corr_out}
108 | return generate_out
109 |
110 | def inference(self, identity_points, pose_points):
111 | generate_out = {}
112 |
113 | corr_out = self.net['netCorr'](pose_points, identity_points)
114 | generate_out['fake_points'] = self.net['netG'](corr_out['id_features'], corr_out['warp_out'])
115 |
116 | generate_out = {**generate_out, **corr_out}
117 | return generate_out
118 |
119 | def use_gpu(self):
120 | return len(self.opt.gpu_ids) > 0
--------------------------------------------------------------------------------
/models/networks/correspondence.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import torch
3 | import torch.nn as nn
4 | from models.networks.base_network import BaseNetwork
5 | from models.networks.generator import AdaptiveFeatureGenerator
6 | import util.util as util
7 |
8 | class ResidualBlock(nn.Module):
9 |
10 | def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1):
11 | super(ResidualBlock, self).__init__()
12 | self.padding1 = nn.ReflectionPad1d(padding)
13 | self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
14 | self.in1 = nn.InstanceNorm1d(out_channels)
15 | self.prelu = nn.PReLU()
16 | self.padding2 = nn.ReflectionPad1d(padding)
17 | self.conv2 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
18 | self.in2 = nn.InstanceNorm1d(out_channels)
19 |
20 | def forward(self, x):
21 | residual = x
22 | out = self.padding1(x)
23 | out = self.conv1(out)
24 | out = self.in1(out)
25 | out = self.prelu(out)
26 | out = self.padding2(out)
27 | out = self.conv2(out)
28 | out = self.in2(out)
29 | out += residual
30 | out = self.prelu(out)
31 | return out
32 |
33 | class Correspondence(BaseNetwork):
34 |
35 | def __init__(self, opt):
36 | self.opt = opt
37 | super().__init__()
38 |
39 | self.adaptive_feature = AdaptiveFeatureGenerator(opt)
40 |
41 | self.feature_channel = 64
42 | self.in_channels = self.feature_channel * 4
43 | self.inter_channels = 256
44 |
45 | self.layer = nn.Sequential(
46 | ResidualBlock(self.feature_channel * 4, self.feature_channel * 4, kernel_size=1, padding=0, stride=1),
47 | ResidualBlock(self.feature_channel * 4, self.feature_channel * 4, kernel_size=1, padding=0, stride=1),
48 | ResidualBlock(self.feature_channel * 4, self.feature_channel * 4, kernel_size=1, padding=0, stride=1),
49 | ResidualBlock(self.feature_channel * 4, self.feature_channel * 4, kernel_size=1, padding=0, stride=1))
50 |
51 | self.phi = nn.Conv1d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
52 | self.theta = nn.Conv1d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
53 |
54 | def forward(self, pose_points, identity_points):
55 | corr_out = {}
56 |
57 | # Extract feature
58 | id_features= self.adaptive_feature(identity_points)
59 | pose_features= self.adaptive_feature(pose_points)
60 | id_features = util.feature_normalize(id_features)
61 | pose_features = util.feature_normalize(pose_features)
62 |
63 | id_features = self.layer(id_features)
64 | pose_features = self.layer(pose_features)
65 | corr_out['id_features'] = id_features
66 | corr_out['pose_features'] = pose_features
67 |
68 | # Correlation matrix C (cosine similarity)
69 | theta = self.theta(id_features)
70 | theta_norm = torch.norm(theta, 2, 1, keepdim=True) + sys.float_info.epsilon
71 | theta = torch.div(theta, theta_norm)
72 | theta_permute = theta.permute(0, 2, 1)
73 |
74 | phi = self.phi(pose_features)
75 | phi_norm = torch.norm(phi, 2, 1, keepdim=True) + sys.float_info.epsilon
76 | phi = torch.div(phi, phi_norm)
77 |
78 | C_Matrix = torch.matmul(theta_permute, phi)
79 |
80 | # Optimal Transport
81 | K = torch.exp(-(1.0 - C_Matrix) / 0.03)
82 |
83 | # Init. of Sinkhorn algorithm
84 | power = 1#gamma / (gamma + epsilon)
85 | a = (
86 | torch.ones(
87 | (K.shape[0], K.shape[1], 1), device=theta.device, dtype=theta.dtype
88 | )
89 | / K.shape[1]
90 | )
91 | prob1 = (
92 | torch.ones(
93 | (K.shape[0], K.shape[1], 1), device=theta.device, dtype=theta.dtype
94 | )
95 | / K.shape[1]
96 | )
97 | prob2 = (
98 | torch.ones(
99 | (K.shape[0], K.shape[2], 1), device=phi.device, dtype=phi.dtype
100 | )
101 | / K.shape[2]
102 | )
103 |
104 | # Sinkhorn algorithm
105 | for _ in range(5):
106 | # Update b
107 | KTa = torch.bmm(K.transpose(1, 2), a)
108 | b = torch.pow(prob2 / (KTa + 1e-8), power)
109 | # Update a
110 | Kb = torch.bmm(K, b)
111 | a = torch.pow(prob1 / (Kb + 1e-8), power)
112 |
113 | # Optimal matching matrix Tm
114 | T_m = torch.mul(torch.mul(a, K), b.transpose(1, 2))
115 | T_m_norm = T_m / torch.sum(T_m, dim=2, keepdim=True)
116 | pose_for_warp = pose_points.permute(0, 2, 1)
117 |
118 | # Warped points
119 | corr_out['warp_out'] = torch.matmul(T_m_norm, pose_for_warp)
120 |
121 | return corr_out
122 |
--------------------------------------------------------------------------------
/util/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import os
4 | import sys
5 | import importlib
6 |
7 | def feature_normalize(feature_in):
8 | feature_in_norm = torch.norm(feature_in, 2, 1, keepdim=True) + sys.float_info.epsilon
9 | feature_in_norm = torch.div(feature_in, feature_in_norm)
10 | return feature_in_norm
11 |
12 | def weighted_l1_loss(input, target, weights):
13 | out = torch.abs(input - target)
14 | out = out * weights.expand_as(out)
15 | loss = out.mean()
16 | return loss
17 |
18 | def mse_loss(input, target=0):
19 | return torch.mean((input - target)**2)
20 |
21 | def mkdirs(paths):
22 | if isinstance(paths, list) and not isinstance(paths, str):
23 | for path in paths:
24 | mkdir(path)
25 | else:
26 | mkdir(paths)
27 |
28 | def mkdir(path):
29 | if not os.path.exists(path):
30 | os.makedirs(path)
31 |
32 | def find_class_in_module(target_cls_name, module):
33 | target_cls_name = target_cls_name.replace('_', '').lower()
34 | clslib = importlib.import_module(module)
35 | cls = None
36 | for name, clsobj in clslib.__dict__.items():
37 | if name.lower() == target_cls_name:
38 | cls = clsobj
39 |
40 | if cls is None:
41 | print("In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name))
42 | exit(0)
43 |
44 | return cls
45 |
46 | def print_network(model):
47 | num_params = 0
48 | for param in model.parameters():
49 | num_params += param.numel()
50 | print('Network [%s] was created. Total number of parameters: %.1f million. '
51 | 'To see the architecture, do print(network).'
52 | % (type(model).__name__, num_params / 1000000))
53 |
54 | def save_network(net, label, epoch, opt):
55 | save_filename = '%s_net_%s.pth' % (epoch, label)
56 | save_path = os.path.join(opt.checkpoints_dir, opt.dataset_mode, save_filename)
57 | torch.save(net.cpu().state_dict(), save_path)
58 | if len(opt.gpu_ids) and torch.cuda.is_available():
59 | net.cuda()
60 |
61 | def load_network(net, label, epoch, opt):
62 | save_filename = '%s_net_%s.pth' % (epoch, label)
63 | save_path = os.path.join(opt.checkpoints_dir, opt.dataset_mode, save_filename)
64 | # save_path = os.path.join(save_dir, save_filename)
65 | if not os.path.exists(save_path):
66 | print('not find model :' + save_path + ', do not load model!')
67 | return net
68 | weights = torch.load(save_path)
69 | try:
70 | net.load_state_dict(weights)
71 | except KeyError:
72 | print('key error, not load!')
73 | except RuntimeError as err:
74 | print(err)
75 | net.load_state_dict(weights, strict=False)
76 | print('loaded with strict=False')
77 | return net
78 |
79 | def print_current_errors(opt, epoch, i, errors, t):
80 | message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
81 | for k, v in errors.items():
82 | v = v.mean().float()
83 | message += '%s: %.3f ' % (k, v)
84 |
85 | print(message)
86 | log_name = os.path.join(opt.checkpoints_dir, opt.dataset_mode, 'loss_log.txt')
87 | with open(log_name, "a") as log_file:
88 | log_file.write('%s\n' % message)
89 |
90 | def init_regul(source_vertices, source_faces):
91 | sommet_A_source = source_vertices[source_faces[:, 0]]
92 | sommet_B_source = source_vertices[source_faces[:, 1]]
93 | sommet_C_source = source_vertices[source_faces[:, 2]]
94 | target = []
95 | target.append(np.sqrt( np.sum((sommet_A_source - sommet_B_source) ** 2, axis=1)))
96 | target.append(np.sqrt( np.sum((sommet_B_source - sommet_C_source) ** 2, axis=1)))
97 | target.append(np.sqrt( np.sum((sommet_A_source - sommet_C_source) ** 2, axis=1)))
98 | return target
99 |
100 | def get_target(vertice, face, size):
101 | target = init_regul(vertice,face)
102 | target = np.array(target)
103 | target = torch.from_numpy(target).float().cuda()
104 | #target = target+0.0001
105 | target = target.unsqueeze(1).expand(3,size,-1)
106 | return target
107 |
108 | def compute_score(points, faces, target):
109 | score = 0
110 | sommet_A = points[:,faces[:, 0]]
111 | sommet_B = points[:,faces[:, 1]]
112 | sommet_C = points[:,faces[:, 2]]
113 |
114 | score = torch.abs(torch.sqrt(torch.sum((sommet_A - sommet_B) ** 2, dim=2)) / target[0] -1)
115 | score = score + torch.abs(torch.sqrt(torch.sum((sommet_B - sommet_C) ** 2, dim=2)) / target[1] -1)
116 | score = score + torch.abs(torch.sqrt(torch.sum((sommet_A - sommet_C) ** 2, dim=2)) / target[2] -1)
117 | return torch.mean(score)
118 |
119 | def weights_init(m):
120 | classname = m.__class__.__name__
121 | if classname.find('Conv') != -1:
122 | m.weight.data.normal_(0.0, 0.02)
123 |
124 | def calc_mean_std(feat, eps=1e-5):
125 | # eps is a small value added to the variance to avoid divide-by-zero.
126 | size = feat.size()
127 | assert (len(size) == 3)
128 | N, C = size[:2]
129 | feat_var = feat.view(N, C, -1).var(dim=2) + eps
130 | feat_std = feat_var.sqrt().view(N, C, 1)
131 | feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1)
132 | return feat_mean, feat_std
133 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 3D Pose Transfer with Correspondence Learning and Mesh Refinement
2 |
3 | #### [Project](https://chaoyuesong.github.io/3D-CoreNet/) | [Paper](https://openreview.net/pdf?id=fG01Z_unHC) | [Video](https://www.youtube.com/watch?v=HlrTCtGZPjg) | [Video (in Chinese)](https://www.bilibili.com/video/BV1rq4y1q7pZ?spm_id_from=333.999.0.0)
4 |
5 | **3D Pose Transfer with Correspondence Learning and Mesh Refinement**
6 | Chaoyue Song,
7 | Jiacheng Wei,
8 | Ruibo Li,
9 | [Fayao Liu](https://sites.google.com/site/fayaoliu/),
10 | [Guosheng Lin](https://guosheng.github.io/)
11 | in NeurIPS, 2021.
12 |
13 |
14 |
15 |
16 | ## News
17 | * `21/11/2022` We release our latest work on unsupervised 3D pose transfer. Please check it [here](https://arxiv.org/abs/2211.10278). In this paper, we present X-DualNet, an unsupervised deep learning framework to solve the 3D pose transfer problem in an end-to-end fashion. Through extensive experiments on human and animal meshes, we demonstrate X-DualNet achieves comparable performance as the state-of-the-art supervised approaches qualitatively and quantitatively and even outperforms some of them. The code of X-DualNet will be released soon at [here](https://github.com/ChaoyueSong/X-DualNet).
18 |
19 | ## Installation
20 | - Clone this repo:
21 | ```bash
22 | git clone https://github.com/ChaoyueSong/3d-corenet.git
23 | cd 3d-corenet
24 | ```
25 |
26 | - Install the dependencies. Our code has been tested on Python 3.6, PyTorch 1.8 (previous versions also work, plz install it according to your cuda version). We also need pymesh.
27 | ```bash
28 | conda create -n 3d_corenet python=3.6
29 | conda activate 3d_corenet
30 | # install pytorch and pymesh
31 | conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=11.1 -c pytorch -c conda-forge
32 | conda install -c conda-forge pymesh2
33 | ```
34 |
35 | - Clone the Synchronized-BatchNorm-PyTorch repo.
36 | ```
37 | cd models/networks/
38 | git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
39 | cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm .
40 | cd ../../
41 | ```
42 |
43 | ## Dataset preparation
44 | We use [SMPL](https://smpl.is.tue.mpg.de/) as the human mesh data, please download data [here](https://drive.google.com/drive/folders/11LbPXbDg4F_pSIr0sMHzWI08FOC8XvSY). And we generate our animal mesh data using [SMAL](https://smal.is.tue.mpg.de/), please download it [here](https://drive.google.com/drive/folders/1uP6H0j7mUJ6utgvXxpT-2rn4EYhJ3el5?usp=sharing).
45 |
46 | ## Generating Meshes Using Pretrained model
47 | By default, it loads the latest checkpoint. It can be changed using `--which_epoch`.
48 |
49 | #### 1) SMPL (human)
50 | Download the pretrained model from [pretrained model link](https://drive.google.com/drive/folders/1pZqw_AU7VpVOnop6HSv6WeRGfCEPt2lm?usp=sharing) and save them in `checkpoints/human`. Then run the command
51 | ````bash
52 | python test.py --dataset_mode human --dataroot [Your data path] --gpu_ids 0
53 | ````
54 | The results will be saved in `test_results/human/` by default. `human_test_list` is randomly choosed for testing.
55 |
56 | #### 2) SMAL (animal)
57 | Download the pretrained model from [pretrained model link](https://drive.google.com/drive/folders/1v3Iz51MtcYsLoKAu9XAdFG6JmhxLHWJ-?usp=sharing) and save them in `checkpoints/animal`. Then run the command
58 | ````bash
59 | python test.py --dataset_mode animal --dataroot [Your data path] --gpu_ids 0
60 | ````
61 | The results will be saved in `test_results/animal/` by default. `animal_test_list` is randomly choosed for testing. For the calculation of CD and EMD, please check [TMNet](https://github.com/jnypan/TMNet) and [MSN](https://github.com/Colin97/MSN-Point-Cloud-Completion).
62 |
63 | ## Training
64 | #### 1) SMPL (human)
65 | To train new models on human meshes, please run:
66 | ```bash
67 | python train.py --dataset_mode human --dataroot [Your data path] --niter 100 --niter_decay 100 --batchSize 8 --gpu_ids 0,1
68 | ```
69 | The output meshes in the training process will be saved in `output/human/`.
70 | #### 2) SMAL (animal)
71 | To train new models on animal meshes, please run:
72 | ```bash
73 | python train.py --dataset_mode animal --dataroot [Your data path] --niter 100 --niter_decay 100 --batchSize 8 --gpu_ids 0,1
74 | ```
75 | The output meshes in the training process will be saved in `output/animal/`.
76 |
77 |
78 | Please change the batch size and gpu_ids as you desired.
79 |
80 | If you need continue training from checkpoint, use `--continue_train`.
81 | ## Citation
82 | If you use this code for your research, please cite the following work.
83 |
84 | ```bash
85 | @inproceedings{song20213d,
86 | title={3D Pose Transfer with Correspondence Learning and Mesh Refinement},
87 | author={Song, Chaoyue and Wei, Jiacheng and Li, Ruibo and Liu, Fayao and Lin, Guosheng},
88 | booktitle={Thirty-Fifth Conference on Neural Information Processing Systems},
89 | year={2021}
90 | }
91 | ```
92 |
93 | ## Acknowledgments
94 |
95 | This code is heavily based on [CoCosNet](https://github.com/microsoft/CoCosNet). We rewrite the [pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) architecture to ver2ver. We also use Optimal Transport code from [FLOT](https://github.com/valeoai/FLOT), Data and Edge loss code from [NPT](https://github.com/jiashunwang/Neural-Pose-Transfer), and [Synchronized Batch Normalization](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch).
96 |
97 | We thank all authors for the wonderful code!
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/options/base_options.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 | import os
4 | from util import util
5 | import torch
6 | import models
7 | import pickle
8 |
9 |
10 | class BaseOptions():
11 | def __init__(self):
12 | self.initialized = False
13 |
14 | def initialize(self, parser):
15 | # experiment specifics
16 | parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
17 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
18 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
19 | parser.add_argument('--batchSize', type=int, default=8, help='input batch size')
20 | parser.add_argument('--model', type=str, default='ver2ver', help='which model to use')
21 |
22 | # for setting inputs
23 | parser.add_argument('--dataroot', type=str, default='./npt-data')
24 | parser.add_argument('--dataset_mode', type=str, default='human')
25 | parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
26 | parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
27 | parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
28 |
29 | # for generator
30 | parser.add_argument('--netG', type=str, default='elain', help='selects model to use for netG')
31 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
32 | parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
33 | parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
34 |
35 | self.initialized = True
36 | return parser
37 |
38 | def gather_options(self):
39 | # initialize parser with basic options
40 | if not self.initialized:
41 | parser = argparse.ArgumentParser(
42 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
43 | parser = self.initialize(parser)
44 |
45 | # get the basic options
46 | opt, unknown = parser.parse_known_args()
47 |
48 | # modify model-related parser options
49 | model_name = opt.model
50 | model_option_setter = models.get_option_setter(model_name)
51 | parser = model_option_setter(parser, self.isTrain)
52 |
53 | opt, unknown = parser.parse_known_args()
54 |
55 | # if there is opt_file, load it.
56 | # The previous default options will be overwritten
57 | if opt.load_from_opt_file:
58 | parser = self.update_options_from_file(parser, opt)
59 |
60 | opt = parser.parse_args()
61 | self.parser = parser
62 | return opt
63 |
64 | def print_options(self, opt):
65 | message = ''
66 | message += '----------------- Options ---------------\n'
67 | for k, v in sorted(vars(opt).items()):
68 | comment = ''
69 | default = self.parser.get_default(k)
70 | if v != default:
71 | comment = '\t[default: %s]' % str(default)
72 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
73 | message += '----------------- End -------------------'
74 | print(message)
75 |
76 | def option_file_path(self, opt, makedir=False):
77 | expr_dir = opt.checkpoints_dir
78 | if makedir:
79 | util.mkdirs(expr_dir)
80 | util.mkdirs(os.path.join(expr_dir, opt.dataset_mode))
81 | file_name = os.path.join(expr_dir, opt.dataset_mode, 'opt')
82 | return file_name
83 |
84 | def save_options(self, opt):
85 | file_name = self.option_file_path(opt, makedir=True)
86 | with open(file_name + '.txt', 'wt') as opt_file:
87 | for k, v in sorted(vars(opt).items()):
88 | comment = ''
89 | default = self.parser.get_default(k)
90 | if v != default:
91 | comment = '\t[default: %s]' % str(default)
92 | opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
93 |
94 | with open(file_name + '.pkl', 'wb') as opt_file:
95 | pickle.dump(opt, opt_file)
96 |
97 | def update_options_from_file(self, parser, opt):
98 | new_opt = self.load_options(opt)
99 | for k, v in sorted(vars(opt).items()):
100 | if hasattr(new_opt, k) and v != getattr(new_opt, k):
101 | new_val = getattr(new_opt, k)
102 | parser.set_defaults(**{k: new_val})
103 | return parser
104 |
105 | def load_options(self, opt):
106 | file_name = self.option_file_path(opt, makedir=False)
107 | new_opt = pickle.load(open(file_name + '.pkl', 'rb'))
108 | return new_opt
109 |
110 | def parse(self, save=False):
111 |
112 | opt = self.gather_options() #gather options from base, train, dataset, model
113 | opt.isTrain = self.isTrain # train or test
114 |
115 | self.print_options(opt)
116 | if opt.isTrain:
117 | self.save_options(opt)
118 |
119 | # set gpu ids
120 | str_ids = opt.gpu_ids.split(',')
121 | opt.gpu_ids = []
122 | for str_id in str_ids:
123 | id = int(str_id)
124 | if id >= 0:
125 | opt.gpu_ids.append(id)
126 | if len(opt.gpu_ids) > 0:
127 | torch.cuda.set_device(opt.gpu_ids[0])
128 |
129 | assert len(opt.gpu_ids) == 0 or opt.batchSize % len(opt.gpu_ids) == 0, \
130 | "Batch size %d is wrong. It must be a multiple of # GPUs %d." \
131 | % (opt.batchSize, len(opt.gpu_ids))
132 |
133 | self.opt = opt
134 | return self.opt
135 |
--------------------------------------------------------------------------------
/human_test_list:
--------------------------------------------------------------------------------
1 | id29_752.obj id21_779.obj id29_779.obj
2 | id24_618.obj id25_780.obj id24_780.obj
3 | id24_636.obj id16_638.obj id24_638.obj
4 | id26_773.obj id17_649.obj id26_649.obj
5 | id17_665.obj id21_736.obj id17_736.obj
6 | id23_718.obj id24_712.obj id23_712.obj
7 | id19_768.obj id25_612.obj id19_612.obj
8 | id16_616.obj id28_604.obj id16_604.obj
9 | id28_767.obj id23_628.obj id28_628.obj
10 | id25_787.obj id20_730.obj id25_730.obj
11 | id21_607.obj id25_614.obj id21_614.obj
12 | id25_743.obj id26_665.obj id25_665.obj
13 | id19_626.obj id25_759.obj id19_759.obj
14 | id24_753.obj id22_633.obj id24_633.obj
15 | id23_686.obj id20_713.obj id23_713.obj
16 | id16_616.obj id23_623.obj id16_623.obj
17 | id23_702.obj id19_759.obj id23_759.obj
18 | id26_644.obj id24_714.obj id26_714.obj
19 | id25_696.obj id25_656.obj id25_656.obj
20 | id21_656.obj id20_697.obj id21_697.obj
21 | id17_768.obj id24_719.obj id17_719.obj
22 | id23_659.obj id17_730.obj id23_730.obj
23 | id23_730.obj id18_765.obj id23_765.obj
24 | id18_730.obj id26_777.obj id18_777.obj
25 | id27_674.obj id21_658.obj id27_658.obj
26 | id17_662.obj id26_656.obj id17_656.obj
27 | id22_601.obj id17_752.obj id22_752.obj
28 | id21_783.obj id23_613.obj id21_613.obj
29 | id24_732.obj id25_736.obj id24_736.obj
30 | id26_706.obj id28_697.obj id26_697.obj
31 | id28_674.obj id27_627.obj id28_627.obj
32 | id27_653.obj id29_627.obj id27_627.obj
33 | id22_672.obj id28_731.obj id22_731.obj
34 | id27_738.obj id21_766.obj id27_766.obj
35 | id19_682.obj id16_649.obj id19_649.obj
36 | id27_602.obj id23_618.obj id27_618.obj
37 | id18_784.obj id20_640.obj id18_640.obj
38 | id26_724.obj id19_716.obj id26_716.obj
39 | id19_684.obj id26_698.obj id19_698.obj
40 | id28_704.obj id29_692.obj id28_692.obj
41 | id20_607.obj id29_779.obj id20_779.obj
42 | id23_618.obj id16_747.obj id23_747.obj
43 | id23_775.obj id19_767.obj id23_767.obj
44 | id21_722.obj id16_706.obj id21_706.obj
45 | id20_764.obj id22_640.obj id20_640.obj
46 | id24_669.obj id27_724.obj id24_724.obj
47 | id22_638.obj id20_788.obj id22_788.obj
48 | id22_616.obj id25_657.obj id22_657.obj
49 | id26_741.obj id16_717.obj id26_717.obj
50 | id26_797.obj id17_660.obj id26_660.obj
51 | id22_651.obj id27_614.obj id22_614.obj
52 | id27_740.obj id18_699.obj id27_699.obj
53 | id26_651.obj id20_745.obj id26_745.obj
54 | id29_736.obj id29_672.obj id29_672.obj
55 | id24_740.obj id23_757.obj id24_757.obj
56 | id27_770.obj id27_629.obj id27_629.obj
57 | id28_638.obj id19_751.obj id28_751.obj
58 | id27_691.obj id27_740.obj id27_740.obj
59 | id21_611.obj id20_792.obj id21_792.obj
60 | id28_659.obj id17_621.obj id28_621.obj
61 | id19_644.obj id26_692.obj id19_692.obj
62 | id25_731.obj id25_764.obj id25_764.obj
63 | id20_635.obj id25_763.obj id20_763.obj
64 | id21_610.obj id22_735.obj id21_735.obj
65 | id25_604.obj id17_677.obj id25_677.obj
66 | id25_652.obj id18_781.obj id25_781.obj
67 | id21_765.obj id25_728.obj id21_728.obj
68 | id25_722.obj id21_735.obj id25_735.obj
69 | id20_660.obj id28_683.obj id20_683.obj
70 | id23_774.obj id16_603.obj id23_603.obj
71 | id18_709.obj id20_745.obj id18_745.obj
72 | id18_696.obj id26_725.obj id18_725.obj
73 | id26_788.obj id17_615.obj id26_615.obj
74 | id24_622.obj id17_773.obj id24_773.obj
75 | id25_771.obj id25_638.obj id25_638.obj
76 | id20_686.obj id17_782.obj id20_782.obj
77 | id19_679.obj id20_665.obj id19_665.obj
78 | id18_710.obj id18_637.obj id18_637.obj
79 | id18_694.obj id17_663.obj id18_663.obj
80 | id29_769.obj id22_681.obj id29_681.obj
81 | id27_708.obj id20_760.obj id27_760.obj
82 | id18_645.obj id24_681.obj id18_681.obj
83 | id24_723.obj id28_609.obj id24_609.obj
84 | id29_750.obj id19_754.obj id29_754.obj
85 | id26_626.obj id22_685.obj id26_685.obj
86 | id26_785.obj id22_733.obj id26_733.obj
87 | id18_693.obj id18_706.obj id18_706.obj
88 | id26_659.obj id18_783.obj id26_783.obj
89 | id22_643.obj id29_633.obj id22_633.obj
90 | id23_614.obj id24_640.obj id23_640.obj
91 | id17_700.obj id26_666.obj id17_666.obj
92 | id28_664.obj id20_792.obj id28_792.obj
93 | id19_780.obj id22_789.obj id19_789.obj
94 | id26_793.obj id21_776.obj id26_776.obj
95 | id17_639.obj id23_798.obj id17_798.obj
96 | id24_707.obj id27_622.obj id24_622.obj
97 | id28_659.obj id22_759.obj id28_759.obj
98 | id25_671.obj id20_617.obj id25_617.obj
99 | id18_607.obj id29_757.obj id18_757.obj
100 | id26_718.obj id25_692.obj id26_692.obj
101 | id17_603.obj id21_643.obj id17_643.obj
102 | id23_717.obj id20_750.obj id23_750.obj
103 | id29_693.obj id20_673.obj id29_673.obj
104 | id23_602.obj id16_676.obj id23_676.obj
105 | id16_713.obj id18_754.obj id16_754.obj
106 | id26_721.obj id19_638.obj id26_638.obj
107 | id21_641.obj id20_764.obj id21_764.obj
108 | id26_677.obj id29_620.obj id26_620.obj
109 | id19_698.obj id25_747.obj id19_747.obj
110 | id28_638.obj id29_734.obj id28_734.obj
111 | id25_689.obj id16_763.obj id25_763.obj
112 | id17_655.obj id19_728.obj id17_728.obj
113 | id23_768.obj id25_745.obj id23_745.obj
114 | id16_651.obj id16_735.obj id16_735.obj
115 | id21_605.obj id21_764.obj id21_764.obj
116 | id19_777.obj id17_628.obj id19_628.obj
117 | id24_736.obj id26_740.obj id24_740.obj
118 | id23_697.obj id26_605.obj id23_605.obj
119 | id21_735.obj id26_729.obj id21_729.obj
120 | id20_701.obj id20_651.obj id20_651.obj
121 | id28_785.obj id16_656.obj id28_656.obj
122 | id18_610.obj id18_747.obj id18_747.obj
123 | id24_641.obj id17_603.obj id24_603.obj
124 | id21_762.obj id20_683.obj id21_683.obj
125 | id23_728.obj id21_737.obj id23_737.obj
126 | id21_736.obj id29_748.obj id21_748.obj
127 | id24_629.obj id19_637.obj id24_637.obj
128 | id27_703.obj id29_786.obj id27_786.obj
129 | id20_794.obj id28_680.obj id20_680.obj
130 | id21_791.obj id16_606.obj id21_606.obj
131 | id27_745.obj id27_702.obj id27_702.obj
132 | id29_751.obj id23_708.obj id29_708.obj
133 | id21_795.obj id27_749.obj id21_749.obj
134 | id26_656.obj id22_752.obj id26_752.obj
135 | id16_716.obj id23_686.obj id16_686.obj
136 | id21_685.obj id23_741.obj id21_741.obj
137 | id21_605.obj id26_619.obj id21_619.obj
138 | id17_612.obj id27_757.obj id17_757.obj
139 | id23_760.obj id17_746.obj id23_746.obj
140 | id29_683.obj id17_718.obj id29_718.obj
141 | id23_759.obj id23_788.obj id23_788.obj
142 | id24_634.obj id22_620.obj id24_620.obj
143 | id24_774.obj id25_759.obj id24_759.obj
144 | id20_616.obj id17_791.obj id20_791.obj
145 | id20_794.obj id21_747.obj id20_747.obj
146 | id23_628.obj id17_750.obj id23_750.obj
147 | id18_750.obj id19_703.obj id18_703.obj
148 | id16_700.obj id25_718.obj id16_718.obj
149 | id23_649.obj id25_618.obj id23_618.obj
150 | id23_763.obj id27_634.obj id23_634.obj
151 | id20_724.obj id17_745.obj id20_745.obj
152 | id17_639.obj id17_767.obj id17_767.obj
153 | id23_644.obj id20_743.obj id23_743.obj
154 | id20_680.obj id24_626.obj id20_626.obj
155 | id28_749.obj id21_611.obj id28_611.obj
156 | id27_724.obj id25_667.obj id27_667.obj
157 | id26_668.obj id19_630.obj id26_630.obj
158 | id19_794.obj id29_608.obj id19_608.obj
159 | id29_623.obj id25_747.obj id29_747.obj
160 | id25_606.obj id29_693.obj id25_693.obj
161 | id21_606.obj id26_791.obj id21_791.obj
162 | id22_739.obj id26_608.obj id22_608.obj
163 | id20_668.obj id23_686.obj id20_686.obj
164 | id24_656.obj id25_733.obj id24_733.obj
165 | id29_737.obj id22_639.obj id29_639.obj
166 | id16_790.obj id18_655.obj id16_655.obj
167 | id25_785.obj id18_610.obj id25_610.obj
168 | id28_755.obj id28_703.obj id28_703.obj
169 | id28_747.obj id17_766.obj id28_766.obj
170 | id21_735.obj id26_601.obj id21_601.obj
171 | id27_697.obj id25_639.obj id27_639.obj
172 | id18_603.obj id20_675.obj id18_675.obj
173 | id24_677.obj id19_612.obj id24_612.obj
174 | id28_679.obj id20_710.obj id28_710.obj
175 | id29_749.obj id24_726.obj id29_726.obj
176 | id18_706.obj id26_651.obj id18_651.obj
177 | id28_776.obj id28_632.obj id28_632.obj
178 | id26_715.obj id28_624.obj id26_624.obj
179 | id27_680.obj id19_784.obj id27_784.obj
180 | id16_751.obj id24_759.obj id16_759.obj
181 | id26_710.obj id25_644.obj id26_644.obj
182 | id19_767.obj id27_668.obj id19_668.obj
183 | id16_729.obj id22_646.obj id16_646.obj
184 | id24_713.obj id28_697.obj id24_697.obj
185 | id16_607.obj id24_788.obj id16_788.obj
186 | id25_747.obj id26_668.obj id25_668.obj
187 | id24_738.obj id25_644.obj id24_644.obj
188 | id23_667.obj id20_659.obj id23_659.obj
189 | id18_648.obj id25_702.obj id18_702.obj
190 | id22_774.obj id22_641.obj id22_641.obj
191 | id16_686.obj id25_791.obj id16_791.obj
192 | id17_618.obj id23_697.obj id17_697.obj
193 | id18_691.obj id23_779.obj id18_779.obj
194 | id24_762.obj id27_690.obj id24_690.obj
195 | id21_678.obj id21_775.obj id21_775.obj
196 | id18_611.obj id18_779.obj id18_779.obj
197 | id21_677.obj id23_604.obj id21_604.obj
198 | id24_624.obj id24_617.obj id24_617.obj
199 | id24_612.obj id17_698.obj id24_698.obj
200 | id29_747.obj id27_615.obj id29_615.obj
201 | id24_687.obj id25_794.obj id24_794.obj
202 | id25_773.obj id17_624.obj id25_624.obj
203 | id20_764.obj id29_665.obj id20_665.obj
204 | id20_688.obj id21_647.obj id20_647.obj
205 | id18_648.obj id26_602.obj id18_602.obj
206 | id20_604.obj id24_687.obj id20_687.obj
207 | id19_732.obj id21_625.obj id19_625.obj
208 | id18_758.obj id22_654.obj id18_654.obj
209 | id17_766.obj id28_634.obj id17_634.obj
210 | id26_681.obj id18_684.obj id26_684.obj
211 | id16_750.obj id21_696.obj id16_696.obj
212 | id27_741.obj id25_628.obj id27_628.obj
213 | id18_674.obj id19_781.obj id18_781.obj
214 | id26_661.obj id29_612.obj id26_612.obj
215 | id23_708.obj id29_708.obj id23_708.obj
216 | id16_643.obj id29_623.obj id16_623.obj
217 | id29_725.obj id16_614.obj id29_614.obj
218 | id19_750.obj id25_618.obj id19_618.obj
219 | id23_623.obj id23_677.obj id23_677.obj
220 | id19_756.obj id16_630.obj id19_630.obj
221 | id28_643.obj id17_614.obj id28_614.obj
222 | id20_778.obj id29_610.obj id20_610.obj
223 | id28_713.obj id21_631.obj id28_631.obj
224 | id24_647.obj id28_680.obj id24_680.obj
225 | id20_631.obj id16_678.obj id20_678.obj
226 | id25_628.obj id19_772.obj id25_772.obj
227 | id23_768.obj id27_652.obj id23_652.obj
228 | id24_763.obj id19_700.obj id24_700.obj
229 | id19_668.obj id18_661.obj id19_661.obj
230 | id25_792.obj id24_651.obj id25_651.obj
231 | id18_796.obj id18_653.obj id18_653.obj
232 | id22_655.obj id22_737.obj id22_737.obj
233 | id19_747.obj id16_735.obj id19_735.obj
234 | id17_670.obj id23_704.obj id17_704.obj
235 | id16_728.obj id25_789.obj id16_789.obj
236 | id29_653.obj id18_726.obj id29_726.obj
237 | id29_690.obj id17_766.obj id29_766.obj
238 | id27_645.obj id23_712.obj id27_712.obj
239 | id21_731.obj id18_647.obj id21_647.obj
240 | id25_602.obj id16_775.obj id25_775.obj
241 | id21_692.obj id26_648.obj id21_648.obj
242 | id27_669.obj id16_768.obj id27_768.obj
243 | id25_648.obj id16_762.obj id25_762.obj
244 | id24_743.obj id22_765.obj id24_765.obj
245 | id18_749.obj id18_615.obj id18_615.obj
246 | id24_734.obj id28_723.obj id24_723.obj
247 | id20_639.obj id18_653.obj id20_653.obj
248 | id21_778.obj id22_620.obj id21_620.obj
249 | id19_613.obj id24_630.obj id19_630.obj
250 | id23_659.obj id26_733.obj id23_733.obj
251 | id24_645.obj id16_758.obj id24_758.obj
252 | id22_626.obj id21_716.obj id22_716.obj
253 | id29_688.obj id18_699.obj id29_699.obj
254 | id19_685.obj id28_709.obj id19_709.obj
255 | id29_798.obj id24_779.obj id29_779.obj
256 | id24_669.obj id22_754.obj id24_754.obj
257 | id18_772.obj id20_760.obj id18_760.obj
258 | id16_750.obj id20_731.obj id16_731.obj
259 | id26_739.obj id18_710.obj id26_710.obj
260 | id28_697.obj id19_708.obj id28_708.obj
261 | id20_707.obj id24_610.obj id20_610.obj
262 | id24_673.obj id27_620.obj id24_620.obj
263 | id22_758.obj id16_781.obj id22_781.obj
264 | id29_763.obj id16_733.obj id29_733.obj
265 | id27_751.obj id29_661.obj id27_661.obj
266 | id28_665.obj id22_677.obj id28_677.obj
267 | id24_679.obj id16_789.obj id24_789.obj
268 | id23_634.obj id29_641.obj id23_641.obj
269 | id23_672.obj id20_628.obj id23_628.obj
270 | id24_691.obj id20_674.obj id24_674.obj
271 | id25_626.obj id29_766.obj id25_766.obj
272 | id28_778.obj id26_779.obj id28_779.obj
273 | id29_772.obj id23_779.obj id29_779.obj
274 | id17_652.obj id20_742.obj id17_742.obj
275 | id29_630.obj id24_739.obj id29_739.obj
276 | id23_701.obj id18_681.obj id23_681.obj
277 | id29_779.obj id29_620.obj id29_620.obj
278 | id29_629.obj id26_722.obj id29_722.obj
279 | id29_703.obj id20_615.obj id29_615.obj
280 | id22_634.obj id29_651.obj id22_651.obj
281 | id19_714.obj id26_778.obj id19_778.obj
282 | id19_776.obj id20_626.obj id19_626.obj
283 | id27_642.obj id27_640.obj id27_640.obj
284 | id21_795.obj id28_755.obj id21_755.obj
285 | id25_620.obj id16_628.obj id25_628.obj
286 | id19_630.obj id27_734.obj id19_734.obj
287 | id26_785.obj id28_658.obj id26_658.obj
288 | id20_746.obj id22_609.obj id20_609.obj
289 | id28_710.obj id28_683.obj id28_683.obj
290 | id29_682.obj id26_621.obj id29_621.obj
291 | id25_728.obj id24_622.obj id25_622.obj
292 | id20_789.obj id18_737.obj id20_737.obj
293 | id24_675.obj id23_779.obj id24_779.obj
294 | id17_661.obj id23_731.obj id17_731.obj
295 | id21_737.obj id27_712.obj id21_712.obj
296 | id16_624.obj id28_771.obj id16_771.obj
297 | id21_786.obj id29_602.obj id21_602.obj
298 | id27_787.obj id19_762.obj id27_762.obj
299 | id27_724.obj id18_738.obj id27_738.obj
300 | id24_621.obj id29_795.obj id24_795.obj
301 | id23_772.obj id25_780.obj id23_780.obj
302 | id21_730.obj id20_687.obj id21_687.obj
303 | id18_729.obj id25_625.obj id18_625.obj
304 | id25_648.obj id19_612.obj id25_612.obj
305 | id22_777.obj id19_769.obj id22_769.obj
306 | id25_792.obj id20_689.obj id25_689.obj
307 | id17_677.obj id18_735.obj id17_735.obj
308 | id19_794.obj id17_764.obj id19_764.obj
309 | id21_748.obj id21_651.obj id21_651.obj
310 | id21_703.obj id27_743.obj id21_743.obj
311 | id27_617.obj id18_726.obj id27_726.obj
312 | id19_762.obj id26_788.obj id19_788.obj
313 | id28_752.obj id17_610.obj id28_610.obj
314 | id28_759.obj id24_666.obj id28_666.obj
315 | id28_747.obj id29_794.obj id28_794.obj
316 | id17_616.obj id24_788.obj id17_788.obj
317 | id17_754.obj id27_632.obj id17_632.obj
318 | id20_671.obj id27_725.obj id20_725.obj
319 | id22_676.obj id23_784.obj id22_784.obj
320 | id27_768.obj id17_789.obj id27_789.obj
321 | id27_786.obj id24_776.obj id27_776.obj
322 | id22_762.obj id19_704.obj id22_704.obj
323 | id21_611.obj id22_785.obj id21_785.obj
324 | id18_735.obj id28_764.obj id18_764.obj
325 | id24_765.obj id17_700.obj id24_700.obj
326 | id20_648.obj id23_744.obj id20_744.obj
327 | id26_668.obj id22_774.obj id26_774.obj
328 | id28_704.obj id23_734.obj id28_734.obj
329 | id17_619.obj id17_710.obj id17_710.obj
330 | id19_692.obj id29_622.obj id19_622.obj
331 | id20_638.obj id19_730.obj id20_730.obj
332 | id20_752.obj id28_744.obj id20_744.obj
333 | id21_779.obj id22_625.obj id21_625.obj
334 | id19_676.obj id24_641.obj id19_641.obj
335 | id24_708.obj id22_658.obj id24_658.obj
336 | id27_672.obj id17_676.obj id27_676.obj
337 | id27_604.obj id19_717.obj id27_717.obj
338 | id23_734.obj id26_743.obj id23_743.obj
339 | id25_781.obj id17_683.obj id25_683.obj
340 | id26_667.obj id23_619.obj id26_619.obj
341 | id26_664.obj id22_723.obj id26_723.obj
342 | id28_657.obj id19_674.obj id28_674.obj
343 | id18_777.obj id19_690.obj id18_690.obj
344 | id28_671.obj id23_664.obj id28_664.obj
345 | id26_691.obj id21_606.obj id26_606.obj
346 | id24_712.obj id27_657.obj id24_657.obj
347 | id23_799.obj id29_684.obj id23_684.obj
348 | id27_619.obj id29_622.obj id27_622.obj
349 | id18_778.obj id27_668.obj id18_668.obj
350 | id29_646.obj id26_635.obj id29_635.obj
351 | id24_721.obj id27_758.obj id24_758.obj
352 | id21_739.obj id25_735.obj id21_735.obj
353 | id18_614.obj id25_737.obj id18_737.obj
354 | id26_779.obj id23_787.obj id26_787.obj
355 | id29_712.obj id18_723.obj id29_723.obj
356 | id22_608.obj id28_666.obj id22_666.obj
357 | id26_629.obj id23_665.obj id26_665.obj
358 | id22_644.obj id27_644.obj id22_644.obj
359 | id17_747.obj id18_764.obj id17_764.obj
360 | id24_610.obj id26_647.obj id24_647.obj
361 | id20_716.obj id29_798.obj id20_798.obj
362 | id29_798.obj id28_634.obj id29_634.obj
363 | id20_685.obj id24_615.obj id20_615.obj
364 | id25_695.obj id23_781.obj id25_781.obj
365 | id20_748.obj id20_756.obj id20_756.obj
366 | id22_629.obj id19_623.obj id22_623.obj
367 | id27_749.obj id28_788.obj id27_788.obj
368 | id20_747.obj id27_742.obj id20_742.obj
369 | id18_753.obj id22_703.obj id18_703.obj
370 | id26_631.obj id24_679.obj id26_679.obj
371 | id20_744.obj id29_764.obj id20_764.obj
372 | id27_720.obj id21_780.obj id27_780.obj
373 | id19_761.obj id23_773.obj id19_773.obj
374 | id20_779.obj id17_672.obj id20_672.obj
375 | id18_758.obj id26_659.obj id18_659.obj
376 | id22_606.obj id28_672.obj id22_672.obj
377 | id22_770.obj id28_699.obj id22_699.obj
378 | id24_749.obj id27_706.obj id24_706.obj
379 | id26_654.obj id25_784.obj id26_784.obj
380 | id18_667.obj id21_771.obj id18_771.obj
381 | id19_607.obj id24_658.obj id19_658.obj
382 | id21_698.obj id28_757.obj id21_757.obj
383 | id18_639.obj id16_626.obj id18_626.obj
384 | id20_798.obj id17_701.obj id20_701.obj
385 | id16_613.obj id28_788.obj id16_788.obj
386 | id27_780.obj id16_627.obj id27_627.obj
387 | id19_702.obj id24_787.obj id19_787.obj
388 | id16_754.obj id21_735.obj id16_735.obj
389 | id16_640.obj id25_624.obj id16_624.obj
390 | id22_656.obj id29_630.obj id22_630.obj
391 | id16_640.obj id26_631.obj id16_631.obj
392 | id25_720.obj id16_704.obj id25_704.obj
393 | id18_615.obj id29_736.obj id18_736.obj
394 | id29_698.obj id16_785.obj id29_785.obj
395 | id24_781.obj id18_769.obj id24_769.obj
396 | id19_675.obj id24_711.obj id19_711.obj
397 | id18_607.obj id29_673.obj id18_673.obj
398 | id25_723.obj id29_758.obj id25_758.obj
399 | id26_765.obj id22_779.obj id26_779.obj
400 | id23_662.obj id18_786.obj id23_786.obj
401 |
--------------------------------------------------------------------------------
/animal_test_list:
--------------------------------------------------------------------------------
1 | toy_40_540.ply toy_20_503.ply toy_40_503.ply
2 | toy_40_521.ply toy_25_496.ply toy_40_496.ply
3 | toy_33_478.ply toy_17_597.ply toy_33_597.ply
4 | toy_32_450.ply toy_16_483.ply toy_32_483.ply
5 | toy_33_447.ply toy_40_573.ply toy_33_573.ply
6 | toy_22_477.ply toy_40_475.ply toy_22_475.ply
7 | toy_37_520.ply toy_36_492.ply toy_37_492.ply
8 | toy_37_464.ply toy_40_471.ply toy_37_471.ply
9 | toy_18_535.ply toy_40_593.ply toy_18_593.ply
10 | toy_37_593.ply toy_37_545.ply toy_37_545.ply
11 | toy_18_469.ply toy_37_439.ply toy_18_439.ply
12 | toy_22_504.ply toy_16_495.ply toy_22_495.ply
13 | toy_40_583.ply toy_40_405.ply toy_40_405.ply
14 | toy_37_480.ply toy_22_440.ply toy_37_440.ply
15 | toy_36_577.ply toy_37_431.ply toy_36_431.ply
16 | toy_18_485.ply toy_20_506.ply toy_18_506.ply
17 | toy_32_553.ply toy_16_464.ply toy_32_464.ply
18 | toy_18_545.ply toy_22_518.ply toy_18_518.ply
19 | toy_25_412.ply toy_22_499.ply toy_25_499.ply
20 | toy_33_484.ply toy_22_549.ply toy_33_549.ply
21 | toy_32_439.ply toy_36_533.ply toy_32_533.ply
22 | toy_19_526.ply toy_16_446.ply toy_19_446.ply
23 | toy_33_539.ply toy_32_526.ply toy_33_526.ply
24 | toy_25_532.ply toy_17_521.ply toy_25_521.ply
25 | toy_40_566.ply toy_37_520.ply toy_40_520.ply
26 | toy_33_464.ply toy_17_487.ply toy_33_487.ply
27 | toy_37_423.ply toy_32_482.ply toy_37_482.ply
28 | toy_33_464.ply toy_33_524.ply toy_33_524.ply
29 | toy_17_530.ply toy_32_448.ply toy_17_448.ply
30 | toy_20_593.ply toy_37_578.ply toy_20_578.ply
31 | toy_16_486.ply toy_36_560.ply toy_16_560.ply
32 | toy_36_566.ply toy_22_478.ply toy_36_478.ply
33 | toy_19_422.ply toy_22_455.ply toy_19_455.ply
34 | toy_40_431.ply toy_18_529.ply toy_40_529.ply
35 | toy_33_537.ply toy_19_494.ply toy_33_494.ply
36 | toy_36_598.ply toy_22_581.ply toy_36_581.ply
37 | toy_22_439.ply toy_19_536.ply toy_22_536.ply
38 | toy_25_581.ply toy_37_560.ply toy_25_560.ply
39 | toy_32_511.ply toy_40_511.ply toy_32_511.ply
40 | toy_20_449.ply toy_20_599.ply toy_20_599.ply
41 | toy_20_468.ply toy_33_473.ply toy_20_473.ply
42 | toy_16_577.ply toy_33_478.ply toy_16_478.ply
43 | toy_20_419.ply toy_22_533.ply toy_20_533.ply
44 | toy_18_402.ply toy_32_449.ply toy_18_449.ply
45 | toy_16_499.ply toy_33_561.ply toy_16_561.ply
46 | toy_25_504.ply toy_19_488.ply toy_25_488.ply
47 | toy_22_525.ply toy_22_503.ply toy_22_503.ply
48 | toy_19_548.ply toy_20_554.ply toy_19_554.ply
49 | toy_32_492.ply toy_19_520.ply toy_32_520.ply
50 | toy_17_430.ply toy_36_546.ply toy_17_546.ply
51 | toy_33_538.ply toy_37_451.ply toy_33_451.ply
52 | toy_22_570.ply toy_32_597.ply toy_22_597.ply
53 | toy_19_440.ply toy_20_595.ply toy_19_595.ply
54 | toy_17_503.ply toy_40_568.ply toy_17_568.ply
55 | toy_16_422.ply toy_25_498.ply toy_16_498.ply
56 | toy_32_402.ply toy_32_403.ply toy_32_403.ply
57 | toy_18_565.ply toy_20_497.ply toy_18_497.ply
58 | toy_16_590.ply toy_17_581.ply toy_16_581.ply
59 | toy_20_512.ply toy_33_556.ply toy_20_556.ply
60 | toy_25_583.ply toy_32_427.ply toy_25_427.ply
61 | toy_16_583.ply toy_40_426.ply toy_16_426.ply
62 | toy_40_425.ply toy_36_538.ply toy_40_538.ply
63 | toy_32_432.ply toy_20_432.ply toy_32_432.ply
64 | toy_19_598.ply toy_32_502.ply toy_19_502.ply
65 | toy_20_516.ply toy_32_582.ply toy_20_582.ply
66 | toy_33_412.ply toy_20_589.ply toy_33_589.ply
67 | toy_20_407.ply toy_36_442.ply toy_20_442.ply
68 | toy_19_562.ply toy_40_443.ply toy_19_443.ply
69 | toy_17_474.ply toy_33_481.ply toy_17_481.ply
70 | toy_17_412.ply toy_16_531.ply toy_17_531.ply
71 | toy_37_421.ply toy_25_530.ply toy_37_530.ply
72 | toy_18_480.ply toy_33_402.ply toy_18_402.ply
73 | toy_32_579.ply toy_37_569.ply toy_32_569.ply
74 | toy_20_415.ply toy_36_591.ply toy_20_591.ply
75 | toy_36_416.ply toy_20_435.ply toy_36_435.ply
76 | toy_33_579.ply toy_36_575.ply toy_33_575.ply
77 | toy_19_555.ply toy_20_413.ply toy_19_413.ply
78 | toy_19_412.ply toy_16_419.ply toy_19_419.ply
79 | toy_22_446.ply toy_36_572.ply toy_22_572.ply
80 | toy_32_553.ply toy_22_579.ply toy_32_579.ply
81 | toy_22_549.ply toy_20_507.ply toy_22_507.ply
82 | toy_37_541.ply toy_16_516.ply toy_37_516.ply
83 | toy_19_483.ply toy_40_510.ply toy_19_510.ply
84 | toy_17_509.ply toy_17_514.ply toy_17_514.ply
85 | toy_25_457.ply toy_33_557.ply toy_25_557.ply
86 | toy_32_469.ply toy_40_419.ply toy_32_419.ply
87 | toy_40_484.ply toy_32_577.ply toy_40_577.ply
88 | toy_36_592.ply toy_19_576.ply toy_36_576.ply
89 | toy_18_597.ply toy_40_406.ply toy_18_406.ply
90 | toy_19_526.ply toy_37_465.ply toy_19_465.ply
91 | toy_32_431.ply toy_18_503.ply toy_32_503.ply
92 | toy_25_576.ply toy_18_404.ply toy_25_404.ply
93 | toy_18_495.ply toy_20_594.ply toy_18_594.ply
94 | toy_19_591.ply toy_33_588.ply toy_19_588.ply
95 | toy_17_471.ply toy_33_566.ply toy_17_566.ply
96 | toy_36_576.ply toy_19_426.ply toy_36_426.ply
97 | toy_17_469.ply toy_16_597.ply toy_17_597.ply
98 | toy_19_429.ply toy_32_566.ply toy_19_566.ply
99 | toy_22_480.ply toy_36_561.ply toy_22_561.ply
100 | toy_37_445.ply toy_18_409.ply toy_37_409.ply
101 | toy_36_460.ply toy_33_512.ply toy_36_512.ply
102 | toy_19_517.ply toy_25_518.ply toy_19_518.ply
103 | toy_18_408.ply toy_17_424.ply toy_18_424.ply
104 | toy_40_443.ply toy_40_426.ply toy_40_426.ply
105 | toy_17_561.ply toy_19_473.ply toy_17_473.ply
106 | toy_40_412.ply toy_33_431.ply toy_40_431.ply
107 | toy_20_453.ply toy_25_484.ply toy_20_484.ply
108 | toy_16_589.ply toy_32_513.ply toy_16_513.ply
109 | toy_25_400.ply toy_25_463.ply toy_25_463.ply
110 | toy_22_421.ply toy_22_414.ply toy_22_414.ply
111 | toy_25_507.ply toy_20_516.ply toy_25_516.ply
112 | toy_16_574.ply toy_22_482.ply toy_16_482.ply
113 | toy_20_596.ply toy_19_574.ply toy_20_574.ply
114 | toy_16_530.ply toy_37_551.ply toy_16_551.ply
115 | toy_37_522.ply toy_33_548.ply toy_37_548.ply
116 | toy_19_522.ply toy_33_592.ply toy_19_592.ply
117 | toy_40_570.ply toy_18_473.ply toy_40_473.ply
118 | toy_19_460.ply toy_22_418.ply toy_19_418.ply
119 | toy_25_522.ply toy_22_495.ply toy_25_495.ply
120 | toy_36_443.ply toy_25_402.ply toy_36_402.ply
121 | toy_22_576.ply toy_18_502.ply toy_22_502.ply
122 | toy_32_514.ply toy_22_502.ply toy_32_502.ply
123 | toy_40_431.ply toy_32_410.ply toy_40_410.ply
124 | toy_37_470.ply toy_22_509.ply toy_37_509.ply
125 | toy_22_538.ply toy_22_454.ply toy_22_454.ply
126 | toy_18_407.ply toy_22_599.ply toy_18_599.ply
127 | toy_16_418.ply toy_20_509.ply toy_16_509.ply
128 | toy_40_495.ply toy_19_514.ply toy_40_514.ply
129 | toy_22_483.ply toy_25_406.ply toy_22_406.ply
130 | toy_37_576.ply toy_19_554.ply toy_37_554.ply
131 | toy_40_405.ply toy_22_442.ply toy_40_442.ply
132 | toy_19_443.ply toy_33_583.ply toy_19_583.ply
133 | toy_40_451.ply toy_25_476.ply toy_40_476.ply
134 | toy_33_585.ply toy_37_523.ply toy_33_523.ply
135 | toy_16_593.ply toy_37_573.ply toy_16_573.ply
136 | toy_40_511.ply toy_25_479.ply toy_40_479.ply
137 | toy_36_549.ply toy_20_589.ply toy_36_589.ply
138 | toy_17_530.ply toy_40_439.ply toy_17_439.ply
139 | toy_40_579.ply toy_36_539.ply toy_40_539.ply
140 | toy_40_486.ply toy_22_571.ply toy_40_571.ply
141 | toy_37_432.ply toy_17_520.ply toy_37_520.ply
142 | toy_17_425.ply toy_22_442.ply toy_17_442.ply
143 | toy_17_578.ply toy_22_453.ply toy_17_453.ply
144 | toy_32_488.ply toy_18_509.ply toy_32_509.ply
145 | toy_20_499.ply toy_22_450.ply toy_20_450.ply
146 | toy_33_574.ply toy_16_508.ply toy_33_508.ply
147 | toy_32_579.ply toy_25_578.ply toy_32_578.ply
148 | toy_16_533.ply toy_36_508.ply toy_16_508.ply
149 | toy_37_449.ply toy_36_534.ply toy_37_534.ply
150 | toy_37_537.ply toy_18_555.ply toy_37_555.ply
151 | toy_32_577.ply toy_20_544.ply toy_32_544.ply
152 | toy_25_530.ply toy_25_524.ply toy_25_524.ply
153 | toy_22_499.ply toy_20_583.ply toy_22_583.ply
154 | toy_20_437.ply toy_33_588.ply toy_20_588.ply
155 | toy_36_546.ply toy_37_452.ply toy_36_452.ply
156 | toy_36_406.ply toy_37_427.ply toy_36_427.ply
157 | toy_25_449.ply toy_32_449.ply toy_25_449.ply
158 | toy_18_575.ply toy_33_590.ply toy_18_590.ply
159 | toy_33_405.ply toy_18_436.ply toy_33_436.ply
160 | toy_17_477.ply toy_37_559.ply toy_17_559.ply
161 | toy_36_560.ply toy_25_550.ply toy_36_550.ply
162 | toy_16_513.ply toy_36_403.ply toy_16_403.ply
163 | toy_22_505.ply toy_25_510.ply toy_22_510.ply
164 | toy_33_548.ply toy_16_545.ply toy_33_545.ply
165 | toy_19_522.ply toy_36_547.ply toy_19_547.ply
166 | toy_17_563.ply toy_40_560.ply toy_17_560.ply
167 | toy_36_536.ply toy_25_501.ply toy_36_501.ply
168 | toy_22_501.ply toy_19_516.ply toy_22_516.ply
169 | toy_20_587.ply toy_40_500.ply toy_20_500.ply
170 | toy_37_467.ply toy_36_571.ply toy_37_571.ply
171 | toy_20_446.ply toy_40_556.ply toy_20_556.ply
172 | toy_40_483.ply toy_20_546.ply toy_40_546.ply
173 | toy_18_434.ply toy_25_402.ply toy_18_402.ply
174 | toy_17_526.ply toy_18_526.ply toy_17_526.ply
175 | toy_16_506.ply toy_22_538.ply toy_16_538.ply
176 | toy_25_488.ply toy_22_417.ply toy_25_417.ply
177 | toy_22_424.ply toy_22_486.ply toy_22_486.ply
178 | toy_19_479.ply toy_20_405.ply toy_19_405.ply
179 | toy_33_422.ply toy_18_533.ply toy_33_533.ply
180 | toy_16_489.ply toy_33_563.ply toy_16_563.ply
181 | toy_16_426.ply toy_20_473.ply toy_16_473.ply
182 | toy_20_501.ply toy_19_524.ply toy_20_524.ply
183 | toy_18_411.ply toy_18_463.ply toy_18_463.ply
184 | toy_33_598.ply toy_16_570.ply toy_33_570.ply
185 | toy_40_428.ply toy_25_474.ply toy_40_474.ply
186 | toy_32_513.ply toy_25_470.ply toy_32_470.ply
187 | toy_18_409.ply toy_22_435.ply toy_18_435.ply
188 | toy_33_580.ply toy_36_490.ply toy_33_490.ply
189 | toy_19_434.ply toy_37_586.ply toy_19_586.ply
190 | toy_22_572.ply toy_25_482.ply toy_22_482.ply
191 | toy_20_455.ply toy_40_582.ply toy_20_582.ply
192 | toy_40_471.ply toy_25_472.ply toy_40_472.ply
193 | toy_20_471.ply toy_20_432.ply toy_20_432.ply
194 | toy_18_546.ply toy_33_402.ply toy_18_402.ply
195 | toy_17_477.ply toy_22_433.ply toy_17_433.ply
196 | toy_40_423.ply toy_25_472.ply toy_40_472.ply
197 | toy_32_486.ply toy_40_442.ply toy_32_442.ply
198 | toy_19_469.ply toy_32_584.ply toy_19_584.ply
199 | toy_37_506.ply toy_16_423.ply toy_37_423.ply
200 | toy_17_519.ply toy_25_492.ply toy_17_492.ply
201 | toy_37_577.ply toy_18_584.ply toy_37_584.ply
202 | toy_19_420.ply toy_18_534.ply toy_19_534.ply
203 | toy_36_409.ply toy_32_417.ply toy_36_417.ply
204 | toy_17_518.ply toy_37_489.ply toy_17_489.ply
205 | toy_18_402.ply toy_37_591.ply toy_18_591.ply
206 | toy_22_497.ply toy_17_501.ply toy_22_501.ply
207 | toy_37_495.ply toy_20_427.ply toy_37_427.ply
208 | toy_20_479.ply toy_22_404.ply toy_20_404.ply
209 | toy_20_492.ply toy_22_411.ply toy_20_411.ply
210 | toy_40_456.ply toy_22_514.ply toy_40_514.ply
211 | toy_16_575.ply toy_33_579.ply toy_16_579.ply
212 | toy_36_419.ply toy_37_455.ply toy_36_455.ply
213 | toy_20_406.ply toy_20_549.ply toy_20_549.ply
214 | toy_22_513.ply toy_32_491.ply toy_22_491.ply
215 | toy_19_595.ply toy_36_511.ply toy_19_511.ply
216 | toy_37_513.ply toy_40_550.ply toy_37_550.ply
217 | toy_32_432.ply toy_17_418.ply toy_32_418.ply
218 | toy_40_563.ply toy_19_464.ply toy_40_464.ply
219 | toy_33_561.ply toy_16_517.ply toy_33_517.ply
220 | toy_32_429.ply toy_16_434.ply toy_32_434.ply
221 | toy_16_556.ply toy_37_498.ply toy_16_498.ply
222 | toy_25_559.ply toy_20_574.ply toy_25_574.ply
223 | toy_40_575.ply toy_33_431.ply toy_40_431.ply
224 | toy_19_468.ply toy_22_561.ply toy_19_561.ply
225 | toy_37_411.ply toy_17_566.ply toy_37_566.ply
226 | toy_19_582.ply toy_16_480.ply toy_19_480.ply
227 | toy_37_517.ply toy_17_411.ply toy_37_411.ply
228 | toy_16_555.ply toy_33_588.ply toy_16_588.ply
229 | toy_40_586.ply toy_25_547.ply toy_40_547.ply
230 | toy_16_446.ply toy_16_585.ply toy_16_585.ply
231 | toy_22_555.ply toy_32_512.ply toy_22_512.ply
232 | toy_18_522.ply toy_22_504.ply toy_18_504.ply
233 | toy_40_576.ply toy_33_594.ply toy_40_594.ply
234 | toy_20_539.ply toy_32_468.ply toy_20_468.ply
235 | toy_22_422.ply toy_16_455.ply toy_22_455.ply
236 | toy_20_573.ply toy_40_435.ply toy_20_435.ply
237 | toy_33_472.ply toy_36_454.ply toy_33_454.ply
238 | toy_22_521.ply toy_22_591.ply toy_22_591.ply
239 | toy_36_517.ply toy_17_500.ply toy_36_500.ply
240 | toy_36_594.ply toy_22_521.ply toy_36_521.ply
241 | toy_17_561.ply toy_40_429.ply toy_17_429.ply
242 | toy_25_581.ply toy_36_527.ply toy_25_527.ply
243 | toy_37_481.ply toy_32_506.ply toy_37_506.ply
244 | toy_18_416.ply toy_22_408.ply toy_18_408.ply
245 | toy_37_598.ply toy_19_474.ply toy_37_474.ply
246 | toy_36_524.ply toy_33_451.ply toy_36_451.ply
247 | toy_22_499.ply toy_19_517.ply toy_22_517.ply
248 | toy_36_441.ply toy_33_436.ply toy_36_436.ply
249 | toy_33_537.ply toy_36_561.ply toy_33_561.ply
250 | toy_40_437.ply toy_25_583.ply toy_40_583.ply
251 | toy_19_580.ply toy_25_488.ply toy_19_488.ply
252 | toy_37_570.ply toy_18_444.ply toy_37_444.ply
253 | toy_37_499.ply toy_16_550.ply toy_37_550.ply
254 | toy_18_470.ply toy_40_574.ply toy_18_574.ply
255 | toy_18_425.ply toy_25_435.ply toy_18_435.ply
256 | toy_33_555.ply toy_16_452.ply toy_33_452.ply
257 | toy_32_422.ply toy_17_556.ply toy_32_556.ply
258 | toy_40_436.ply toy_37_400.ply toy_40_400.ply
259 | toy_36_580.ply toy_18_592.ply toy_36_592.ply
260 | toy_32_510.ply toy_18_453.ply toy_32_453.ply
261 | toy_22_572.ply toy_18_505.ply toy_22_505.ply
262 | toy_17_573.ply toy_17_411.ply toy_17_411.ply
263 | toy_16_592.ply toy_37_468.ply toy_16_468.ply
264 | toy_25_568.ply toy_22_523.ply toy_25_523.ply
265 | toy_33_505.ply toy_36_482.ply toy_33_482.ply
266 | toy_32_417.ply toy_25_438.ply toy_32_438.ply
267 | toy_22_542.ply toy_33_513.ply toy_22_513.ply
268 | toy_20_444.ply toy_37_591.ply toy_20_591.ply
269 | toy_25_491.ply toy_33_597.ply toy_25_597.ply
270 | toy_25_542.ply toy_20_565.ply toy_25_565.ply
271 | toy_33_578.ply toy_17_522.ply toy_33_522.ply
272 | toy_22_462.ply toy_36_594.ply toy_22_594.ply
273 | toy_17_592.ply toy_33_481.ply toy_17_481.ply
274 | toy_19_593.ply toy_36_475.ply toy_19_475.ply
275 | toy_22_554.ply toy_18_458.ply toy_22_458.ply
276 | toy_18_521.ply toy_33_579.ply toy_18_579.ply
277 | toy_32_467.ply toy_18_452.ply toy_32_452.ply
278 | toy_33_568.ply toy_22_503.ply toy_33_503.ply
279 | toy_17_561.ply toy_22_471.ply toy_17_471.ply
280 | toy_32_534.ply toy_40_545.ply toy_32_545.ply
281 | toy_22_489.ply toy_18_589.ply toy_22_589.ply
282 | toy_22_509.ply toy_25_557.ply toy_22_557.ply
283 | toy_22_455.ply toy_40_438.ply toy_22_438.ply
284 | toy_36_516.ply toy_25_512.ply toy_36_512.ply
285 | toy_22_590.ply toy_16_507.ply toy_22_507.ply
286 | toy_32_421.ply toy_16_553.ply toy_32_553.ply
287 | toy_17_419.ply toy_36_505.ply toy_17_505.ply
288 | toy_22_588.ply toy_22_581.ply toy_22_581.ply
289 | toy_18_435.ply toy_19_418.ply toy_18_418.ply
290 | toy_16_424.ply toy_19_476.ply toy_16_476.ply
291 | toy_16_426.ply toy_25_559.ply toy_16_559.ply
292 | toy_18_573.ply toy_18_404.ply toy_18_404.ply
293 | toy_32_506.ply toy_32_408.ply toy_32_408.ply
294 | toy_40_542.ply toy_32_528.ply toy_40_528.ply
295 | toy_19_472.ply toy_22_591.ply toy_19_591.ply
296 | toy_19_448.ply toy_32_406.ply toy_19_406.ply
297 | toy_18_458.ply toy_32_475.ply toy_18_475.ply
298 | toy_37_465.ply toy_18_444.ply toy_37_444.ply
299 | toy_40_421.ply toy_19_485.ply toy_40_485.ply
300 | toy_18_565.ply toy_19_449.ply toy_18_449.ply
301 | toy_36_441.ply toy_18_430.ply toy_36_430.ply
302 | toy_17_592.ply toy_22_549.ply toy_17_549.ply
303 | toy_36_430.ply toy_36_436.ply toy_36_436.ply
304 | toy_18_508.ply toy_19_461.ply toy_18_461.ply
305 | toy_17_557.ply toy_17_489.ply toy_17_489.ply
306 | toy_18_437.ply toy_37_537.ply toy_18_537.ply
307 | toy_37_595.ply toy_32_523.ply toy_37_523.ply
308 | toy_16_529.ply toy_19_458.ply toy_16_458.ply
309 | toy_33_484.ply toy_17_586.ply toy_33_586.ply
310 | toy_19_400.ply toy_20_598.ply toy_19_598.ply
311 | toy_18_593.ply toy_18_445.ply toy_18_445.ply
312 | toy_40_458.ply toy_19_529.ply toy_40_529.ply
313 | toy_36_491.ply toy_40_435.ply toy_36_435.ply
314 | toy_19_551.ply toy_16_458.ply toy_19_458.ply
315 | toy_20_478.ply toy_36_465.ply toy_20_465.ply
316 | toy_17_569.ply toy_17_500.ply toy_17_500.ply
317 | toy_40_446.ply toy_17_503.ply toy_40_503.ply
318 | toy_18_490.ply toy_18_402.ply toy_18_402.ply
319 | toy_36_474.ply toy_33_568.ply toy_36_568.ply
320 | toy_20_414.ply toy_17_581.ply toy_20_581.ply
321 | toy_22_470.ply toy_37_412.ply toy_22_412.ply
322 | toy_33_455.ply toy_22_597.ply toy_33_597.ply
323 | toy_33_554.ply toy_33_465.ply toy_33_465.ply
324 | toy_37_412.ply toy_18_543.ply toy_37_543.ply
325 | toy_16_464.ply toy_37_544.ply toy_16_544.ply
326 | toy_36_579.ply toy_32_426.ply toy_36_426.ply
327 | toy_16_578.ply toy_36_433.ply toy_16_433.ply
328 | toy_20_438.ply toy_18_551.ply toy_20_551.ply
329 | toy_20_457.ply toy_32_548.ply toy_20_548.ply
330 | toy_33_436.ply toy_16_477.ply toy_33_477.ply
331 | toy_33_505.ply toy_19_527.ply toy_33_527.ply
332 | toy_32_506.ply toy_40_406.ply toy_32_406.ply
333 | toy_36_525.ply toy_18_466.ply toy_36_466.ply
334 | toy_20_445.ply toy_16_578.ply toy_20_578.ply
335 | toy_36_446.ply toy_37_473.ply toy_36_473.ply
336 | toy_36_429.ply toy_36_529.ply toy_36_529.ply
337 | toy_20_579.ply toy_17_512.ply toy_20_512.ply
338 | toy_16_532.ply toy_22_412.ply toy_16_412.ply
339 | toy_36_549.ply toy_40_494.ply toy_36_494.ply
340 | toy_32_580.ply toy_22_568.ply toy_32_568.ply
341 | toy_19_513.ply toy_19_527.ply toy_19_527.ply
342 | toy_25_476.ply toy_25_509.ply toy_25_509.ply
343 | toy_17_534.ply toy_16_453.ply toy_17_453.ply
344 | toy_33_586.ply toy_36_597.ply toy_33_597.ply
345 | toy_22_598.ply toy_16_526.ply toy_22_526.ply
346 | toy_37_497.ply toy_16_575.ply toy_37_575.ply
347 | toy_16_455.ply toy_16_443.ply toy_16_443.ply
348 | toy_32_417.ply toy_16_560.ply toy_32_560.ply
349 | toy_18_538.ply toy_22_582.ply toy_18_582.ply
350 | toy_22_535.ply toy_22_596.ply toy_22_596.ply
351 | toy_17_585.ply toy_22_438.ply toy_17_438.ply
352 | toy_32_501.ply toy_32_589.ply toy_32_589.ply
353 | toy_40_573.ply toy_33_529.ply toy_40_529.ply
354 | toy_36_511.ply toy_33_448.ply toy_36_448.ply
355 | toy_19_427.ply toy_36_577.ply toy_19_577.ply
356 | toy_36_585.ply toy_36_459.ply toy_36_459.ply
357 | toy_37_493.ply toy_19_564.ply toy_37_564.ply
358 | toy_32_557.ply toy_16_464.ply toy_32_464.ply
359 | toy_20_508.ply toy_37_400.ply toy_20_400.ply
360 | toy_37_574.ply toy_25_440.ply toy_37_440.ply
361 | toy_19_407.ply toy_40_554.ply toy_19_554.ply
362 | toy_18_551.ply toy_33_478.ply toy_18_478.ply
363 | toy_36_578.ply toy_18_416.ply toy_36_416.ply
364 | toy_19_532.ply toy_19_442.ply toy_19_442.ply
365 | toy_36_588.ply toy_33_468.ply toy_36_468.ply
366 | toy_32_433.ply toy_20_414.ply toy_32_414.ply
367 | toy_16_456.ply toy_40_462.ply toy_16_462.ply
368 | toy_25_597.ply toy_40_575.ply toy_25_575.ply
369 | toy_33_576.ply toy_36_559.ply toy_33_559.ply
370 | toy_17_459.ply toy_19_465.ply toy_17_465.ply
371 | toy_22_497.ply toy_32_437.ply toy_22_437.ply
372 | toy_20_411.ply toy_18_488.ply toy_20_488.ply
373 | toy_37_502.ply toy_20_565.ply toy_37_565.ply
374 | toy_22_435.ply toy_18_555.ply toy_22_555.ply
375 | toy_17_489.ply toy_18_516.ply toy_17_516.ply
376 | toy_17_472.ply toy_40_579.ply toy_17_579.ply
377 | toy_33_439.ply toy_22_423.ply toy_33_423.ply
378 | toy_40_521.ply toy_36_449.ply toy_40_449.ply
379 | toy_36_591.ply toy_20_511.ply toy_36_511.ply
380 | toy_32_580.ply toy_36_460.ply toy_32_460.ply
381 | toy_18_591.ply toy_32_490.ply toy_18_490.ply
382 | toy_22_426.ply toy_16_402.ply toy_22_402.ply
383 | toy_37_448.ply toy_37_511.ply toy_37_511.ply
384 | toy_16_535.ply toy_40_436.ply toy_16_436.ply
385 | toy_25_475.ply toy_40_576.ply toy_25_576.ply
386 | toy_16_489.ply toy_16_529.ply toy_16_529.ply
387 | toy_25_441.ply toy_36_500.ply toy_25_500.ply
388 | toy_20_580.ply toy_20_556.ply toy_20_556.ply
389 | toy_20_514.ply toy_19_539.ply toy_20_539.ply
390 | toy_25_592.ply toy_16_572.ply toy_25_572.ply
391 | toy_19_589.ply toy_17_513.ply toy_19_513.ply
392 | toy_17_419.ply toy_40_517.ply toy_17_517.ply
393 | toy_16_520.ply toy_20_490.ply toy_16_490.ply
394 | toy_25_524.ply toy_25_552.ply toy_25_552.ply
395 | toy_37_406.ply toy_22_548.ply toy_37_548.ply
396 | toy_16_414.ply toy_25_493.ply toy_16_493.ply
397 | toy_33_405.ply toy_40_551.ply toy_33_551.ply
398 | toy_37_590.ply toy_18_469.ply toy_37_469.ply
399 | toy_17_423.ply toy_18_405.ply toy_17_405.ply
400 | toy_33_487.ply toy_33_460.ply toy_33_460.ply
401 |
--------------------------------------------------------------------------------