├── .gitignore ├── AccelerateFix.py ├── LICENSE ├── LMDBDataset_jpeg.py ├── Main.py ├── PreProcess.py ├── README.md ├── README_md_files ├── 0b7854f0-1cf7-11ef-b8f1-596730d257b3.jpeg ├── 0de42f60-1cf8-11ef-b8f1-596730d257b3.jpeg ├── 26b6d1b0-1cf7-11ef-b8f1-596730d257b3.jpeg ├── 3f840050-1cf7-11ef-b8f1-596730d257b3.jpeg ├── 4bd0ac70-3e74-11ef-b06d-69973bdd91b6.jpeg ├── 4ffc0130-1cf7-11ef-b8f1-596730d257b3.jpeg ├── 5c029c40-3e77-11ef-b06d-69973bdd91b6.jpeg ├── 5fcbb420-1cf7-11ef-b8f1-596730d257b3.jpeg ├── 67158a70-3e77-11ef-b06d-69973bdd91b6.jpeg ├── 7d6dc170-3e77-11ef-b06d-69973bdd91b6.jpeg ├── 839ab800-3e77-11ef-b06d-69973bdd91b6.jpeg ├── aeb5db90-1cef-11ef-b8f1-596730d257b3.jpeg ├── b7bc1030-1cf7-11ef-b8f1-596730d257b3.jpeg ├── c070ff40-1cf9-11ef-b8f1-596730d257b3.jpeg ├── caabc7d0-1cf7-11ef-b8f1-596730d257b3.jpeg ├── e53350a0-1cf7-11ef-b8f1-596730d257b3.jpeg └── f9942470-1cf7-11ef-b8f1-596730d257b3.jpeg ├── calvin2lmdb.py ├── configs.json ├── evaluate_calvin.py ├── evaluation ├── calvin_env_wrapper_raw.py └── calvin_evaluation.py ├── evaluation_logs ├── result.txt └── success_rate.txt ├── fake_dataset ├── training │ ├── .hydra │ │ ├── config.yaml │ │ ├── hydra.yaml │ │ ├── merged_config.yaml │ │ └── overrides.yaml │ └── statistics.yaml └── validation │ ├── .hydra │ ├── config.yaml │ ├── hydra.yaml │ ├── merged_config.yaml │ └── overrides.yaml │ └── statistics.yaml ├── install.sh ├── models ├── gr1.py ├── transformer_utils.py └── vision_transformer.py ├── modify.md ├── requirements.txt └── utils └── calvin_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.zip -------------------------------------------------------------------------------- /AccelerateFix.py: -------------------------------------------------------------------------------- 1 | def AsyncStep(self, closure=None): 2 | if self.gradient_state.sync_gradients: 3 | if self.scaler is not None: 4 | self.scaler.step(self.optimizer, closure) 5 | self.scaler.update() 6 | else: 7 | self.optimizer.step(closure) 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LMDBDataset_jpeg.py: -------------------------------------------------------------------------------- 1 | import io 2 | import gc 3 | from time import time 4 | import lmdb 5 | from pickle import loads 6 | import numpy as np 7 | import torch 8 | from torch.utils.data import Dataset 9 | from torchvision.io import decode_jpeg 10 | 11 | ORIGINAL_STATIC_RES = 200 12 | ORIGINAL_GRIPPER_RES = 84 13 | 14 | class DataPrefetcher(): 15 | def __init__(self, loader, device): 16 | self.device = device 17 | self.loader = loader 18 | self.iter = iter(self.loader) 19 | self.stream = torch.cuda.Stream() 20 | self.preload() 21 | 22 | def preload(self): 23 | try: 24 | # Dataloader will prefetch data to cpu so this step is very quick 25 | self.batch = next(self.iter) 26 | except StopIteration: 27 | self.batch = None 28 | self.iter = iter(self.loader) 29 | return 30 | with torch.cuda.stream(self.stream): 31 | for key in self.batch: 32 | self.batch[key] = self.batch[key].to(self.device, non_blocking=True) 33 | 34 | def next(self): 35 | clock = time() 36 | batch = self.batch 37 | if batch is not None: 38 | for key in batch: 39 | if batch[key] is not None: 40 | batch[key].record_stream(torch.cuda.current_stream()) 41 | self.preload() 42 | return batch, time()-clock 43 | 44 | def next_without_none(self): 45 | batch, time = self.next() 46 | if batch is None: 47 | batch, time = self.next() 48 | return batch, time 49 | 50 | class LMDBDataset(Dataset): 51 | def __init__(self, lmdb_dir, sequence_length, chunk_size, action_mode, action_dim, start_ratio, end_ratio): 52 | super(LMDBDataset).__init__() 53 | self.sequence_length = sequence_length 54 | self.chunk_size = chunk_size 55 | self.action_mode = action_mode 56 | self.action_dim = action_dim 57 | self.dummy_rgb_static = torch.zeros(sequence_length, 3, ORIGINAL_STATIC_RES, ORIGINAL_STATIC_RES, dtype=torch.uint8) 58 | self.dummy_rgb_gripper = torch.zeros(sequence_length, 3, ORIGINAL_GRIPPER_RES, ORIGINAL_GRIPPER_RES, dtype=torch.uint8) 59 | self.dummy_arm_state = torch.zeros(sequence_length, 6) 60 | self.dummy_gripper_state = torch.zeros(sequence_length, 2) 61 | self.dummy_actions = torch.zeros(sequence_length, chunk_size, action_dim) 62 | self.dummy_mask = torch.zeros(sequence_length, chunk_size) 63 | self.lmdb_dir = lmdb_dir 64 | env = lmdb.open(lmdb_dir, readonly=True, create=False, lock=False) 65 | with env.begin() as txn: 66 | dataset_len = loads(txn.get('cur_step'.encode())) + 1 67 | self.start_step = int(dataset_len * start_ratio) 68 | self.end_step = int(dataset_len * end_ratio) - sequence_length - chunk_size 69 | env.close() 70 | 71 | def open_lmdb(self): 72 | self.env = lmdb.open(self.lmdb_dir, readonly=True, create=False, lock=False) 73 | self.txn = self.env.begin() 74 | 75 | def __getitem__(self, idx): 76 | if hasattr(self, 'env') == 0: 77 | self.open_lmdb() 78 | 79 | idx = idx + self.start_step 80 | 81 | rgb_static = self.dummy_rgb_static.clone() 82 | rgb_gripper = self.dummy_rgb_gripper.clone() 83 | arm_state = self.dummy_arm_state.clone() 84 | gripper_state = self.dummy_gripper_state.clone() 85 | actions = self.dummy_actions.clone() 86 | mask = self.dummy_mask.clone() 87 | 88 | cur_episode = loads(self.txn.get(f'cur_episode_{idx}'.encode())) 89 | inst_token = loads(self.txn.get(f'inst_token_{cur_episode}'.encode())) 90 | for i in range(self.sequence_length): 91 | if loads(self.txn.get(f'cur_episode_{idx+i}'.encode())) == cur_episode: 92 | rgb_static[i] = decode_jpeg(loads(self.txn.get(f'rgb_static_{idx+i}'.encode()))) 93 | rgb_gripper[i] = decode_jpeg(loads(self.txn.get(f'rgb_gripper_{idx+i}'.encode()))) 94 | robot_obs = loads(self.txn.get(f'robot_obs_{idx+i}'.encode())) 95 | arm_state[i, :6] = robot_obs[:6] 96 | gripper_state[i, ((robot_obs[-1] + 1) / 2).long()] = 1 97 | for j in range(self.chunk_size): 98 | if loads(self.txn.get(f'cur_episode_{idx+i+j}'.encode())) == cur_episode: 99 | mask[i, j] = 1 100 | if self.action_mode == 'ee_rel_pose': 101 | actions[i, j] = loads(self.txn.get(f'rel_action_{idx+i+j}'.encode())) 102 | elif self.action_mode == 'ee_abs_pose': 103 | actions[i, j] = loads(self.txn.get(f'abs_action_{idx+i+j}'.encode())) 104 | actions[i, j, -1] = (actions[i, j, -1] + 1) / 2 105 | return { 106 | 'rgb_static': rgb_static, 107 | 'rgb_gripper': rgb_gripper, 108 | 'inst_token': inst_token, 109 | 'arm_state': arm_state, 110 | 'gripper_state': gripper_state, 111 | 'actions': actions, 112 | 'mask': mask, 113 | } 114 | 115 | def __len__(self): 116 | return self.end_step - self.start_step 117 | -------------------------------------------------------------------------------- /Main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import math 3 | import json 4 | from time import time 5 | from datetime import timedelta 6 | import numpy as np 7 | import torch 8 | import torch.nn.functional as F 9 | from torch.utils.data import DataLoader, random_split 10 | from transformers import get_cosine_schedule_with_warmup 11 | from accelerate import Accelerator 12 | from accelerate.utils import DistributedDataParallelKwargs, InitProcessGroupKwargs 13 | from torch.profiler import profile, record_function, ProfilerActivity, tensorboard_trace_handler 14 | from torch.utils.tensorboard import SummaryWriter 15 | import clip 16 | from LMDBDataset_jpeg import LMDBDataset as LMDBdst_jpeg 17 | from LMDBDataset_jpeg import DataPrefetcher as DataPrefetcher_jpeg 18 | from PreProcess import PreProcess 19 | import models.vision_transformer as vits 20 | from models.gr1 import GR1 21 | from AccelerateFix import AsyncStep 22 | 23 | def masked_loss(pred, target, mask, skip_frame=0, loss_func=F.mse_loss): 24 | if skip_frame == 0: 25 | new_pred = pred 26 | else: 27 | new_pred = pred[:, :-skip_frame] 28 | new_target = target[:, skip_frame:] 29 | new_mask = mask[:, skip_frame:] 30 | data_shape, mask_shape = new_pred.shape, new_mask.shape 31 | loss = loss_func(new_pred, new_target, reduction='none') 32 | for _ in range(len(data_shape) - len(mask_shape)): 33 | new_mask = new_mask.unsqueeze(-1) 34 | loss = (loss*new_mask).sum() / new_mask.sum() / math.prod(data_shape[len(mask_shape):]) 35 | return loss 36 | 37 | def train(acc, train_prefetcher, test_prefetcher, preprocessor, model, env, eva, eval_dir, optimizer, scheduler, device, cfg, step, writer): 38 | ''' 39 | prof = profile( 40 | schedule = torch.profiler.schedule( 41 | wait=20, 42 | warmup=3, 43 | active=4, 44 | repeat=1, 45 | ), 46 | activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], 47 | on_trace_ready=tensorboard_trace_handler(cfg['save_path']+'prof'), 48 | record_shapes=True, 49 | profile_memory=True, 50 | with_stack=True, 51 | with_flops=True, 52 | with_modules=True, 53 | ) 54 | prof.start() 55 | ''' 56 | 57 | train_dataset_len = len(train_prefetcher.loader.dataset) 58 | test_dataset_len = len(test_prefetcher.loader.dataset) 59 | eval_steps = train_dataset_len // test_dataset_len 60 | avg_reward = 0.0 61 | for epoch in range(cfg['num_epochs']): 62 | if epoch % cfg['save_epochs'] == 0: 63 | # in the 1st epoch, policy.ema has not been initialized. You may also load the wrong ckpt and modify the right one 64 | if epoch != 0: 65 | acc.wait_for_everyone() 66 | unwrapped_model = acc.unwrap_model(model) 67 | modules_to_exclude = ['model_mae', 'model_clip'] 68 | if hasattr(unwrapped_model, '_orig_mod'): 69 | state_dict = {k: v for k, v in unwrapped_model._orig_mod.state_dict().items() if not any(module_name in k for module_name in modules_to_exclude)} 70 | else: 71 | state_dict = {k: v for k, v in unwrapped_model.state_dict().items() if not any(module_name in k for module_name in modules_to_exclude)} 72 | acc.save({'state_dict': state_dict}, cfg['save_path']+'GR1_{}.pth'.format(epoch+cfg['load_epoch'])) 73 | 74 | if cfg['evaluate_during_training']: 75 | model.eval() 76 | avg_reward = torch.tensor(evaluate_policy( 77 | eva, 78 | env, 79 | cfg['save_path']+'success_rate.txt', 80 | cfg['save_path']+'result.txt', 81 | cfg['ep_len'], 82 | cfg['num_sequences'], 83 | acc.num_processes, 84 | acc.process_index, 85 | eval_dir, 86 | debug=cfg['record_evaluation_video'], 87 | )).float().mean().to(device) 88 | avg_reward = acc.gather_for_metrics(avg_reward).mean() 89 | 90 | log_loss = { 91 | 'rgb_static': 0, 92 | 'rgb_gripper': 0, 93 | 'action_arm': 0, 94 | 'action_gripper': 0, 95 | } 96 | eval_log_loss = { 97 | 'rgb_static': 0, 98 | 'rgb_gripper': 0, 99 | 'action_arm': 0, 100 | 'action_gripper': 0, 101 | } 102 | for key in log_loss: 103 | log_loss[key] = torch.tensor(0).float().to(device) 104 | for key in eval_log_loss: 105 | eval_log_loss[key] = torch.tensor(0).float().to(device) 106 | cum_load_time = 0 107 | clock = time() 108 | batch_idx = 0 109 | batch, load_time = train_prefetcher.next() 110 | while batch is not None: 111 | with acc.accumulate(model): 112 | model.train() 113 | optimizer.zero_grad() 114 | rgb_static, rgb_gripper = preprocessor.rgb_process(batch['rgb_static'], batch['rgb_gripper'], train=True) 115 | obs_mask = batch['mask'][..., 0] 116 | pred = model( 117 | rgb=rgb_static, 118 | hand_rgb=rgb_gripper, 119 | state={'arm': batch['arm_state'], 'gripper': batch['gripper_state']}, 120 | language=batch['inst_token'], 121 | attention_mask=obs_mask, 122 | ) 123 | loss = {} 124 | loss['rgb_static'] = masked_loss(pred['obs_preds'], pred['obs_targets'], obs_mask, cfg['skip_frame'], F.mse_loss) 125 | loss['rgb_gripper'] = masked_loss(pred['obs_hand_preds'], pred['obs_hand_targets'], obs_mask, cfg['skip_frame'], F.mse_loss) 126 | loss['action_arm'] = masked_loss(pred['arm_action_preds'], batch['actions'][..., :6], batch['mask'], 0, F.smooth_l1_loss) 127 | loss['action_gripper'] = masked_loss(pred['gripper_action_preds'], batch['actions'][..., -1:], batch['mask'], 0, F.binary_cross_entropy_with_logits) 128 | total_loss = loss['rgb_static'] + loss['rgb_gripper'] + cfg['arm_loss_ratio']*loss['action_arm'] + loss['action_gripper'] 129 | acc.backward(total_loss) 130 | optimizer.step(optimizer) 131 | for key in log_loss: 132 | log_loss[key] += loss[key].detach() / cfg['print_steps'] 133 | cum_load_time += load_time / cfg['print_steps'] 134 | 135 | if batch_idx % eval_steps == 0: 136 | with torch.no_grad(): 137 | model.eval() 138 | batch, _ = test_prefetcher.next_without_none() 139 | rgb_static, rgb_gripper = preprocessor.rgb_process(batch['rgb_static'], batch['rgb_gripper'], train=False) 140 | obs_mask = batch['mask'][..., 0] 141 | pred = model( 142 | rgb=rgb_static, 143 | hand_rgb=rgb_gripper, 144 | state={'arm': batch['arm_state'], 'gripper': batch['gripper_state']}, 145 | language=batch['inst_token'], 146 | attention_mask=obs_mask, 147 | ) 148 | loss = {} 149 | loss['rgb_static'] = masked_loss(pred['obs_preds'], pred['obs_targets'], obs_mask, cfg['skip_frame'], F.mse_loss) 150 | loss['rgb_gripper'] = masked_loss(pred['obs_hand_preds'], pred['obs_hand_targets'], obs_mask, cfg['skip_frame'], F.mse_loss) 151 | loss['action_arm'] = masked_loss(pred['arm_action_preds'], batch['actions'][..., :6], batch['mask'], 0, F.smooth_l1_loss) 152 | loss['action_gripper'] = masked_loss(pred['gripper_action_preds'], batch['actions'][..., -1:], batch['mask'], 0, F.binary_cross_entropy_with_logits) 153 | for key in eval_log_loss: 154 | eval_log_loss[key] += loss[key].detach() / cfg['print_steps'] * eval_steps 155 | 156 | if batch_idx % cfg['print_steps'] == 0 and batch_idx != 0: 157 | for key in log_loss: 158 | log_loss[key] = acc.gather_for_metrics(log_loss[key]).mean() 159 | for key in eval_log_loss: 160 | eval_log_loss[key] = acc.gather_for_metrics(eval_log_loss[key]).mean() 161 | load_pecnt = torch.tensor(cum_load_time / (time()-clock)).to(device) 162 | load_pecnt = acc.gather_for_metrics(load_pecnt).mean() 163 | fps = (cfg['bs_per_gpu']*cfg['print_steps']*cfg['seq_len']) / (time()-clock) 164 | fps = acc.gather_for_metrics(torch.tensor(fps).to(device)).sum() 165 | 166 | text = 'Train Epoch: {} [{}/{} ({:.0f}%)] Reward: {:.5f} FPS:{:.5f} Load Pertentage:{:.5f} LR:{}'.format( 167 | epoch, 168 | batch_idx * cfg['bs_per_gpu'] * acc.num_processes, 169 | train_dataset_len, 170 | 100. * batch_idx * cfg['bs_per_gpu'] * acc.num_processes / train_dataset_len, 171 | avg_reward, 172 | fps, 173 | load_pecnt, 174 | scheduler.get_last_lr()[0], 175 | ) 176 | for key in log_loss: 177 | text = text + ' {}_loss: {:.5f}'.format(key, log_loss[key]) 178 | for key in eval_log_loss: 179 | text = text + ' eval_{}_loss: {:.5f}'.format(key, eval_log_loss[key]) 180 | acc.print(text) 181 | if acc.is_main_process: 182 | for key in log_loss: 183 | writer.add_scalar(key+'_loss', log_loss[key], step) 184 | for key in eval_log_loss: 185 | writer.add_scalar('eval_'+key+'_loss', eval_log_loss[key], step) 186 | writer.add_scalar("reward", avg_reward, step) 187 | writer.add_scalar("learning rate", scheduler.get_last_lr()[0], step) 188 | writer.add_scalar("FPS", fps, step) 189 | writer.add_scalar("loading time in total time", load_pecnt, step) 190 | with open(cfg['save_path']+'step.json', 'w') as json_file: 191 | json.dump(step, json_file) 192 | 193 | for key in log_loss: 194 | log_loss[key] = torch.tensor(0).float().to(device) 195 | for key in eval_log_loss: 196 | eval_log_loss[key] = torch.tensor(0).float().to(device) 197 | cum_load_time = 0 198 | clock = time() 199 | scheduler.step() 200 | 201 | batch_idx += 1 202 | step += 1 203 | batch, load_time = train_prefetcher.next() 204 | ''' 205 | prof.step() 206 | if batch_idx == 28: 207 | prof.stop() 208 | ''' 209 | 210 | if __name__ == '__main__': 211 | # Preparation 212 | cfg = json.load(open('configs.json')) 213 | # The timeout here is 3600s to wait for other processes to finish the simulation 214 | init_pg_kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=3600)) 215 | ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) 216 | acc = Accelerator( 217 | gradient_accumulation_steps=cfg['gradient_accumulation_steps'], 218 | kwargs_handlers=[init_pg_kwargs, ddp_kwargs] 219 | ) 220 | device = acc.device 221 | preprocessor = PreProcess( 222 | cfg['rgb_static_pad'], 223 | cfg['rgb_gripper_pad'], 224 | cfg['rgb_shape'], 225 | cfg['rgb_mean'], 226 | cfg['rgb_std'], 227 | device, 228 | ) 229 | train_dataset = LMDBdst_jpeg( 230 | cfg['LMDB_path'], 231 | cfg['seq_len'], 232 | cfg['chunk_size'], 233 | cfg['action_mode'], 234 | cfg['act_dim'], 235 | start_ratio = 0, 236 | end_ratio = 0.9, 237 | ) 238 | test_dataset = LMDBdst_jpeg( 239 | cfg['LMDB_path'], 240 | cfg['seq_len'], 241 | cfg['chunk_size'], 242 | cfg['action_mode'], 243 | cfg['act_dim'], 244 | start_ratio = 0.9, 245 | end_ratio = 1, 246 | ) 247 | train_loader = DataLoader( 248 | train_dataset, 249 | batch_size=cfg['bs_per_gpu'], # to be flattened in prefetcher 250 | num_workers=cfg['workers_per_gpu'], 251 | pin_memory=True, # Accelerate data reading 252 | shuffle=True, 253 | prefetch_factor=cfg['prefetch_factor'], 254 | persistent_workers=True, 255 | ) 256 | test_loader = DataLoader( 257 | test_dataset, 258 | batch_size=cfg['bs_per_gpu'], # to be flattened in prefetcher 259 | num_workers=cfg['workers_per_gpu'], 260 | pin_memory=True, # Accelerate data reading 261 | shuffle=True, 262 | prefetch_factor=cfg['prefetch_factor'], 263 | persistent_workers=True, 264 | ) 265 | model_clip, _ = clip.load(cfg['clip_backbone'], device=device) 266 | model_mae = vits.__dict__['vit_base'](patch_size=16, num_classes=0).to(device) 267 | checkpoint = torch.load(cfg['mae_ckpt']) 268 | model_mae.load_state_dict(checkpoint['model'], strict=False) 269 | model = GR1( 270 | model_clip, 271 | model_mae, 272 | rgb_shape=cfg['rgb_shape'], 273 | patch_size=cfg['patch_size'], 274 | state_dim=cfg['state_dim'], 275 | act_dim=cfg['act_dim'], 276 | hidden_size=cfg['embed_dim'], 277 | sequence_length=cfg['seq_len'], 278 | chunk_size=cfg['chunk_size'], 279 | training_target=['act_pred', 'fwd_pred', 'fwd_pred_hand'], 280 | img_feat_dim=cfg['img_feat_dim'], 281 | patch_feat_dim=cfg['patch_feat_dim'], 282 | lang_feat_dim=cfg['lang_feat_dim'], 283 | resampler_params={ 284 | 'depth': cfg['resampler_depth'], 285 | 'dim_head': cfg['resampler_dim_head'], 286 | 'heads': cfg['resampler_heads'], 287 | 'num_latents': cfg['resampler_num_latents'], 288 | 'num_media_embeds': cfg['resampler_num_media_embeds'], 289 | }, 290 | without_norm_pixel_loss=cfg['without_norm_pixel_loss'], 291 | use_hand_rgb=True, 292 | n_layer=cfg['n_layer'], 293 | n_head=cfg['n_head'], 294 | n_inner=4*cfg['embed_dim'], 295 | activation_function=cfg['activation_function'], 296 | n_positions=cfg['n_positions'], 297 | resid_pdrop=cfg['dropout'], 298 | attn_pdrop=cfg['dropout'], 299 | ).to(device) # for fused optimizer 300 | if cfg['load_bytedance_ckpt']: 301 | missing_keys, unexpected_keys = model.load_state_dict(torch.load(cfg['bytedance_ckpt_path'])['state_dict'], strict=False) 302 | acc.print('load ', cfg['bytedance_ckpt_path'], '\nmissing ', missing_keys, '\nunexpected ', unexpected_keys) 303 | elif os.path.isfile(cfg['save_path']+'GR1_{}.pth'.format(cfg['load_epoch'])): 304 | state_dict = torch.load(cfg['save_path']+'GR1_{}.pth'.format(cfg['load_epoch']))['state_dict'] 305 | missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) 306 | acc.print('load ', cfg['save_path']+'GR1_{}.pth'.format(cfg['load_epoch']), '\nmissing ', missing_keys, '\nunexpected ', unexpected_keys) 307 | if cfg['compile_model']: 308 | model = torch.compile(model) 309 | if os.path.isfile(cfg['save_path']+'step.json'): 310 | with open(cfg['save_path']+'step.json', 'r') as json_file: 311 | step = json.load(open(cfg['save_path']+'step.json')) 312 | else: 313 | step = 0 314 | optimizer = torch.optim.AdamW(model.parameters(), lr=cfg['lr_max'], weight_decay=cfg['weight_decay'], fused=True) 315 | total_prints_per_epoch = len(train_dataset) // (cfg['print_steps'] * cfg['bs_per_gpu'] * acc.num_processes) 316 | scheduler = get_cosine_schedule_with_warmup( 317 | optimizer, 318 | num_warmup_steps=cfg['num_warmup_epochs']*total_prints_per_epoch, 319 | num_training_steps=cfg['num_epochs']*total_prints_per_epoch, 320 | ) 321 | model, optimizer, train_loader, test_loader = acc.prepare( 322 | model, 323 | optimizer, 324 | train_loader, 325 | test_loader, 326 | device_placement=[True, True, False, False], 327 | ) 328 | optimizer.step = AsyncStep 329 | train_prefetcher = DataPrefetcher_jpeg(train_loader, device) 330 | test_prefetcher = DataPrefetcher_jpeg(test_loader, device) 331 | observation_space = { 332 | 'rgb_obs': ['rgb_static', 'rgb_gripper'], 333 | 'depth_obs': [], 334 | 'state_obs': ['robot_obs'], 335 | 'actions': ['rel_actions'], 336 | 'language': ['language']} 337 | eval_dir = cfg['save_path']+f'eval{torch.cuda.current_device()}/' 338 | os.makedirs(eval_dir, exist_ok=True) 339 | if cfg['evaluate_during_training']: 340 | from evaluate_calvin import make_env, evaluate_policy 341 | from evaluation.calvin_evaluation import GR1CalvinEvaluation 342 | env = make_env('./fake_dataset', observation_space, device) 343 | eva = GR1CalvinEvaluation(model, cfg, preprocessor, device) 344 | else: 345 | env = None 346 | eva = None 347 | writer = SummaryWriter(cfg['save_path'] + 'logs') 348 | 349 | # Train 350 | train(acc, train_prefetcher, test_prefetcher, preprocessor, model, env, eva, eval_dir, optimizer, scheduler, device, cfg, step, writer) 351 | -------------------------------------------------------------------------------- /PreProcess.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import torch 3 | import torch.nn.functional as F 4 | from torchvision.transforms.v2 import Resize 5 | 6 | def RandomShiftsAug(x, pad): 7 | x = x.float() 8 | b, t, c, h, w = x.size() 9 | assert h == w 10 | x = x.view(b*t, c, h, w) # reshape x to [B*T, C, H, W] 11 | padding = tuple([pad] * 4) 12 | x = F.pad(x, padding, "replicate") 13 | h_pad, w_pad = h + 2*pad, w + 2*pad # calculate the height and width after padding 14 | eps = 1.0 / (h_pad) 15 | arange = torch.linspace(-1.0 + eps, 1.0 - eps, h_pad, device=x.device, dtype=x.dtype)[:h] 16 | arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) 17 | base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) 18 | base_grid = base_grid.unsqueeze(0).repeat(b*t, 1, 1, 1) 19 | 20 | shift = torch.randint(0, 2 * pad + 1, size=(b, 1, 1, 1, 2), device=x.device, dtype=x.dtype) 21 | shift = shift.repeat(1, t, 1, 1, 1) # repeat the shift for each image in the sequence 22 | shift = shift.view(b*t, 1, 1, 2) # reshape shift to match the size of base_grid 23 | shift *= 2.0 / (h_pad) 24 | 25 | grid = base_grid + shift 26 | output = F.grid_sample(x, grid, padding_mode="zeros", align_corners=False) 27 | output = output.view(b, t, c, h, w) # reshape output back to [B, T, C, H, W] 28 | return output 29 | 30 | class PreProcess(): 31 | def __init__( 32 | self, 33 | rgb_static_pad, 34 | rgb_gripper_pad, 35 | rgb_shape, 36 | rgb_mean, 37 | rgb_std, 38 | device, 39 | ): 40 | self.resize = Resize([rgb_shape, rgb_shape], interpolation=Image.BICUBIC, antialias=True).to(device) 41 | self.rgb_static_pad = rgb_static_pad 42 | self.rgb_gripper_pad = rgb_gripper_pad 43 | self.rgb_mean = torch.tensor(rgb_mean, device=device).view(1, 1, -1, 1, 1) 44 | self.rgb_std = torch.tensor(rgb_std, device=device).view(1, 1, -1, 1, 1) 45 | 46 | def rgb_process(self, rgb_static, rgb_gripper, train=False): 47 | rgb_static = rgb_static.float()*(1/255.) 48 | rgb_gripper = rgb_gripper.float()*(1/255.) 49 | if train: 50 | rgb_static = RandomShiftsAug(rgb_static, self.rgb_static_pad) 51 | rgb_gripper = RandomShiftsAug(rgb_gripper, self.rgb_gripper_pad) 52 | rgb_static = self.resize(rgb_static) 53 | rgb_gripper = self.resize(rgb_gripper) 54 | # torchvision Normalize forces sync between CPU and GPU, so we use our own 55 | rgb_static = (rgb_static - self.rgb_mean) / (self.rgb_std + 1e-6) 56 | rgb_gripper = (rgb_gripper - self.rgb_mean) / (self.rgb_std + 1e-6) 57 | return rgb_static, rgb_gripper 58 | 59 | def rgb_back_process(self, rgb_static, rgb_gripper): 60 | rgb_static = rgb_static * (self.rgb_std + 1e-6) + self.rgb_mean 61 | rgb_gripper = rgb_gripper * (self.rgb_std + 1e-6) + self.rgb_mean 62 | rgb_static = torch.clamp(rgb_static, 0, 1) 63 | rgb_gripper = torch.clamp(rgb_gripper, 0, 1) 64 | rgb_static *= 255. 65 | rgb_gripper *= 255. 66 | return rgb_static, rgb_gripper 67 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GR1-Training 2 | 3 | 4 | ![](README_md_files/aeb5db90-1cef-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 5 | 6 | Note: Bytedance releases GR-MG, which uses a image diffusion model to generate goal images for GR-1. They also release a pretrained GR-1 trained on Ego4D dataset only. Please see this [link](https://gr-mg.github.io/). 7 | 8 | A variant of GR-1: "Unleashing Large-Scale Video Generative Pre-training for Visual Robot Manipulation". It performs good on [CALVIN Manipulation Benchmark](http://calvin.cs.uni-freiburg.de/) without using depth information. The original implementation is [here](https://github.com/bytedance/GR-1) but there is no training script. 9 | 10 | **This variant has higher performance than the original implementation on the CALVIN benchmark (current SOTA on ABC->D scenario, I may test ABCD->D later) .** The details, data, multi-GPU training and evaluation code are fully open-source. 11 | 12 | Another nice implementation on CALVIN benchmark is [MDT](https://github.com/intuitive-robots/mdt_policy), which uses a DiT action head + image language alignment loss + masked future image prediction. For differences between MDT and my GR-1 varients, please refer to this [issue](https://github.com/intuitive-robots/mdt_policy/issues/3). 13 | 14 | Please remember I build systems for you ヾ(^▽\^*)). Feel free to ask [@StarCycle](https://github.com/StarCycle) if you have any question! 15 | 16 | Also star and cite this repository (and of course the original implementation) if you find it is helpful! 17 | 18 | ## News 19 | **[2024.7.31]** We also release [mimictest](https://github.com/EDiRobotics/mimictest), which includes RT-1, Diffusion Policy, and Florence policy on the robomimic benchmark. Florence policy is modified from Microsoft's Florence2 VLM, which is trained on 900M images with VQA, detection, segmentation, and OCR tasks. We add a linear action head or a diffusion transformer action head to it. Since Florence policy only contains 0.2/0.7B parameters, it's more light-weight than OpenVLA, RT-2, Roboflamingo, etc. 20 | 21 | **[2024.7.10]** Now it can render the predicted videos. Here are some examples: 22 | 23 | | Real | Predicted | 24 | |--|--| 25 | | ![输入图片描述](README_md_files/5c029c40-3e77-11ef-b06d-69973bdd91b6.jpeg?v=1&type=image) | ![输入图片描述](README_md_files/67158a70-3e77-11ef-b06d-69973bdd91b6.jpeg?v=1&type=image) | 26 | | ![输入图片描述](README_md_files/7d6dc170-3e77-11ef-b06d-69973bdd91b6.jpeg?v=1&type=image) | ![](README_md_files/839ab800-3e77-11ef-b06d-69973bdd91b6.jpeg?v=1&type=image) | 27 | 28 | To render the videos, please set `without_norm_pixel_loss=true` during training and `record_evaluation_video=true` during inference. Train GR-Chunk with `without_norm_pixel_loss=true` seems to have a lower average length (3.46 on ABC->D, the checkpoint is [here](https://huggingface.co/datasets/StarCycle/calvin_lmdb/blob/main/GR1_chunksize10_withoutpixnorm.pth)). 29 | 30 | **[2024.6.17]** Release the initial version of **GR-Diffusion**, which denoises both the predicted images and actions. It can directly load Bytedance's GR-1 checkpoint but its performance is worse than GR-Chunk. 31 | 32 | Please refer to the [grdiffusion](https://github.com/EDiRobotics/GR1-Training/tree/grdiffusion) branch. 33 | 34 | **[2024.5.28]** Release **GR-Chunk** which has higher performance. Specifically, the followings are updated: 35 | 36 | - The actions predicted by GR-Chunk has shape (sequence length, action dim), which improves the average length from 3.25 to 3.556 on CALVIN's ABC->D scenerio (I uploads the log to `evaluation_logs` folder). See the method section. However, you can always load Bytedance's weights and use their settings by modifying `configs.json`. 37 | - This implementation can be directly used for multi-GPU training and evaluation. I run it on 12*4090 GPUs but it can be easily scaled if you have more computing resources. 38 | - Unlike the original implementation, GR-Chunk does not have old dependencies like `transformers==4.5.1`. Other dependencies mainly comes from CALVIN so you can discard them if you use other environments. 39 | - I use the same image shifting approach of the original implementation. The hyper-parameters (except for chunking) are as close as possible. 40 | - Add independent evaluation script and modify some APIs. 41 | 42 | **[2024.5.16]** Initial data, checkpoint, training & evaluation code released. 43 | 44 | 45 | ## Installation 46 | 47 | Setup conda environment and install the CALVIN benchmark. Notice that you do not need this step if you don't want to use CALVIN simulation (so just install my repository). 48 | ``` 49 | source activate 50 | conda create -n gr python=3.8 51 | conda activate gr 52 | git clone --recurse-submodules https://github.com/mees/calvin.git 53 | pip install setuptools==57.5.0 54 | cd calvin 55 | cd calvin_env; git checkout main 56 | cd ../calvin_models 57 | sed -i 's/pytorch-lightning==1.8.6/pytorch-lightning/g' requirements.txt 58 | sed -i 's/torch==1.13.1/torch/g' requirements.txt 59 | cd .. 60 | sh ./install.sh 61 | cd .. 62 | ``` 63 | Install this repository: 64 | ``` 65 | git clone https://github.com/EDiRobotics/GR1-Training 66 | cd ./GR1-Training 67 | pip install -r requirements.txt 68 | apt-get install -y libegl1-mesa libegl1 69 | apt-get install -y libgl1 70 | apt-get install -y libosmesa6-dev 71 | apt-get install -y patchelf 72 | ``` 73 | 74 | ## Prepare Dataset 75 | You do not need to download any datasets if you just want to evaluate the checkpoint. 76 | 77 | If you want to train it, the [original CALVIN dataset](https://github.com/mees/calvin/tree/main/dataset) is too large (~500GB for each task) and contains trajectories data without any language annotation. Here, we only use the trajectories with annotations for training (the same as GR-1 and 3D Diffuser Actor). 78 | 79 | As an example, let's download the CALVIN debug dataset (1.3GB) and transfer it to our LMDB format. 80 | ``` 81 | wget http://calvin.cs.uni-freiburg.de/dataset/calvin_debug_dataset.zip 82 | unzip calvin_debug_dataset.zip 83 | python calvin2lmdb.py --input_dir ./calvin_debug_dataset --output_dir ./calvin_lmdb 84 | ``` 85 | You can also download the processed LMDB dataset (ABC->D split) from Huggingface. The LMDB dataset only takes ~23GB, while the original ABC->D split takes 517GB. In this example, I use the tool of [HF-Mirror](https://hf-mirror.com/). You can set the environment variable `export HF_ENDPOINT=https://hf-mirror.com` to avoid the connection problem in some regions. 86 | ``` 87 | rm -rf calvin_lmdb 88 | apt install git-lfs aria2 89 | wget https://hf-mirror.com/hfd/hfd.sh 90 | chmod a+x hfd.sh 91 | ./hfd.sh StarCycle/calvin_lmdb --dataset --tool aria2c -x 4 92 | ``` 93 | ## Config HuggingFace Accelerate & Setup CALVIN Simulation 94 | 95 | To config accelerate, run this command and follow its guidance. I use `bf16` in training. 96 | ``` 97 | accelerate config 98 | ``` 99 | 100 | To setup CALVIN, use 101 | ``` 102 | export CALVIN_ROOT= 103 | ``` 104 | 105 | ## Prepare Weights 106 | You need to download the weights of the ViT visual encoder. The weights of ViT is [here](https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth). The model will also automatically download weights of CLIP when loading it. 107 | 108 | For weights of policy, you can use the [Bytedance's weights for CALVIN's ABC->D](https://lf-robot-opensource.bytetos.com/obj/lab-robot-public/gr1_code_release/snapshot_ABC.pt) or my weights. My weights (chunk size==1 or 10) are within the HuggingFace dataset mentioned above. 109 | 110 | If you choose to use Bytedance's weights, please set the followings in `configs.json`. My implementation is always compatible with Bytedance's weights no matter what chunk size you choose. 111 | ``` 112 | "bytedance_ckpt_path": "", 113 | "load_bytedance_ckpt": true, 114 | "mae_ckpt": "", 115 | ``` 116 | 117 | If you choose to use my weights, please set 118 | ``` 119 | "load_bytedance_ckpt": false, 120 | "load_epoch": , 121 | "save_path": "", 122 | "mae_ckpt": "", 123 | ``` 124 | It loads `GR1_.pth` from `./Save/`. Notice that: 125 | - To use my weights, copy it from the downloaded lmdb dataset and rename it as `GR1_.pth`. 126 | - My weights with different `chunk_size` are incompatible, but you can slightly modify `state_dict['action_chunk_queries.weight']` to solve it. 127 | - `GR1_0_chunksize1.pth` (197MB) is trained with transformers 4.11, which will save `attn.bias` and `attn.mask_bias`. By contrast, `GR1_0_chunksize10.pth` (186MB) is trained with newest transformers, which will not save such parameters. 128 | 129 | ## Evaluation 130 | 131 | Remember to set these in `configs.json` (you may not use my hyper-parameters): 132 | ``` 133 | "record_evaluation_video": "", 134 | "bytedance_ckpt_path": "", 135 | "load_bytedance_ckpt": , 136 | "load_epoch": , 137 | "num_sequences": , 138 | "ep_len": , 139 | "chunk_size": , 140 | "test_chunk_size": , 141 | ``` 142 | 143 | Then simply run 144 | ``` 145 | accelerate launch evaluate_calvin.py 146 | ``` 147 | It will run N simulations in parallel on N GPUs (depending on what you set in `accelerate config`). If you choose to record the evaluation video, the videos will be saved in `eval_` folder under the `save_path` you specify. 148 | 149 | **If you have EGL related errors**: this error can usually be fixed by install some packages depending on your system. I recommend to use `conda install -c conda-forge ` instead of `apt install` since it usually fixes my error. You can contact me if it's still not solved 150 | 151 | ## Training 152 | 153 | After setting `configs.json`, you can simply launch training with 154 | ``` 155 | accelerate launch Main.py 156 | ``` 157 | If `"evaluate_during_training": true` in `configs.json`, then it will evaluate the policy after every epoch of training. 158 | 159 | ## GR-Chunk Method 160 | 161 | Let's assume `sequence_len==4`, `chunk_size==3`, `test_chunk_size==2` and the currect timestep is 4, the input and output of the original GR1 is simply (s1, s2, s3, s4) and (a1, a2, a3, a4), respectively. In the environment, we take the predicted action a4. 162 | 163 | By contrast, the output in GR-Chunk is ((a1, a2, a3), (a2, a3, a4), (a3, a4, a5), (a4, a5, a6)). In the environment, we take the predicted action a4 and a5 in consecutive timesteps, and then run the policy to predict future actions again. 164 | 165 | Similar approach is taken in Meta's recent [4-token prediction LLM](https://arxiv.org/pdf/2404.19737), [ACT/Aloha](https://github.com/tonyzhaozh/act) of @[tonyzhaozh](https://github.com/tonyzhaozh/act/commits?author=tonyzhaozh), [Diffusion Policy](https://github.com/real-stanford/diffusion_policy) of @[cheng-chi](https://github.com/real-stanford/diffusion_policy/commits?author=cheng-chi), and [Octo](https://github.com/octo-models/octo). 166 | 167 | In my experiment, temporal ensembling of ACT does not improve the success rate. Octo has similar conclusion. As reported by @[tonyzhaozh](https://github.com/tonyzhaozh/act/commits?author=tonyzhaozh), temporal ensembling seems to work better with small data. 168 | 169 | The best `test_chunk_size` seems to be 1 when `chunk_size==10`, see the following ablation: 170 | 171 | | Configuration | Avg. Len of ABC->D | 172 | |--|--| 173 | | chunk_size=1, test_chunk_size=1 | 3.257 | 174 | | chunk_size=10, test_chunk_size=1 | 3.556 | 175 | | chunk_size=10, test_chunk_size=2 | 2.145 | 176 | 177 | ## Detailed Analysis of the Original Network and API 178 | Please refer to this [issue](https://github.com/bytedance/GR-1/issues/4). 179 | 180 | ## Training Curves 181 | 182 | I first finetune the policy with `chunk_size==1` and then keep finetuning it with `chunk_size==10`. Each phase takes 20 epochs. I guess you can directly train it with `chunk_size==10`. 183 | 184 | I do not evaluate the policy during training because the server I use now does not have Nvidia EGL support. 185 | 186 | First finetune it with `chunk_size==1` (8*4090 GPU, batch size per GPU 22, gradient accumulation step 3): 187 | 188 | Static camera reconstruction loss 189 | ![](README_md_files/5fcbb420-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 190 | 191 | Wrist camera reconstruction loss 192 | ![](README_md_files/4ffc0130-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 193 | 194 | Arm action loss 195 | ![](README_md_files/0b7854f0-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 196 | 197 | Gripper action loss 198 | ![](README_md_files/26b6d1b0-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 199 | 200 | Learning rate 201 | ![](README_md_files/3f840050-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 202 | 203 | First finetune it with `chunk_size==10` (8*4090 GPU, batch size per GPU 19, gradient accumulation step 3): 204 | 205 | Static camera reconstruction loss 206 | ![](README_md_files/b7bc1030-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 207 | 208 | Wrist camera reconstruction loss 209 | ![](README_md_files/caabc7d0-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 210 | 211 | Arm action loss 212 | ![](README_md_files/e53350a0-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 213 | 214 | Gripper action loss 215 | ![](README_md_files/f9942470-1cf7-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 216 | 217 | Learning rate 218 | ![](README_md_files/0de42f60-1cf8-11ef-b8f1-596730d257b3.jpeg?v=1&type=image) 219 | 220 | ## Notice 221 | - I have not trained it from scratch yet ([according to the authors](https://github.com/bytedance/GR-1/issues/2), the Ego4D pretraining takes 4 days on 32 V100 16GB, and the CALVIN finetuning takes 1 day on 32 V100 16GB). I just finetuned their checkpoint to adapt to my training pipeline. 222 | - When I measure the success rate of the Bytedance's weight, I get an average length of 3.25 instead of 3.06 in their original paper. Perhaps they did not fix this [issue](https://github.com/mees/calvin/issues/32#issuecomment-1363352121) in CALVIN benchmark, while 3D Diffuser Actor fixed it 223 | 224 | ## Acknowledgement 225 | Great thanks to [@bdrhtw](https://github.com/bdrhtw) to make it open-source! 226 | 227 | ## Feel Free to Contact Me! 228 | 229 | 230 | Email: zhuohengli@foxmail.com 231 | 232 | Find Zhuoheng Li in HuggiingFace LeRobot server: [![Discord](https://dcbadge.vercel.app/api/server/C5P34WJ68S?style=flat)](https://discord.gg/s3KuuzsPFb) 233 | (try to merge this repo to LeRobot) 234 | 235 | Wechat group: 236 | 237 | ![图片](https://github.com/EDiRobotics/GR1-Training/assets/33491471/1250bcc6-52a8-4da0-89d2-c8ed55bb4613) 238 | 239 | Or feel free to open an issue here. 240 | -------------------------------------------------------------------------------- /README_md_files/0b7854f0-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/0b7854f0-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/0de42f60-1cf8-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/0de42f60-1cf8-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/26b6d1b0-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/26b6d1b0-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/3f840050-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/3f840050-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/4bd0ac70-3e74-11ef-b06d-69973bdd91b6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/4bd0ac70-3e74-11ef-b06d-69973bdd91b6.jpeg -------------------------------------------------------------------------------- /README_md_files/4ffc0130-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/4ffc0130-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/5c029c40-3e77-11ef-b06d-69973bdd91b6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/5c029c40-3e77-11ef-b06d-69973bdd91b6.jpeg -------------------------------------------------------------------------------- /README_md_files/5fcbb420-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/5fcbb420-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/67158a70-3e77-11ef-b06d-69973bdd91b6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/67158a70-3e77-11ef-b06d-69973bdd91b6.jpeg -------------------------------------------------------------------------------- /README_md_files/7d6dc170-3e77-11ef-b06d-69973bdd91b6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/7d6dc170-3e77-11ef-b06d-69973bdd91b6.jpeg -------------------------------------------------------------------------------- /README_md_files/839ab800-3e77-11ef-b06d-69973bdd91b6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/839ab800-3e77-11ef-b06d-69973bdd91b6.jpeg -------------------------------------------------------------------------------- /README_md_files/aeb5db90-1cef-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/aeb5db90-1cef-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/b7bc1030-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/b7bc1030-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/c070ff40-1cf9-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/c070ff40-1cf9-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/caabc7d0-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/caabc7d0-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/e53350a0-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/e53350a0-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /README_md_files/f9942470-1cf7-11ef-b8f1-596730d257b3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/README_md_files/f9942470-1cf7-11ef-b8f1-596730d257b3.jpeg -------------------------------------------------------------------------------- /calvin2lmdb.py: -------------------------------------------------------------------------------- 1 | import os 2 | import io 3 | import argparse 4 | import lmdb 5 | from pickle import dumps, loads 6 | import numpy as np 7 | import torch 8 | from torchvision.transforms.functional import resize 9 | from torchvision.io import encode_jpeg 10 | import clip 11 | from einops import rearrange, repeat 12 | 13 | def save_to_lmdb(output_dir, input_dir): 14 | env = lmdb.open(output_dir, map_size=int(3e12), readonly=False, lock=False) # maximum size of memory map is 3TB 15 | annotations = np.load(os.path.join(input_dir, 'lang_annotations/auto_lang_ann.npy'), allow_pickle=True).tolist()['language']['ann'] 16 | start_end_ids = np.load(os.path.join(input_dir, 'lang_annotations/auto_lang_ann.npy'), allow_pickle=True).tolist()['info']['indx'] 17 | with env.begin(write=True) as txn: 18 | if txn.get('cur_step'.encode()) is not None: 19 | cur_step = loads(txn.get('cur_step'.encode())) + 1 20 | cur_episode = loads(txn.get(f'cur_episode_{cur_step - 1}'.encode())) + 1 21 | else: 22 | cur_step = 0 23 | cur_episode = 0 24 | 25 | for index, (start, end) in enumerate(start_end_ids): 26 | print(f'{index/len(start_end_ids)}') 27 | inst = annotations[index] 28 | txn.put(f'inst_{cur_episode}'.encode(), dumps(inst)) 29 | with torch.no_grad(): 30 | inst_token = clip.tokenize(inst) 31 | inst_emb = model_clip.encode_text(inst_token.cuda()).cpu() 32 | txn.put(f'inst_token_{cur_episode}'.encode(), dumps(inst_token[0])) 33 | txn.put(f'inst_emb_{cur_episode}'.encode(), dumps(inst_emb[0])) 34 | for i in range(start, end+1): 35 | frame = np.load(os.path.join(input_dir, f'episode_{i:07}.npz')) 36 | txn.put('cur_step'.encode(), dumps(cur_step)) 37 | txn.put(f'cur_episode_{cur_step}'.encode(), dumps(cur_episode)) 38 | txn.put(f'done_{cur_step}'.encode(), dumps(False)) 39 | rgb_static = torch.from_numpy(rearrange(frame['rgb_static'], 'h w c -> c h w')) 40 | txn.put(f'rgb_static_{cur_step}'.encode(), dumps(encode_jpeg(rgb_static))) 41 | rgb_gripper = torch.from_numpy(rearrange(frame['rgb_gripper'], 'h w c -> c h w')) 42 | txn.put(f'rgb_gripper_{cur_step}'.encode(), dumps(encode_jpeg(rgb_gripper))) 43 | txn.put(f'abs_action_{cur_step}'.encode(), dumps(torch.from_numpy(frame['actions']))) 44 | txn.put(f'rel_action_{cur_step}'.encode(), dumps(torch.from_numpy(frame['rel_actions']))) 45 | txn.put(f'scene_obs_{cur_step}'.encode(), dumps(torch.from_numpy(frame['scene_obs']))) 46 | txn.put(f'robot_obs_{cur_step}'.encode(), dumps(torch.from_numpy(frame['robot_obs']))) 47 | cur_step += 1 48 | txn.put(f'done_{cur_step-1}'.encode(), dumps(True)) 49 | cur_episode += 1 50 | env.close() 51 | 52 | if __name__ == '__main__': 53 | parser = argparse.ArgumentParser(description="Transfer CALVIN dataset to lmdb format.") 54 | parser.add_argument("--input_dir", default='./ABC_D', type=str, help="Original dataset directory.") 55 | parser.add_argument("--output_dir", default='./calvin_lmdb', type=str, help="Original dataset directory.") 56 | args = parser.parse_args() 57 | model_clip, _ = clip.load('ViT-B/32', device='cuda:0') 58 | if not os.path.exists(args.output_dir): 59 | os.makedirs(args.output_dir) 60 | save_to_lmdb(args.output_dir, os.path.join(args.input_dir, 'training')) 61 | save_to_lmdb(args.output_dir, os.path.join(args.input_dir, 'validation')) -------------------------------------------------------------------------------- /configs.json: -------------------------------------------------------------------------------- 1 | { 2 | "evaluate_during_training": true, 3 | "record_evaluation_video": false, 4 | "compile_model": false, 5 | "bytedance_ckpt_path": "./snapshot_ABC.pt", 6 | "load_bytedance_ckpt": false, 7 | "load_epoch": 0, 8 | "save_epochs": 1, 9 | "num_epochs": 20, 10 | "num_sequences": 1000, 11 | "ep_len": 360, 12 | "print_steps": 100, 13 | "lr_max": 0.0009, 14 | "weight_decay": 0.0001, 15 | "arm_loss_ratio": 100, 16 | "num_warmup_epochs": 1, 17 | "bs_per_gpu": 19, 18 | "workers_per_gpu": 7, 19 | "prefetch_factor": 2, 20 | "gradient_accumulation_steps": 3, 21 | "rgb_static_pad": 10, 22 | "rgb_gripper_pad": 4, 23 | "rgb_shape": 224, 24 | "patch_size": 16, 25 | "rgb_mean": [0.485, 0.456, 0.406], 26 | "rgb_std": [0.229, 0.224, 0.225], 27 | "LMDB_path": "../calvin_lmdb/", 28 | "save_path": "./Save/", 29 | "mae_ckpt": "./mae_pretrain_vit_base.pth", 30 | "embed_dim": 384, 31 | "n_layer": 12, 32 | "n_head": 12, 33 | "activation_function": "relu", 34 | "dropout": 0.1, 35 | "n_positions": 1024, 36 | "device": "cuda", 37 | "resampler_depth": 3, 38 | "resampler_dim_head": 128, 39 | "resampler_heads": 4, 40 | "resampler_num_media_embeds": 1, 41 | "resampler_num_latents": 9, 42 | "seq_len": 10, 43 | "chunk_size": 10, 44 | "test_chunk_size": 1, 45 | "skip_frame": 3, 46 | "action_mode": "ee_rel_pose", 47 | "act_dim": 7, 48 | "state_dim": 7, 49 | "use_hand_rgb": true, 50 | "clip_backbone": "ViT-B/32", 51 | "act_pred": true, 52 | "fwd_pred": true, 53 | "fwd_pred_hand": true, 54 | "fwd_pred_next_n": 3, 55 | "without_norm_pixel_loss": false, 56 | "img_feat_dim": 768, 57 | "patch_feat_dim": 768, 58 | "lang_feat_dim": 512 59 | } 60 | -------------------------------------------------------------------------------- /evaluate_calvin.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2021 Oier Mees 4 | # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates 5 | 6 | # Permission is hereby granted, free of charge, to any person obtaining a copy 7 | # of this software and associated documentation files (the "Software"), to deal 8 | # in the Software without restriction, including without limitation the rights 9 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | # copies of the Software, and to permit persons to whom the Software is 11 | # furnished to do so, subject to the following conditions: 12 | 13 | # The above copyright notice and this permission notice shall be included in all 14 | # copies or substantial portions of the Software. 15 | 16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | # SOFTWARE. 23 | 24 | """Code to evaluate Calvin.""" 25 | import argparse 26 | import json 27 | import logging 28 | import os 29 | from pathlib import Path 30 | import sys 31 | import time 32 | import copy 33 | from moviepy.editor import ImageSequenceClip 34 | from accelerate import Accelerator 35 | from datetime import timedelta 36 | from accelerate.utils import InitProcessGroupKwargs 37 | 38 | # This is for using the locally installed repo clone when using slurm 39 | from calvin_agent.models.calvin_base_model import CalvinBaseModel 40 | 41 | sys.path.insert(0, Path(__file__).absolute().parents[2].as_posix()) 42 | 43 | from calvin_agent.evaluation.multistep_sequences import get_sequences 44 | from calvin_agent.evaluation.utils import ( 45 | count_success, 46 | get_env_state_for_initial_condition, 47 | get_log_dir, 48 | ) 49 | import hydra 50 | import numpy as np 51 | from omegaconf import OmegaConf 52 | from pytorch_lightning import seed_everything 53 | from termcolor import colored 54 | import torch 55 | from tqdm.auto import tqdm 56 | 57 | from evaluation.calvin_evaluation import GR1CalvinEvaluation 58 | from utils.calvin_utils import print_and_save 59 | import clip 60 | from PreProcess import PreProcess 61 | import models.vision_transformer as vits 62 | from models.gr1 import GR1 63 | 64 | logger = logging.getLogger(__name__) 65 | 66 | os.environ["FFMPEG_BINARY"] = "auto-detect" 67 | CALVIN_ROOT = os.environ['CALVIN_ROOT'] 68 | 69 | def make_env(dataset_path, observation_space, device): 70 | val_folder = Path(dataset_path) / "validation" 71 | from evaluation.calvin_env_wrapper_raw import CalvinEnvWrapperRaw 72 | env = CalvinEnvWrapperRaw(val_folder, observation_space, device) 73 | return env 74 | 75 | 76 | def evaluate_policy(model, env, eval_sr_path, eval_result_path, ep_len, num_sequences, num_procs, procs_id, eval_dir=None, debug=False): 77 | conf_dir = Path(f"{CALVIN_ROOT}/calvin_models") / "conf" 78 | task_cfg = OmegaConf.load(conf_dir / "callbacks/rollout/tasks/new_playtable_tasks.yaml") 79 | task_oracle = hydra.utils.instantiate(task_cfg) 80 | val_annotations = OmegaConf.load(conf_dir / "annotations/new_playtable_validation.yaml") 81 | eval_dir = get_log_dir(eval_dir) 82 | eval_sequences = get_sequences(num_sequences) 83 | num_seq_per_procs = num_sequences // num_procs 84 | eval_sequences = eval_sequences[num_seq_per_procs*procs_id:num_seq_per_procs*(procs_id+1)] 85 | 86 | results = [] 87 | if not debug: 88 | eval_sequences = tqdm(eval_sequences, position=0, leave=True) 89 | 90 | sequence_i = 0 91 | for initial_state, eval_sequence in eval_sequences: 92 | result = evaluate_sequence(env, model, task_oracle, initial_state, eval_sequence, val_annotations, debug, eval_dir, sequence_i, ep_len) 93 | results.append(result) 94 | if not debug: 95 | success_list = count_success(results) 96 | with open(eval_sr_path, 'a') as f: 97 | line =f"{sequence_i}/{num_sequences}: " 98 | for sr in success_list: 99 | line += f"{sr:.3f} | " 100 | sequence_i += 1 101 | line += "\n" 102 | f.write(line) 103 | eval_sequences.set_description( 104 | " ".join([f"{i + 1}/5 : {v * 100:.1f}% |" for i, v in enumerate(success_list)]) + "|" 105 | ) 106 | else: 107 | sequence_i += 1 108 | print_and_save(results, eval_sequences, eval_result_path, None) 109 | return results 110 | 111 | 112 | def evaluate_sequence(env, model, task_checker, initial_state, eval_sequence, val_annotations, debug, eval_dir, sequence_i, ep_len): 113 | robot_obs, scene_obs = get_env_state_for_initial_condition(initial_state) 114 | env.reset(robot_obs=robot_obs, scene_obs=scene_obs) 115 | success_counter = 0 116 | if debug: 117 | time.sleep(1) 118 | print() 119 | print() 120 | print(f"Evaluating sequence: {' -> '.join(eval_sequence)}") 121 | print("Subtask: ", end="") 122 | for subtask_i, subtask in enumerate(eval_sequence): 123 | success = rollout(env, model, task_checker, subtask, val_annotations, debug, eval_dir, subtask_i, sequence_i, ep_len) 124 | if success: 125 | success_counter += 1 126 | else: 127 | return success_counter 128 | return success_counter 129 | 130 | 131 | def rollout(env, model, task_oracle, subtask, val_annotations, debug, eval_dir, subtask_i, sequence_i, ep_len): 132 | if debug: 133 | print(f"{subtask} ", end="") 134 | time.sleep(0.5) 135 | obs = env.get_obs() 136 | lang_annotation = val_annotations[subtask][0] 137 | model.reset() 138 | start_info = env.get_info() 139 | if debug: 140 | img_dict = { 141 | 'static': [], 142 | 'gripper': [], 143 | 'pred_static': [], 144 | 'pred_gripper': [], 145 | } 146 | unfinished = 0 147 | for step in range(ep_len): 148 | if unfinished == 0: 149 | output = model.step(obs, lang_annotation) 150 | action = output['action_pred'] 151 | unfinished = action.shape[0] 152 | obs, _, _, current_info = env.step(action[-unfinished]) 153 | unfinished -= 1 154 | if debug: 155 | img_dict['static'].append(copy.deepcopy(obs['rgb_obs']['rgb_static'])) 156 | img_dict['gripper'].append(copy.deepcopy(obs['rgb_obs']['rgb_gripper'])) 157 | img_dict['pred_static'].append(copy.deepcopy(output['obs_preds'][0, -1].astype(np.uint8))) 158 | img_dict['pred_gripper'].append(copy.deepcopy(output['obs_hand_preds'][0, -1].astype(np.uint8))) 159 | # check if current step solves a task 160 | current_task_info = task_oracle.get_task_info_for_set(start_info, current_info, {subtask}) 161 | if len(current_task_info) > 0: 162 | if debug: 163 | print(colored("success", "green"), end=" ") 164 | for key in img_dict.keys(): 165 | clip = ImageSequenceClip(img_dict[key], fps=30) 166 | clip.write_gif(os.path.join(eval_dir, f'{sequence_i}-{subtask_i}-{subtask}-{key}-succ.gif'), fps=30) 167 | return True 168 | if debug: 169 | print(colored("fail", "red"), end=" ") 170 | for key in img_dict.keys(): 171 | clip = ImageSequenceClip(img_dict[key], fps=30) 172 | clip.write_gif(os.path.join(eval_dir, f'{sequence_i}-{subtask_i}-{subtask}-{key}-fail.gif'), fps=30) 173 | return False 174 | 175 | 176 | def main(): 177 | # Preparation 178 | cfg = json.load(open('configs.json')) 179 | # The timeout here is 3600s to wait for other processes to finish the simulation 180 | kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=3600)) 181 | acc = Accelerator(kwargs_handlers=[kwargs]) 182 | device = acc.device 183 | preprocessor = PreProcess( 184 | cfg['rgb_static_pad'], 185 | cfg['rgb_gripper_pad'], 186 | cfg['rgb_shape'], 187 | cfg['rgb_mean'], 188 | cfg['rgb_std'], 189 | device, 190 | ) 191 | model_clip, _ = clip.load(cfg['clip_backbone'], device=device) 192 | model_mae = vits.__dict__['vit_base'](patch_size=16, num_classes=0).to(device) 193 | checkpoint = torch.load(cfg['mae_ckpt']) 194 | model_mae.load_state_dict(checkpoint['model'], strict=False) 195 | model = GR1( 196 | model_clip, 197 | model_mae, 198 | rgb_shape=cfg['rgb_shape'], 199 | patch_size=cfg['patch_size'], 200 | state_dim=cfg['state_dim'], 201 | act_dim=cfg['act_dim'], 202 | hidden_size=cfg['embed_dim'], 203 | sequence_length=cfg['seq_len'], 204 | chunk_size=cfg['chunk_size'], 205 | training_target=['act_pred', 'fwd_pred', 'fwd_pred_hand'], 206 | img_feat_dim=cfg['img_feat_dim'], 207 | patch_feat_dim=cfg['patch_feat_dim'], 208 | lang_feat_dim=cfg['lang_feat_dim'], 209 | resampler_params={ 210 | 'depth': cfg['resampler_depth'], 211 | 'dim_head': cfg['resampler_dim_head'], 212 | 'heads': cfg['resampler_heads'], 213 | 'num_latents': cfg['resampler_num_latents'], 214 | 'num_media_embeds': cfg['resampler_num_media_embeds'], 215 | }, 216 | without_norm_pixel_loss=cfg['without_norm_pixel_loss'], 217 | skip_frame=cfg['skip_frame'], 218 | use_hand_rgb=True, 219 | n_layer=cfg['n_layer'], 220 | n_head=cfg['n_head'], 221 | n_inner=4*cfg['embed_dim'], 222 | activation_function=cfg['activation_function'], 223 | n_positions=cfg['n_positions'], 224 | resid_pdrop=cfg['dropout'], 225 | attn_pdrop=cfg['dropout'], 226 | ).to(device) # for fused optimizer 227 | if cfg['load_bytedance_ckpt']: 228 | model.load_state_dict(torch.load(cfg['bytedance_ckpt_path'])['state_dict'], strict=False) 229 | acc.print('load ', cfg['bytedance_ckpt_path'] ) 230 | elif os.path.isfile(cfg['save_path']+'GR1_{}.pth'.format(cfg['load_epoch'])): 231 | state_dict = torch.load(cfg['save_path']+'GR1_{}.pth'.format(cfg['load_epoch']))['state_dict'] 232 | model.load_state_dict(state_dict, strict=False) 233 | acc.print('load ', cfg['save_path']+'GR1_{}.pth'.format(cfg['load_epoch'])) 234 | if cfg['compile_model']: 235 | model = torch.compile(model) 236 | model = acc.prepare(model, device_placement=[True]) 237 | observation_space = { 238 | 'rgb_obs': ['rgb_static', 'rgb_gripper'], 239 | 'depth_obs': [], 240 | 'state_obs': ['robot_obs'], 241 | 'actions': ['rel_actions'], 242 | 'language': ['language']} 243 | eval_dir = cfg['save_path']+f'eval{torch.cuda.current_device()}/' 244 | os.makedirs(eval_dir, exist_ok=True) 245 | env = make_env('./fake_dataset', observation_space, device) 246 | eva = GR1CalvinEvaluation(model, cfg, preprocessor, device) 247 | model.eval() 248 | avg_reward = torch.tensor(evaluate_policy( 249 | eva, 250 | env, 251 | cfg['save_path']+'success_rate.txt', 252 | cfg['save_path']+'result.txt', 253 | cfg['ep_len'], 254 | cfg['num_sequences'], 255 | acc.num_processes, 256 | acc.process_index, 257 | eval_dir, 258 | debug=cfg['record_evaluation_video'], 259 | )).float().mean().to(device) 260 | acc.wait_for_everyone() 261 | avg_reward = acc.gather_for_metrics(avg_reward).mean() 262 | if acc.is_main_process: 263 | print('average success rate ', avg_reward) 264 | 265 | if __name__ == "__main__": 266 | main() 267 | -------------------------------------------------------------------------------- /evaluation/calvin_env_wrapper_raw.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2021 Oier Mees 4 | # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates 5 | 6 | # Permission is hereby granted, free of charge, to any person obtaining a copy 7 | # of this software and associated documentation files (the "Software"), to deal 8 | # in the Software without restriction, including without limitation the rights 9 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | # copies of the Software, and to permit persons to whom the Software is 11 | # furnished to do so, subject to the following conditions: 12 | 13 | # The above copyright notice and this permission notice shall be included in all 14 | # copies or substantial portions of the Software. 15 | 16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | # SOFTWARE. 23 | 24 | import logging 25 | import os 26 | from typing import Any, Dict, Tuple, Union 27 | 28 | import gym 29 | import numpy as np 30 | import torch 31 | 32 | from calvin_env.envs.play_table_env import get_env 33 | from calvin_env.utils.utils import EglDeviceNotFoundError, get_egl_device_id 34 | 35 | logger = logging.getLogger(__name__) 36 | 37 | 38 | class CalvinEnvWrapperRaw(gym.Wrapper): 39 | def __init__(self, abs_datasets_dir, observation_space, device, show_gui=False, **kwargs): 40 | """Environment wrapper which returns raw observations. 41 | 42 | Args: 43 | abs_datasets_dir: absolute datset directory 44 | observation_sapce: {'rgb_obs': ['rgb_static', 'rgb_gripper'], 'depth_obs': [], 'state_obs': ['robot_obs'], 'actions': ['rel_actions'], 'language': ['language']} 45 | """ 46 | self.set_egl_device(device) 47 | env = get_env( 48 | abs_datasets_dir, show_gui=show_gui, obs_space=observation_space, **kwargs 49 | ) 50 | super(CalvinEnvWrapperRaw, self).__init__(env) 51 | self.observation_space_keys = observation_space 52 | self.device = device 53 | self.relative_actions = "rel_actions" in self.observation_space_keys["actions"] 54 | logger.info(f"Initialized PlayTableEnv for device {self.device}") 55 | 56 | @staticmethod 57 | def set_egl_device(device): 58 | if "EGL_VISIBLE_DEVICES" in os.environ: 59 | logger.warning("Environment variable EGL_VISIBLE_DEVICES is already set. Is this intended?") 60 | # modified: cuda_id = device.index if device.type == "cuda" else 0 61 | cuda_id = torch.cuda.current_device() 62 | try: 63 | egl_id = get_egl_device_id(cuda_id) 64 | except EglDeviceNotFoundError: 65 | logger.warning( 66 | "Couldn't find correct EGL device. Setting EGL_VISIBLE_DEVICE=0. " 67 | "When using DDP with many GPUs this can lead to OOM errors. " 68 | "Did you install PyBullet correctly? Please refer to calvin env README" 69 | ) 70 | egl_id = 0 71 | os.environ["EGL_VISIBLE_DEVICES"] = str(egl_id) 72 | logger.info(f"EGL_DEVICE_ID {egl_id} <==> CUDA_DEVICE_ID {cuda_id}") 73 | 74 | def step( 75 | self, action_tensor: torch.Tensor 76 | ) -> Tuple[Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]], int, bool, Dict]: 77 | if self.relative_actions: 78 | action = action_tensor.squeeze().cpu().detach().numpy() 79 | assert len(action) == 7 80 | else: 81 | if action_tensor.shape[-1] == 7: 82 | slice_ids = [3, 6] 83 | elif action_tensor.shape[-1] == 8: 84 | slice_ids = [3, 7] 85 | else: 86 | logger.error("actions are required to have length 8 (for euler angles) or 9 (for quaternions)") 87 | raise NotImplementedError 88 | action = np.split(action_tensor.squeeze().cpu().detach().numpy(), slice_ids) 89 | action[-1] = 1 if action[-1] > 0 else -1 90 | o, r, d, i = self.env.step(action) 91 | 92 | # obs = self.transform_observation(o) 93 | obs = o # use raw observation 94 | return obs, r, d, i 95 | 96 | def reset( 97 | self, 98 | reset_info: Dict[str, Any] = None, 99 | batch_idx: int = 0, 100 | seq_idx: int = 0, 101 | scene_obs: Any = None, 102 | robot_obs: Any = None, 103 | ) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]: 104 | if reset_info is not None: 105 | obs = self.env.reset( 106 | robot_obs=reset_info["robot_obs"][batch_idx, seq_idx], 107 | scene_obs=reset_info["scene_obs"][batch_idx, seq_idx], 108 | ) 109 | elif scene_obs is not None or robot_obs is not None: 110 | obs = self.env.reset(scene_obs=scene_obs, robot_obs=robot_obs) 111 | else: 112 | obs = self.env.reset() 113 | 114 | # return self.transform_observation(obs) 115 | return obs # use raw observation 116 | 117 | def get_info(self): 118 | return self.env.get_info() 119 | 120 | def get_obs(self): 121 | obs = self.env.get_obs() 122 | # return self.transform_observation(obs) 123 | return obs # use raw observation 124 | -------------------------------------------------------------------------------- /evaluation/calvin_evaluation.py: -------------------------------------------------------------------------------- 1 | # Copyright (2024) Bytedance Ltd. and/or its affiliates 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Class for evaluating GR-1 on Calvin Benchmark.""" 16 | import torch 17 | import torchvision.transforms as T 18 | import torch.nn.functional as F 19 | import numpy as np 20 | from einops import rearrange 21 | 22 | import clip 23 | 24 | import models.vision_transformer as vits 25 | from models.gr1 import GR1 26 | 27 | from calvin_agent.models.calvin_base_model import CalvinBaseModel 28 | 29 | 30 | class GR1CalvinEvaluation(CalvinBaseModel): 31 | def __init__(self, 32 | policy, 33 | variant, 34 | preprocessor, 35 | device 36 | ): 37 | """Constructor.""" 38 | self.tokenizer = clip.tokenize 39 | self.seq_len = variant['seq_len'] 40 | self.chunk_size = variant['chunk_size'] 41 | self.test_chunk_size = variant['test_chunk_size'] 42 | self.use_hand_rgb = variant['use_hand_rgb'] 43 | self.act_dim = variant['act_dim'] 44 | self.rgb_shape = variant['rgb_shape'] 45 | self.patch_size = variant['patch_size'] 46 | self.state_dim = variant['state_dim'] 47 | self.device = device 48 | 49 | # Preprocess 50 | self.preprocessor = preprocessor 51 | self.policy = policy 52 | 53 | def reset(self): 54 | """Reset function.""" 55 | self.rgb_list = [] 56 | self.hand_rgb_list = [] 57 | self.state_list = [] 58 | self.rollout_step_counter = 0 59 | 60 | def step(self, obs, goal): 61 | """Step function.""" 62 | # Language 63 | text = goal 64 | tokenized_text = self.tokenizer(text) 65 | 66 | # RGB 67 | rgb = rearrange(torch.from_numpy(obs['rgb_obs']['rgb_static']), 'h w c -> c h w') 68 | hand_rgb = rearrange(torch.from_numpy(obs['rgb_obs']['rgb_gripper']), 'h w c -> c h w') 69 | self.rgb_list.append(rgb) 70 | self.hand_rgb_list.append(hand_rgb) 71 | 72 | # State 73 | state = obs['robot_obs'] 74 | arm_state = state[:6] 75 | gripper_state = state[-1] 76 | state = torch.from_numpy(np.hstack([arm_state, gripper_state])) 77 | self.state_list.append(state) 78 | 79 | # Buffer 80 | buffer_len = len(self.rgb_list) 81 | if buffer_len > self.seq_len: 82 | self.rgb_list.pop(0) 83 | self.hand_rgb_list.pop(0) 84 | self.state_list.pop(0) 85 | assert len(self.rgb_list) == self.seq_len 86 | assert len(self.hand_rgb_list) == self.seq_len 87 | assert len(self.state_list) == self.seq_len 88 | buffer_len = len(self.rgb_list) 89 | 90 | # Static RGB 91 | c, h, w = rgb.shape 92 | rgb_data = torch.zeros((1, self.seq_len, c, h, w)) 93 | rgb_tensor = torch.stack(self.rgb_list, dim=0) # (t, c, h, w) 94 | rgb_data[0, :buffer_len] = rgb_tensor 95 | 96 | # Hand RGB 97 | c, h, w = hand_rgb.shape 98 | hand_rgb_data = torch.zeros((1, self.seq_len, c, h, w)) 99 | hand_rgb_tensor = torch.stack(self.hand_rgb_list, dim=0) # (t, c, h, w) 100 | hand_rgb_data[0, :buffer_len] = hand_rgb_tensor 101 | 102 | # State 103 | state_tensor = torch.stack(self.state_list, dim=0) # (l, act_dim) 104 | gripper_state_data = - torch.ones((1, self.seq_len)).float() 105 | gripper_state_data[0, :buffer_len] = state_tensor[:, 6] 106 | gripper_state_data = (gripper_state_data + 1.0) / 2 107 | gripper_state_data = gripper_state_data.long() 108 | gripper_state_data = F.one_hot(gripper_state_data, num_classes=2).float() # (1, t, 2) 109 | arm_state_data = torch.zeros((1, self.seq_len, self.act_dim - 1)).float() # (1, t, act_dim - 1) 110 | arm_state_data[0, :buffer_len] = state_tensor[:, :6] 111 | 112 | # Attention mask 113 | attention_mask = torch.zeros(1, self.seq_len).long() 114 | attention_mask[0, :buffer_len] = 1 115 | 116 | # Forward pass 117 | tokenized_text = tokenized_text.to(self.device) 118 | rgb_data = rgb_data.to(self.device) 119 | hand_rgb_data = hand_rgb_data.to(self.device) 120 | arm_state_data = arm_state_data.to(self.device) 121 | gripper_state_data = gripper_state_data.to(self.device) 122 | state_data = {'arm': arm_state_data, 'gripper': gripper_state_data} 123 | attention_mask = attention_mask.to(self.device) 124 | 125 | rgb_data, hand_rgb_data = self.preprocessor.rgb_process(rgb_data, hand_rgb_data, train=False) 126 | 127 | with torch.no_grad(): 128 | prediction = self.policy( 129 | rgb=rgb_data, 130 | hand_rgb=hand_rgb_data, 131 | state=state_data, 132 | language=tokenized_text, 133 | attention_mask=attention_mask 134 | ) 135 | 136 | # Arm action 137 | arm_action_preds = prediction['arm_action_preds'] # (1, t, chunk_size, act_dim - 1) 138 | arm_action_preds = arm_action_preds.view(-1, self.chunk_size, self.act_dim - 1) # (t, chunk_size, act_dim - 1) 139 | arm_action_preds = arm_action_preds[attention_mask.flatten() > 0] 140 | 141 | # Gripper action 142 | gripper_action_preds = prediction['gripper_action_preds'] # (1, t, chunk_size, 1) 143 | gripper_action_preds = gripper_action_preds.view(-1, self.chunk_size, 1) # (t, chunk_size, 1) 144 | gripper_action_preds = gripper_action_preds[attention_mask.flatten() > 0] 145 | 146 | # Use the last action 147 | arm_action_pred = arm_action_preds[-1, :self.test_chunk_size] # (test_chunk_size, act_dim - 1) 148 | gripper_action_pred = gripper_action_preds[-1, :self.test_chunk_size] # (test_chunk_size, 1) 149 | gripper_action_pred = torch.nn.Sigmoid()(gripper_action_pred) 150 | gripper_action_pred = gripper_action_pred > 0.5 151 | gripper_action_pred = gripper_action_pred.int().float() 152 | gripper_action_pred = gripper_action_pred * 2.0 - 1.0 153 | action_pred = torch.cat((arm_action_pred, gripper_action_pred), dim=-1) # (act_dim,) 154 | action_pred = action_pred.detach().cpu() 155 | 156 | self.rollout_step_counter += 1 157 | 158 | ps = self.patch_size 159 | pn = self.rgb_shape // ps 160 | obs_preds = rearrange(prediction['obs_preds'], 'b s (hp wp) (p1 p2 c) -> b s c (hp p1) (wp p2)', hp=pn, wp=pn, p1=ps, p2=ps, c=3) 161 | obs_hand_preds = rearrange(prediction['obs_hand_preds'], 'b s (hp wp) (p1 p2 c) -> b s c (hp p1) (wp p2)', hp=pn, wp=pn, p1=ps, p2=ps, c=3) 162 | obs_preds, obs_hand_preds = self.preprocessor.rgb_back_process(obs_preds, obs_hand_preds) 163 | obs_preds = rearrange(obs_preds, 'b s c h w -> b s h w c') 164 | obs_hand_preds = rearrange(obs_hand_preds, 'b s c h w -> b s h w c') 165 | 166 | output = { 167 | 'obs_preds': obs_preds.cpu().numpy(), 168 | 'obs_hand_preds': obs_hand_preds.cpu().numpy(), 169 | 'action_pred': action_pred, 170 | } 171 | return output 172 | -------------------------------------------------------------------------------- /evaluation_logs/result.txt: -------------------------------------------------------------------------------- 1 | {"null": {"avg_seq_len": 3.424, "chain_sr": {"1": 0.928, "2": 0.792, "3": 0.672, "4": 0.576, "5": 0.456}, "task_info": {"lift_pink_block_slider": {"success": 16, "total": 17}, "place_in_drawer": {"success": 17, "total": 19}, "push_blue_block_right": {"success": 6, "total": 8}, "open_drawer": {"success": 36, "total": 36}, "lift_blue_block_table": {"success": 17, "total": 19}, "stack_block": {"success": 14, "total": 18}, "turn_on_led": {"success": 20, "total": 22}, "close_drawer": {"success": 20, "total": 20}, "place_in_slider": {"success": 32, "total": 40}, "rotate_blue_block_left": {"success": 6, "total": 6}, "lift_red_block_table": {"success": 24, "total": 25}, "push_blue_block_left": {"success": 4, "total": 5}, "move_slider_left": {"success": 27, "total": 30}, "rotate_pink_block_left": {"success": 4, "total": 4}, "turn_off_led": {"success": 13, "total": 13}, "push_into_drawer": {"success": 11, "total": 12}, "move_slider_right": {"success": 34, "total": 35}, "push_pink_block_left": {"success": 12, "total": 12}, "lift_blue_block_slider": {"success": 10, "total": 10}, "lift_pink_block_table": {"success": 18, "total": 23}, "unstack_block": {"success": 8, "total": 8}, "rotate_red_block_right": {"success": 8, "total": 11}, "turn_off_lightbulb": {"success": 11, "total": 16}, "rotate_red_block_left": {"success": 6, "total": 7}, "rotate_pink_block_right": {"success": 10, "total": 12}, "turn_on_lightbulb": {"success": 13, "total": 26}, "push_red_block_right": {"success": 4, "total": 7}, "lift_red_block_slider": {"success": 7, "total": 8}, "rotate_blue_block_right": {"success": 9, "total": 11}, "push_red_block_left": {"success": 6, "total": 8}, "lift_red_block_drawer": {"success": 3, "total": 3}, "lift_blue_block_drawer": {"success": 2, "total": 2}, "push_pink_block_right": {"success": 0, "total": 3}}}} -------------------------------------------------------------------------------- /evaluation_logs/success_rate.txt: -------------------------------------------------------------------------------- 1 | 0/1000: 0.000 | 0.000 | 0.000 | 0.000 | 0.000 | 2 | 0/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 3 | 0/1000: 1.000 | 1.000 | 0.000 | 0.000 | 0.000 | 4 | 0/1000: 1.000 | 1.000 | 0.000 | 0.000 | 0.000 | 5 | 0/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 6 | 0/1000: 1.000 | 1.000 | 1.000 | 1.000 | 0.000 | 7 | 0/1000: 1.000 | 1.000 | 1.000 | 1.000 | 0.000 | 8 | 0/1000: 1.000 | 1.000 | 1.000 | 0.000 | 0.000 | 9 | 1/1000: 0.500 | 0.500 | 0.000 | 0.000 | 0.000 | 10 | 1/1000: 1.000 | 1.000 | 0.500 | 0.500 | 0.500 | 11 | 1/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 12 | 1/1000: 1.000 | 1.000 | 1.000 | 0.500 | 0.500 | 13 | 1/1000: 1.000 | 1.000 | 1.000 | 1.000 | 0.500 | 14 | 1/1000: 1.000 | 1.000 | 1.000 | 0.500 | 0.000 | 15 | 1/1000: 0.500 | 0.500 | 0.500 | 0.000 | 0.000 | 16 | 1/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 17 | 2/1000: 1.000 | 1.000 | 0.667 | 0.667 | 0.667 | 18 | 2/1000: 0.667 | 0.667 | 0.333 | 0.333 | 0.333 | 19 | 2/1000: 1.000 | 1.000 | 1.000 | 1.000 | 0.667 | 20 | 2/1000: 1.000 | 1.000 | 0.667 | 0.333 | 0.333 | 21 | 2/1000: 1.000 | 1.000 | 1.000 | 0.667 | 0.333 | 22 | 2/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 23 | 2/1000: 0.667 | 0.667 | 0.667 | 0.333 | 0.333 | 24 | 3/1000: 0.750 | 0.750 | 0.500 | 0.500 | 0.500 | 25 | 3/1000: 0.750 | 0.750 | 0.500 | 0.500 | 0.500 | 26 | 2/1000: 1.000 | 0.667 | 0.667 | 0.667 | 0.667 | 27 | 3/1000: 1.000 | 1.000 | 0.750 | 0.500 | 0.500 | 28 | 3/1000: 1.000 | 0.750 | 0.750 | 0.750 | 0.500 | 29 | 3/1000: 1.000 | 1.000 | 1.000 | 0.750 | 0.500 | 30 | 3/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 31 | 4/1000: 0.800 | 0.800 | 0.600 | 0.600 | 0.600 | 32 | 3/1000: 1.000 | 0.750 | 0.750 | 0.750 | 0.750 | 33 | 4/1000: 1.000 | 1.000 | 0.800 | 0.600 | 0.600 | 34 | 4/1000: 1.000 | 0.600 | 0.600 | 0.600 | 0.400 | 35 | 4/1000: 1.000 | 1.000 | 1.000 | 1.000 | 1.000 | 36 | 4/1000: 0.800 | 0.800 | 0.600 | 0.600 | 0.600 | 37 | 4/1000: 1.000 | 1.000 | 0.800 | 0.600 | 0.400 | 38 | 3/1000: 0.750 | 0.750 | 0.500 | 0.250 | 0.250 | 39 | 4/1000: 1.000 | 0.800 | 0.800 | 0.800 | 0.800 | 40 | 5/1000: 0.833 | 0.833 | 0.667 | 0.667 | 0.500 | 41 | 5/1000: 1.000 | 0.833 | 0.667 | 0.500 | 0.500 | 42 | 5/1000: 1.000 | 0.667 | 0.667 | 0.667 | 0.500 | 43 | 5/1000: 0.833 | 0.833 | 0.667 | 0.667 | 0.667 | 44 | 5/1000: 0.833 | 0.833 | 0.667 | 0.500 | 0.333 | 45 | 5/1000: 1.000 | 1.000 | 0.833 | 0.833 | 0.833 | 46 | 6/1000: 1.000 | 0.857 | 0.714 | 0.571 | 0.571 | 47 | 6/1000: 0.857 | 0.714 | 0.571 | 0.571 | 0.429 | 48 | 4/1000: 0.800 | 0.800 | 0.600 | 0.400 | 0.200 | 49 | 5/1000: 1.000 | 0.667 | 0.667 | 0.667 | 0.667 | 50 | 6/1000: 0.857 | 0.714 | 0.571 | 0.571 | 0.571 | 51 | 6/1000: 1.000 | 0.714 | 0.714 | 0.571 | 0.429 | 52 | 6/1000: 1.000 | 1.000 | 0.857 | 0.857 | 0.857 | 53 | 6/1000: 0.857 | 0.857 | 0.714 | 0.571 | 0.286 | 54 | 7/1000: 0.875 | 0.750 | 0.625 | 0.625 | 0.625 | 55 | 5/1000: 0.833 | 0.833 | 0.667 | 0.500 | 0.333 | 56 | 7/1000: 1.000 | 0.750 | 0.750 | 0.625 | 0.500 | 57 | 7/1000: 1.000 | 0.875 | 0.750 | 0.625 | 0.500 | 58 | 6/1000: 1.000 | 0.714 | 0.714 | 0.714 | 0.714 | 59 | 6/1000: 0.857 | 0.857 | 0.714 | 0.571 | 0.429 | 60 | 8/1000: 0.778 | 0.667 | 0.556 | 0.556 | 0.556 | 61 | 7/1000: 1.000 | 1.000 | 0.875 | 0.750 | 0.750 | 62 | 7/1000: 0.875 | 0.750 | 0.625 | 0.625 | 0.375 | 63 | 8/1000: 1.000 | 0.889 | 0.778 | 0.667 | 0.556 | 64 | 8/1000: 1.000 | 0.778 | 0.667 | 0.556 | 0.444 | 65 | 7/1000: 0.875 | 0.875 | 0.750 | 0.500 | 0.250 | 66 | 8/1000: 1.000 | 0.889 | 0.778 | 0.667 | 0.667 | 67 | 9/1000: 1.000 | 0.800 | 0.700 | 0.600 | 0.500 | 68 | 9/1000: 0.800 | 0.700 | 0.600 | 0.600 | 0.600 | 69 | 7/1000: 0.875 | 0.875 | 0.750 | 0.625 | 0.375 | 70 | 8/1000: 0.889 | 0.778 | 0.667 | 0.444 | 0.222 | 71 | 8/1000: 0.889 | 0.778 | 0.667 | 0.667 | 0.444 | 72 | 7/1000: 1.000 | 0.750 | 0.750 | 0.750 | 0.750 | 73 | 9/1000: 1.000 | 0.900 | 0.700 | 0.600 | 0.500 | 74 | 9/1000: 0.900 | 0.800 | 0.700 | 0.600 | 0.600 | 75 | 8/1000: 0.889 | 0.889 | 0.778 | 0.667 | 0.444 | 76 | 10/1000: 1.000 | 0.818 | 0.727 | 0.636 | 0.545 | 77 | 9/1000: 0.900 | 0.800 | 0.700 | 0.700 | 0.500 | 78 | 9/1000: 0.900 | 0.800 | 0.700 | 0.500 | 0.300 | 79 | 8/1000: 1.000 | 0.778 | 0.778 | 0.778 | 0.778 | 80 | 10/1000: 0.818 | 0.727 | 0.636 | 0.636 | 0.636 | 81 | 10/1000: 0.909 | 0.818 | 0.636 | 0.545 | 0.455 | 82 | 10/1000: 0.818 | 0.727 | 0.636 | 0.545 | 0.545 | 83 | 10/1000: 0.909 | 0.818 | 0.727 | 0.727 | 0.545 | 84 | 10/1000: 0.909 | 0.818 | 0.727 | 0.545 | 0.364 | 85 | 11/1000: 1.000 | 0.833 | 0.750 | 0.583 | 0.500 | 86 | 9/1000: 0.900 | 0.900 | 0.700 | 0.600 | 0.400 | 87 | 11/1000: 0.750 | 0.667 | 0.583 | 0.500 | 0.500 | 88 | 9/1000: 1.000 | 0.700 | 0.700 | 0.700 | 0.700 | 89 | 11/1000: 0.833 | 0.750 | 0.583 | 0.583 | 0.583 | 90 | 11/1000: 0.917 | 0.833 | 0.583 | 0.500 | 0.417 | 91 | 11/1000: 0.917 | 0.833 | 0.750 | 0.583 | 0.417 | 92 | 11/1000: 0.917 | 0.833 | 0.750 | 0.750 | 0.500 | 93 | 10/1000: 0.909 | 0.909 | 0.727 | 0.636 | 0.455 | 94 | 12/1000: 1.000 | 0.769 | 0.692 | 0.538 | 0.462 | 95 | 12/1000: 0.769 | 0.692 | 0.615 | 0.538 | 0.538 | 96 | 12/1000: 0.923 | 0.846 | 0.769 | 0.615 | 0.462 | 97 | 12/1000: 0.923 | 0.846 | 0.615 | 0.538 | 0.462 | 98 | 12/1000: 0.846 | 0.769 | 0.538 | 0.538 | 0.538 | 99 | 13/1000: 0.786 | 0.714 | 0.643 | 0.571 | 0.571 | 100 | 13/1000: 1.000 | 0.786 | 0.714 | 0.571 | 0.500 | 101 | 13/1000: 0.929 | 0.857 | 0.643 | 0.571 | 0.500 | 102 | 12/1000: 0.923 | 0.846 | 0.692 | 0.692 | 0.462 | 103 | 10/1000: 1.000 | 0.727 | 0.727 | 0.636 | 0.636 | 104 | 13/1000: 0.786 | 0.714 | 0.500 | 0.500 | 0.500 | 105 | 13/1000: 0.929 | 0.857 | 0.714 | 0.571 | 0.429 | 106 | 14/1000: 0.733 | 0.667 | 0.600 | 0.533 | 0.533 | 107 | 14/1000: 1.000 | 0.800 | 0.667 | 0.533 | 0.467 | 108 | 11/1000: 0.917 | 0.917 | 0.750 | 0.667 | 0.417 | 109 | 13/1000: 0.929 | 0.857 | 0.714 | 0.714 | 0.500 | 110 | 14/1000: 0.933 | 0.867 | 0.667 | 0.600 | 0.533 | 111 | 14/1000: 0.933 | 0.867 | 0.733 | 0.600 | 0.467 | 112 | 15/1000: 0.750 | 0.688 | 0.625 | 0.562 | 0.562 | 113 | 11/1000: 1.000 | 0.667 | 0.667 | 0.583 | 0.583 | 114 | 14/1000: 0.800 | 0.733 | 0.533 | 0.533 | 0.467 | 115 | 14/1000: 0.933 | 0.867 | 0.733 | 0.733 | 0.533 | 116 | 15/1000: 0.938 | 0.875 | 0.688 | 0.625 | 0.562 | 117 | 15/1000: 1.000 | 0.812 | 0.688 | 0.562 | 0.500 | 118 | 12/1000: 0.923 | 0.923 | 0.692 | 0.615 | 0.385 | 119 | 16/1000: 0.765 | 0.706 | 0.647 | 0.588 | 0.588 | 120 | 16/1000: 0.941 | 0.882 | 0.706 | 0.647 | 0.588 | 121 | 15/1000: 0.938 | 0.875 | 0.750 | 0.750 | 0.562 | 122 | 15/1000: 0.938 | 0.875 | 0.750 | 0.562 | 0.438 | 123 | 16/1000: 1.000 | 0.765 | 0.647 | 0.529 | 0.471 | 124 | 15/1000: 0.812 | 0.750 | 0.500 | 0.500 | 0.438 | 125 | 13/1000: 0.929 | 0.929 | 0.714 | 0.643 | 0.429 | 126 | 17/1000: 0.722 | 0.667 | 0.611 | 0.556 | 0.556 | 127 | 12/1000: 1.000 | 0.692 | 0.692 | 0.538 | 0.538 | 128 | 17/1000: 0.944 | 0.889 | 0.722 | 0.667 | 0.611 | 129 | 16/1000: 0.882 | 0.824 | 0.706 | 0.706 | 0.529 | 130 | 16/1000: 0.882 | 0.824 | 0.706 | 0.529 | 0.412 | 131 | 16/1000: 0.824 | 0.765 | 0.529 | 0.529 | 0.471 | 132 | 17/1000: 1.000 | 0.778 | 0.611 | 0.500 | 0.444 | 133 | 18/1000: 0.684 | 0.632 | 0.579 | 0.526 | 0.526 | 134 | 18/1000: 0.947 | 0.895 | 0.737 | 0.684 | 0.632 | 135 | 17/1000: 0.833 | 0.778 | 0.556 | 0.556 | 0.500 | 136 | 14/1000: 0.933 | 0.933 | 0.733 | 0.600 | 0.400 | 137 | 19/1000: 0.700 | 0.650 | 0.600 | 0.550 | 0.550 | 138 | 18/1000: 1.000 | 0.789 | 0.632 | 0.526 | 0.474 | 139 | 13/1000: 1.000 | 0.714 | 0.714 | 0.571 | 0.500 | 140 | 17/1000: 0.889 | 0.833 | 0.722 | 0.722 | 0.500 | 141 | 17/1000: 0.889 | 0.833 | 0.722 | 0.556 | 0.389 | 142 | 19/1000: 0.950 | 0.900 | 0.750 | 0.700 | 0.650 | 143 | 15/1000: 0.938 | 0.875 | 0.688 | 0.562 | 0.375 | 144 | 19/1000: 1.000 | 0.800 | 0.650 | 0.550 | 0.500 | 145 | 18/1000: 0.895 | 0.842 | 0.737 | 0.737 | 0.526 | 146 | 20/1000: 0.714 | 0.667 | 0.619 | 0.571 | 0.571 | 147 | 14/1000: 1.000 | 0.733 | 0.733 | 0.600 | 0.533 | 148 | 18/1000: 0.842 | 0.789 | 0.579 | 0.579 | 0.474 | 149 | 16/1000: 0.882 | 0.824 | 0.647 | 0.529 | 0.353 | 150 | 20/1000: 1.000 | 0.810 | 0.667 | 0.571 | 0.524 | 151 | 15/1000: 1.000 | 0.750 | 0.750 | 0.625 | 0.562 | 152 | 20/1000: 0.952 | 0.905 | 0.714 | 0.667 | 0.619 | 153 | 19/1000: 0.850 | 0.800 | 0.600 | 0.600 | 0.500 | 154 | 21/1000: 0.727 | 0.636 | 0.591 | 0.545 | 0.545 | 155 | 21/1000: 1.000 | 0.818 | 0.682 | 0.591 | 0.545 | 156 | 16/1000: 0.941 | 0.706 | 0.706 | 0.588 | 0.529 | 157 | 18/1000: 0.895 | 0.842 | 0.737 | 0.579 | 0.368 | 158 | 19/1000: 0.900 | 0.850 | 0.750 | 0.750 | 0.500 | 159 | 17/1000: 0.889 | 0.833 | 0.667 | 0.556 | 0.389 | 160 | 21/1000: 0.955 | 0.909 | 0.727 | 0.682 | 0.591 | 161 | 22/1000: 0.739 | 0.652 | 0.609 | 0.565 | 0.565 | 162 | 19/1000: 0.900 | 0.850 | 0.750 | 0.600 | 0.400 | 163 | 22/1000: 1.000 | 0.826 | 0.652 | 0.565 | 0.522 | 164 | 20/1000: 0.905 | 0.857 | 0.762 | 0.762 | 0.524 | 165 | 17/1000: 0.944 | 0.722 | 0.722 | 0.611 | 0.556 | 166 | 20/1000: 0.857 | 0.810 | 0.619 | 0.619 | 0.476 | 167 | 22/1000: 0.913 | 0.870 | 0.696 | 0.652 | 0.565 | 168 | 23/1000: 1.000 | 0.833 | 0.667 | 0.583 | 0.542 | 169 | 18/1000: 0.895 | 0.842 | 0.684 | 0.579 | 0.421 | 170 | 21/1000: 0.864 | 0.818 | 0.727 | 0.727 | 0.500 | 171 | 20/1000: 0.905 | 0.857 | 0.714 | 0.571 | 0.381 | 172 | 23/1000: 0.750 | 0.625 | 0.583 | 0.542 | 0.542 | 173 | 23/1000: 0.917 | 0.875 | 0.708 | 0.667 | 0.583 | 174 | 22/1000: 0.870 | 0.826 | 0.739 | 0.739 | 0.522 | 175 | 19/1000: 0.900 | 0.850 | 0.700 | 0.600 | 0.450 | 176 | 21/1000: 0.909 | 0.864 | 0.727 | 0.591 | 0.409 | 177 | 18/1000: 0.947 | 0.737 | 0.737 | 0.632 | 0.526 | 178 | 21/1000: 0.864 | 0.818 | 0.591 | 0.591 | 0.455 | 179 | 24/1000: 0.760 | 0.640 | 0.600 | 0.560 | 0.560 | 180 | 24/1000: 1.000 | 0.840 | 0.640 | 0.560 | 0.520 | 181 | 24/1000: 0.880 | 0.840 | 0.680 | 0.640 | 0.560 | 182 | 22/1000: 0.870 | 0.826 | 0.696 | 0.565 | 0.391 | 183 | 23/1000: 0.875 | 0.792 | 0.708 | 0.708 | 0.500 | 184 | 19/1000: 0.950 | 0.750 | 0.750 | 0.650 | 0.550 | 185 | 20/1000: 0.905 | 0.857 | 0.714 | 0.619 | 0.476 | 186 | 22/1000: 0.870 | 0.783 | 0.565 | 0.565 | 0.435 | 187 | 25/1000: 0.885 | 0.846 | 0.692 | 0.654 | 0.577 | 188 | 25/1000: 1.000 | 0.846 | 0.654 | 0.538 | 0.500 | 189 | 25/1000: 0.769 | 0.654 | 0.577 | 0.538 | 0.538 | 190 | 24/1000: 0.880 | 0.800 | 0.720 | 0.720 | 0.520 | 191 | 20/1000: 0.952 | 0.762 | 0.762 | 0.667 | 0.571 | 192 | 21/1000: 0.864 | 0.818 | 0.682 | 0.591 | 0.455 | 193 | 23/1000: 0.875 | 0.833 | 0.708 | 0.583 | 0.417 | 194 | 23/1000: 0.875 | 0.792 | 0.583 | 0.583 | 0.458 | 195 | 26/1000: 0.889 | 0.852 | 0.704 | 0.667 | 0.593 | 196 | 26/1000: 0.778 | 0.667 | 0.593 | 0.556 | 0.556 | 197 | 25/1000: 0.885 | 0.808 | 0.692 | 0.692 | 0.500 | 198 | 21/1000: 0.955 | 0.773 | 0.727 | 0.636 | 0.545 | 199 | 22/1000: 0.870 | 0.826 | 0.652 | 0.565 | 0.435 | 200 | 24/1000: 0.880 | 0.840 | 0.720 | 0.600 | 0.440 | 201 | 26/1000: 1.000 | 0.852 | 0.667 | 0.556 | 0.519 | 202 | 27/1000: 0.786 | 0.679 | 0.607 | 0.571 | 0.571 | 203 | 24/1000: 0.880 | 0.800 | 0.560 | 0.560 | 0.440 | 204 | 27/1000: 0.893 | 0.857 | 0.679 | 0.643 | 0.571 | 205 | 23/1000: 0.833 | 0.792 | 0.625 | 0.542 | 0.417 | 206 | 26/1000: 0.889 | 0.815 | 0.704 | 0.704 | 0.519 | 207 | 27/1000: 0.964 | 0.821 | 0.643 | 0.536 | 0.500 | 208 | 28/1000: 0.793 | 0.690 | 0.621 | 0.586 | 0.586 | 209 | 22/1000: 0.957 | 0.783 | 0.739 | 0.652 | 0.565 | 210 | 25/1000: 0.885 | 0.769 | 0.538 | 0.538 | 0.423 | 211 | 27/1000: 0.893 | 0.821 | 0.714 | 0.714 | 0.536 | 212 | 28/1000: 0.897 | 0.862 | 0.690 | 0.621 | 0.552 | 213 | 28/1000: 0.966 | 0.828 | 0.655 | 0.552 | 0.517 | 214 | 24/1000: 0.840 | 0.800 | 0.640 | 0.560 | 0.440 | 215 | 23/1000: 0.958 | 0.792 | 0.750 | 0.667 | 0.583 | 216 | 25/1000: 0.885 | 0.846 | 0.731 | 0.615 | 0.423 | 217 | 29/1000: 0.800 | 0.700 | 0.633 | 0.600 | 0.600 | 218 | 26/1000: 0.889 | 0.778 | 0.556 | 0.556 | 0.444 | 219 | 28/1000: 0.862 | 0.793 | 0.690 | 0.690 | 0.517 | 220 | 26/1000: 0.852 | 0.815 | 0.704 | 0.593 | 0.407 | 221 | 29/1000: 0.900 | 0.867 | 0.700 | 0.600 | 0.533 | 222 | 30/1000: 0.806 | 0.710 | 0.645 | 0.613 | 0.613 | 223 | 29/1000: 0.967 | 0.833 | 0.667 | 0.533 | 0.500 | 224 | 24/1000: 0.960 | 0.800 | 0.760 | 0.640 | 0.560 | 225 | 27/1000: 0.893 | 0.786 | 0.571 | 0.571 | 0.464 | 226 | 29/1000: 0.867 | 0.800 | 0.700 | 0.700 | 0.533 | 227 | 25/1000: 0.846 | 0.808 | 0.654 | 0.577 | 0.423 | 228 | 27/1000: 0.821 | 0.786 | 0.679 | 0.571 | 0.393 | 229 | 30/1000: 0.968 | 0.839 | 0.677 | 0.548 | 0.516 | 230 | 30/1000: 0.871 | 0.806 | 0.710 | 0.710 | 0.548 | 231 | 25/1000: 0.962 | 0.808 | 0.769 | 0.654 | 0.577 | 232 | 31/1000: 0.812 | 0.719 | 0.656 | 0.625 | 0.625 | 233 | 30/1000: 0.903 | 0.871 | 0.710 | 0.581 | 0.516 | 234 | 26/1000: 0.852 | 0.815 | 0.667 | 0.593 | 0.444 | 235 | 28/1000: 0.897 | 0.759 | 0.552 | 0.552 | 0.448 | 236 | 28/1000: 0.793 | 0.759 | 0.655 | 0.552 | 0.379 | 237 | 31/1000: 0.875 | 0.812 | 0.719 | 0.719 | 0.562 | 238 | 31/1000: 0.969 | 0.844 | 0.688 | 0.562 | 0.531 | 239 | 27/1000: 0.857 | 0.821 | 0.679 | 0.607 | 0.464 | 240 | 26/1000: 0.926 | 0.778 | 0.741 | 0.630 | 0.556 | 241 | 32/1000: 0.788 | 0.697 | 0.636 | 0.606 | 0.606 | 242 | 31/1000: 0.906 | 0.875 | 0.719 | 0.594 | 0.531 | 243 | 29/1000: 0.800 | 0.767 | 0.667 | 0.567 | 0.400 | 244 | 29/1000: 0.900 | 0.767 | 0.567 | 0.567 | 0.467 | 245 | 27/1000: 0.929 | 0.786 | 0.750 | 0.643 | 0.571 | 246 | 32/1000: 0.879 | 0.818 | 0.727 | 0.727 | 0.576 | 247 | 28/1000: 0.862 | 0.828 | 0.690 | 0.621 | 0.483 | 248 | 32/1000: 0.970 | 0.848 | 0.667 | 0.545 | 0.515 | 249 | 30/1000: 0.806 | 0.774 | 0.677 | 0.581 | 0.419 | 250 | 33/1000: 0.794 | 0.676 | 0.618 | 0.588 | 0.588 | 251 | 32/1000: 0.909 | 0.879 | 0.727 | 0.606 | 0.545 | 252 | 28/1000: 0.931 | 0.793 | 0.759 | 0.655 | 0.586 | 253 | 33/1000: 0.882 | 0.824 | 0.735 | 0.735 | 0.588 | 254 | 30/1000: 0.903 | 0.774 | 0.581 | 0.548 | 0.452 | 255 | 31/1000: 0.812 | 0.781 | 0.688 | 0.594 | 0.438 | 256 | 29/1000: 0.933 | 0.800 | 0.767 | 0.667 | 0.600 | 257 | 29/1000: 0.867 | 0.833 | 0.700 | 0.600 | 0.467 | 258 | 33/1000: 0.971 | 0.853 | 0.647 | 0.529 | 0.500 | 259 | 34/1000: 0.886 | 0.829 | 0.743 | 0.743 | 0.600 | 260 | 34/1000: 0.800 | 0.686 | 0.629 | 0.600 | 0.571 | 261 | 30/1000: 0.871 | 0.839 | 0.710 | 0.613 | 0.484 | 262 | 34/1000: 0.971 | 0.857 | 0.657 | 0.543 | 0.514 | 263 | 31/1000: 0.906 | 0.781 | 0.562 | 0.531 | 0.438 | 264 | 35/1000: 0.889 | 0.833 | 0.750 | 0.750 | 0.611 | 265 | 33/1000: 0.912 | 0.882 | 0.735 | 0.588 | 0.529 | 266 | 30/1000: 0.935 | 0.806 | 0.774 | 0.677 | 0.581 | 267 | 32/1000: 0.818 | 0.788 | 0.697 | 0.576 | 0.424 | 268 | 35/1000: 0.972 | 0.861 | 0.667 | 0.556 | 0.528 | 269 | 35/1000: 0.806 | 0.694 | 0.639 | 0.611 | 0.583 | 270 | 32/1000: 0.909 | 0.788 | 0.576 | 0.545 | 0.455 | 271 | 31/1000: 0.875 | 0.812 | 0.688 | 0.594 | 0.469 | 272 | 36/1000: 0.892 | 0.838 | 0.757 | 0.757 | 0.622 | 273 | 31/1000: 0.938 | 0.812 | 0.781 | 0.688 | 0.594 | 274 | 34/1000: 0.914 | 0.886 | 0.743 | 0.600 | 0.543 | 275 | 33/1000: 0.912 | 0.794 | 0.588 | 0.559 | 0.471 | 276 | 36/1000: 0.973 | 0.865 | 0.676 | 0.568 | 0.541 | 277 | 32/1000: 0.848 | 0.788 | 0.667 | 0.576 | 0.455 | 278 | 36/1000: 0.811 | 0.703 | 0.649 | 0.622 | 0.595 | 279 | 33/1000: 0.824 | 0.794 | 0.706 | 0.588 | 0.441 | 280 | 32/1000: 0.939 | 0.818 | 0.788 | 0.697 | 0.606 | 281 | 35/1000: 0.917 | 0.889 | 0.750 | 0.611 | 0.556 | 282 | 37/1000: 0.895 | 0.842 | 0.737 | 0.737 | 0.605 | 283 | 37/1000: 0.816 | 0.711 | 0.658 | 0.632 | 0.605 | 284 | 34/1000: 0.914 | 0.800 | 0.600 | 0.571 | 0.486 | 285 | 37/1000: 0.974 | 0.868 | 0.684 | 0.579 | 0.553 | 286 | 33/1000: 0.853 | 0.794 | 0.647 | 0.559 | 0.441 | 287 | 33/1000: 0.941 | 0.824 | 0.794 | 0.706 | 0.618 | 288 | 38/1000: 0.897 | 0.846 | 0.744 | 0.744 | 0.615 | 289 | 35/1000: 0.917 | 0.806 | 0.611 | 0.583 | 0.500 | 290 | 36/1000: 0.919 | 0.892 | 0.757 | 0.622 | 0.568 | 291 | 38/1000: 0.821 | 0.718 | 0.667 | 0.641 | 0.615 | 292 | 34/1000: 0.857 | 0.800 | 0.657 | 0.571 | 0.457 | 293 | 34/1000: 0.943 | 0.829 | 0.800 | 0.714 | 0.629 | 294 | 34/1000: 0.829 | 0.800 | 0.714 | 0.600 | 0.429 | 295 | 39/1000: 0.900 | 0.850 | 0.750 | 0.750 | 0.625 | 296 | 38/1000: 0.974 | 0.872 | 0.692 | 0.564 | 0.538 | 297 | 35/1000: 0.944 | 0.833 | 0.806 | 0.722 | 0.639 | 298 | 35/1000: 0.833 | 0.806 | 0.722 | 0.611 | 0.444 | 299 | 36/1000: 0.919 | 0.811 | 0.595 | 0.568 | 0.486 | 300 | 40/1000: 0.902 | 0.854 | 0.756 | 0.756 | 0.634 | 301 | 39/1000: 0.825 | 0.700 | 0.650 | 0.625 | 0.600 | 302 | 39/1000: 0.975 | 0.875 | 0.700 | 0.575 | 0.550 | 303 | 37/1000: 0.895 | 0.789 | 0.579 | 0.553 | 0.474 | 304 | 37/1000: 0.921 | 0.895 | 0.763 | 0.632 | 0.553 | 305 | 36/1000: 0.838 | 0.784 | 0.703 | 0.595 | 0.432 | 306 | 36/1000: 0.946 | 0.838 | 0.811 | 0.730 | 0.649 | 307 | 40/1000: 0.829 | 0.707 | 0.659 | 0.634 | 0.610 | 308 | 35/1000: 0.861 | 0.806 | 0.667 | 0.583 | 0.444 | 309 | 41/1000: 0.905 | 0.833 | 0.738 | 0.738 | 0.619 | 310 | 37/1000: 0.947 | 0.842 | 0.816 | 0.737 | 0.658 | 311 | 38/1000: 0.923 | 0.897 | 0.769 | 0.641 | 0.564 | 312 | 40/1000: 0.976 | 0.878 | 0.707 | 0.561 | 0.537 | 313 | 38/1000: 0.897 | 0.769 | 0.564 | 0.538 | 0.462 | 314 | 37/1000: 0.842 | 0.789 | 0.684 | 0.579 | 0.421 | 315 | 41/1000: 0.810 | 0.690 | 0.643 | 0.619 | 0.595 | 316 | 36/1000: 0.838 | 0.784 | 0.649 | 0.568 | 0.432 | 317 | 38/1000: 0.949 | 0.846 | 0.821 | 0.744 | 0.667 | 318 | 39/1000: 0.900 | 0.775 | 0.575 | 0.550 | 0.475 | 319 | 41/1000: 0.976 | 0.881 | 0.714 | 0.571 | 0.548 | 320 | 39/1000: 0.925 | 0.900 | 0.775 | 0.650 | 0.575 | 321 | 42/1000: 0.907 | 0.837 | 0.744 | 0.744 | 0.605 | 322 | 37/1000: 0.842 | 0.789 | 0.658 | 0.579 | 0.447 | 323 | 42/1000: 0.814 | 0.674 | 0.628 | 0.605 | 0.581 | 324 | 39/1000: 0.950 | 0.850 | 0.825 | 0.750 | 0.675 | 325 | 38/1000: 0.846 | 0.795 | 0.667 | 0.564 | 0.410 | 326 | 42/1000: 0.977 | 0.884 | 0.721 | 0.581 | 0.558 | 327 | 43/1000: 0.909 | 0.818 | 0.727 | 0.727 | 0.591 | 328 | 40/1000: 0.951 | 0.854 | 0.829 | 0.756 | 0.683 | 329 | 38/1000: 0.846 | 0.769 | 0.641 | 0.564 | 0.436 | 330 | 40/1000: 0.927 | 0.902 | 0.780 | 0.659 | 0.585 | 331 | 43/1000: 0.818 | 0.682 | 0.614 | 0.591 | 0.568 | 332 | 40/1000: 0.902 | 0.756 | 0.561 | 0.537 | 0.463 | 333 | 44/1000: 0.911 | 0.822 | 0.733 | 0.733 | 0.600 | 334 | 43/1000: 0.977 | 0.864 | 0.705 | 0.568 | 0.545 | 335 | 39/1000: 0.825 | 0.750 | 0.625 | 0.550 | 0.425 | 336 | 41/1000: 0.952 | 0.833 | 0.810 | 0.738 | 0.667 | 337 | 44/1000: 0.800 | 0.667 | 0.600 | 0.578 | 0.556 | 338 | 45/1000: 0.891 | 0.804 | 0.717 | 0.717 | 0.587 | 339 | 41/1000: 0.929 | 0.905 | 0.786 | 0.667 | 0.595 | 340 | 39/1000: 0.850 | 0.800 | 0.675 | 0.575 | 0.400 | 341 | 41/1000: 0.905 | 0.762 | 0.548 | 0.524 | 0.452 | 342 | 44/1000: 0.978 | 0.867 | 0.711 | 0.578 | 0.556 | 343 | 40/1000: 0.805 | 0.732 | 0.610 | 0.537 | 0.415 | 344 | 42/1000: 0.930 | 0.814 | 0.791 | 0.721 | 0.651 | 345 | 45/1000: 0.804 | 0.652 | 0.587 | 0.565 | 0.543 | 346 | 46/1000: 0.872 | 0.787 | 0.702 | 0.702 | 0.574 | 347 | 40/1000: 0.854 | 0.805 | 0.683 | 0.585 | 0.415 | 348 | 42/1000: 0.884 | 0.744 | 0.535 | 0.512 | 0.442 | 349 | 47/1000: 0.875 | 0.792 | 0.708 | 0.708 | 0.583 | 350 | 41/1000: 0.810 | 0.738 | 0.619 | 0.548 | 0.429 | 351 | 45/1000: 0.978 | 0.870 | 0.717 | 0.587 | 0.565 | 352 | 43/1000: 0.932 | 0.818 | 0.795 | 0.727 | 0.659 | 353 | 46/1000: 0.809 | 0.660 | 0.596 | 0.574 | 0.553 | 354 | 41/1000: 0.857 | 0.810 | 0.690 | 0.595 | 0.429 | 355 | 42/1000: 0.930 | 0.907 | 0.791 | 0.674 | 0.605 | 356 | 42/1000: 0.814 | 0.744 | 0.628 | 0.558 | 0.442 | 357 | 44/1000: 0.933 | 0.822 | 0.800 | 0.733 | 0.667 | 358 | 46/1000: 0.979 | 0.872 | 0.723 | 0.596 | 0.574 | 359 | 47/1000: 0.812 | 0.646 | 0.583 | 0.562 | 0.542 | 360 | 42/1000: 0.860 | 0.791 | 0.674 | 0.581 | 0.419 | 361 | 43/1000: 0.886 | 0.750 | 0.545 | 0.523 | 0.432 | 362 | 48/1000: 0.878 | 0.796 | 0.714 | 0.714 | 0.571 | 363 | 43/1000: 0.818 | 0.750 | 0.636 | 0.568 | 0.455 | 364 | 43/1000: 0.932 | 0.909 | 0.795 | 0.682 | 0.614 | 365 | 45/1000: 0.935 | 0.804 | 0.783 | 0.717 | 0.652 | 366 | 47/1000: 0.979 | 0.875 | 0.729 | 0.604 | 0.583 | 367 | 44/1000: 0.889 | 0.756 | 0.556 | 0.533 | 0.444 | 368 | 48/1000: 0.816 | 0.653 | 0.592 | 0.571 | 0.551 | 369 | 49/1000: 0.860 | 0.780 | 0.700 | 0.700 | 0.560 | 370 | 43/1000: 0.864 | 0.795 | 0.682 | 0.591 | 0.432 | 371 | 44/1000: 0.911 | 0.889 | 0.778 | 0.667 | 0.600 | 372 | 44/1000: 0.822 | 0.756 | 0.644 | 0.578 | 0.467 | 373 | 46/1000: 0.936 | 0.809 | 0.787 | 0.723 | 0.660 | 374 | 48/1000: 0.959 | 0.857 | 0.714 | 0.592 | 0.571 | 375 | 49/1000: 0.820 | 0.660 | 0.600 | 0.580 | 0.560 | 376 | 44/1000: 0.844 | 0.778 | 0.667 | 0.578 | 0.422 | 377 | 50/1000: 0.863 | 0.784 | 0.686 | 0.686 | 0.549 | 378 | 45/1000: 0.891 | 0.761 | 0.565 | 0.543 | 0.457 | 379 | 47/1000: 0.938 | 0.812 | 0.792 | 0.729 | 0.667 | 380 | 49/1000: 0.960 | 0.860 | 0.720 | 0.600 | 0.580 | 381 | 50/1000: 0.824 | 0.667 | 0.608 | 0.588 | 0.569 | 382 | 45/1000: 0.826 | 0.761 | 0.652 | 0.587 | 0.478 | 383 | 45/1000: 0.913 | 0.891 | 0.783 | 0.652 | 0.587 | 384 | 48/1000: 0.939 | 0.816 | 0.796 | 0.735 | 0.673 | 385 | 45/1000: 0.848 | 0.783 | 0.652 | 0.565 | 0.413 | 386 | 50/1000: 0.961 | 0.843 | 0.706 | 0.588 | 0.569 | 387 | 51/1000: 0.827 | 0.673 | 0.615 | 0.596 | 0.577 | 388 | 46/1000: 0.809 | 0.745 | 0.638 | 0.574 | 0.468 | 389 | 46/1000: 0.894 | 0.766 | 0.574 | 0.553 | 0.447 | 390 | 51/1000: 0.865 | 0.788 | 0.692 | 0.692 | 0.538 | 391 | 46/1000: 0.851 | 0.787 | 0.660 | 0.574 | 0.426 | 392 | 46/1000: 0.915 | 0.894 | 0.787 | 0.638 | 0.574 | 393 | 49/1000: 0.940 | 0.820 | 0.800 | 0.740 | 0.680 | 394 | 51/1000: 0.962 | 0.846 | 0.692 | 0.577 | 0.558 | 395 | 52/1000: 0.868 | 0.792 | 0.698 | 0.698 | 0.547 | 396 | 47/1000: 0.896 | 0.750 | 0.562 | 0.542 | 0.438 | 397 | 52/1000: 0.830 | 0.679 | 0.623 | 0.604 | 0.566 | 398 | 47/1000: 0.812 | 0.750 | 0.646 | 0.583 | 0.458 | 399 | 52/1000: 0.962 | 0.849 | 0.698 | 0.585 | 0.566 | 400 | 47/1000: 0.917 | 0.896 | 0.771 | 0.625 | 0.562 | 401 | 53/1000: 0.870 | 0.796 | 0.704 | 0.704 | 0.556 | 402 | 47/1000: 0.854 | 0.792 | 0.667 | 0.583 | 0.417 | 403 | 50/1000: 0.941 | 0.824 | 0.804 | 0.745 | 0.686 | 404 | 48/1000: 0.898 | 0.735 | 0.551 | 0.531 | 0.429 | 405 | 53/1000: 0.963 | 0.852 | 0.704 | 0.593 | 0.574 | 406 | 53/1000: 0.815 | 0.667 | 0.611 | 0.593 | 0.556 | 407 | 54/1000: 0.873 | 0.800 | 0.709 | 0.709 | 0.564 | 408 | 48/1000: 0.898 | 0.878 | 0.755 | 0.612 | 0.551 | 409 | 48/1000: 0.857 | 0.796 | 0.673 | 0.592 | 0.429 | 410 | 48/1000: 0.816 | 0.735 | 0.633 | 0.571 | 0.449 | 411 | 51/1000: 0.942 | 0.827 | 0.808 | 0.750 | 0.692 | 412 | 49/1000: 0.900 | 0.740 | 0.560 | 0.540 | 0.440 | 413 | 54/1000: 0.800 | 0.655 | 0.600 | 0.582 | 0.545 | 414 | 54/1000: 0.964 | 0.855 | 0.709 | 0.600 | 0.582 | 415 | 49/1000: 0.860 | 0.800 | 0.680 | 0.600 | 0.440 | 416 | 49/1000: 0.820 | 0.740 | 0.640 | 0.580 | 0.460 | 417 | 52/1000: 0.925 | 0.811 | 0.792 | 0.736 | 0.679 | 418 | 49/1000: 0.900 | 0.880 | 0.760 | 0.620 | 0.560 | 419 | 55/1000: 0.804 | 0.661 | 0.607 | 0.589 | 0.554 | 420 | 55/1000: 0.875 | 0.804 | 0.714 | 0.696 | 0.554 | 421 | 50/1000: 0.902 | 0.745 | 0.569 | 0.549 | 0.451 | 422 | 55/1000: 0.964 | 0.857 | 0.714 | 0.607 | 0.589 | 423 | 50/1000: 0.863 | 0.784 | 0.667 | 0.588 | 0.431 | 424 | 50/1000: 0.824 | 0.725 | 0.627 | 0.569 | 0.451 | 425 | 56/1000: 0.807 | 0.667 | 0.614 | 0.596 | 0.561 | 426 | 53/1000: 0.926 | 0.796 | 0.778 | 0.722 | 0.667 | 427 | 56/1000: 0.860 | 0.789 | 0.702 | 0.684 | 0.544 | 428 | 50/1000: 0.902 | 0.882 | 0.765 | 0.627 | 0.569 | 429 | 51/1000: 0.865 | 0.788 | 0.673 | 0.596 | 0.442 | 430 | 56/1000: 0.965 | 0.860 | 0.719 | 0.596 | 0.579 | 431 | 51/1000: 0.904 | 0.750 | 0.577 | 0.558 | 0.442 | 432 | 54/1000: 0.909 | 0.782 | 0.764 | 0.709 | 0.655 | 433 | 51/1000: 0.827 | 0.731 | 0.635 | 0.577 | 0.462 | 434 | 57/1000: 0.810 | 0.655 | 0.603 | 0.586 | 0.552 | 435 | 51/1000: 0.904 | 0.885 | 0.769 | 0.635 | 0.577 | 436 | 57/1000: 0.966 | 0.862 | 0.724 | 0.603 | 0.586 | 437 | 52/1000: 0.830 | 0.736 | 0.642 | 0.585 | 0.472 | 438 | 57/1000: 0.862 | 0.793 | 0.707 | 0.690 | 0.552 | 439 | 52/1000: 0.868 | 0.792 | 0.660 | 0.585 | 0.434 | 440 | 58/1000: 0.814 | 0.661 | 0.610 | 0.593 | 0.559 | 441 | 55/1000: 0.911 | 0.786 | 0.768 | 0.696 | 0.643 | 442 | 53/1000: 0.833 | 0.741 | 0.648 | 0.593 | 0.481 | 443 | 52/1000: 0.887 | 0.868 | 0.755 | 0.623 | 0.566 | 444 | 58/1000: 0.949 | 0.847 | 0.712 | 0.593 | 0.576 | 445 | 53/1000: 0.870 | 0.796 | 0.667 | 0.593 | 0.444 | 446 | 58/1000: 0.864 | 0.797 | 0.712 | 0.695 | 0.559 | 447 | 56/1000: 0.912 | 0.789 | 0.772 | 0.702 | 0.649 | 448 | 59/1000: 0.817 | 0.667 | 0.617 | 0.600 | 0.567 | 449 | 52/1000: 0.906 | 0.755 | 0.585 | 0.547 | 0.434 | 450 | 54/1000: 0.836 | 0.727 | 0.636 | 0.582 | 0.473 | 451 | 54/1000: 0.873 | 0.800 | 0.673 | 0.600 | 0.455 | 452 | 57/1000: 0.914 | 0.793 | 0.776 | 0.707 | 0.655 | 453 | 59/1000: 0.867 | 0.783 | 0.700 | 0.683 | 0.550 | 454 | 53/1000: 0.889 | 0.741 | 0.574 | 0.537 | 0.426 | 455 | 53/1000: 0.889 | 0.870 | 0.741 | 0.611 | 0.556 | 456 | 60/1000: 0.820 | 0.672 | 0.623 | 0.607 | 0.574 | 457 | 59/1000: 0.950 | 0.850 | 0.717 | 0.600 | 0.567 | 458 | 61/1000: 0.823 | 0.677 | 0.629 | 0.613 | 0.581 | 459 | 55/1000: 0.875 | 0.804 | 0.679 | 0.607 | 0.464 | 460 | 58/1000: 0.898 | 0.780 | 0.763 | 0.695 | 0.644 | 461 | 54/1000: 0.891 | 0.745 | 0.582 | 0.545 | 0.436 | 462 | 55/1000: 0.839 | 0.732 | 0.625 | 0.571 | 0.464 | 463 | 60/1000: 0.869 | 0.787 | 0.705 | 0.672 | 0.541 | 464 | 54/1000: 0.891 | 0.873 | 0.745 | 0.600 | 0.545 | 465 | 56/1000: 0.877 | 0.807 | 0.684 | 0.614 | 0.474 | 466 | 60/1000: 0.951 | 0.852 | 0.705 | 0.590 | 0.557 | 467 | 55/1000: 0.893 | 0.750 | 0.589 | 0.554 | 0.446 | 468 | 62/1000: 0.825 | 0.683 | 0.635 | 0.619 | 0.587 | 469 | 59/1000: 0.900 | 0.783 | 0.750 | 0.683 | 0.633 | 470 | 56/1000: 0.842 | 0.719 | 0.614 | 0.561 | 0.456 | 471 | 55/1000: 0.893 | 0.875 | 0.750 | 0.607 | 0.554 | 472 | 56/1000: 0.895 | 0.754 | 0.596 | 0.561 | 0.456 | 473 | 61/1000: 0.952 | 0.855 | 0.710 | 0.597 | 0.565 | 474 | 57/1000: 0.879 | 0.793 | 0.672 | 0.603 | 0.466 | 475 | 60/1000: 0.885 | 0.770 | 0.738 | 0.672 | 0.623 | 476 | 61/1000: 0.871 | 0.790 | 0.694 | 0.661 | 0.532 | 477 | 63/1000: 0.828 | 0.672 | 0.625 | 0.609 | 0.578 | 478 | 56/1000: 0.895 | 0.877 | 0.754 | 0.614 | 0.561 | 479 | 62/1000: 0.952 | 0.857 | 0.714 | 0.603 | 0.571 | 480 | 57/1000: 0.845 | 0.724 | 0.621 | 0.569 | 0.448 | 481 | 58/1000: 0.881 | 0.780 | 0.661 | 0.593 | 0.458 | 482 | 62/1000: 0.873 | 0.778 | 0.683 | 0.651 | 0.524 | 483 | 57/1000: 0.897 | 0.759 | 0.603 | 0.552 | 0.448 | 484 | 61/1000: 0.887 | 0.774 | 0.742 | 0.677 | 0.629 | 485 | 63/1000: 0.953 | 0.859 | 0.719 | 0.609 | 0.578 | 486 | 64/1000: 0.831 | 0.677 | 0.631 | 0.615 | 0.585 | 487 | 57/1000: 0.897 | 0.879 | 0.759 | 0.621 | 0.569 | 488 | 58/1000: 0.831 | 0.712 | 0.610 | 0.559 | 0.441 | 489 | 59/1000: 0.883 | 0.783 | 0.667 | 0.600 | 0.467 | 490 | 65/1000: 0.833 | 0.682 | 0.636 | 0.621 | 0.591 | 491 | 64/1000: 0.954 | 0.862 | 0.723 | 0.615 | 0.585 | 492 | 58/1000: 0.898 | 0.881 | 0.763 | 0.627 | 0.576 | 493 | 62/1000: 0.889 | 0.778 | 0.746 | 0.667 | 0.619 | 494 | 58/1000: 0.898 | 0.763 | 0.610 | 0.559 | 0.441 | 495 | 63/1000: 0.875 | 0.781 | 0.688 | 0.656 | 0.516 | 496 | 60/1000: 0.869 | 0.770 | 0.656 | 0.590 | 0.459 | 497 | 59/1000: 0.833 | 0.717 | 0.617 | 0.567 | 0.433 | 498 | 66/1000: 0.836 | 0.687 | 0.627 | 0.612 | 0.582 | 499 | 59/1000: 0.900 | 0.883 | 0.767 | 0.633 | 0.583 | 500 | 61/1000: 0.855 | 0.758 | 0.645 | 0.581 | 0.452 | 501 | 63/1000: 0.891 | 0.766 | 0.734 | 0.656 | 0.609 | 502 | 65/1000: 0.955 | 0.864 | 0.727 | 0.606 | 0.576 | 503 | 64/1000: 0.877 | 0.785 | 0.692 | 0.662 | 0.523 | 504 | 60/1000: 0.902 | 0.885 | 0.770 | 0.639 | 0.590 | 505 | 59/1000: 0.900 | 0.767 | 0.617 | 0.550 | 0.433 | 506 | 67/1000: 0.824 | 0.676 | 0.618 | 0.603 | 0.574 | 507 | 62/1000: 0.857 | 0.762 | 0.651 | 0.587 | 0.460 | 508 | 60/1000: 0.836 | 0.721 | 0.623 | 0.574 | 0.443 | 509 | 64/1000: 0.892 | 0.769 | 0.738 | 0.662 | 0.615 | 510 | 65/1000: 0.864 | 0.773 | 0.682 | 0.652 | 0.515 | 511 | 66/1000: 0.955 | 0.866 | 0.731 | 0.612 | 0.582 | 512 | 60/1000: 0.902 | 0.770 | 0.623 | 0.557 | 0.443 | 513 | 61/1000: 0.903 | 0.887 | 0.774 | 0.645 | 0.597 | 514 | 63/1000: 0.859 | 0.766 | 0.656 | 0.594 | 0.469 | 515 | 66/1000: 0.866 | 0.776 | 0.687 | 0.657 | 0.522 | 516 | 65/1000: 0.894 | 0.758 | 0.727 | 0.652 | 0.606 | 517 | 61/1000: 0.903 | 0.774 | 0.629 | 0.565 | 0.452 | 518 | 68/1000: 0.826 | 0.681 | 0.623 | 0.609 | 0.580 | 519 | 62/1000: 0.905 | 0.889 | 0.778 | 0.651 | 0.603 | 520 | 61/1000: 0.839 | 0.710 | 0.613 | 0.565 | 0.435 | 521 | 64/1000: 0.862 | 0.754 | 0.646 | 0.585 | 0.462 | 522 | 67/1000: 0.956 | 0.868 | 0.735 | 0.618 | 0.588 | 523 | 67/1000: 0.868 | 0.779 | 0.691 | 0.662 | 0.529 | 524 | 62/1000: 0.905 | 0.778 | 0.635 | 0.571 | 0.460 | 525 | 69/1000: 0.829 | 0.686 | 0.629 | 0.614 | 0.586 | 526 | 62/1000: 0.841 | 0.714 | 0.619 | 0.571 | 0.444 | 527 | 66/1000: 0.896 | 0.761 | 0.731 | 0.657 | 0.612 | 528 | 65/1000: 0.864 | 0.758 | 0.652 | 0.591 | 0.470 | 529 | 68/1000: 0.870 | 0.783 | 0.696 | 0.667 | 0.536 | 530 | 68/1000: 0.957 | 0.870 | 0.739 | 0.623 | 0.594 | 531 | 63/1000: 0.906 | 0.891 | 0.781 | 0.656 | 0.594 | 532 | 70/1000: 0.831 | 0.690 | 0.620 | 0.606 | 0.577 | 533 | 67/1000: 0.882 | 0.750 | 0.721 | 0.647 | 0.603 | 534 | 63/1000: 0.844 | 0.719 | 0.625 | 0.578 | 0.453 | 535 | 69/1000: 0.871 | 0.786 | 0.700 | 0.671 | 0.543 | 536 | 69/1000: 0.957 | 0.871 | 0.743 | 0.629 | 0.600 | 537 | 63/1000: 0.906 | 0.781 | 0.641 | 0.578 | 0.453 | 538 | 64/1000: 0.908 | 0.892 | 0.785 | 0.662 | 0.600 | 539 | 68/1000: 0.884 | 0.754 | 0.725 | 0.652 | 0.609 | 540 | 70/1000: 0.873 | 0.789 | 0.704 | 0.676 | 0.549 | 541 | 66/1000: 0.866 | 0.761 | 0.657 | 0.597 | 0.463 | 542 | 70/1000: 0.958 | 0.859 | 0.732 | 0.620 | 0.592 | 543 | 65/1000: 0.909 | 0.894 | 0.788 | 0.667 | 0.606 | 544 | 64/1000: 0.908 | 0.785 | 0.646 | 0.585 | 0.462 | 545 | 71/1000: 0.875 | 0.792 | 0.708 | 0.681 | 0.556 | 546 | 71/1000: 0.833 | 0.694 | 0.625 | 0.611 | 0.569 | 547 | 69/1000: 0.886 | 0.757 | 0.729 | 0.657 | 0.614 | 548 | 64/1000: 0.846 | 0.723 | 0.631 | 0.569 | 0.446 | 549 | 67/1000: 0.868 | 0.750 | 0.647 | 0.588 | 0.456 | 550 | 72/1000: 0.836 | 0.699 | 0.630 | 0.616 | 0.575 | 551 | 66/1000: 0.910 | 0.896 | 0.791 | 0.672 | 0.612 | 552 | 72/1000: 0.863 | 0.781 | 0.699 | 0.671 | 0.548 | 553 | 65/1000: 0.894 | 0.773 | 0.636 | 0.576 | 0.455 | 554 | 70/1000: 0.887 | 0.761 | 0.732 | 0.662 | 0.620 | 555 | 65/1000: 0.848 | 0.727 | 0.636 | 0.576 | 0.455 | 556 | 68/1000: 0.870 | 0.739 | 0.638 | 0.580 | 0.449 | 557 | 71/1000: 0.958 | 0.861 | 0.736 | 0.625 | 0.583 | 558 | 73/1000: 0.838 | 0.703 | 0.635 | 0.622 | 0.581 | 559 | 71/1000: 0.889 | 0.764 | 0.736 | 0.667 | 0.625 | 560 | 66/1000: 0.851 | 0.731 | 0.642 | 0.582 | 0.463 | 561 | 66/1000: 0.896 | 0.761 | 0.627 | 0.567 | 0.448 | 562 | 73/1000: 0.865 | 0.784 | 0.703 | 0.676 | 0.554 | 563 | 67/1000: 0.912 | 0.897 | 0.794 | 0.662 | 0.603 | 564 | 69/1000: 0.871 | 0.743 | 0.643 | 0.586 | 0.457 | 565 | 72/1000: 0.959 | 0.863 | 0.740 | 0.630 | 0.589 | 566 | 67/1000: 0.897 | 0.765 | 0.632 | 0.574 | 0.456 | 567 | 74/1000: 0.840 | 0.693 | 0.627 | 0.613 | 0.573 | 568 | 72/1000: 0.890 | 0.753 | 0.726 | 0.658 | 0.616 | 569 | 74/1000: 0.867 | 0.787 | 0.707 | 0.680 | 0.560 | 570 | 67/1000: 0.853 | 0.735 | 0.632 | 0.574 | 0.456 | 571 | 68/1000: 0.913 | 0.899 | 0.797 | 0.667 | 0.609 | 572 | 73/1000: 0.959 | 0.851 | 0.730 | 0.622 | 0.581 | 573 | 75/1000: 0.855 | 0.776 | 0.697 | 0.671 | 0.553 | 574 | 68/1000: 0.899 | 0.768 | 0.638 | 0.580 | 0.464 | 575 | 75/1000: 0.842 | 0.697 | 0.632 | 0.605 | 0.566 | 576 | 70/1000: 0.873 | 0.746 | 0.648 | 0.592 | 0.451 | 577 | 73/1000: 0.892 | 0.757 | 0.716 | 0.649 | 0.608 | 578 | 68/1000: 0.855 | 0.725 | 0.623 | 0.565 | 0.449 | 579 | 74/1000: 0.960 | 0.853 | 0.733 | 0.627 | 0.587 | 580 | 69/1000: 0.914 | 0.900 | 0.800 | 0.671 | 0.600 | 581 | 74/1000: 0.893 | 0.760 | 0.720 | 0.653 | 0.613 | 582 | 71/1000: 0.875 | 0.736 | 0.639 | 0.583 | 0.444 | 583 | 75/1000: 0.961 | 0.855 | 0.737 | 0.632 | 0.592 | 584 | 76/1000: 0.857 | 0.779 | 0.701 | 0.675 | 0.545 | 585 | 76/1000: 0.844 | 0.701 | 0.636 | 0.597 | 0.558 | 586 | 69/1000: 0.857 | 0.729 | 0.629 | 0.557 | 0.443 | 587 | 70/1000: 0.915 | 0.901 | 0.803 | 0.676 | 0.606 | 588 | 75/1000: 0.895 | 0.763 | 0.724 | 0.658 | 0.618 | 589 | 69/1000: 0.900 | 0.771 | 0.643 | 0.586 | 0.457 | 590 | 72/1000: 0.877 | 0.740 | 0.644 | 0.589 | 0.438 | 591 | 76/1000: 0.896 | 0.766 | 0.727 | 0.662 | 0.623 | 592 | 77/1000: 0.859 | 0.782 | 0.692 | 0.667 | 0.538 | 593 | 76/1000: 0.961 | 0.857 | 0.740 | 0.636 | 0.584 | 594 | 70/1000: 0.859 | 0.732 | 0.634 | 0.549 | 0.437 | 595 | 71/1000: 0.917 | 0.903 | 0.792 | 0.667 | 0.597 | 596 | 77/1000: 0.846 | 0.705 | 0.641 | 0.590 | 0.551 | 597 | 78/1000: 0.861 | 0.785 | 0.696 | 0.671 | 0.544 | 598 | 73/1000: 0.878 | 0.743 | 0.649 | 0.595 | 0.446 | 599 | 77/1000: 0.962 | 0.846 | 0.731 | 0.628 | 0.577 | 600 | 79/1000: 0.863 | 0.787 | 0.700 | 0.675 | 0.550 | 601 | 70/1000: 0.901 | 0.775 | 0.648 | 0.592 | 0.451 | 602 | 77/1000: 0.897 | 0.769 | 0.731 | 0.667 | 0.615 | 603 | 72/1000: 0.918 | 0.904 | 0.795 | 0.671 | 0.603 | 604 | 71/1000: 0.861 | 0.736 | 0.625 | 0.542 | 0.431 | 605 | 78/1000: 0.848 | 0.709 | 0.646 | 0.595 | 0.557 | 606 | 74/1000: 0.880 | 0.747 | 0.653 | 0.600 | 0.453 | 607 | 80/1000: 0.864 | 0.790 | 0.704 | 0.679 | 0.556 | 608 | 78/1000: 0.962 | 0.848 | 0.734 | 0.633 | 0.582 | 609 | 78/1000: 0.899 | 0.772 | 0.734 | 0.671 | 0.620 | 610 | 71/1000: 0.903 | 0.764 | 0.639 | 0.583 | 0.444 | 611 | 72/1000: 0.863 | 0.740 | 0.630 | 0.548 | 0.438 | 612 | 73/1000: 0.919 | 0.905 | 0.784 | 0.662 | 0.595 | 613 | 81/1000: 0.866 | 0.793 | 0.707 | 0.683 | 0.561 | 614 | 75/1000: 0.882 | 0.750 | 0.658 | 0.605 | 0.461 | 615 | 79/1000: 0.963 | 0.850 | 0.738 | 0.637 | 0.588 | 616 | 72/1000: 0.904 | 0.753 | 0.630 | 0.575 | 0.438 | 617 | 79/1000: 0.850 | 0.713 | 0.650 | 0.600 | 0.550 | 618 | 82/1000: 0.855 | 0.783 | 0.699 | 0.675 | 0.554 | 619 | 79/1000: 0.900 | 0.775 | 0.738 | 0.675 | 0.613 | 620 | 73/1000: 0.865 | 0.743 | 0.635 | 0.554 | 0.446 | 621 | 80/1000: 0.963 | 0.852 | 0.741 | 0.642 | 0.593 | 622 | 74/1000: 0.920 | 0.907 | 0.787 | 0.667 | 0.600 | 623 | 83/1000: 0.857 | 0.774 | 0.690 | 0.667 | 0.548 | 624 | 80/1000: 0.901 | 0.778 | 0.741 | 0.679 | 0.617 | 625 | 74/1000: 0.853 | 0.733 | 0.627 | 0.547 | 0.440 | 626 | 76/1000: 0.883 | 0.753 | 0.662 | 0.597 | 0.455 | 627 | 81/1000: 0.951 | 0.841 | 0.732 | 0.634 | 0.585 | 628 | 73/1000: 0.905 | 0.757 | 0.635 | 0.581 | 0.446 | 629 | 84/1000: 0.859 | 0.776 | 0.694 | 0.671 | 0.553 | 630 | 80/1000: 0.852 | 0.716 | 0.654 | 0.605 | 0.543 | 631 | 81/1000: 0.890 | 0.768 | 0.732 | 0.671 | 0.610 | 632 | 77/1000: 0.885 | 0.756 | 0.667 | 0.603 | 0.462 | 633 | 82/1000: 0.952 | 0.843 | 0.735 | 0.639 | 0.590 | 634 | 75/1000: 0.921 | 0.908 | 0.789 | 0.658 | 0.592 | 635 | 74/1000: 0.907 | 0.760 | 0.640 | 0.587 | 0.453 | 636 | 75/1000: 0.855 | 0.737 | 0.632 | 0.553 | 0.447 | 637 | 85/1000: 0.860 | 0.779 | 0.698 | 0.674 | 0.558 | 638 | 81/1000: 0.854 | 0.720 | 0.659 | 0.610 | 0.549 | 639 | 76/1000: 0.922 | 0.909 | 0.792 | 0.662 | 0.597 | 640 | 76/1000: 0.857 | 0.740 | 0.636 | 0.558 | 0.455 | 641 | 75/1000: 0.895 | 0.750 | 0.632 | 0.579 | 0.447 | 642 | 78/1000: 0.886 | 0.759 | 0.671 | 0.595 | 0.456 | 643 | 83/1000: 0.952 | 0.845 | 0.726 | 0.631 | 0.583 | 644 | 82/1000: 0.892 | 0.771 | 0.735 | 0.675 | 0.602 | 645 | 82/1000: 0.855 | 0.723 | 0.663 | 0.614 | 0.554 | 646 | 77/1000: 0.910 | 0.897 | 0.782 | 0.654 | 0.590 | 647 | 77/1000: 0.859 | 0.744 | 0.641 | 0.564 | 0.462 | 648 | 86/1000: 0.862 | 0.782 | 0.690 | 0.667 | 0.552 | 649 | 84/1000: 0.953 | 0.847 | 0.729 | 0.635 | 0.588 | 650 | 79/1000: 0.887 | 0.762 | 0.662 | 0.588 | 0.450 | 651 | 83/1000: 0.857 | 0.726 | 0.667 | 0.619 | 0.560 | 652 | 78/1000: 0.911 | 0.899 | 0.785 | 0.658 | 0.595 | 653 | 78/1000: 0.861 | 0.734 | 0.633 | 0.557 | 0.456 | 654 | 83/1000: 0.893 | 0.774 | 0.738 | 0.679 | 0.607 | 655 | 76/1000: 0.896 | 0.753 | 0.636 | 0.571 | 0.442 | 656 | 87/1000: 0.864 | 0.784 | 0.693 | 0.659 | 0.545 | 657 | 85/1000: 0.953 | 0.837 | 0.721 | 0.628 | 0.581 | 658 | 80/1000: 0.889 | 0.765 | 0.667 | 0.593 | 0.457 | 659 | 84/1000: 0.859 | 0.729 | 0.671 | 0.624 | 0.565 | 660 | 79/1000: 0.912 | 0.900 | 0.787 | 0.662 | 0.600 | 661 | 79/1000: 0.863 | 0.725 | 0.625 | 0.550 | 0.450 | 662 | 77/1000: 0.897 | 0.756 | 0.641 | 0.577 | 0.449 | 663 | 88/1000: 0.865 | 0.787 | 0.697 | 0.663 | 0.551 | 664 | 81/1000: 0.890 | 0.768 | 0.671 | 0.598 | 0.463 | 665 | 84/1000: 0.894 | 0.776 | 0.729 | 0.671 | 0.600 | 666 | 86/1000: 0.954 | 0.839 | 0.724 | 0.632 | 0.586 | 667 | 85/1000: 0.860 | 0.733 | 0.674 | 0.628 | 0.570 | 668 | 80/1000: 0.914 | 0.889 | 0.778 | 0.654 | 0.593 | 669 | 80/1000: 0.864 | 0.728 | 0.630 | 0.556 | 0.457 | 670 | 78/1000: 0.899 | 0.759 | 0.646 | 0.582 | 0.456 | 671 | 89/1000: 0.867 | 0.789 | 0.700 | 0.667 | 0.556 | 672 | 82/1000: 0.892 | 0.771 | 0.675 | 0.602 | 0.470 | 673 | 85/1000: 0.895 | 0.779 | 0.733 | 0.674 | 0.605 | 674 | 81/1000: 0.915 | 0.890 | 0.780 | 0.659 | 0.598 | 675 | 81/1000: 0.866 | 0.732 | 0.634 | 0.561 | 0.463 | 676 | 86/1000: 0.862 | 0.736 | 0.678 | 0.632 | 0.575 | 677 | 90/1000: 0.868 | 0.791 | 0.703 | 0.670 | 0.560 | 678 | 87/1000: 0.955 | 0.841 | 0.716 | 0.625 | 0.580 | 679 | 83/1000: 0.893 | 0.774 | 0.679 | 0.607 | 0.476 | 680 | 79/1000: 0.900 | 0.762 | 0.637 | 0.575 | 0.450 | 681 | 86/1000: 0.897 | 0.782 | 0.736 | 0.678 | 0.609 | 682 | 91/1000: 0.870 | 0.793 | 0.707 | 0.674 | 0.565 | 683 | 82/1000: 0.904 | 0.880 | 0.771 | 0.651 | 0.590 | 684 | 88/1000: 0.955 | 0.843 | 0.719 | 0.629 | 0.584 | 685 | 82/1000: 0.867 | 0.735 | 0.639 | 0.566 | 0.470 | 686 | 87/1000: 0.864 | 0.739 | 0.670 | 0.625 | 0.568 | 687 | 92/1000: 0.871 | 0.796 | 0.710 | 0.677 | 0.570 | 688 | 84/1000: 0.894 | 0.776 | 0.671 | 0.600 | 0.471 | 689 | 80/1000: 0.901 | 0.765 | 0.630 | 0.568 | 0.444 | 690 | 89/1000: 0.956 | 0.844 | 0.722 | 0.633 | 0.589 | 691 | 83/1000: 0.905 | 0.881 | 0.774 | 0.655 | 0.595 | 692 | 87/1000: 0.898 | 0.784 | 0.739 | 0.682 | 0.614 | 693 | 88/1000: 0.854 | 0.730 | 0.663 | 0.618 | 0.562 | 694 | 85/1000: 0.895 | 0.779 | 0.674 | 0.605 | 0.477 | 695 | 84/1000: 0.906 | 0.882 | 0.776 | 0.659 | 0.600 | 696 | 81/1000: 0.902 | 0.768 | 0.634 | 0.573 | 0.451 | 697 | 83/1000: 0.869 | 0.738 | 0.643 | 0.571 | 0.464 | 698 | 90/1000: 0.956 | 0.846 | 0.725 | 0.637 | 0.593 | 699 | 86/1000: 0.897 | 0.782 | 0.678 | 0.609 | 0.483 | 700 | 88/1000: 0.899 | 0.787 | 0.742 | 0.674 | 0.607 | 701 | 93/1000: 0.872 | 0.798 | 0.713 | 0.670 | 0.564 | 702 | 82/1000: 0.892 | 0.759 | 0.627 | 0.566 | 0.446 | 703 | 91/1000: 0.946 | 0.837 | 0.717 | 0.630 | 0.587 | 704 | 87/1000: 0.886 | 0.773 | 0.670 | 0.602 | 0.477 | 705 | 85/1000: 0.907 | 0.884 | 0.779 | 0.651 | 0.593 | 706 | 84/1000: 0.871 | 0.741 | 0.647 | 0.576 | 0.471 | 707 | 89/1000: 0.856 | 0.733 | 0.667 | 0.611 | 0.556 | 708 | 83/1000: 0.893 | 0.762 | 0.631 | 0.571 | 0.452 | 709 | 89/1000: 0.900 | 0.778 | 0.733 | 0.667 | 0.600 | 710 | 94/1000: 0.874 | 0.800 | 0.716 | 0.674 | 0.568 | 711 | 88/1000: 0.888 | 0.775 | 0.674 | 0.607 | 0.483 | 712 | 85/1000: 0.872 | 0.744 | 0.651 | 0.581 | 0.477 | 713 | 90/1000: 0.890 | 0.769 | 0.725 | 0.659 | 0.593 | 714 | 86/1000: 0.908 | 0.885 | 0.782 | 0.655 | 0.598 | 715 | 90/1000: 0.857 | 0.736 | 0.659 | 0.604 | 0.549 | 716 | 92/1000: 0.946 | 0.839 | 0.720 | 0.634 | 0.581 | 717 | 89/1000: 0.889 | 0.778 | 0.678 | 0.611 | 0.489 | 718 | 95/1000: 0.875 | 0.802 | 0.719 | 0.677 | 0.573 | 719 | 86/1000: 0.874 | 0.747 | 0.655 | 0.586 | 0.483 | 720 | 84/1000: 0.894 | 0.765 | 0.624 | 0.565 | 0.447 | 721 | 91/1000: 0.880 | 0.761 | 0.717 | 0.652 | 0.587 | 722 | 87/1000: 0.909 | 0.886 | 0.784 | 0.659 | 0.602 | 723 | 91/1000: 0.859 | 0.739 | 0.663 | 0.609 | 0.554 | 724 | 93/1000: 0.947 | 0.840 | 0.723 | 0.638 | 0.585 | 725 | 90/1000: 0.890 | 0.780 | 0.681 | 0.615 | 0.495 | 726 | 92/1000: 0.882 | 0.763 | 0.720 | 0.656 | 0.591 | 727 | 87/1000: 0.875 | 0.750 | 0.659 | 0.580 | 0.477 | 728 | 96/1000: 0.876 | 0.804 | 0.722 | 0.670 | 0.567 | 729 | 85/1000: 0.895 | 0.767 | 0.628 | 0.570 | 0.442 | 730 | 94/1000: 0.947 | 0.842 | 0.726 | 0.642 | 0.589 | 731 | 92/1000: 0.860 | 0.742 | 0.667 | 0.613 | 0.559 | 732 | 91/1000: 0.891 | 0.783 | 0.685 | 0.620 | 0.500 | 733 | 88/1000: 0.910 | 0.888 | 0.775 | 0.652 | 0.596 | 734 | 88/1000: 0.865 | 0.742 | 0.652 | 0.573 | 0.472 | 735 | 97/1000: 0.878 | 0.806 | 0.724 | 0.673 | 0.571 | 736 | 93/1000: 0.883 | 0.766 | 0.713 | 0.649 | 0.585 | 737 | 95/1000: 0.948 | 0.844 | 0.729 | 0.646 | 0.594 | 738 | 93/1000: 0.862 | 0.745 | 0.670 | 0.617 | 0.564 | 739 | 86/1000: 0.897 | 0.770 | 0.632 | 0.575 | 0.448 | 740 | 89/1000: 0.856 | 0.733 | 0.644 | 0.567 | 0.467 | 741 | 89/1000: 0.911 | 0.889 | 0.778 | 0.656 | 0.600 | 742 | 92/1000: 0.892 | 0.785 | 0.677 | 0.613 | 0.495 | 743 | 98/1000: 0.869 | 0.798 | 0.717 | 0.667 | 0.566 | 744 | 94/1000: 0.884 | 0.768 | 0.716 | 0.653 | 0.589 | 745 | 96/1000: 0.948 | 0.845 | 0.732 | 0.649 | 0.598 | 746 | 94/1000: 0.853 | 0.737 | 0.663 | 0.611 | 0.558 | 747 | 90/1000: 0.857 | 0.736 | 0.648 | 0.571 | 0.473 | 748 | 90/1000: 0.912 | 0.890 | 0.780 | 0.659 | 0.604 | 749 | 87/1000: 0.898 | 0.773 | 0.636 | 0.580 | 0.455 | 750 | 93/1000: 0.883 | 0.777 | 0.670 | 0.606 | 0.489 | 751 | 99/1000: 0.870 | 0.800 | 0.720 | 0.670 | 0.570 | 752 | 95/1000: 0.885 | 0.760 | 0.708 | 0.646 | 0.583 | 753 | 95/1000: 0.854 | 0.740 | 0.667 | 0.615 | 0.562 | 754 | 97/1000: 0.949 | 0.847 | 0.724 | 0.643 | 0.592 | 755 | 88/1000: 0.899 | 0.775 | 0.640 | 0.584 | 0.461 | 756 | 91/1000: 0.859 | 0.728 | 0.641 | 0.565 | 0.467 | 757 | 94/1000: 0.884 | 0.779 | 0.674 | 0.611 | 0.495 | 758 | 91/1000: 0.913 | 0.891 | 0.772 | 0.652 | 0.598 | 759 | 100/1000: 0.871 | 0.802 | 0.723 | 0.673 | 0.574 | 760 | 96/1000: 0.887 | 0.763 | 0.711 | 0.639 | 0.577 | 761 | 92/1000: 0.860 | 0.731 | 0.645 | 0.570 | 0.473 | 762 | 98/1000: 0.949 | 0.848 | 0.717 | 0.636 | 0.586 | 763 | 96/1000: 0.856 | 0.742 | 0.670 | 0.608 | 0.557 | 764 | 95/1000: 0.885 | 0.781 | 0.677 | 0.615 | 0.500 | 765 | 89/1000: 0.900 | 0.778 | 0.644 | 0.589 | 0.456 | 766 | 97/1000: 0.888 | 0.755 | 0.704 | 0.633 | 0.571 | 767 | 97/1000: 0.857 | 0.735 | 0.663 | 0.602 | 0.551 | 768 | 96/1000: 0.887 | 0.784 | 0.680 | 0.608 | 0.495 | 769 | 93/1000: 0.862 | 0.734 | 0.649 | 0.564 | 0.468 | 770 | 90/1000: 0.901 | 0.769 | 0.637 | 0.582 | 0.451 | 771 | 99/1000: 0.950 | 0.850 | 0.720 | 0.640 | 0.580 | 772 | 101/1000: 0.873 | 0.804 | 0.725 | 0.676 | 0.569 | 773 | 92/1000: 0.914 | 0.892 | 0.774 | 0.645 | 0.591 | 774 | 98/1000: 0.889 | 0.758 | 0.707 | 0.636 | 0.566 | 775 | 98/1000: 0.859 | 0.737 | 0.667 | 0.606 | 0.556 | 776 | 91/1000: 0.902 | 0.772 | 0.641 | 0.587 | 0.457 | 777 | 94/1000: 0.863 | 0.726 | 0.642 | 0.558 | 0.463 | 778 | 100/1000: 0.950 | 0.851 | 0.723 | 0.644 | 0.584 | 779 | 102/1000: 0.874 | 0.796 | 0.718 | 0.670 | 0.563 | 780 | 99/1000: 0.890 | 0.760 | 0.710 | 0.640 | 0.570 | 781 | 97/1000: 0.888 | 0.786 | 0.684 | 0.612 | 0.490 | 782 | 93/1000: 0.915 | 0.894 | 0.777 | 0.649 | 0.596 | 783 | 99/1000: 0.850 | 0.730 | 0.660 | 0.600 | 0.550 | 784 | 92/1000: 0.903 | 0.763 | 0.634 | 0.581 | 0.452 | 785 | 103/1000: 0.875 | 0.798 | 0.721 | 0.673 | 0.567 | 786 | 100/1000: 0.891 | 0.762 | 0.713 | 0.644 | 0.574 | 787 | 101/1000: 0.951 | 0.853 | 0.725 | 0.647 | 0.588 | 788 | 93/1000: 0.904 | 0.766 | 0.638 | 0.585 | 0.457 | 789 | 98/1000: 0.889 | 0.788 | 0.677 | 0.606 | 0.485 | 790 | 95/1000: 0.865 | 0.729 | 0.646 | 0.562 | 0.458 | 791 | 100/1000: 0.851 | 0.733 | 0.663 | 0.594 | 0.545 | 792 | 104/1000: 0.876 | 0.800 | 0.724 | 0.676 | 0.571 | 793 | 101/1000: 0.892 | 0.765 | 0.716 | 0.647 | 0.578 | 794 | 94/1000: 0.916 | 0.895 | 0.779 | 0.653 | 0.589 | 795 | 102/1000: 0.942 | 0.845 | 0.718 | 0.641 | 0.583 | 796 | 94/1000: 0.905 | 0.768 | 0.642 | 0.589 | 0.463 | 797 | 99/1000: 0.890 | 0.790 | 0.680 | 0.610 | 0.490 | 798 | 102/1000: 0.893 | 0.767 | 0.718 | 0.650 | 0.583 | 799 | 96/1000: 0.866 | 0.732 | 0.649 | 0.567 | 0.464 | 800 | 103/1000: 0.942 | 0.846 | 0.721 | 0.644 | 0.587 | 801 | 105/1000: 0.877 | 0.802 | 0.726 | 0.679 | 0.575 | 802 | 101/1000: 0.853 | 0.735 | 0.657 | 0.588 | 0.539 | 803 | 95/1000: 0.917 | 0.896 | 0.781 | 0.646 | 0.583 | 804 | 104/1000: 0.943 | 0.848 | 0.724 | 0.648 | 0.590 | 805 | 100/1000: 0.891 | 0.792 | 0.683 | 0.604 | 0.485 | 806 | 97/1000: 0.867 | 0.735 | 0.653 | 0.571 | 0.469 | 807 | 102/1000: 0.845 | 0.728 | 0.650 | 0.583 | 0.534 | 808 | 103/1000: 0.894 | 0.769 | 0.721 | 0.654 | 0.577 | 809 | 106/1000: 0.879 | 0.794 | 0.720 | 0.673 | 0.570 | 810 | 96/1000: 0.907 | 0.887 | 0.773 | 0.639 | 0.577 | 811 | 101/1000: 0.892 | 0.794 | 0.686 | 0.608 | 0.490 | 812 | 95/1000: 0.906 | 0.771 | 0.646 | 0.594 | 0.458 | 813 | 105/1000: 0.943 | 0.840 | 0.717 | 0.642 | 0.585 | 814 | 103/1000: 0.837 | 0.721 | 0.644 | 0.577 | 0.529 | 815 | 107/1000: 0.880 | 0.796 | 0.722 | 0.676 | 0.574 | 816 | 104/1000: 0.895 | 0.771 | 0.724 | 0.657 | 0.581 | 817 | 102/1000: 0.893 | 0.796 | 0.689 | 0.612 | 0.495 | 818 | 97/1000: 0.908 | 0.888 | 0.776 | 0.643 | 0.582 | 819 | 98/1000: 0.869 | 0.737 | 0.657 | 0.566 | 0.465 | 820 | 104/1000: 0.838 | 0.724 | 0.648 | 0.581 | 0.533 | 821 | 96/1000: 0.907 | 0.773 | 0.649 | 0.588 | 0.454 | 822 | 108/1000: 0.881 | 0.798 | 0.725 | 0.679 | 0.578 | 823 | 106/1000: 0.944 | 0.832 | 0.710 | 0.636 | 0.579 | 824 | 105/1000: 0.896 | 0.774 | 0.726 | 0.651 | 0.575 | 825 | 98/1000: 0.909 | 0.889 | 0.778 | 0.646 | 0.586 | 826 | 103/1000: 0.894 | 0.788 | 0.683 | 0.606 | 0.490 | 827 | 107/1000: 0.944 | 0.833 | 0.713 | 0.639 | 0.583 | 828 | 109/1000: 0.882 | 0.800 | 0.727 | 0.682 | 0.582 | 829 | 106/1000: 0.897 | 0.776 | 0.729 | 0.654 | 0.579 | 830 | 105/1000: 0.840 | 0.726 | 0.651 | 0.575 | 0.528 | 831 | 97/1000: 0.908 | 0.765 | 0.643 | 0.582 | 0.449 | 832 | 99/1000: 0.870 | 0.740 | 0.660 | 0.570 | 0.460 | 833 | 99/1000: 0.910 | 0.890 | 0.780 | 0.650 | 0.590 | 834 | 104/1000: 0.895 | 0.790 | 0.686 | 0.610 | 0.495 | 835 | 107/1000: 0.898 | 0.778 | 0.731 | 0.657 | 0.583 | 836 | 110/1000: 0.883 | 0.793 | 0.721 | 0.676 | 0.577 | 837 | 106/1000: 0.841 | 0.729 | 0.654 | 0.579 | 0.533 | 838 | 108/1000: 0.945 | 0.835 | 0.716 | 0.633 | 0.578 | 839 | 105/1000: 0.896 | 0.792 | 0.689 | 0.613 | 0.500 | 840 | 100/1000: 0.911 | 0.881 | 0.772 | 0.644 | 0.584 | 841 | 111/1000: 0.875 | 0.786 | 0.714 | 0.670 | 0.571 | 842 | 107/1000: 0.843 | 0.731 | 0.657 | 0.583 | 0.537 | 843 | 98/1000: 0.909 | 0.768 | 0.646 | 0.586 | 0.455 | 844 | 100/1000: 0.871 | 0.743 | 0.653 | 0.564 | 0.455 | 845 | 108/1000: 0.899 | 0.780 | 0.725 | 0.651 | 0.578 | 846 | 109/1000: 0.945 | 0.827 | 0.709 | 0.627 | 0.573 | 847 | 106/1000: 0.897 | 0.785 | 0.682 | 0.607 | 0.495 | 848 | 101/1000: 0.912 | 0.882 | 0.775 | 0.647 | 0.588 | 849 | 112/1000: 0.876 | 0.788 | 0.717 | 0.673 | 0.575 | 850 | 101/1000: 0.873 | 0.745 | 0.657 | 0.569 | 0.461 | 851 | 108/1000: 0.844 | 0.725 | 0.651 | 0.578 | 0.532 | 852 | 109/1000: 0.900 | 0.782 | 0.727 | 0.655 | 0.582 | 853 | 110/1000: 0.946 | 0.829 | 0.712 | 0.631 | 0.577 | 854 | 113/1000: 0.877 | 0.789 | 0.719 | 0.675 | 0.579 | 855 | 107/1000: 0.898 | 0.778 | 0.676 | 0.602 | 0.491 | 856 | 99/1000: 0.910 | 0.770 | 0.650 | 0.590 | 0.450 | 857 | 110/1000: 0.901 | 0.784 | 0.730 | 0.658 | 0.586 | 858 | 102/1000: 0.913 | 0.883 | 0.777 | 0.641 | 0.583 | 859 | 111/1000: 0.946 | 0.830 | 0.714 | 0.634 | 0.580 | 860 | 114/1000: 0.878 | 0.791 | 0.722 | 0.678 | 0.583 | 861 | 109/1000: 0.845 | 0.727 | 0.655 | 0.582 | 0.536 | 862 | 108/1000: 0.890 | 0.771 | 0.670 | 0.596 | 0.486 | 863 | 102/1000: 0.874 | 0.748 | 0.660 | 0.563 | 0.456 | 864 | 103/1000: 0.913 | 0.885 | 0.779 | 0.644 | 0.587 | 865 | 100/1000: 0.911 | 0.772 | 0.653 | 0.584 | 0.446 | 866 | 112/1000: 0.947 | 0.832 | 0.717 | 0.637 | 0.584 | 867 | 115/1000: 0.879 | 0.793 | 0.724 | 0.681 | 0.586 | 868 | 110/1000: 0.847 | 0.730 | 0.658 | 0.586 | 0.541 | 869 | 111/1000: 0.902 | 0.786 | 0.732 | 0.652 | 0.580 | 870 | 101/1000: 0.912 | 0.775 | 0.657 | 0.588 | 0.451 | 871 | 109/1000: 0.891 | 0.773 | 0.673 | 0.600 | 0.482 | 872 | 113/1000: 0.947 | 0.833 | 0.719 | 0.640 | 0.588 | 873 | 104/1000: 0.914 | 0.876 | 0.771 | 0.638 | 0.581 | 874 | 103/1000: 0.875 | 0.750 | 0.663 | 0.567 | 0.462 | 875 | 116/1000: 0.880 | 0.795 | 0.726 | 0.684 | 0.590 | 876 | 111/1000: 0.848 | 0.723 | 0.652 | 0.580 | 0.536 | 877 | 105/1000: 0.915 | 0.877 | 0.774 | 0.642 | 0.585 | 878 | 102/1000: 0.913 | 0.777 | 0.660 | 0.592 | 0.456 | 879 | 112/1000: 0.903 | 0.788 | 0.735 | 0.655 | 0.584 | 880 | 110/1000: 0.892 | 0.775 | 0.667 | 0.595 | 0.477 | 881 | 104/1000: 0.876 | 0.752 | 0.667 | 0.571 | 0.467 | 882 | 114/1000: 0.948 | 0.835 | 0.722 | 0.643 | 0.591 | 883 | 106/1000: 0.907 | 0.869 | 0.766 | 0.636 | 0.579 | 884 | 103/1000: 0.913 | 0.779 | 0.663 | 0.596 | 0.462 | 885 | 117/1000: 0.881 | 0.797 | 0.729 | 0.686 | 0.593 | 886 | 113/1000: 0.904 | 0.781 | 0.728 | 0.649 | 0.579 | 887 | 105/1000: 0.877 | 0.755 | 0.670 | 0.575 | 0.472 | 888 | 112/1000: 0.850 | 0.726 | 0.655 | 0.575 | 0.531 | 889 | 111/1000: 0.893 | 0.777 | 0.670 | 0.598 | 0.482 | 890 | 114/1000: 0.904 | 0.783 | 0.730 | 0.652 | 0.583 | 891 | 104/1000: 0.914 | 0.781 | 0.667 | 0.600 | 0.467 | 892 | 115/1000: 0.948 | 0.836 | 0.716 | 0.638 | 0.586 | 893 | 107/1000: 0.907 | 0.861 | 0.759 | 0.630 | 0.574 | 894 | 118/1000: 0.882 | 0.798 | 0.731 | 0.689 | 0.597 | 895 | 115/1000: 0.905 | 0.784 | 0.733 | 0.655 | 0.586 | 896 | 106/1000: 0.879 | 0.757 | 0.673 | 0.579 | 0.467 | 897 | 116/1000: 0.949 | 0.838 | 0.718 | 0.641 | 0.590 | 898 | 112/1000: 0.894 | 0.779 | 0.664 | 0.593 | 0.478 | 899 | 113/1000: 0.851 | 0.728 | 0.658 | 0.570 | 0.526 | 900 | 105/1000: 0.915 | 0.783 | 0.670 | 0.604 | 0.472 | 901 | 108/1000: 0.908 | 0.862 | 0.761 | 0.624 | 0.569 | 902 | 119/1000: 0.883 | 0.800 | 0.733 | 0.692 | 0.600 | 903 | 107/1000: 0.880 | 0.759 | 0.676 | 0.583 | 0.472 | 904 | 117/1000: 0.949 | 0.839 | 0.712 | 0.636 | 0.585 | 905 | 116/1000: 0.906 | 0.786 | 0.735 | 0.658 | 0.581 | 906 | 114/1000: 0.852 | 0.722 | 0.652 | 0.565 | 0.522 | 907 | 108/1000: 0.881 | 0.761 | 0.679 | 0.587 | 0.477 | 908 | 113/1000: 0.895 | 0.781 | 0.658 | 0.588 | 0.474 | 909 | 120/1000: 0.884 | 0.802 | 0.736 | 0.686 | 0.595 | 910 | 118/1000: 0.950 | 0.840 | 0.714 | 0.639 | 0.588 | 911 | 115/1000: 0.853 | 0.724 | 0.655 | 0.569 | 0.526 | 912 | 109/1000: 0.909 | 0.864 | 0.764 | 0.627 | 0.573 | 913 | 106/1000: 0.916 | 0.785 | 0.673 | 0.598 | 0.467 | 914 | 114/1000: 0.896 | 0.783 | 0.661 | 0.591 | 0.478 | 915 | 109/1000: 0.882 | 0.764 | 0.682 | 0.591 | 0.482 | 916 | 121/1000: 0.885 | 0.803 | 0.738 | 0.689 | 0.598 | 917 | 117/1000: 0.907 | 0.788 | 0.737 | 0.653 | 0.576 | 918 | 110/1000: 0.910 | 0.865 | 0.766 | 0.631 | 0.577 | 919 | 119/1000: 0.950 | 0.842 | 0.717 | 0.642 | 0.592 | 920 | 107/1000: 0.917 | 0.787 | 0.676 | 0.602 | 0.472 | 921 | 110/1000: 0.883 | 0.766 | 0.685 | 0.595 | 0.486 | 922 | 122/1000: 0.886 | 0.805 | 0.740 | 0.691 | 0.602 | 923 | 118/1000: 0.908 | 0.790 | 0.739 | 0.655 | 0.580 | 924 | 120/1000: 0.950 | 0.843 | 0.719 | 0.645 | 0.595 | 925 | 111/1000: 0.911 | 0.866 | 0.768 | 0.634 | 0.580 | 926 | 115/1000: 0.897 | 0.784 | 0.664 | 0.586 | 0.474 | 927 | 116/1000: 0.855 | 0.726 | 0.658 | 0.573 | 0.521 | 928 | 108/1000: 0.917 | 0.780 | 0.670 | 0.596 | 0.468 | 929 | 123/1000: 0.887 | 0.806 | 0.742 | 0.694 | 0.605 | 930 | 111/1000: 0.884 | 0.768 | 0.688 | 0.598 | 0.491 | 931 | 119/1000: 0.908 | 0.792 | 0.742 | 0.658 | 0.583 | 932 | 121/1000: 0.951 | 0.844 | 0.721 | 0.648 | 0.598 | 933 | 117/1000: 0.856 | 0.729 | 0.661 | 0.576 | 0.525 | 934 | 116/1000: 0.897 | 0.786 | 0.667 | 0.590 | 0.479 | 935 | 109/1000: 0.918 | 0.782 | 0.673 | 0.600 | 0.473 | 936 | 124/1000: 0.888 | 0.808 | 0.744 | 0.696 | 0.608 | 937 | 112/1000: 0.876 | 0.761 | 0.681 | 0.593 | 0.487 | 938 | 120/1000: 0.901 | 0.785 | 0.736 | 0.653 | 0.579 | 939 | 122/1000: 0.951 | 0.846 | 0.724 | 0.650 | 0.602 | 940 | 112/1000: 0.912 | 0.867 | 0.770 | 0.637 | 0.575 | 941 | 117/1000: 0.890 | 0.780 | 0.661 | 0.585 | 0.475 | 942 | 118/1000: 0.857 | 0.723 | 0.655 | 0.571 | 0.521 | 943 | 113/1000: 0.877 | 0.763 | 0.684 | 0.596 | 0.491 | 944 | 110/1000: 0.919 | 0.784 | 0.676 | 0.604 | 0.477 | 945 | 113/1000: 0.912 | 0.868 | 0.772 | 0.640 | 0.579 | 946 | 121/1000: 0.902 | 0.779 | 0.730 | 0.648 | 0.574 | 947 | 118/1000: 0.891 | 0.782 | 0.664 | 0.588 | 0.471 | 948 | 119/1000: 0.858 | 0.725 | 0.658 | 0.575 | 0.525 | 949 | 123/1000: 0.952 | 0.847 | 0.726 | 0.653 | 0.605 | 950 | 111/1000: 0.920 | 0.786 | 0.679 | 0.607 | 0.482 | 951 | 114/1000: 0.878 | 0.765 | 0.687 | 0.600 | 0.496 | 952 | 114/1000: 0.913 | 0.870 | 0.774 | 0.635 | 0.574 | 953 | 122/1000: 0.902 | 0.780 | 0.732 | 0.650 | 0.577 | 954 | 124/1000: 0.952 | 0.848 | 0.728 | 0.656 | 0.608 | 955 | 119/1000: 0.892 | 0.775 | 0.658 | 0.583 | 0.467 | 956 | 120/1000: 0.851 | 0.719 | 0.653 | 0.570 | 0.521 | 957 | 112/1000: 0.920 | 0.779 | 0.673 | 0.602 | 0.478 | 958 | 115/1000: 0.879 | 0.767 | 0.690 | 0.603 | 0.500 | 959 | 123/1000: 0.895 | 0.774 | 0.726 | 0.645 | 0.573 | 960 | 121/1000: 0.852 | 0.721 | 0.656 | 0.574 | 0.525 | 961 | 113/1000: 0.921 | 0.781 | 0.675 | 0.605 | 0.482 | 962 | 120/1000: 0.893 | 0.777 | 0.653 | 0.579 | 0.463 | 963 | 115/1000: 0.914 | 0.871 | 0.776 | 0.638 | 0.569 | 964 | 124/1000: 0.896 | 0.776 | 0.728 | 0.648 | 0.576 | 965 | 122/1000: 0.854 | 0.724 | 0.659 | 0.577 | 0.528 | 966 | 116/1000: 0.880 | 0.769 | 0.692 | 0.598 | 0.496 | 967 | 121/1000: 0.893 | 0.779 | 0.656 | 0.582 | 0.467 | 968 | 122/1000: 0.894 | 0.780 | 0.659 | 0.585 | 0.472 | 969 | 116/1000: 0.915 | 0.872 | 0.778 | 0.641 | 0.564 | 970 | 123/1000: 0.855 | 0.726 | 0.661 | 0.581 | 0.532 | 971 | 114/1000: 0.922 | 0.783 | 0.678 | 0.600 | 0.478 | 972 | 117/1000: 0.881 | 0.771 | 0.695 | 0.593 | 0.492 | 973 | 123/1000: 0.895 | 0.782 | 0.661 | 0.589 | 0.476 | 974 | 124/1000: 0.856 | 0.728 | 0.664 | 0.584 | 0.536 | 975 | 115/1000: 0.922 | 0.784 | 0.672 | 0.595 | 0.474 | 976 | 117/1000: 0.915 | 0.873 | 0.780 | 0.636 | 0.559 | 977 | 118/1000: 0.882 | 0.773 | 0.697 | 0.597 | 0.496 | 978 | 124/1000: 0.896 | 0.784 | 0.664 | 0.584 | 0.472 | 979 | 118/1000: 0.916 | 0.874 | 0.782 | 0.639 | 0.563 | 980 | 116/1000: 0.923 | 0.786 | 0.675 | 0.590 | 0.470 | 981 | 119/1000: 0.883 | 0.775 | 0.700 | 0.600 | 0.492 | 982 | 119/1000: 0.917 | 0.875 | 0.783 | 0.642 | 0.558 | 983 | 120/1000: 0.884 | 0.769 | 0.694 | 0.595 | 0.488 | 984 | 117/1000: 0.924 | 0.788 | 0.678 | 0.585 | 0.466 | 985 | 121/1000: 0.885 | 0.770 | 0.697 | 0.598 | 0.492 | 986 | 120/1000: 0.917 | 0.868 | 0.777 | 0.636 | 0.554 | 987 | 118/1000: 0.924 | 0.790 | 0.681 | 0.580 | 0.462 | 988 | 122/1000: 0.886 | 0.764 | 0.691 | 0.593 | 0.488 | 989 | 121/1000: 0.918 | 0.861 | 0.770 | 0.631 | 0.549 | 990 | 123/1000: 0.887 | 0.766 | 0.694 | 0.597 | 0.492 | 991 | 119/1000: 0.925 | 0.792 | 0.683 | 0.583 | 0.467 | 992 | 122/1000: 0.919 | 0.854 | 0.764 | 0.626 | 0.545 | 993 | 120/1000: 0.926 | 0.793 | 0.686 | 0.587 | 0.471 | 994 | 124/1000: 0.888 | 0.768 | 0.696 | 0.592 | 0.488 | 995 | 123/1000: 0.919 | 0.855 | 0.758 | 0.621 | 0.540 | 996 | 121/1000: 0.926 | 0.795 | 0.680 | 0.582 | 0.467 | 997 | 124/1000: 0.912 | 0.848 | 0.752 | 0.616 | 0.536 | 998 | 122/1000: 0.927 | 0.797 | 0.675 | 0.577 | 0.463 | 999 | 123/1000: 0.927 | 0.798 | 0.677 | 0.581 | 0.460 | 1000 | 124/1000: 0.928 | 0.792 | 0.672 | 0.576 | 0.456 | 1001 | -------------------------------------------------------------------------------- /fake_dataset/training/.hydra/config.yaml: -------------------------------------------------------------------------------- 1 | cameras: 2 | static: 3 | _target_: vr_env.camera.static_camera.StaticCamera 4 | name: static 5 | fov: 10 6 | aspect: 1 7 | nearval: 0.01 8 | farval: 10 9 | width: 200 10 | height: 200 11 | look_at: 12 | - -0.026242351159453392 13 | - -0.0302329882979393 14 | - 0.3920000493526459 15 | look_from: 16 | - 2.871459009488717 17 | - -2.166602199425597 18 | - 2.555159848480571 19 | up_vector: 20 | - 0.4041403970338857 21 | - 0.22629790978217404 22 | - 0.8862616969685161 23 | gripper: 24 | _target_: vr_env.camera.gripper_camera.GripperCamera 25 | name: gripper 26 | fov: 75 27 | aspect: 1 28 | nearval: 0.01 29 | farval: 2 30 | width: 84 31 | height: 84 32 | tactile: 33 | _target_: vr_env.camera.tactile_sensor.TactileSensor 34 | name: tactile 35 | width: 120 36 | height: 160 37 | digit_link_ids: 38 | - 10 39 | - 12 40 | visualize_gui: false 41 | config_path: conf/digit_sensor/config_digit.yml 42 | load_dir: /work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/ 43 | data_path: data 44 | save_dir: ??? 45 | show_gui: false 46 | processes: 16 47 | max_episode_frames: 1 48 | save_body_infos: true 49 | set_static_cam: false 50 | env: 51 | cameras: ${cameras} 52 | show_gui: ${show_gui} 53 | use_vr: false 54 | scene: 55 | _target_: vr_env.scene.play_table_scene.PlayTableScene 56 | _recursive_: false 57 | name: calvin_scene_A 58 | data_path: ${data_path} 59 | global_scaling: 0.8 60 | euler_obs: ${robot.euler_obs} 61 | robot_base_position: 62 | - -0.34 63 | - -0.46 64 | - 0.24 65 | robot_base_orientation: 66 | - 0 67 | - 0 68 | - 0 69 | robot_initial_joint_positions: 70 | - -1.21779206 71 | - 1.03987646 72 | - 2.11978261 73 | - -2.34205014 74 | - -0.87015947 75 | - 1.64119353 76 | - 0.55344866 77 | surfaces: 78 | table: 79 | - - 0.0 80 | - -0.15 81 | - 0.46 82 | - - 0.35 83 | - -0.03 84 | - 0.46 85 | slider_left: 86 | - - -0.32 87 | - 0.05 88 | - 0.46 89 | - - -0.16 90 | - 0.12 91 | - 0.46 92 | slider_right: 93 | - - -0.05 94 | - 0.05 95 | - 0.46 96 | - - 0.13 97 | - 0.12 98 | - 0.46 99 | objects: 100 | fixed_objects: 101 | table: 102 | file: calvin_table_A/urdf/calvin_table_A.urdf 103 | initial_pos: 104 | - 0 105 | - 0 106 | - 0 107 | initial_orn: 108 | - 0 109 | - 0 110 | - 0 111 | joints: 112 | base__slide: 113 | initial_state: 0 114 | base__drawer: 115 | initial_state: 0 116 | buttons: 117 | base__button: 118 | initial_state: 0 119 | effect: led 120 | switches: 121 | base__switch: 122 | initial_state: 0 123 | effect: lightbulb 124 | lights: 125 | lightbulb: 126 | link: light_link 127 | color: 128 | - 1 129 | - 1 130 | - 0 131 | - 1 132 | led: 133 | link: led_link 134 | color: 135 | - 0 136 | - 1 137 | - 0 138 | - 1 139 | movable_objects: 140 | block_red: 141 | file: blocks/block_red_middle.urdf 142 | initial_pos: any 143 | initial_orn: any 144 | block_blue: 145 | file: blocks/block_blue_small.urdf 146 | initial_pos: any 147 | initial_orn: any 148 | block_pink: 149 | file: blocks/block_pink_big.urdf 150 | initial_pos: any 151 | initial_orn: any 152 | -------------------------------------------------------------------------------- /fake_dataset/training/.hydra/hydra.yaml: -------------------------------------------------------------------------------- 1 | hydra: 2 | run: 3 | dir: /work/dlclarge2/meeso-lfp/calvin_data/task_A_A/ 4 | sweep: 5 | dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} 6 | subdir: ${hydra.job.num} 7 | launcher: 8 | _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher 9 | sweeper: 10 | _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper 11 | max_batch_size: null 12 | help: 13 | app_name: ${hydra.job.name} 14 | header: '${hydra.help.app_name} is powered by Hydra. 15 | 16 | ' 17 | footer: 'Powered by Hydra (https://hydra.cc) 18 | 19 | Use --hydra-help to view Hydra specific help 20 | 21 | ' 22 | template: '${hydra.help.header} 23 | 24 | == Configuration groups == 25 | 26 | Compose your configuration from those groups (group=option) 27 | 28 | 29 | $APP_CONFIG_GROUPS 30 | 31 | 32 | == Config == 33 | 34 | Override anything in the config (foo.bar=value) 35 | 36 | 37 | $CONFIG 38 | 39 | 40 | ${hydra.help.footer} 41 | 42 | ' 43 | hydra_help: 44 | template: 'Hydra (${hydra.runtime.version}) 45 | 46 | See https://hydra.cc for more info. 47 | 48 | 49 | == Flags == 50 | 51 | $FLAGS_HELP 52 | 53 | 54 | == Configuration groups == 55 | 56 | Compose your configuration from those groups (For example, append hydra/job_logging=disabled 57 | to command line) 58 | 59 | 60 | $HYDRA_CONFIG_GROUPS 61 | 62 | 63 | Use ''--cfg hydra'' to Show the Hydra config. 64 | 65 | ' 66 | hydra_help: ??? 67 | hydra_logging: 68 | version: 1 69 | formatters: 70 | colorlog: 71 | (): colorlog.ColoredFormatter 72 | format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' 73 | handlers: 74 | console: 75 | class: logging.StreamHandler 76 | formatter: colorlog 77 | stream: ext://sys.stdout 78 | root: 79 | level: INFO 80 | handlers: 81 | - console 82 | disable_existing_loggers: false 83 | job_logging: 84 | version: 1 85 | formatters: 86 | simple: 87 | format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' 88 | colorlog: 89 | (): colorlog.ColoredFormatter 90 | format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] 91 | - %(message)s' 92 | log_colors: 93 | DEBUG: purple 94 | INFO: green 95 | WARNING: yellow 96 | ERROR: red 97 | CRITICAL: red 98 | handlers: 99 | console: 100 | class: logging.StreamHandler 101 | formatter: colorlog 102 | stream: ext://sys.stdout 103 | file: 104 | class: logging.FileHandler 105 | formatter: simple 106 | filename: ${hydra.job.name}.log 107 | root: 108 | level: INFO 109 | handlers: 110 | - console 111 | - file 112 | disable_existing_loggers: false 113 | env: {} 114 | searchpath: [] 115 | callbacks: {} 116 | output_subdir: .hydra 117 | overrides: 118 | hydra: 119 | - hydra.run.dir=/work/dlclarge2/meeso-lfp/calvin_data/task_A_A/ 120 | task: 121 | - load_dir=/work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/ 122 | - set_static_cam=false 123 | - processes=16 124 | - +scene=calvin_scene_A 125 | - show_gui=false 126 | job: 127 | name: datarenderer 128 | override_dirname: +scene=calvin_scene_A,load_dir=/work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/,processes=16,set_static_cam=false,show_gui=false 129 | id: ??? 130 | num: ??? 131 | config_name: config_rendering 132 | env_set: {} 133 | env_copy: [] 134 | config: 135 | override_dirname: 136 | kv_sep: '=' 137 | item_sep: ',' 138 | exclude_keys: [] 139 | runtime: 140 | version: 1.1.1 141 | cwd: /home/meeso/VRENVCalvinRender/vr_env 142 | config_sources: 143 | - path: hydra.conf 144 | schema: pkg 145 | provider: hydra 146 | - path: /home/meeso/VRENVCalvinRender/conf 147 | schema: file 148 | provider: main 149 | - path: hydra_plugins.hydra_colorlog.conf 150 | schema: pkg 151 | provider: hydra-colorlog 152 | - path: '' 153 | schema: structured 154 | provider: schema 155 | choices: 156 | scene: calvin_scene_A 157 | cameras: static_gripper_tactile 158 | cameras/cameras@cameras.tactile: tactile 159 | cameras/cameras@cameras.gripper: gripper 160 | cameras/cameras@cameras.static: static 161 | hydra/env: default 162 | hydra/callbacks: null 163 | hydra/job_logging: colorlog 164 | hydra/hydra_logging: colorlog 165 | hydra/hydra_help: default 166 | hydra/help: default 167 | hydra/sweeper: basic 168 | hydra/launcher: basic 169 | hydra/output: default 170 | verbose: false 171 | -------------------------------------------------------------------------------- /fake_dataset/training/.hydra/merged_config.yaml: -------------------------------------------------------------------------------- 1 | cameras: 2 | static: 3 | _target_: calvin_env.camera.static_camera.StaticCamera 4 | name: static 5 | fov: 10 6 | aspect: 1 7 | nearval: 0.01 8 | farval: 10 9 | width: 200 10 | height: 200 11 | look_at: 12 | - -0.026242351159453392 13 | - -0.0302329882979393 14 | - 0.3920000493526459 15 | look_from: 16 | - 2.871459009488717 17 | - -2.166602199425597 18 | - 2.555159848480571 19 | up_vector: 20 | - 0.4041403970338857 21 | - 0.22629790978217404 22 | - 0.8862616969685161 23 | gripper: 24 | _target_: calvin_env.camera.gripper_camera.GripperCamera 25 | name: gripper 26 | fov: 75 27 | aspect: 1 28 | nearval: 0.01 29 | farval: 2 30 | width: 84 31 | height: 84 32 | tactile: 33 | _target_: calvin_env.camera.tactile_sensor.TactileSensor 34 | name: tactile 35 | width: 120 36 | height: 160 37 | digit_link_ids: 38 | - 10 39 | - 12 40 | visualize_gui: false 41 | config_path: conf/digit_sensor/config_digit.yml 42 | vr_input: 43 | vr_controller: 44 | POSITION: 1 45 | ORIENTATION: 2 46 | ANALOG: 3 47 | BUTTONS: 6 48 | BUTTON_A: 2 49 | BUTTON_B: 1 50 | vr_controller_id: 3 51 | gripper_orientation_offset: 52 | - 0 53 | - 3 54 | - 3.14 55 | gripper_position_offset: 56 | - -0.2 57 | - 0.3 58 | - 0 59 | _target_: calvin_env.io_utils.vr_input.VrInput 60 | limit_angle: 61 | - 90 62 | - 0 63 | - 0 64 | - -1 65 | visualize_vr_pos: true 66 | reset_button_queue_len: 60 67 | env: 68 | _target_: calvin_env.envs.play_table_env.PlayTableSimEnv 69 | _recursive_: false 70 | cameras: ${cameras} 71 | seed: 0 72 | bullet_time_step: 240.0 73 | use_vr: false 74 | show_gui: ${show_gui} 75 | robot_cfg: ${robot} 76 | scene_cfg: ${scene} 77 | use_scene_info: false 78 | use_egl: true 79 | control_freq: 30 80 | scene: 81 | _target_: calvin_env.scene.play_table_scene.PlayTableScene 82 | _recursive_: false 83 | data_path: ${data_path} 84 | global_scaling: 0.8 85 | euler_obs: ${robot.euler_obs} 86 | robot_base_position: 87 | - -0.34 88 | - -0.46 89 | - 0.24 90 | robot_base_orientation: 91 | - 0 92 | - 0 93 | - 0 94 | robot_initial_joint_positions: 95 | - -1.21779206 96 | - 1.03987646 97 | - 2.11978261 98 | - -2.34205014 99 | - -0.87015947 100 | - 1.64119353 101 | - 0.55344866 102 | surfaces: 103 | table: 104 | - - 0.0 105 | - -0.15 106 | - 0.46 107 | - - 0.35 108 | - -0.03 109 | - 0.46 110 | slider_left: 111 | - - -0.32 112 | - 0.05 113 | - 0.46 114 | - - -0.16 115 | - 0.12 116 | - 0.46 117 | slider_right: 118 | - - -0.05 119 | - 0.05 120 | - 0.46 121 | - - 0.13 122 | - 0.12 123 | - 0.46 124 | objects: 125 | fixed_objects: 126 | table: 127 | file: calvin_table_D/urdf/calvin_table_D.urdf 128 | initial_pos: 129 | - 0 130 | - 0 131 | - 0 132 | initial_orn: 133 | - 0 134 | - 0 135 | - 0 136 | joints: 137 | base__slide: 138 | initial_state: 0 139 | base__drawer: 140 | initial_state: 0 141 | buttons: 142 | base__button: 143 | initial_state: 0 144 | effect: led 145 | switches: 146 | base__switch: 147 | initial_state: 0 148 | effect: lightbulb 149 | lights: 150 | lightbulb: 151 | link: light_link 152 | color: 153 | - 1 154 | - 1 155 | - 0 156 | - 1 157 | led: 158 | link: led_link 159 | color: 160 | - 0 161 | - 1 162 | - 0 163 | - 1 164 | movable_objects: 165 | block_red: 166 | file: blocks/block_red_middle.urdf 167 | initial_pos: any 168 | initial_orn: any 169 | block_blue: 170 | file: blocks/block_blue_small.urdf 171 | initial_pos: any 172 | initial_orn: any 173 | block_pink: 174 | file: blocks/block_pink_big.urdf 175 | initial_pos: any 176 | initial_orn: any 177 | name: calvin_scene_D 178 | robot: 179 | _target_: calvin_env.robot.robot.Robot 180 | filename: franka_panda/panda_longer_finger.urdf 181 | base_position: ${scene.robot_base_position} 182 | base_orientation: ${scene.robot_base_orientation} 183 | initial_joint_positions: ${scene.robot_initial_joint_positions} 184 | max_joint_force: 200.0 185 | gripper_force: 200 186 | arm_joint_ids: 187 | - 0 188 | - 1 189 | - 2 190 | - 3 191 | - 4 192 | - 5 193 | - 6 194 | gripper_joint_ids: 195 | - 9 196 | - 11 197 | gripper_joint_limits: 198 | - 0 199 | - 0.04 200 | tcp_link_id: 15 201 | end_effector_link_id: 7 202 | gripper_cam_link: 12 203 | use_nullspace: true 204 | max_velocity: 2 205 | use_ik_fast: false 206 | magic_scaling_factor_pos: 1 207 | magic_scaling_factor_orn: 1 208 | use_target_pose: true 209 | euler_obs: true 210 | tasks: 211 | _target_: calvin_env.envs.tasks.Tasks 212 | tasks: 213 | rotate_red_block_right: 214 | - rotate_object 215 | - block_red 216 | - -60 217 | rotate_red_block_left: 218 | - rotate_object 219 | - block_red 220 | - 60 221 | rotate_blue_block_right: 222 | - rotate_object 223 | - block_blue 224 | - -60 225 | rotate_blue_block_left: 226 | - rotate_object 227 | - block_blue 228 | - 60 229 | rotate_pink_block_right: 230 | - rotate_object 231 | - block_pink 232 | - -60 233 | rotate_pink_block_left: 234 | - rotate_object 235 | - block_pink 236 | - 60 237 | push_red_block_right: 238 | - push_object 239 | - block_red 240 | - 0.1 241 | - 0 242 | push_red_block_left: 243 | - push_object 244 | - block_red 245 | - -0.1 246 | - 0 247 | push_blue_block_right: 248 | - push_object 249 | - block_blue 250 | - 0.1 251 | - 0 252 | push_blue_block_left: 253 | - push_object 254 | - block_blue 255 | - -0.1 256 | - 0 257 | push_pink_block_right: 258 | - push_object 259 | - block_pink 260 | - 0.1 261 | - 0 262 | push_pink_block_left: 263 | - push_object 264 | - block_pink 265 | - -0.1 266 | - 0 267 | move_slider_left: 268 | - move_door_rel 269 | - base__slide 270 | - 0.15 271 | move_slider_right: 272 | - move_door_rel 273 | - base__slide 274 | - -0.15 275 | open_drawer: 276 | - move_door_rel 277 | - base__drawer 278 | - 0.12 279 | close_drawer: 280 | - move_door_rel 281 | - base__drawer 282 | - -0.12 283 | lift_red_block_table: 284 | - lift_object 285 | - block_red 286 | - 0.05 287 | - table 288 | - base_link 289 | lift_red_block_slider: 290 | - lift_object 291 | - block_red 292 | - 0.03 293 | - table 294 | - plank_link 295 | lift_red_block_drawer: 296 | - lift_object 297 | - block_red 298 | - 0.05 299 | - table 300 | - drawer_link 301 | lift_blue_block_table: 302 | - lift_object 303 | - block_blue 304 | - 0.05 305 | - table 306 | - base_link 307 | lift_blue_block_slider: 308 | - lift_object 309 | - block_blue 310 | - 0.03 311 | - table 312 | - plank_link 313 | lift_blue_block_drawer: 314 | - lift_object 315 | - block_blue 316 | - 0.05 317 | - table 318 | - drawer_link 319 | lift_pink_block_table: 320 | - lift_object 321 | - block_pink 322 | - 0.05 323 | - table 324 | - base_link 325 | lift_pink_block_slider: 326 | - lift_object 327 | - block_pink 328 | - 0.03 329 | - table 330 | - plank_link 331 | lift_pink_block_drawer: 332 | - lift_object 333 | - block_pink 334 | - 0.05 335 | - table 336 | - drawer_link 337 | place_in_slider: 338 | - place_object 339 | - table 340 | - plank_link 341 | place_in_drawer: 342 | - place_object 343 | - table 344 | - drawer_link 345 | stack_block: 346 | - stack_objects 347 | unstack_block: 348 | - unstack_objects 349 | turn_on_lightbulb: 350 | - toggle_light 351 | - lightbulb 352 | - 0 353 | - 1 354 | turn_off_lightbulb: 355 | - toggle_light 356 | - lightbulb 357 | - 1 358 | - 0 359 | turn_on_led: 360 | - toggle_light 361 | - led 362 | - 0 363 | - 1 364 | turn_off_led: 365 | - toggle_light 366 | - led 367 | - 1 368 | - 0 369 | push_into_drawer: 370 | - push_object_into 371 | - - block_red 372 | - block_blue 373 | - block_pink 374 | - table 375 | - base_link 376 | - table 377 | - drawer_link 378 | recorder: 379 | record: ${record} 380 | record_fps: 30.0 381 | show_fps: false 382 | enable_tts: true 383 | seed: 0 384 | use_vr: true 385 | data_path: data 386 | save_dir: /home/meeso/recordings 387 | record: true 388 | load_dir: /work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-09-12/11-36-05/ 389 | show_gui: false 390 | processes: 16 391 | max_episode_frames: 1 392 | save_body_infos: true 393 | set_static_cam: false 394 | -------------------------------------------------------------------------------- /fake_dataset/training/.hydra/overrides.yaml: -------------------------------------------------------------------------------- 1 | - load_dir=/work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/ 2 | - set_static_cam=false 3 | - processes=16 4 | - +scene=calvin_scene_A 5 | - show_gui=false 6 | -------------------------------------------------------------------------------- /fake_dataset/training/statistics.yaml: -------------------------------------------------------------------------------- 1 | robot_obs: 2 | - _target_: calvin_agent.utils.transforms.NormalizeVector 3 | mean: [0.039233, -0.118554, 0.507826, 1.079174, -0.083069, 1.579753, 4 | 0.054622, -0.736859, 1.017769, 1.792879, -2.099604, -0.993738, 5 | 1.790842, 0.586534, 0.095367] 6 | std: [0.150769, 0.1104 , 0.06253 , 2.883517, 0.126405, 0.377196, 7 | 0.030152, 0.334392, 0.172714, 0.240513, 0.3842 , 0.198596, 8 | 0.158712, 0.346865, 0.995442] 9 | scene_obs: 10 | - _target_: calvin_agent.utils.transforms.NormalizeVector 11 | mean: [0.150934, 0.119917, 0.000239, 0.042049, 0.487755, 0.47448 , 12 | 0.057482, -0.088074, 0.431237, 0.046034, 0.030599, 0.027333, 13 | 0.062103, -0.092833, 0.430236, -0.054962, 0.019381, 0.096546, 14 | 0.064944, -0.093058, 0.428381, 0.024941, 0.002746, -0.031589] 15 | std: [ 0.125757, 0.09654 , 0.002148, 0.041916, 0.49985 , 0.499348, 16 | 0.146225, 0.119266, 0.050408, 1.430807, 0.676023, 2.017468, 17 | 0.142979, 0.113236, 0.049651, 1.545888, 0.3906 , 1.763569, 18 | 0.143077, 0.11546 , 0.050363, 1.514873, 0.431664, 1.860245] 19 | 20 | act_min_bound: [-0.432188, -0.545456, 0.293439, -3.141593, -0.811348, -3.141573, -1. ] 21 | act_max_bound: [0.42977 , 0.139396, 0.796262, 3.141592, 0.638583, 3.141551, 1. ] 22 | -------------------------------------------------------------------------------- /fake_dataset/validation/.hydra/config.yaml: -------------------------------------------------------------------------------- 1 | cameras: 2 | static: 3 | _target_: vr_env.camera.static_camera.StaticCamera 4 | name: static 5 | fov: 10 6 | aspect: 1 7 | nearval: 0.01 8 | farval: 10 9 | width: 200 10 | height: 200 11 | look_at: 12 | - -0.026242351159453392 13 | - -0.0302329882979393 14 | - 0.3920000493526459 15 | look_from: 16 | - 2.871459009488717 17 | - -2.166602199425597 18 | - 2.555159848480571 19 | up_vector: 20 | - 0.4041403970338857 21 | - 0.22629790978217404 22 | - 0.8862616969685161 23 | gripper: 24 | _target_: vr_env.camera.gripper_camera.GripperCamera 25 | name: gripper 26 | fov: 75 27 | aspect: 1 28 | nearval: 0.01 29 | farval: 2 30 | width: 84 31 | height: 84 32 | tactile: 33 | _target_: vr_env.camera.tactile_sensor.TactileSensor 34 | name: tactile 35 | width: 120 36 | height: 160 37 | digit_link_ids: 38 | - 10 39 | - 12 40 | visualize_gui: false 41 | config_path: conf/digit_sensor/config_digit.yml 42 | load_dir: /work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/ 43 | data_path: data 44 | save_dir: ??? 45 | show_gui: false 46 | processes: 16 47 | max_episode_frames: 1 48 | save_body_infos: true 49 | set_static_cam: false 50 | env: 51 | cameras: ${cameras} 52 | show_gui: ${show_gui} 53 | use_vr: false 54 | scene: 55 | _target_: vr_env.scene.play_table_scene.PlayTableScene 56 | _recursive_: false 57 | name: calvin_scene_A 58 | data_path: ${data_path} 59 | global_scaling: 0.8 60 | euler_obs: ${robot.euler_obs} 61 | robot_base_position: 62 | - -0.34 63 | - -0.46 64 | - 0.24 65 | robot_base_orientation: 66 | - 0 67 | - 0 68 | - 0 69 | robot_initial_joint_positions: 70 | - -1.21779206 71 | - 1.03987646 72 | - 2.11978261 73 | - -2.34205014 74 | - -0.87015947 75 | - 1.64119353 76 | - 0.55344866 77 | surfaces: 78 | table: 79 | - - 0.0 80 | - -0.15 81 | - 0.46 82 | - - 0.35 83 | - -0.03 84 | - 0.46 85 | slider_left: 86 | - - -0.32 87 | - 0.05 88 | - 0.46 89 | - - -0.16 90 | - 0.12 91 | - 0.46 92 | slider_right: 93 | - - -0.05 94 | - 0.05 95 | - 0.46 96 | - - 0.13 97 | - 0.12 98 | - 0.46 99 | objects: 100 | fixed_objects: 101 | table: 102 | file: calvin_table_A/urdf/calvin_table_A.urdf 103 | initial_pos: 104 | - 0 105 | - 0 106 | - 0 107 | initial_orn: 108 | - 0 109 | - 0 110 | - 0 111 | joints: 112 | base__slide: 113 | initial_state: 0 114 | base__drawer: 115 | initial_state: 0 116 | buttons: 117 | base__button: 118 | initial_state: 0 119 | effect: led 120 | switches: 121 | base__switch: 122 | initial_state: 0 123 | effect: lightbulb 124 | lights: 125 | lightbulb: 126 | link: light_link 127 | color: 128 | - 1 129 | - 1 130 | - 0 131 | - 1 132 | led: 133 | link: led_link 134 | color: 135 | - 0 136 | - 1 137 | - 0 138 | - 1 139 | movable_objects: 140 | block_red: 141 | file: blocks/block_red_middle.urdf 142 | initial_pos: any 143 | initial_orn: any 144 | block_blue: 145 | file: blocks/block_blue_small.urdf 146 | initial_pos: any 147 | initial_orn: any 148 | block_pink: 149 | file: blocks/block_pink_big.urdf 150 | initial_pos: any 151 | initial_orn: any 152 | -------------------------------------------------------------------------------- /fake_dataset/validation/.hydra/hydra.yaml: -------------------------------------------------------------------------------- 1 | hydra: 2 | run: 3 | dir: /work/dlclarge2/meeso-lfp/calvin_data/task_A_A/ 4 | sweep: 5 | dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} 6 | subdir: ${hydra.job.num} 7 | launcher: 8 | _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher 9 | sweeper: 10 | _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper 11 | max_batch_size: null 12 | help: 13 | app_name: ${hydra.job.name} 14 | header: '${hydra.help.app_name} is powered by Hydra. 15 | 16 | ' 17 | footer: 'Powered by Hydra (https://hydra.cc) 18 | 19 | Use --hydra-help to view Hydra specific help 20 | 21 | ' 22 | template: '${hydra.help.header} 23 | 24 | == Configuration groups == 25 | 26 | Compose your configuration from those groups (group=option) 27 | 28 | 29 | $APP_CONFIG_GROUPS 30 | 31 | 32 | == Config == 33 | 34 | Override anything in the config (foo.bar=value) 35 | 36 | 37 | $CONFIG 38 | 39 | 40 | ${hydra.help.footer} 41 | 42 | ' 43 | hydra_help: 44 | template: 'Hydra (${hydra.runtime.version}) 45 | 46 | See https://hydra.cc for more info. 47 | 48 | 49 | == Flags == 50 | 51 | $FLAGS_HELP 52 | 53 | 54 | == Configuration groups == 55 | 56 | Compose your configuration from those groups (For example, append hydra/job_logging=disabled 57 | to command line) 58 | 59 | 60 | $HYDRA_CONFIG_GROUPS 61 | 62 | 63 | Use ''--cfg hydra'' to Show the Hydra config. 64 | 65 | ' 66 | hydra_help: ??? 67 | hydra_logging: 68 | version: 1 69 | formatters: 70 | colorlog: 71 | (): colorlog.ColoredFormatter 72 | format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' 73 | handlers: 74 | console: 75 | class: logging.StreamHandler 76 | formatter: colorlog 77 | stream: ext://sys.stdout 78 | root: 79 | level: INFO 80 | handlers: 81 | - console 82 | disable_existing_loggers: false 83 | job_logging: 84 | version: 1 85 | formatters: 86 | simple: 87 | format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' 88 | colorlog: 89 | (): colorlog.ColoredFormatter 90 | format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] 91 | - %(message)s' 92 | log_colors: 93 | DEBUG: purple 94 | INFO: green 95 | WARNING: yellow 96 | ERROR: red 97 | CRITICAL: red 98 | handlers: 99 | console: 100 | class: logging.StreamHandler 101 | formatter: colorlog 102 | stream: ext://sys.stdout 103 | file: 104 | class: logging.FileHandler 105 | formatter: simple 106 | filename: ${hydra.job.name}.log 107 | root: 108 | level: INFO 109 | handlers: 110 | - console 111 | - file 112 | disable_existing_loggers: false 113 | env: {} 114 | searchpath: [] 115 | callbacks: {} 116 | output_subdir: .hydra 117 | overrides: 118 | hydra: 119 | - hydra.run.dir=/work/dlclarge2/meeso-lfp/calvin_data/task_A_A/ 120 | task: 121 | - load_dir=/work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/ 122 | - set_static_cam=false 123 | - processes=16 124 | - +scene=calvin_scene_A 125 | - show_gui=false 126 | job: 127 | name: datarenderer 128 | override_dirname: +scene=calvin_scene_A,load_dir=/work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/,processes=16,set_static_cam=false,show_gui=false 129 | id: ??? 130 | num: ??? 131 | config_name: config_rendering 132 | env_set: {} 133 | env_copy: [] 134 | config: 135 | override_dirname: 136 | kv_sep: '=' 137 | item_sep: ',' 138 | exclude_keys: [] 139 | runtime: 140 | version: 1.1.1 141 | cwd: /home/meeso/VRENVCalvinRender/vr_env 142 | config_sources: 143 | - path: hydra.conf 144 | schema: pkg 145 | provider: hydra 146 | - path: /home/meeso/VRENVCalvinRender/conf 147 | schema: file 148 | provider: main 149 | - path: hydra_plugins.hydra_colorlog.conf 150 | schema: pkg 151 | provider: hydra-colorlog 152 | - path: '' 153 | schema: structured 154 | provider: schema 155 | choices: 156 | scene: calvin_scene_A 157 | cameras: static_gripper_tactile 158 | cameras/cameras@cameras.tactile: tactile 159 | cameras/cameras@cameras.gripper: gripper 160 | cameras/cameras@cameras.static: static 161 | hydra/env: default 162 | hydra/callbacks: null 163 | hydra/job_logging: colorlog 164 | hydra/hydra_logging: colorlog 165 | hydra/hydra_help: default 166 | hydra/help: default 167 | hydra/sweeper: basic 168 | hydra/launcher: basic 169 | hydra/output: default 170 | verbose: false 171 | -------------------------------------------------------------------------------- /fake_dataset/validation/.hydra/merged_config.yaml: -------------------------------------------------------------------------------- 1 | cameras: 2 | static: 3 | _target_: calvin_env.camera.static_camera.StaticCamera 4 | name: static 5 | fov: 10 6 | aspect: 1 7 | nearval: 0.01 8 | farval: 10 9 | width: 200 10 | height: 200 11 | look_at: 12 | - -0.026242351159453392 13 | - -0.0302329882979393 14 | - 0.3920000493526459 15 | look_from: 16 | - 2.871459009488717 17 | - -2.166602199425597 18 | - 2.555159848480571 19 | up_vector: 20 | - 0.4041403970338857 21 | - 0.22629790978217404 22 | - 0.8862616969685161 23 | gripper: 24 | _target_: calvin_env.camera.gripper_camera.GripperCamera 25 | name: gripper 26 | fov: 75 27 | aspect: 1 28 | nearval: 0.01 29 | farval: 2 30 | width: 84 31 | height: 84 32 | tactile: 33 | _target_: calvin_env.camera.tactile_sensor.TactileSensor 34 | name: tactile 35 | width: 120 36 | height: 160 37 | digit_link_ids: 38 | - 10 39 | - 12 40 | visualize_gui: false 41 | config_path: conf/digit_sensor/config_digit.yml 42 | vr_input: 43 | vr_controller: 44 | POSITION: 1 45 | ORIENTATION: 2 46 | ANALOG: 3 47 | BUTTONS: 6 48 | BUTTON_A: 2 49 | BUTTON_B: 1 50 | vr_controller_id: 3 51 | gripper_orientation_offset: 52 | - 0 53 | - 3 54 | - 3.14 55 | gripper_position_offset: 56 | - -0.2 57 | - 0.3 58 | - 0 59 | _target_: calvin_env.io_utils.vr_input.VrInput 60 | limit_angle: 61 | - 90 62 | - 0 63 | - 0 64 | - -1 65 | visualize_vr_pos: true 66 | reset_button_queue_len: 60 67 | env: 68 | _target_: calvin_env.envs.play_table_env.PlayTableSimEnv 69 | _recursive_: false 70 | cameras: ${cameras} 71 | seed: 0 72 | bullet_time_step: 240.0 73 | use_vr: false 74 | show_gui: ${show_gui} 75 | robot_cfg: ${robot} 76 | scene_cfg: ${scene} 77 | use_scene_info: false 78 | use_egl: true 79 | control_freq: 30 80 | scene: 81 | _target_: calvin_env.scene.play_table_scene.PlayTableScene 82 | _recursive_: false 83 | data_path: ${data_path} 84 | global_scaling: 0.8 85 | euler_obs: ${robot.euler_obs} 86 | robot_base_position: 87 | - -0.34 88 | - -0.46 89 | - 0.24 90 | robot_base_orientation: 91 | - 0 92 | - 0 93 | - 0 94 | robot_initial_joint_positions: 95 | - -1.21779206 96 | - 1.03987646 97 | - 2.11978261 98 | - -2.34205014 99 | - -0.87015947 100 | - 1.64119353 101 | - 0.55344866 102 | surfaces: 103 | table: 104 | - - 0.0 105 | - -0.15 106 | - 0.46 107 | - - 0.35 108 | - -0.03 109 | - 0.46 110 | slider_left: 111 | - - -0.32 112 | - 0.05 113 | - 0.46 114 | - - -0.16 115 | - 0.12 116 | - 0.46 117 | slider_right: 118 | - - -0.05 119 | - 0.05 120 | - 0.46 121 | - - 0.13 122 | - 0.12 123 | - 0.46 124 | objects: 125 | fixed_objects: 126 | table: 127 | file: calvin_table_D/urdf/calvin_table_D.urdf 128 | initial_pos: 129 | - 0 130 | - 0 131 | - 0 132 | initial_orn: 133 | - 0 134 | - 0 135 | - 0 136 | joints: 137 | base__slide: 138 | initial_state: 0 139 | base__drawer: 140 | initial_state: 0 141 | buttons: 142 | base__button: 143 | initial_state: 0 144 | effect: led 145 | switches: 146 | base__switch: 147 | initial_state: 0 148 | effect: lightbulb 149 | lights: 150 | lightbulb: 151 | link: light_link 152 | color: 153 | - 1 154 | - 1 155 | - 0 156 | - 1 157 | led: 158 | link: led_link 159 | color: 160 | - 0 161 | - 1 162 | - 0 163 | - 1 164 | movable_objects: 165 | block_red: 166 | file: blocks/block_red_middle.urdf 167 | initial_pos: any 168 | initial_orn: any 169 | block_blue: 170 | file: blocks/block_blue_small.urdf 171 | initial_pos: any 172 | initial_orn: any 173 | block_pink: 174 | file: blocks/block_pink_big.urdf 175 | initial_pos: any 176 | initial_orn: any 177 | name: calvin_scene_D 178 | robot: 179 | _target_: calvin_env.robot.robot.Robot 180 | filename: franka_panda/panda_longer_finger.urdf 181 | base_position: ${scene.robot_base_position} 182 | base_orientation: ${scene.robot_base_orientation} 183 | initial_joint_positions: ${scene.robot_initial_joint_positions} 184 | max_joint_force: 200.0 185 | gripper_force: 200 186 | arm_joint_ids: 187 | - 0 188 | - 1 189 | - 2 190 | - 3 191 | - 4 192 | - 5 193 | - 6 194 | gripper_joint_ids: 195 | - 9 196 | - 11 197 | gripper_joint_limits: 198 | - 0 199 | - 0.04 200 | tcp_link_id: 15 201 | end_effector_link_id: 7 202 | gripper_cam_link: 12 203 | use_nullspace: true 204 | max_velocity: 2 205 | use_ik_fast: false 206 | magic_scaling_factor_pos: 1 207 | magic_scaling_factor_orn: 1 208 | use_target_pose: true 209 | euler_obs: true 210 | tasks: 211 | _target_: calvin_env.envs.tasks.Tasks 212 | tasks: 213 | rotate_red_block_right: 214 | - rotate_object 215 | - block_red 216 | - -60 217 | rotate_red_block_left: 218 | - rotate_object 219 | - block_red 220 | - 60 221 | rotate_blue_block_right: 222 | - rotate_object 223 | - block_blue 224 | - -60 225 | rotate_blue_block_left: 226 | - rotate_object 227 | - block_blue 228 | - 60 229 | rotate_pink_block_right: 230 | - rotate_object 231 | - block_pink 232 | - -60 233 | rotate_pink_block_left: 234 | - rotate_object 235 | - block_pink 236 | - 60 237 | push_red_block_right: 238 | - push_object 239 | - block_red 240 | - 0.1 241 | - 0 242 | push_red_block_left: 243 | - push_object 244 | - block_red 245 | - -0.1 246 | - 0 247 | push_blue_block_right: 248 | - push_object 249 | - block_blue 250 | - 0.1 251 | - 0 252 | push_blue_block_left: 253 | - push_object 254 | - block_blue 255 | - -0.1 256 | - 0 257 | push_pink_block_right: 258 | - push_object 259 | - block_pink 260 | - 0.1 261 | - 0 262 | push_pink_block_left: 263 | - push_object 264 | - block_pink 265 | - -0.1 266 | - 0 267 | move_slider_left: 268 | - move_door_rel 269 | - base__slide 270 | - 0.15 271 | move_slider_right: 272 | - move_door_rel 273 | - base__slide 274 | - -0.15 275 | open_drawer: 276 | - move_door_rel 277 | - base__drawer 278 | - 0.12 279 | close_drawer: 280 | - move_door_rel 281 | - base__drawer 282 | - -0.12 283 | lift_red_block_table: 284 | - lift_object 285 | - block_red 286 | - 0.05 287 | - table 288 | - base_link 289 | lift_red_block_slider: 290 | - lift_object 291 | - block_red 292 | - 0.03 293 | - table 294 | - plank_link 295 | lift_red_block_drawer: 296 | - lift_object 297 | - block_red 298 | - 0.05 299 | - table 300 | - drawer_link 301 | lift_blue_block_table: 302 | - lift_object 303 | - block_blue 304 | - 0.05 305 | - table 306 | - base_link 307 | lift_blue_block_slider: 308 | - lift_object 309 | - block_blue 310 | - 0.03 311 | - table 312 | - plank_link 313 | lift_blue_block_drawer: 314 | - lift_object 315 | - block_blue 316 | - 0.05 317 | - table 318 | - drawer_link 319 | lift_pink_block_table: 320 | - lift_object 321 | - block_pink 322 | - 0.05 323 | - table 324 | - base_link 325 | lift_pink_block_slider: 326 | - lift_object 327 | - block_pink 328 | - 0.03 329 | - table 330 | - plank_link 331 | lift_pink_block_drawer: 332 | - lift_object 333 | - block_pink 334 | - 0.05 335 | - table 336 | - drawer_link 337 | place_in_slider: 338 | - place_object 339 | - table 340 | - plank_link 341 | place_in_drawer: 342 | - place_object 343 | - table 344 | - drawer_link 345 | stack_block: 346 | - stack_objects 347 | unstack_block: 348 | - unstack_objects 349 | turn_on_lightbulb: 350 | - toggle_light 351 | - lightbulb 352 | - 0 353 | - 1 354 | turn_off_lightbulb: 355 | - toggle_light 356 | - lightbulb 357 | - 1 358 | - 0 359 | turn_on_led: 360 | - toggle_light 361 | - led 362 | - 0 363 | - 1 364 | turn_off_led: 365 | - toggle_light 366 | - led 367 | - 1 368 | - 0 369 | push_into_drawer: 370 | - push_object_into 371 | - - block_red 372 | - block_blue 373 | - block_pink 374 | - table 375 | - base_link 376 | - table 377 | - drawer_link 378 | recorder: 379 | record: ${record} 380 | record_fps: 30.0 381 | show_fps: false 382 | enable_tts: true 383 | seed: 0 384 | use_vr: true 385 | data_path: data 386 | save_dir: /home/meeso/recordings 387 | record: true 388 | load_dir: /work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-09-12/11-36-05/ 389 | show_gui: false 390 | processes: 16 391 | max_episode_frames: 1 392 | save_body_infos: true 393 | set_static_cam: false 394 | -------------------------------------------------------------------------------- /fake_dataset/validation/.hydra/overrides.yaml: -------------------------------------------------------------------------------- 1 | - load_dir=/work/dlclarge2/meeso-lfp/calvin_recordings/play_env_A/2021-10-06/16-23-57/ 2 | - set_static_cam=false 3 | - processes=16 4 | - +scene=calvin_scene_A 5 | - show_gui=false 6 | -------------------------------------------------------------------------------- /fake_dataset/validation/statistics.yaml: -------------------------------------------------------------------------------- 1 | robot_obs: 2 | - _target_: calvin_agent.utils.transforms.NormalizeVector 3 | mean: [0.039233, -0.118554, 0.507826, 1.079174, -0.083069, 1.579753, 4 | 0.054622, -0.736859, 1.017769, 1.792879, -2.099604, -0.993738, 5 | 1.790842, 0.586534, 0.095367] 6 | std: [0.150769, 0.1104 , 0.06253 , 2.883517, 0.126405, 0.377196, 7 | 0.030152, 0.334392, 0.172714, 0.240513, 0.3842 , 0.198596, 8 | 0.158712, 0.346865, 0.995442] 9 | scene_obs: 10 | - _target_: calvin_agent.utils.transforms.NormalizeVector 11 | mean: [0.150934, 0.119917, 0.000239, 0.042049, 0.487755, 0.47448 , 12 | 0.057482, -0.088074, 0.431237, 0.046034, 0.030599, 0.027333, 13 | 0.062103, -0.092833, 0.430236, -0.054962, 0.019381, 0.096546, 14 | 0.064944, -0.093058, 0.428381, 0.024941, 0.002746, -0.031589] 15 | std: [ 0.125757, 0.09654 , 0.002148, 0.041916, 0.49985 , 0.499348, 16 | 0.146225, 0.119266, 0.050408, 1.430807, 0.676023, 2.017468, 17 | 0.142979, 0.113236, 0.049651, 1.545888, 0.3906 , 1.763569, 18 | 0.143077, 0.11546 , 0.050363, 1.514873, 0.431664, 1.860245] 19 | 20 | act_min_bound: [-0.432188, -0.545456, 0.293439, -3.141593, -0.811348, -3.141573, -1. ] 21 | act_max_bound: [0.42977 , 0.139396, 0.796262, 3.141592, 0.638583, 3.141551, 1. ] 22 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EDiRobotics/GR1-Training/673bb814a03e6d93053046103060ee47d5c8d8e1/install.sh -------------------------------------------------------------------------------- /models/gr1.py: -------------------------------------------------------------------------------- 1 | # Copyright (2024) Bytedance Ltd. and/or its affiliates 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """GR-1 model.""" 16 | import torch 17 | import torch.nn as nn 18 | 19 | import transformers 20 | from flamingo_pytorch import PerceiverResampler 21 | 22 | from transformers import GPT2Model 23 | from models.vision_transformer import Block 24 | from models.transformer_utils import get_2d_sincos_pos_embed 25 | 26 | 27 | class GR1(nn.Module): 28 | def __init__( 29 | self, 30 | model_clip, 31 | model_mae, 32 | rgb_shape, 33 | patch_size, 34 | state_dim, 35 | act_dim, 36 | hidden_size, 37 | sequence_length, 38 | chunk_size, 39 | training_target, 40 | img_feat_dim, 41 | patch_feat_dim, 42 | lang_feat_dim, 43 | resampler_params, 44 | without_norm_pixel_loss, 45 | use_hand_rgb=True, 46 | **kwargs 47 | ): 48 | super().__init__() 49 | self.state_dim = state_dim 50 | self.act_dim = act_dim 51 | self.sequence_length = sequence_length 52 | self.chunk_size = chunk_size 53 | 54 | # GPT 55 | self.hidden_size = hidden_size 56 | config = transformers.GPT2Config( 57 | vocab_size=1, 58 | n_embd=hidden_size, 59 | **kwargs 60 | ) 61 | self.transformer = GPT2Model(config) 62 | 63 | # Perciever resampler 64 | self.n_patch_latents = resampler_params['num_latents'] 65 | self.perceiver_resampler = PerceiverResampler( 66 | dim=patch_feat_dim, 67 | depth=resampler_params['depth'], 68 | dim_head=resampler_params['dim_head'], 69 | heads=resampler_params['heads'], 70 | num_latents=self.n_patch_latents, 71 | num_media_embeds=resampler_params['num_media_embeds']) 72 | 73 | # CLIP for language encoding 74 | self.model_clip = model_clip 75 | for _, param in self.model_clip.named_parameters(): 76 | param.requires_grad = False 77 | 78 | # MAE for image encoding 79 | self.model_mae = model_mae 80 | for _, param in self.model_mae.named_parameters(): 81 | param.requires_grad = False 82 | 83 | self.patch_size = patch_size 84 | self.image_size = rgb_shape 85 | self.img_feat_dim = img_feat_dim 86 | self.lang_feat_dim = lang_feat_dim 87 | self.patch_feat_dim = patch_feat_dim 88 | self.use_hand_rgb = use_hand_rgb 89 | 90 | self.act_pred = False 91 | self.fwd_pred = False 92 | self.fwd_pred_hand = False 93 | if 'act_pred' in training_target: 94 | self.act_pred = True 95 | if 'fwd_pred' in training_target: 96 | self.fwd_pred = True 97 | if 'fwd_pred_hand' in training_target: 98 | self.fwd_pred_hand = True 99 | 100 | self.without_norm_pixel_loss = without_norm_pixel_loss 101 | 102 | # Embedding functions for states 103 | self.embed_arm_state = torch.nn.Linear(self.state_dim - 1, hidden_size) 104 | self.embed_gripper_state = torch.nn.Linear(2, hidden_size) # one-hot gripper state 105 | self.embed_state = torch.nn.Linear(2*hidden_size, hidden_size) 106 | 107 | # Relative timestep embedding 108 | self.embed_timestep = nn.Embedding(self.sequence_length, hidden_size) 109 | 110 | # Embedding function for languages 111 | self.embed_lang = torch.nn.Linear(self.lang_feat_dim, hidden_size) 112 | 113 | # Embedding functions for images 114 | self.embed_hand_img = torch.nn.Linear(self.img_feat_dim, hidden_size) 115 | self.embed_img = torch.nn.Linear(self.img_feat_dim, hidden_size) 116 | self.embed_hand_patch = torch.nn.Linear(self.patch_feat_dim, hidden_size) 117 | self.embed_patch = torch.nn.Linear(self.patch_feat_dim, hidden_size) 118 | 119 | # Layer norm 120 | self.embed_ln = nn.LayerNorm(hidden_size) 121 | 122 | # Action query token 123 | self.action_queries = nn.Embedding(1, hidden_size) # arm + gripper, weight from bytedance 124 | self.action_chunk_queries = nn.Embedding(chunk_size, hidden_size) 125 | self.action_chunk_queries.weight.data.fill_(0) # finetune it from zero weight 126 | 127 | # Observation query token 128 | self.obs_queries = nn.Embedding(self.n_patch_latents + 1, self.hidden_size) 129 | self.obs_hand_queries = nn.Embedding(self.n_patch_latents + 1, self.hidden_size) 130 | 131 | # Action prediction 132 | self.pred_act_mlps = nn.ModuleList([ 133 | nn.Linear(hidden_size, hidden_size//2), 134 | nn.Linear(hidden_size//2, hidden_size//2)]) 135 | self.pred_arm_act = nn.Linear(hidden_size//2, self.act_dim-1) # arm action 136 | self.pred_gripper_act = nn.Linear(hidden_size//2, 1) # gripper action (binary) 137 | 138 | # Forward prediction 139 | self.decoder_embed = nn.Linear(hidden_size, hidden_size, bias=True) 140 | self.mask_token = nn.Parameter(torch.zeros(1, 1, 1, hidden_size)) 141 | decoder_depth = 2 142 | self.decoder_blocks = nn.ModuleList([ 143 | Block(hidden_size, 16, 4, qkv_bias=True, qk_scale=None, norm_layer=nn.LayerNorm) 144 | for i in range(decoder_depth)]) 145 | self.decoder_norm = nn.LayerNorm(hidden_size) 146 | self.decoder_pred = nn.Linear(hidden_size, self.patch_size**2 * 3, bias=True) # decoder to patch 147 | self.decoder_pos_embed = nn.Parameter(torch.zeros(1, (self.image_size//self.patch_size)**2, 148 | hidden_size), requires_grad=False) # (1, n_patch, h) 149 | decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], (self.image_size//self.patch_size)) 150 | self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) 151 | 152 | def forward(self, 153 | rgb, 154 | hand_rgb, 155 | state, 156 | language, 157 | attention_mask 158 | ): 159 | obs_preds = None 160 | obs_hand_preds = None 161 | obs_targets = None 162 | obs_hand_targets = None 163 | arm_action_preds = None 164 | gripper_action_preds = None 165 | 166 | batch_size, sequence_length, c, h, w = rgb.shape 167 | 168 | # Embed state 169 | arm_state = state['arm'] 170 | gripper_state = state['gripper'] 171 | arm_state_embeddings = self.embed_arm_state(arm_state.view(batch_size, sequence_length, self.state_dim-1)) 172 | gripper_state_embeddings = self.embed_gripper_state(gripper_state) 173 | state_embeddings = torch.cat((arm_state_embeddings, gripper_state_embeddings), dim=2) 174 | state_embeddings = self.embed_state(state_embeddings) # (b, t, h) 175 | 176 | # Embed language 177 | lang_embeddings = self.model_clip.encode_text(language) 178 | lang_embeddings = lang_embeddings / (lang_embeddings.norm(dim=1, keepdim=True) + 1e-6) # normalization 179 | lang_embeddings = self.embed_lang(lang_embeddings.float()) # (b, h) 180 | 181 | # Get obs and patch feature from MAE 182 | obs_embeddings, patch_embeddings = self.model_mae( 183 | rgb.view(batch_size*sequence_length, c, h, w)) # (b * t, img_feat_dim), (b * t, n_patches, patch_feat_dim) 184 | obs_embeddings = obs_embeddings.view(batch_size, sequence_length, -1) # (b, t, img_feat_dim) 185 | if self.use_hand_rgb: 186 | hand_obs_embeddings, hand_patch_embeddings = self.model_mae( 187 | hand_rgb.view(batch_size*sequence_length, c, h, w)) 188 | hand_obs_embeddings = hand_obs_embeddings.view(batch_size, sequence_length, -1) # (b, t, img_feat_dim) 189 | if self.fwd_pred: 190 | p = self.patch_size 191 | h_p = h // p 192 | w_p = w // p 193 | rgb = rgb.reshape(shape=(batch_size, sequence_length, 3, h_p, p, w_p, p)) 194 | obs_targets = rgb.permute(0, 1, 3, 5, 4, 6, 2) 195 | obs_targets = obs_targets.reshape(shape=(batch_size, sequence_length, h_p * w_p, (p**2) * 3)) # (b, t, n_patches, p*p*3) 196 | if not self.without_norm_pixel_loss: 197 | # norm the target 198 | obs_targets = (obs_targets - obs_targets.mean(dim=-1, keepdim=True) 199 | ) / (obs_targets.var(dim=-1, unbiased=True, keepdim=True).sqrt() + 1e-6) 200 | if self.fwd_pred_hand: 201 | hand_rgb = hand_rgb.reshape(shape=(batch_size, sequence_length, 3, h_p, p, w_p, p)) 202 | obs_hand_targets = hand_rgb.permute(0, 1, 3, 5, 4, 6, 2) 203 | obs_hand_targets = obs_hand_targets.reshape(shape=(batch_size, sequence_length, h_p * w_p, (p**2)*3)) # (b, t, n_patches, p*p*3) 204 | if not self.without_norm_pixel_loss: 205 | # norm the target 206 | obs_hand_targets = (obs_hand_targets - obs_hand_targets.mean(dim=-1, keepdim=True) 207 | ) / (obs_hand_targets.var(dim=-1, unbiased=True, keepdim=True).sqrt() + 1e-6) 208 | 209 | # Use resampler to process patch embeddings 210 | patch_embeddings = patch_embeddings.unsqueeze(1) # (b * t, 1, n_patches, patch_feat_dim) 211 | patch_embeddings = self.perceiver_resampler(patch_embeddings) # (b * t, 1, n_patch_latents, patch_feat_dim) 212 | patch_embeddings = patch_embeddings.squeeze(1) # (b * t, n_patch_latents, patch_feat_dim) 213 | patch_embeddings = patch_embeddings.view(batch_size, sequence_length, self.n_patch_latents, self.patch_feat_dim) # (b, t, n_patch_latents, patch_feat_dim) 214 | if self.use_hand_rgb: 215 | hand_patch_embeddings = hand_patch_embeddings.unsqueeze(1) 216 | hand_patch_embeddings = self.perceiver_resampler(hand_patch_embeddings) 217 | hand_patch_embeddings = hand_patch_embeddings.squeeze(1) 218 | hand_patch_embeddings = hand_patch_embeddings.view(batch_size, sequence_length, self.n_patch_latents, self.patch_feat_dim) # (b, t, n_patch_latents, patch_feat_dim) 219 | 220 | # Embed images and patches 221 | obs_embeddings = self.embed_img(obs_embeddings.float()) # (b, t, h) 222 | patch_embeddings = self.embed_patch(patch_embeddings.float()) # (b, t, n_patch_latents, h) 223 | if self.use_hand_rgb: 224 | hand_obs_embeddings = self.embed_hand_img(hand_obs_embeddings.float()) # (b, t, h) 225 | hand_patch_embeddings = self.embed_hand_patch(hand_patch_embeddings.float()) # (b, t, n_patch_latents, h) 226 | 227 | # Add timestep embeddings 228 | time_embeddings = self.embed_timestep.weight # (l, h) 229 | lang_embeddings = lang_embeddings.view(batch_size, 1, -1) + time_embeddings 230 | state_embeddings = state_embeddings + time_embeddings 231 | patch_embeddings = patch_embeddings + time_embeddings.view(sequence_length, 1, self.hidden_size) 232 | obs_embeddings = obs_embeddings + time_embeddings 233 | if self.use_hand_rgb: 234 | hand_obs_embeddings = hand_obs_embeddings + time_embeddings 235 | hand_patch_embeddings = hand_patch_embeddings + time_embeddings.view(sequence_length, 1, self.hidden_size) 236 | 237 | # Format sequence: lang, state, patch, obs, hand_patch, hand_obs, [ACT], [OBS], [OBS_HAND] 238 | lang_embeddings = lang_embeddings.view(batch_size, sequence_length, 1, self.hidden_size) 239 | state_embeddings = state_embeddings.view(batch_size, sequence_length, 1, self.hidden_size) 240 | obs_embeddings = obs_embeddings.view(batch_size, sequence_length, 1, self.hidden_size) 241 | stacked_inputs = torch.cat( 242 | (lang_embeddings, 243 | state_embeddings, 244 | patch_embeddings, 245 | obs_embeddings), dim=2) # (b, t, n_tokens, h) 246 | if self.use_hand_rgb: 247 | hand_obs_embeddings = hand_obs_embeddings.view(batch_size, sequence_length, 1, self.hidden_size) 248 | stacked_inputs = torch.cat( 249 | (stacked_inputs, 250 | hand_patch_embeddings, 251 | hand_obs_embeddings), dim=2) # (b, t, n_tokens, h) 252 | if self.act_pred: 253 | action_queries = self.action_queries.weight # (1, h) 254 | action_chunk_queries = self.action_chunk_queries.weight + action_queries # (chunk_size, h) 255 | action_chunk_queries = action_chunk_queries.view(1, 1, self.chunk_size, self.hidden_size).repeat(batch_size, sequence_length, 1, 1) # (b, t, chunk_size, h) 256 | stacked_inputs = torch.cat((stacked_inputs, action_chunk_queries), dim=2) # (b, t, n_tokens, h) 257 | if self.fwd_pred: 258 | obs_queries = self.obs_queries.weight # (n_patch_latents + 1, h) 259 | obs_queries = obs_queries.view(1, 1, self.n_patch_latents + 1, self.hidden_size).repeat(batch_size, sequence_length, 1, 1) # (b, t, n_patch_latents + 1, h) 260 | stacked_inputs = torch.cat((stacked_inputs, obs_queries), dim=2) 261 | if self.fwd_pred_hand: 262 | obs_hand_queries = self.obs_hand_queries.weight # 10, h 263 | obs_hand_queries = obs_hand_queries.view(1, 1, self.n_patch_latents+1, self.hidden_size).repeat(batch_size, sequence_length, 1, 1) 264 | stacked_inputs = torch.cat((stacked_inputs, obs_hand_queries), dim=2) 265 | 266 | # Number of tokens 267 | n_lang_tokens = 1 268 | n_state_tokens = 1 269 | n_patch_tokens = self.n_patch_latents 270 | n_obs_tokens = 1 271 | n_hand_patch_tokens = self.n_patch_latents 272 | n_hand_obs_tokens = 1 273 | n_tokens = n_lang_tokens + n_state_tokens + n_patch_tokens + n_obs_tokens 274 | if self.use_hand_rgb: 275 | n_tokens += n_hand_obs_tokens 276 | n_tokens += n_hand_patch_tokens 277 | n_act_pred_tokens = self.chunk_size 278 | if self.act_pred: 279 | act_query_token_start_i = n_tokens 280 | n_tokens += self.chunk_size 281 | if self.fwd_pred: 282 | obs_query_token_start_i = n_tokens 283 | n_tokens += (n_patch_tokens + n_obs_tokens) 284 | if self.fwd_pred_hand: 285 | obs_hand_query_token_start_i = n_tokens 286 | n_tokens += (n_patch_tokens + n_obs_tokens) 287 | 288 | # Layer norm 289 | stacked_inputs = stacked_inputs.reshape(batch_size, n_tokens * sequence_length, self.hidden_size) 290 | stacked_inputs = self.embed_ln(stacked_inputs) 291 | 292 | # Attention mask 293 | stacked_attention_mask = attention_mask.view(batch_size, sequence_length, 1) 294 | if self.use_hand_rgb: 295 | stacked_attention_mask = stacked_attention_mask.repeat( 296 | 1, 1, n_lang_tokens + n_state_tokens + n_hand_patch_tokens + n_hand_obs_tokens + n_patch_tokens + n_obs_tokens) 297 | else: 298 | stacked_attention_mask = stacked_attention_mask.repeat( 299 | 1, 1, n_lang_tokens + n_state_tokens + n_patch_tokens + n_obs_tokens) 300 | if self.act_pred: 301 | act_query_attention_mask = torch.zeros((batch_size, sequence_length, n_act_pred_tokens), dtype=torch.long, device=stacked_inputs.device) 302 | stacked_attention_mask = torch.cat((stacked_attention_mask, act_query_attention_mask), dim=2) 303 | if self.fwd_pred: 304 | obs_query_attention_mask = torch.zeros((batch_size, sequence_length, n_patch_tokens + n_obs_tokens), dtype=torch.long, device=stacked_inputs.device) 305 | stacked_attention_mask = torch.cat((stacked_attention_mask, obs_query_attention_mask), dim=2) 306 | if self.fwd_pred_hand: 307 | obs_hand_query_attention_mask = torch.zeros((batch_size, sequence_length, n_patch_tokens + n_obs_tokens), dtype=torch.long, device=stacked_inputs.device) 308 | stacked_attention_mask = torch.cat((stacked_attention_mask, obs_hand_query_attention_mask), dim=2) 309 | stacked_attention_mask = stacked_attention_mask.reshape(batch_size, n_tokens * sequence_length) 310 | 311 | # GPT forward pass 312 | transformer_outputs = self.transformer( 313 | inputs_embeds=stacked_inputs, 314 | attention_mask=stacked_attention_mask, 315 | ) 316 | x = transformer_outputs['last_hidden_state'] 317 | x = x.reshape(batch_size, sequence_length, n_tokens, self.hidden_size) 318 | 319 | # Action prediction 320 | if self.act_pred: 321 | action_embedding = x[:, :, act_query_token_start_i:(act_query_token_start_i+self.chunk_size)] 322 | for pred_act_mlp in self.pred_act_mlps: 323 | action_embedding = pred_act_mlp(action_embedding) 324 | arm_action_preds = self.pred_arm_act(action_embedding) # (b, t, chunk_size, act_dim - 1) 325 | gripper_action_preds = self.pred_gripper_act(action_embedding) # (b, t, chunk_size, 1) 326 | 327 | # Forward prediction 328 | if self.fwd_pred: 329 | mask_token = self.mask_token # (1, 1, 1, h) 330 | mask_tokens = mask_token.repeat(batch_size, sequence_length, (self.image_size//self.patch_size)**2, 1) # (b, l, n_patches, h) 331 | mask_tokens = mask_tokens + self.decoder_pos_embed.unsqueeze(0).repeat(batch_size, sequence_length, 1, 1) # (b, l, n_patches, h) 332 | 333 | obs_pred = self.decoder_embed(x[:, :, obs_query_token_start_i:(obs_query_token_start_i + self.n_patch_latents + n_obs_tokens)]) # (b, l, n_patch_latents + 1, h) 334 | obs_pred_ = torch.cat([obs_pred, mask_tokens], dim=2) # (b, l, n_patches + n_patch_latens + 1, h) 335 | obs_pred_ = obs_pred_.reshape(-1, obs_pred_.shape[-2], obs_pred_.shape[-1]) # (b * l, n_patches + n_patch_latens + 1, h) 336 | for blk in self.decoder_blocks: 337 | obs_pred_ = blk(obs_pred_) 338 | obs_pred_ = self.decoder_norm(obs_pred_) 339 | obs_preds = self.decoder_pred(obs_pred_) # (b * l, n_patches + n_patch_latens + 1, h) 340 | obs_preds = obs_preds.reshape(batch_size, sequence_length, -1, obs_preds.shape[-1]) # (b, l, n_patches + n_patch_latens + 1, h) 341 | obs_preds = obs_preds[:, :, (self.n_patch_latents+n_obs_tokens):] # (b, l, n_patches, h) 342 | 343 | if self.fwd_pred_hand: 344 | obs_pred_hand = self.decoder_embed(x[:, :, obs_hand_query_token_start_i:(obs_hand_query_token_start_i + self.n_patch_latents + n_obs_tokens)]) 345 | obs_pred_hand_ = torch.cat([obs_pred_hand, mask_tokens], dim=2) 346 | obs_pred_hand_ = obs_pred_hand_.reshape(-1, obs_pred_hand_.shape[-2], obs_pred_hand_.shape[-1]) 347 | for blk in self.decoder_blocks: 348 | obs_pred_hand_ = blk(obs_pred_hand_) 349 | obs_pred_hand_ = self.decoder_norm(obs_pred_hand_) 350 | obs_hand_preds = self.decoder_pred(obs_pred_hand_) 351 | obs_hand_preds = obs_hand_preds.reshape(batch_size, sequence_length, -1, obs_hand_preds.shape[-1]) 352 | obs_hand_preds = obs_hand_preds[:, :, (self.n_patch_latents+n_obs_tokens):] 353 | 354 | prediction = { 355 | 'obs_preds': obs_preds, 356 | 'obs_targets': obs_targets, 357 | 'obs_hand_preds': obs_hand_preds, 358 | 'obs_hand_targets': obs_hand_targets, 359 | 'arm_action_preds': arm_action_preds, 360 | 'gripper_action_preds': gripper_action_preds, 361 | } 362 | return prediction -------------------------------------------------------------------------------- /models/transformer_utils.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. 3 | # Copyright 2023 Bytedance Ltd. and/or its affiliate 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import numpy as np 18 | 19 | def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): 20 | """ 21 | embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) 22 | """ 23 | if embed_dim % 2 != 0: 24 | raise ValueError("embed_dim must be even") 25 | 26 | omega = np.arange(embed_dim // 2, dtype=float) 27 | omega /= embed_dim / 2.0 28 | omega = 1.0 / 10000**omega # (D/2,) 29 | 30 | pos = pos.reshape(-1) # (M,) 31 | out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product 32 | 33 | emb_sin = np.sin(out) # (M, D/2) 34 | emb_cos = np.cos(out) # (M, D/2) 35 | 36 | emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) 37 | return emb 38 | 39 | 40 | def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): 41 | if embed_dim % 2 != 0: 42 | raise ValueError("embed_dim must be even") 43 | 44 | # use half of dimensions to encode grid_h 45 | emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) 46 | emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) 47 | 48 | emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) 49 | return emb 50 | 51 | 52 | def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False): 53 | """ 54 | Create 2D sin/cos positional embeddings. 55 | 56 | Args: 57 | embed_dim (`int`): 58 | Embedding dimension. 59 | grid_size (`int`): 60 | The grid height and width. 61 | add_cls_token (`bool`, *optional*, defaults to `False`): 62 | Whether or not to add a classification (CLS) token. 63 | 64 | Returns: 65 | (`torch.FloatTensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the 66 | position embeddings (with or without classification token) 67 | """ 68 | grid_h = np.arange(grid_size, dtype=np.float32) 69 | grid_w = np.arange(grid_size, dtype=np.float32) 70 | grid = np.meshgrid(grid_w, grid_h) # here w goes first 71 | grid = np.stack(grid, axis=0) 72 | 73 | grid = grid.reshape([2, 1, grid_size, grid_size]) 74 | pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) 75 | if add_cls_token: 76 | pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) 77 | return pos_embed 78 | -------------------------------------------------------------------------------- /models/vision_transformer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # Copyright 2023 Bytedance Ltd. and/or its affiliate 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """ 16 | Mostly copy-paste from timm library. 17 | https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py 18 | """ 19 | import math 20 | from functools import partial 21 | 22 | import torch 23 | import torch.nn as nn 24 | 25 | # from utils import trunc_normal_ 26 | 27 | def _no_grad_trunc_normal_(tensor, mean, std, a, b): 28 | # Cut & paste from PyTorch official master until it's in a few official releases - RW 29 | # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf 30 | def norm_cdf(x): 31 | # Computes standard normal cumulative distribution function 32 | return (1. + math.erf(x / math.sqrt(2.))) / 2. 33 | 34 | if (mean < a - 2 * std) or (mean > b + 2 * std): 35 | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " 36 | "The distribution of values may be incorrect.", 37 | stacklevel=2) 38 | 39 | with torch.no_grad(): 40 | # Values are generated by using a truncated uniform distribution and 41 | # then using the inverse CDF for the normal distribution. 42 | # Get upper and lower cdf values 43 | l = norm_cdf((a - mean) / std) 44 | u = norm_cdf((b - mean) / std) 45 | 46 | # Uniformly fill tensor with values from [l, u], then translate to 47 | # [2l-1, 2u-1]. 48 | tensor.uniform_(2 * l - 1, 2 * u - 1) 49 | 50 | # Use inverse cdf transform for normal distribution to get truncated 51 | # standard normal 52 | tensor.erfinv_() 53 | 54 | # Transform to proper mean, std 55 | tensor.mul_(std * math.sqrt(2.)) 56 | tensor.add_(mean) 57 | 58 | # Clamp to ensure it's in the proper range 59 | tensor.clamp_(min=a, max=b) 60 | return tensor 61 | 62 | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): 63 | # type: (Tensor, float, float, float, float) -> Tensor 64 | return _no_grad_trunc_normal_(tensor, mean, std, a, b) 65 | 66 | def drop_path(x, drop_prob: float = 0., training: bool = False): 67 | if drop_prob == 0. or not training: 68 | return x 69 | keep_prob = 1 - drop_prob 70 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 71 | random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) 72 | random_tensor.floor_() # binarize 73 | output = x.div(keep_prob) * random_tensor 74 | return output 75 | 76 | 77 | class DropPath(nn.Module): 78 | """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). 79 | """ 80 | def __init__(self, drop_prob=None): 81 | super(DropPath, self).__init__() 82 | self.drop_prob = drop_prob 83 | 84 | def forward(self, x): 85 | return drop_path(x, self.drop_prob, self.training) 86 | 87 | 88 | class Mlp(nn.Module): 89 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): 90 | super().__init__() 91 | out_features = out_features or in_features 92 | hidden_features = hidden_features or in_features 93 | self.fc1 = nn.Linear(in_features, hidden_features) 94 | self.act = act_layer() 95 | self.fc2 = nn.Linear(hidden_features, out_features) 96 | self.drop = nn.Dropout(drop) 97 | 98 | def forward(self, x): 99 | x = self.fc1(x) 100 | x = self.act(x) 101 | x = self.drop(x) 102 | x = self.fc2(x) 103 | x = self.drop(x) 104 | return x 105 | 106 | 107 | class Attention(nn.Module): 108 | def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): 109 | super().__init__() 110 | self.num_heads = num_heads 111 | head_dim = dim // num_heads 112 | self.scale = qk_scale or head_dim ** -0.5 113 | 114 | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) 115 | self.attn_drop = nn.Dropout(attn_drop) 116 | self.proj = nn.Linear(dim, dim) 117 | self.proj_drop = nn.Dropout(proj_drop) 118 | 119 | def forward(self, x): 120 | B, N, C = x.shape 121 | qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 122 | q, k, v = qkv[0], qkv[1], qkv[2] 123 | 124 | attn = (q @ k.transpose(-2, -1)) * self.scale 125 | attn = attn.softmax(dim=-1) 126 | attn = self.attn_drop(attn) 127 | 128 | x = (attn @ v).transpose(1, 2).reshape(B, N, C) 129 | x = self.proj(x) 130 | x = self.proj_drop(x) 131 | return x, attn 132 | 133 | 134 | class Block(nn.Module): 135 | def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., 136 | drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): 137 | super().__init__() 138 | self.norm1 = norm_layer(dim) 139 | self.attn = Attention( 140 | dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) 141 | self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() 142 | self.norm2 = norm_layer(dim) 143 | mlp_hidden_dim = int(dim * mlp_ratio) 144 | self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) 145 | 146 | def forward(self, x, return_attention=False): 147 | y, attn = self.attn(self.norm1(x)) 148 | if return_attention: 149 | return attn 150 | x = x + self.drop_path(y) 151 | x = x + self.drop_path(self.mlp(self.norm2(x))) 152 | return x 153 | 154 | 155 | class PatchEmbed(nn.Module): 156 | """ Image to Patch Embedding 157 | """ 158 | def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): 159 | super().__init__() 160 | num_patches = (img_size // patch_size) * (img_size // patch_size) 161 | self.img_size = img_size 162 | self.patch_size = patch_size 163 | self.num_patches = num_patches 164 | 165 | self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) 166 | 167 | def forward(self, x): 168 | B, C, H, W = x.shape 169 | x = self.proj(x).flatten(2).transpose(1, 2) 170 | return x 171 | 172 | 173 | class VisionTransformer(nn.Module): 174 | """ Vision Transformer """ 175 | def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, 176 | num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., 177 | drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs): 178 | super().__init__() 179 | self.num_features = self.embed_dim = embed_dim 180 | 181 | self.patch_embed = PatchEmbed( 182 | img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) 183 | num_patches = self.patch_embed.num_patches 184 | 185 | self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) 186 | self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) 187 | self.pos_drop = nn.Dropout(p=drop_rate) 188 | 189 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule 190 | self.blocks = nn.ModuleList([ 191 | Block( 192 | dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, 193 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) 194 | for i in range(depth)]) 195 | self.norm = norm_layer(embed_dim) 196 | 197 | # Classifier head 198 | self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() 199 | 200 | trunc_normal_(self.pos_embed, std=.02) 201 | trunc_normal_(self.cls_token, std=.02) 202 | self.apply(self._init_weights) 203 | 204 | def _init_weights(self, m): 205 | if isinstance(m, nn.Linear): 206 | trunc_normal_(m.weight, std=.02) 207 | if isinstance(m, nn.Linear) and m.bias is not None: 208 | nn.init.constant_(m.bias, 0) 209 | elif isinstance(m, nn.LayerNorm): 210 | nn.init.constant_(m.bias, 0) 211 | nn.init.constant_(m.weight, 1.0) 212 | 213 | def interpolate_pos_encoding(self, x, w, h): 214 | npatch = x.shape[1] - 1 215 | N = self.pos_embed.shape[1] - 1 216 | if npatch == N and w == h: 217 | return self.pos_embed 218 | class_pos_embed = self.pos_embed[:, 0] 219 | patch_pos_embed = self.pos_embed[:, 1:] 220 | dim = x.shape[-1] 221 | w0 = w // self.patch_embed.patch_size 222 | h0 = h // self.patch_embed.patch_size 223 | # we add a small number to avoid floating point error in the interpolation 224 | # see discussion at https://github.com/facebookresearch/dino/issues/8 225 | w0, h0 = w0 + 0.1, h0 + 0.1 226 | patch_pos_embed = nn.functional.interpolate( 227 | patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), 228 | scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), 229 | mode='bicubic', 230 | ) 231 | assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] 232 | patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) 233 | return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) 234 | 235 | def prepare_tokens(self, x): 236 | B, nc, w, h = x.shape 237 | x = self.patch_embed(x) # patch linear embedding 238 | 239 | # add the [CLS] token to the embed patch tokens 240 | cls_tokens = self.cls_token.expand(B, -1, -1) 241 | x = torch.cat((cls_tokens, x), dim=1) 242 | 243 | # add positional encoding to each token 244 | x = x + self.interpolate_pos_encoding(x, w, h) 245 | 246 | return self.pos_drop(x) 247 | 248 | def forward(self, x): 249 | x = self.prepare_tokens(x) 250 | for blk in self.blocks: 251 | x = blk(x) 252 | x = self.norm(x) 253 | return x[:, 0], x[:, 1:] 254 | 255 | def get_last_selfattention(self, x): 256 | x = self.prepare_tokens(x) 257 | for i, blk in enumerate(self.blocks): 258 | if i < len(self.blocks) - 1: 259 | x = blk(x) 260 | else: 261 | # return attention of the last block 262 | return blk(x, return_attention=True) 263 | 264 | def get_intermediate_layers(self, x, n=1): 265 | x = self.prepare_tokens(x) 266 | # we return the output tokens from the `n` last blocks 267 | output = [] 268 | for i, blk in enumerate(self.blocks): 269 | x = blk(x) 270 | if len(self.blocks) - i <= n: 271 | output.append(self.norm(x)) 272 | return output 273 | 274 | 275 | def vit_tiny(patch_size=16, **kwargs): 276 | model = VisionTransformer( 277 | patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, 278 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 279 | return model 280 | 281 | 282 | def vit_small(patch_size=16, **kwargs): 283 | model = VisionTransformer( 284 | patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, 285 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 286 | return model 287 | 288 | 289 | def vit_base(patch_size=16, **kwargs): 290 | model = VisionTransformer( 291 | patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, 292 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 293 | return model 294 | 295 | 296 | class DINOHead(nn.Module): 297 | def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): 298 | super().__init__() 299 | nlayers = max(nlayers, 1) 300 | if nlayers == 1: 301 | self.mlp = nn.Linear(in_dim, bottleneck_dim) 302 | else: 303 | layers = [nn.Linear(in_dim, hidden_dim)] 304 | if use_bn: 305 | layers.append(nn.BatchNorm1d(hidden_dim)) 306 | layers.append(nn.GELU()) 307 | for _ in range(nlayers - 2): 308 | layers.append(nn.Linear(hidden_dim, hidden_dim)) 309 | if use_bn: 310 | layers.append(nn.BatchNorm1d(hidden_dim)) 311 | layers.append(nn.GELU()) 312 | layers.append(nn.Linear(hidden_dim, bottleneck_dim)) 313 | self.mlp = nn.Sequential(*layers) 314 | self.apply(self._init_weights) 315 | self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) 316 | self.last_layer.weight_g.data.fill_(1) 317 | if norm_last_layer: 318 | self.last_layer.weight_g.requires_grad = False 319 | 320 | def _init_weights(self, m): 321 | if isinstance(m, nn.Linear): 322 | trunc_normal_(m.weight, std=.02) 323 | if isinstance(m, nn.Linear) and m.bias is not None: 324 | nn.init.constant_(m.bias, 0) 325 | 326 | def forward(self, x): 327 | x = self.mlp(x) 328 | x = nn.functional.normalize(x, dim=-1, p=2) 329 | x = self.last_layer(x) 330 | return x -------------------------------------------------------------------------------- /modify.md: -------------------------------------------------------------------------------- 1 | slightly modify API 2 | use pad instead of crop, use official coeeficients 3 | use action chunk, add test chunk size 4 | optional evaluation 5 | newest transformers 6 | run evaluation without dataset 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | torchvision 3 | packaging==21.3 4 | setuptools==57.5.0 5 | flamingo_pytorch 6 | tensorboard 7 | matplotlib 8 | einops 9 | lmdb 10 | transformers 11 | accelerate 12 | git+https://github.com/openai/CLIP.git 13 | -------------------------------------------------------------------------------- /utils/calvin_utils.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2021 Oier Mees 4 | # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates 5 | 6 | # Permission is hereby granted, free of charge, to any person obtaining a copy 7 | # of this software and associated documentation files (the "Software"), to deal 8 | # in the Software without restriction, including without limitation the rights 9 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | # copies of the Software, and to permit persons to whom the Software is 11 | # furnished to do so, subject to the following conditions: 12 | 13 | # The above copyright notice and this permission notice shall be included in all 14 | # copies or substantial portions of the Software. 15 | 16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | # SOFTWARE. 23 | 24 | from collections import Counter 25 | import json 26 | import numpy as np 27 | 28 | 29 | def count_success(results): 30 | count = Counter(results) 31 | step_success = [] 32 | for i in range(1, 6): 33 | n_success = sum(count[j] for j in reversed(range(i, 6))) 34 | sr = n_success / len(results) 35 | step_success.append(sr) 36 | return step_success 37 | 38 | 39 | def print_and_save(results, sequences, eval_result_path, epoch=None): 40 | current_data = {} 41 | print(f"Results for Epoch {epoch}:") 42 | avg_seq_len = np.mean(results) 43 | chain_sr = {i + 1: sr for i, sr in enumerate(count_success(results))} 44 | print(f"Average successful sequence length: {avg_seq_len}") 45 | print("Success rates for i instructions in a row:") 46 | for i, sr in chain_sr.items(): 47 | print(f"{i}: {sr * 100:.1f}%") 48 | 49 | cnt_success = Counter() 50 | cnt_fail = Counter() 51 | 52 | for result, (_, sequence) in zip(results, sequences): 53 | for successful_tasks in sequence[:result]: 54 | cnt_success[successful_tasks] += 1 55 | if result < len(sequence): 56 | failed_task = sequence[result] 57 | cnt_fail[failed_task] += 1 58 | 59 | total = cnt_success + cnt_fail 60 | task_info = {} 61 | for task in total: 62 | task_info[task] = {"success": cnt_success[task], "total": total[task]} 63 | print(f"{task}: {cnt_success[task]} / {total[task]} | SR: {cnt_success[task] / total[task] * 100:.1f}%") 64 | 65 | data = {"avg_seq_len": avg_seq_len, "chain_sr": chain_sr, "task_info": task_info} 66 | 67 | current_data[epoch] = data 68 | 69 | print() 70 | previous_data = {} 71 | json_data = {**previous_data, **current_data} 72 | with open(eval_result_path, "w") as file: 73 | json.dump(json_data, file) 74 | print( 75 | f"Best model: epoch {max(json_data, key=lambda x: json_data[x]['avg_seq_len'])} " 76 | f"with average sequences length of {max(map(lambda x: x['avg_seq_len'], json_data.values()))}" 77 | ) 78 | --------------------------------------------------------------------------------