├── .gitignore ├── CEM ├── .gitignore ├── ManiSkill2-Learn │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── configs │ │ ├── brl │ │ │ └── bc │ │ │ │ ├── pointnet.py │ │ │ │ ├── pointnet_soft_body.py │ │ │ │ ├── rgbd.py │ │ │ │ └── rgbd_soft_body.py │ │ ├── mfrl │ │ │ ├── dapg │ │ │ │ ├── maniskill2_pn.py │ │ │ │ ├── maniskill2_pn_frameminers.py │ │ │ │ └── maniskill2_rgbd.py │ │ │ ├── gail │ │ │ │ ├── maniskill2_pn.py │ │ │ │ └── maniskill2_pn_frameminers.py │ │ │ ├── ppo │ │ │ │ ├── maniskill2_pn.py │ │ │ │ ├── maniskill2_pn_frameminers.py │ │ │ │ ├── maniskill2_rgbd.py │ │ │ │ └── maniskill2_sparseconv.py │ │ │ └── sac │ │ │ │ ├── maniskill2_pn.py │ │ │ │ ├── maniskill2_rgbd.py │ │ │ │ └── maniskill2_state.py │ │ └── mpc │ │ │ ├── _base_ │ │ │ └── cem.py │ │ │ └── cem │ │ │ ├── maniskill2_DigitalTwin.py │ │ │ ├── maniskill2_DigitalTwin_ablation.py │ │ │ └── maniskill2_DigitalTwin_tool.py │ ├── maniskill2_learn │ │ ├── __init__.py │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── render_traj.py │ │ │ ├── run_rl.py │ │ │ └── train_rl.py │ │ ├── env │ │ │ ├── __init__.py │ │ │ ├── action_space_utils.py │ │ │ ├── builder.py │ │ │ ├── env_utils.py │ │ │ ├── evaluation.py │ │ │ ├── observation_process.py │ │ │ ├── replay_buffer.py │ │ │ ├── rollout.py │ │ │ ├── sampling_strategy.py │ │ │ ├── vec_env.py │ │ │ └── wrappers.py │ │ ├── methods │ │ │ ├── __init__.py │ │ │ ├── brl │ │ │ │ ├── __init__.py │ │ │ │ └── bc.py │ │ │ ├── builder.py │ │ │ ├── mfrl │ │ │ │ ├── __init__.py │ │ │ │ ├── gail.py │ │ │ │ ├── ppo.py │ │ │ │ └── sac.py │ │ │ └── mpc │ │ │ │ ├── __init__.py │ │ │ │ └── cem.py │ │ ├── networks │ │ │ ├── __init__.py │ │ │ ├── applications │ │ │ │ ├── __init__.py │ │ │ │ └── actor_critic.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── mlp.py │ │ │ │ ├── pointnet.py │ │ │ │ ├── resnet.py │ │ │ │ ├── rl_cnn.py │ │ │ │ ├── sp_resnet.py │ │ │ │ ├── transformer.py │ │ │ │ └── visuomotor.py │ │ │ ├── builder.py │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── activation.py │ │ │ │ ├── attention.py │ │ │ │ ├── block_utils.py │ │ │ │ ├── cnn_modules │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── resnet_utils.py │ │ │ │ ├── conv.py │ │ │ │ ├── linear.py │ │ │ │ ├── norm.py │ │ │ │ ├── padding.py │ │ │ │ ├── pct_modules │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── pointnet_util.py │ │ │ │ ├── plugin.py │ │ │ │ ├── pooling.py │ │ │ │ ├── spconv_modules │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── resnet_utils.py │ │ │ │ │ └── spconv_utils.py │ │ │ │ ├── test_norm.py │ │ │ │ └── weight_init.py │ │ │ ├── ops │ │ │ │ ├── __init__.py │ │ │ │ └── ops_3d │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── ball_query │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── ball_query.py │ │ │ │ │ └── src │ │ │ │ │ │ ├── ball_query.cpp │ │ │ │ │ │ └── ball_query_cuda.cu │ │ │ │ │ └── pcd_process │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pcd_process.cpp │ │ │ │ │ ├── pcd_process.py │ │ │ │ │ └── pcd_process_cuda.cu │ │ │ ├── regression_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── deterministic.py │ │ │ │ ├── gaussian.py │ │ │ │ └── regression_base.py │ │ │ └── utils.py │ │ ├── schedulers │ │ │ ├── __init__.py │ │ │ ├── custom_scheduler.py │ │ │ └── lr_scheduler.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── data │ │ │ │ ├── __init__.py │ │ │ │ ├── array_ops.py │ │ │ │ ├── compression.py │ │ │ │ ├── converter.py │ │ │ │ ├── dict_array.py │ │ │ │ ├── dict_utils.py │ │ │ │ ├── filtering.py │ │ │ │ ├── misc.py │ │ │ │ ├── seq_utils.py │ │ │ │ ├── string_utils.py │ │ │ │ ├── type_utils.py │ │ │ │ └── wrappers.py │ │ │ ├── file │ │ │ │ ├── __init__.py │ │ │ │ ├── cache_utils.py │ │ │ │ ├── file_client.py │ │ │ │ ├── hash_utils.py │ │ │ │ ├── hdf5_utils.py │ │ │ │ ├── lmdb_utils.py │ │ │ │ ├── pandas_utils.py │ │ │ │ ├── record_utils.py │ │ │ │ ├── serialization │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── handlers │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base.py │ │ │ │ │ │ ├── csv_handler.py │ │ │ │ │ │ ├── json_handler.py │ │ │ │ │ │ ├── pickle_handler.py │ │ │ │ │ │ ├── txt_handler.py │ │ │ │ │ │ └── yaml_handler.py │ │ │ │ │ ├── io.py │ │ │ │ │ └── utils.py │ │ │ │ └── zip_utils.py │ │ │ ├── image │ │ │ │ ├── __init__.py │ │ │ │ ├── colorspace.py │ │ │ │ ├── geometric.py │ │ │ │ ├── io.py │ │ │ │ ├── misc.py │ │ │ │ ├── photometric.py │ │ │ │ └── video_utils.py │ │ │ ├── lib3d │ │ │ │ ├── __init__.py │ │ │ │ ├── mani_skill2_contrib.py │ │ │ │ ├── o3d_utils.py │ │ │ │ ├── trimesh_utils.py │ │ │ │ └── utils.py │ │ │ ├── math │ │ │ │ ├── __init__.py │ │ │ │ ├── counting.py │ │ │ │ ├── running_stats.py │ │ │ │ ├── split_array.py │ │ │ │ └── trunc_normal.py │ │ │ ├── meta │ │ │ │ ├── __init__.py │ │ │ │ ├── collect_env.py │ │ │ │ ├── config.py │ │ │ │ ├── env_var.py │ │ │ │ ├── logger.py │ │ │ │ ├── magic_utils.py │ │ │ │ ├── module_utils.py │ │ │ │ ├── network.py │ │ │ │ ├── parallel_runner.py │ │ │ │ ├── path_utils.py │ │ │ │ ├── process_utils.py │ │ │ │ ├── progressbar.py │ │ │ │ ├── random_utils.py │ │ │ │ ├── registry.py │ │ │ │ ├── timer.py │ │ │ │ └── version_utils.py │ │ │ ├── torch │ │ │ │ ├── __init__.py │ │ │ │ ├── checkpoint_utils.py │ │ │ │ ├── cuda_utils.py │ │ │ │ ├── distributed_utils.py │ │ │ │ ├── distributions.py │ │ │ │ ├── flops_counter.py │ │ │ │ ├── freezer.py │ │ │ │ ├── logger │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── tensorboard_logger.py │ │ │ │ │ ├── tensorboard_utils.py │ │ │ │ │ └── wandb_logger.py │ │ │ │ ├── misc.py │ │ │ │ ├── module_utils.py │ │ │ │ ├── ops.py │ │ │ │ ├── optimizer_utils.py │ │ │ │ └── running_stats.py │ │ │ └── visualization │ │ │ │ ├── __init__.py │ │ │ │ └── o3d_utils.py │ │ └── version.py │ ├── requirements.txt │ ├── scripts │ │ ├── example_demo_conversion │ │ │ ├── general_rigid_body_multi_object_envs.sh │ │ │ ├── general_rigid_body_single_object_envs.sh │ │ │ ├── general_soft_body_envs.sh │ │ │ ├── maniskill1.sh │ │ │ ├── pick_place_multi_object_envs.sh │ │ │ └── pick_place_single_object_envs.sh │ │ └── example_training │ │ │ ├── pretrained_model │ │ │ ├── bc_soft_body_pointcloud.sh │ │ │ ├── bc_soft_body_rgbd.sh │ │ │ ├── dapg_pickcube_pointcloud.sh │ │ │ ├── dapg_pickcube_rgbd.sh │ │ │ ├── dapg_picksingleycb_pointcloud.sh │ │ │ ├── dapg_picksingleycb_rgbd.sh │ │ │ ├── dapg_stackcube_pointcloud.sh │ │ │ └── dapg_stackcube_rgbd.sh │ │ │ └── scratch_pointcloud_template │ │ │ ├── run_bc.sh │ │ │ ├── run_dapg.sh │ │ │ ├── run_gail.sh │ │ │ ├── run_ppo.sh │ │ │ └── run_sac.sh │ ├── setup.py │ ├── submission_example │ │ ├── Dockerfile │ │ └── user_solution.py │ └── tools │ │ ├── convert_state.py │ │ ├── merge_h5.py │ │ ├── merge_trajectory.py │ │ └── shuffle_demo.py ├── ManiSkill2 │ ├── mani_skill2 │ │ ├── __init__.py │ │ ├── agents │ │ │ ├── active_light_sensor.py │ │ │ ├── base_agent.py │ │ │ ├── camera.py │ │ │ ├── control_utils.py │ │ │ ├── controllers │ │ │ │ ├── __init__.py │ │ │ │ ├── arm_imp_ee_pos.py │ │ │ │ ├── arm_imp_ee_pos_vel.py │ │ │ │ ├── arm_imp_ee_vel.py │ │ │ │ ├── arm_imp_joint_pos.py │ │ │ │ ├── arm_imp_joint_pos_vel.py │ │ │ │ ├── arm_imp_joint_vel.py │ │ │ │ ├── arm_pd_ee_delta_position.py │ │ │ │ ├── base_controller.py │ │ │ │ ├── combined_controller.py │ │ │ │ ├── general_pd_ee_twist.py │ │ │ │ ├── general_pd_joint_pos.py │ │ │ │ ├── general_pd_joint_pos_vel.py │ │ │ │ ├── general_pd_joint_vel.py │ │ │ │ ├── gripper_pd_joint_pos_mimic.py │ │ │ │ ├── gripper_pd_joint_vel_mimic.py │ │ │ │ ├── mobile_pd_joint_vel_decoupled.py │ │ │ │ └── mobile_pd_joint_vel_diff.py │ │ │ ├── fixed_xmate3_robotiq.py │ │ │ └── fixed_xmate3_robotiq_with_tool.py │ │ ├── assets │ │ │ ├── config_files │ │ │ │ ├── .gitignore │ │ │ │ └── agents │ │ │ │ │ ├── fixed_xmate3_robotiq.yml │ │ │ │ │ ├── fixed_xmate3_robotiq_low_res.yml │ │ │ │ │ ├── fixed_xmate3_robotiq_sensors.yml │ │ │ │ │ ├── fixed_xmate3_robotiq_sensors_low_res.yml │ │ │ │ │ └── fixed_xmate3_robotiq_tool.yml │ │ │ ├── descriptions │ │ │ │ ├── .gitignore │ │ │ │ ├── fixed_xmate3_robotiq.srdf │ │ │ │ ├── fixed_xmate3_robotiq.urdf │ │ │ │ ├── fixed_xmate3_robotiq_with_tool.urdf │ │ │ │ ├── tool.STL │ │ │ │ ├── tool.obj │ │ │ │ ├── tool_decomp.obj │ │ │ │ └── xmate3_robot.py │ │ │ └── digital_twins │ │ │ │ └── .gitignore │ │ ├── envs │ │ │ ├── CEM_ablation_env.py │ │ │ ├── CEM_env.py │ │ │ ├── CEM_tool_env.py │ │ │ ├── fixed_xmate3_env.py │ │ │ ├── fixed_xmate3_tool_env.py │ │ │ └── sapien_env.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── articulation_utils.py │ │ │ ├── common.py │ │ │ ├── contrib.py │ │ │ ├── cv_utils.py │ │ │ ├── data_utils.py │ │ │ ├── geometry.py │ │ │ ├── io.py │ │ │ ├── misc.py │ │ │ ├── o3d_utils.py │ │ │ ├── sapien_utils.py │ │ │ ├── string_utils.py │ │ │ ├── tmu.py │ │ │ ├── trimesh_utils.py │ │ │ ├── urdf │ │ │ ├── __init__.py │ │ │ ├── urdf.py │ │ │ ├── urdfParser.py │ │ │ └── urdfpy_utils.py │ │ │ ├── visualization │ │ │ ├── __init__.py │ │ │ ├── cv2.py │ │ │ ├── jupyter.py │ │ │ └── misc.py │ │ │ └── wrappers.py │ └── setup.py ├── test_CEM.py └── trajectory_player.py ├── README.md ├── ditto ├── .gitignore ├── LICENSE ├── README.md ├── assets │ ├── pipeline.png │ └── stats │ │ ├── drawer_train.txt │ │ ├── drawer_val.txt │ │ ├── faucet_train.txt │ │ ├── faucet_val.txt │ │ ├── ins_cnt_drawer.txt │ │ ├── ins_cnt_faucet.txt │ │ ├── ins_cnt_laptop.txt │ │ ├── laptop_train.txt │ │ └── laptop_val.txt ├── conda_env_gpu.yaml ├── configs │ ├── callbacks │ │ ├── default.yaml │ │ ├── none.yaml │ │ └── wandb.yaml │ ├── config.yaml │ ├── datamodule │ │ ├── default_datamodule.yaml │ │ ├── sapien_datamodule.yaml │ │ ├── sapien_datamodule_ablation.yaml │ │ ├── sapien_different_angle_datamodule.yaml │ │ ├── sapien_different_angle_stereo_depth_datamodule.yaml │ │ ├── sapien_different_pose_angle_stereo_depth_datamodule.yaml │ │ ├── sapien_single_datamodule.yaml │ │ └── sapien_single_stereo_depth_datamodule.yaml │ ├── experiment │ │ ├── Ditto_s2m.yaml │ │ ├── Ditto_syn.yaml │ │ ├── Sapien_single_demo.yaml │ │ ├── all_stereo.yaml │ │ ├── all_stereo_ablation.yaml │ │ ├── drawer.yaml │ │ ├── drawer_stereo_ablation.yaml │ │ ├── faucet.yaml │ │ ├── faucet_stereo.yaml │ │ ├── faucet_stereo_ablation.yaml │ │ ├── laptop.yaml │ │ ├── laptop_stereo_ablation.yaml │ │ ├── sapien.yaml │ │ ├── sapien_different_angle.yaml │ │ ├── sapien_different_angle_stereo_depth.yaml │ │ ├── sapien_different_pose_angle_stereo_depth.yaml │ │ └── sapien_single_stereo_depth.yaml │ ├── hparams_search │ │ └── mnist_optuna.yaml │ ├── hydra │ │ └── default.yaml │ ├── logger │ │ ├── comet.yaml │ │ ├── csv.yaml │ │ ├── many_loggers.yaml │ │ ├── mlflow.yaml │ │ ├── neptune.yaml │ │ ├── tensorboard.yaml │ │ └── wandb.yaml │ ├── model │ │ ├── geo_art_model_v0.yaml │ │ └── network │ │ │ └── geo_art_net_v0.yaml │ └── trainer │ │ ├── ddp.yaml │ │ ├── debug.yaml │ │ ├── default.yaml │ │ └── minimal.yaml ├── data │ └── data_checker.py ├── data_generation │ ├── active_light_sensor.py │ ├── camera.py │ ├── collect_data.py │ ├── d415-pattern-sq.png │ ├── datagen.py │ ├── env.py │ ├── gen_Ditto_tsdf.py │ ├── gen_offline_data.py │ ├── gen_part_mesh.py │ ├── mesh_fix.py │ ├── sensor.py │ └── utils.py ├── real_experiment │ ├── .gitignore │ ├── demo_depth_map.ipynb │ ├── generate_digital_twin.py │ ├── real_cabinet_different_pose_angle.ipynb │ ├── real_test1.py │ └── test.py ├── requirements.txt ├── run.py ├── run_test.py ├── scripts │ ├── convonet_setup.py │ ├── run_collect_data.sh │ └── run_gen_offline_data.sh ├── src │ ├── __init__.py │ ├── callbacks │ │ ├── __init__.py │ │ ├── misc_callbacks.py │ │ └── wandb_callbacks.py │ ├── datamodules │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── geo_art_dataset_ablation.py │ │ │ ├── geo_art_dataset_ablation_test.py │ │ │ ├── geo_art_dataset_sapien.py │ │ │ ├── geo_art_dataset_sapien_test.py │ │ │ ├── geo_art_dataset_v0.py │ │ │ └── geo_art_dataset_v1.py │ │ └── default_datamodule.py │ ├── models │ │ ├── __init__.py │ │ ├── geo_art_model_v0.py │ │ ├── geo_art_model_v0_pri.py │ │ └── modules │ │ │ ├── Transformer.py │ │ │ ├── __init__.py │ │ │ ├── losses.py │ │ │ └── losses_dense_joint.py │ ├── test.py │ ├── third_party │ │ └── ConvONets │ │ │ ├── __init__.py │ │ │ ├── common.py │ │ │ ├── config.py │ │ │ ├── conv_onet │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── generation_two_stage.py │ │ │ └── models │ │ │ │ ├── __init__.py │ │ │ │ └── decoder.py │ │ │ ├── encoder │ │ │ ├── __init__.py │ │ │ ├── encoder.py │ │ │ ├── pointnetpp_attn.py │ │ │ ├── pointnetpp_corr.py │ │ │ ├── pointnetpp_utils.py │ │ │ ├── unet.py │ │ │ └── unet3d.py │ │ │ ├── eval.py │ │ │ ├── layers.py │ │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── binvox_rw.py │ │ │ ├── icp.py │ │ │ ├── io.py │ │ │ ├── libmcubes │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.rst │ │ │ ├── __init__.py │ │ │ ├── exporter.py │ │ │ ├── marchingcubes.cpp │ │ │ ├── marchingcubes.h │ │ │ ├── mcubes.cpp │ │ │ ├── mcubes.cpython-38-x86_64-linux-gnu.so │ │ │ ├── mcubes.pyx │ │ │ ├── pyarray_symbol.h │ │ │ ├── pyarraymodule.h │ │ │ ├── pywrapper.cpp │ │ │ └── pywrapper.h │ │ │ ├── libmesh │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── inside_mesh.py │ │ │ ├── triangle_hash.cpython-38-x86_64-linux-gnu.so │ │ │ └── triangle_hash.pyx │ │ │ ├── libmise │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── mise.cpython-38-x86_64-linux-gnu.so │ │ │ ├── mise.pyx │ │ │ └── test.py │ │ │ ├── libsimplify │ │ │ ├── Simplify.h │ │ │ ├── __init__.py │ │ │ ├── simplify_mesh.cpp │ │ │ ├── simplify_mesh.cpython-38-x86_64-linux-gnu.so │ │ │ ├── simplify_mesh.pyx │ │ │ └── test.py │ │ │ ├── libvoxelize │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── tribox2.h │ │ │ ├── voxelize.cpython-38-x86_64-linux-gnu.so │ │ │ └── voxelize.pyx │ │ │ ├── mesh.py │ │ │ ├── visualize.py │ │ │ └── voxels.py │ ├── train.py │ └── utils │ │ ├── __init__.py │ │ ├── chamfer.py │ │ ├── joint_estimation.py │ │ ├── misc.py │ │ ├── transform.py │ │ ├── utils.py │ │ └── visual.py ├── template.urdf ├── template_pri.urdf └── utils3d │ ├── .gitignore │ ├── .pre-commit-config.yaml │ ├── LICENSE │ ├── README.md │ ├── data │ ├── Squirrel.mtl │ ├── Squirrel_texture.png │ ├── Squirrel_visual.obj │ ├── pointcloud_color.pcd │ └── rgbd │ │ ├── r_0.png │ │ ├── r_0_depth_0019.png │ │ ├── r_1.png │ │ ├── r_1_depth_0019.png │ │ ├── r_2.png │ │ ├── r_2_depth_0019.png │ │ ├── r_3.png │ │ ├── r_3_depth_0019.png │ │ ├── r_4.png │ │ ├── r_4_depth_0019.png │ │ ├── r_5.png │ │ ├── r_5_depth_0019.png │ │ ├── r_6.png │ │ ├── r_6_depth_0019.png │ │ ├── r_7.png │ │ ├── r_7_depth_0019.png │ │ ├── r_8.png │ │ ├── r_8_depth_0019.png │ │ ├── r_9.png │ │ ├── r_9_depth_0019.png │ │ └── transforms.json │ ├── examples │ ├── multiview_rgbd_fusion.py │ ├── pointcloud_io.py │ ├── render_mesh_nvisii.py │ ├── render_mesh_pyrender.py │ └── visualize_3d_pointcloud.py │ ├── requirements-extra.txt │ ├── requirements.txt │ ├── setup.cfg │ ├── setup.py │ ├── tests │ └── unit │ │ └── test_io.py │ └── utils3d │ ├── __init__.py │ ├── mesh │ ├── __init__.py │ ├── io.py │ └── utils.py │ ├── pointcloud │ ├── __init__.py │ ├── io.py │ ├── utils.py │ └── visualization.py │ ├── render │ ├── __init__.py │ ├── nvisii.py │ └── pyrender.py │ ├── rgbd │ ├── __init__.py │ ├── fusion.py │ ├── io.py │ └── utils.py │ └── utils │ ├── __init__.py │ ├── transform.py │ └── utils.py ├── requirements.txt └── where2act ├── README.md ├── code ├── .gitignore ├── README.md ├── blender_utils │ ├── camera.blend │ ├── cube.obj │ ├── quaternion.py │ ├── render_blender.py │ └── render_using_blender.py ├── camera.py ├── checkcollect_data.py ├── collect_data.py ├── colors.py ├── data.py ├── datagen.py ├── env.py ├── gen_html_hierachy_local.py ├── gen_offline_data.py ├── logs │ ├── .gitignore │ └── README.md ├── models │ ├── model_3d.py │ ├── model_3d_critic.py │ ├── model_3d_critic_legacy.py │ └── model_3d_legacy.py ├── pyquaternion │ ├── .gitignore │ ├── README.md │ ├── __init__.py │ └── quaternion.py ├── recollect_data.py ├── replay_data.py ├── requirements.txt ├── results │ ├── .gitignore │ └── README.md ├── robots │ ├── franka_description │ │ └── meshes │ │ │ ├── collision │ │ │ ├── finger.stl │ │ │ ├── finger.stl.convex.stl │ │ │ ├── hand.stl │ │ │ ├── hand.stl.convex.stl │ │ │ ├── link0.stl │ │ │ ├── link0.stl.convex.stl │ │ │ ├── link1.stl │ │ │ ├── link1.stl.convex.stl │ │ │ ├── link2.stl │ │ │ ├── link2.stl.convex.stl │ │ │ ├── link3.stl │ │ │ ├── link3.stl.convex.stl │ │ │ ├── link4.stl │ │ │ ├── link4.stl.convex.stl │ │ │ ├── link5.stl │ │ │ ├── link5.stl.convex.stl │ │ │ ├── link6.stl │ │ │ ├── link6.stl.convex.stl │ │ │ ├── link7.stl │ │ │ └── link7.stl.convex.stl │ │ │ └── visual │ │ │ ├── finger.dae │ │ │ ├── hand.dae │ │ │ ├── link0.dae │ │ │ ├── link1.dae │ │ │ ├── link2.dae │ │ │ ├── link3.dae │ │ │ ├── link4.dae │ │ │ ├── link5.dae │ │ │ ├── link6.dae │ │ │ └── link7.dae │ ├── panda.urdf │ ├── panda_gripper.urdf │ ├── panda_robot.py │ ├── robotiq_description │ │ ├── collision │ │ │ ├── robotiq_arg2f_140_inner_finger.stl │ │ │ ├── robotiq_arg2f_140_inner_knuckle.stl │ │ │ ├── robotiq_arg2f_140_outer_finger.stl │ │ │ ├── robotiq_arg2f_140_outer_knuckle.stl │ │ │ ├── robotiq_arg2f_base_link.stl │ │ │ ├── robotiq_arg2f_base_link.stl.convex.stl │ │ │ └── robotiq_arg2f_coupling.stl │ │ └── visual │ │ │ ├── Robotiq Arg2F Coupling.dae │ │ │ ├── robotiq_arg2f_140_inner_finger.dae │ │ │ ├── robotiq_arg2f_140_inner_finger.stl │ │ │ ├── robotiq_arg2f_140_inner_knuckle.dae │ │ │ ├── robotiq_arg2f_140_inner_knuckle.stl │ │ │ ├── robotiq_arg2f_140_outer_finger.dae │ │ │ ├── robotiq_arg2f_140_outer_finger.stl │ │ │ ├── robotiq_arg2f_140_outer_knuckle.dae │ │ │ ├── robotiq_arg2f_140_outer_knuckle.stl │ │ │ ├── robotiq_arg2f_base_link.dae │ │ │ ├── robotiq_arg2f_base_link.stl │ │ │ └── robotiq_arg2f_coupling.stl │ ├── robotiq_gripper.urdf │ └── robotiq_robot.py ├── scripts │ ├── .gitignore │ ├── README.md │ ├── history │ │ ├── drawer_35 │ │ │ ├── run_gen_offline_data.sh │ │ │ ├── run_test_real_visu_action_heatmap.sh │ │ │ ├── run_testing.sh │ │ │ ├── run_train_3d.sh │ │ │ ├── run_train_3d_critic.sh │ │ │ ├── run_visu_action_heatmap_proposals.sh │ │ │ └── run_visu_critic_heatmap.sh │ │ ├── drawer_45677 │ │ │ ├── run_gen_offline_data.sh │ │ │ ├── run_train_3d.sh │ │ │ ├── run_train_3d_critic.sh │ │ │ ├── run_visu_action_heatmap_proposals.sh │ │ │ └── run_visu_critic_heatmap.sh │ │ ├── laptop_10211_pushing_1500&100 │ │ │ ├── run_gen_offline_data.sh │ │ │ ├── run_train_3d.sh │ │ │ ├── run_train_3d_critic.sh │ │ │ ├── run_visu_action_heatmap_proposals.sh │ │ │ └── run_visu_critic_heatmap.sh │ │ └── laptop_7 │ │ │ ├── run_gen_offline_data.sh │ │ │ ├── run_test_real_visu_action_heatmap.sh │ │ │ ├── run_testing.sh │ │ │ ├── run_train_3d.sh │ │ │ ├── run_train_3d_critic.sh │ │ │ ├── run_visu_action_heatmap_proposals.sh │ │ │ └── run_visu_critic_heatmap.sh │ ├── original │ │ ├── run_gen_offline_data.sh │ │ ├── run_train_3d.sh │ │ ├── run_train_3d_critic.sh │ │ ├── run_visu_action_heatmap_proposals.sh │ │ └── run_visu_critic_heatmap.sh │ ├── run_gen_offline_data.sh │ ├── run_test_real_visu_action_heatmap.sh │ ├── run_testing.sh │ ├── run_train_3d.sh │ ├── run_train_3d_critic.sh │ ├── run_visu_action_heatmap_proposals.sh │ └── run_visu_critic_heatmap.sh ├── test_real_visu_action_heatmap.py ├── testing.py ├── train_3d.py ├── train_3d_critic.py ├── training_tips.md ├── utils.py ├── visu_action_heatmap_proposals.py └── visu_critic_heatmap.py ├── data ├── .gitignore └── README.md ├── stats ├── README.md ├── drawer_35.txt ├── faucet_8.txt ├── ins_cnt_drawer_35.txt ├── ins_cnt_faucet_8.txt ├── ins_cnt_laptop_7.txt ├── laptop_7.txt └── original │ ├── all_15cats.txt │ ├── data_cabinet_41003.txt │ ├── ins_cnt_15cats.txt │ ├── ins_cnt_5cats.txt │ ├── test_5cats_data_list.txt │ ├── train_10cats_test_data_list.txt │ └── train_10cats_train_data_list.txt └── urdf ├── .gitignore └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | wheels 2 | __pycache__ 3 | .vscode 4 | imgui.ini 5 | *.egg-info -------------------------------------------------------------------------------- /CEM/.gitignore: -------------------------------------------------------------------------------- 1 | wheels 2 | __pycache__ 3 | .vscode 4 | build/ 5 | imgui.ini 6 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/brl/bc/pointnet.py: -------------------------------------------------------------------------------- 1 | agent_cfg = dict( 2 | type="BC", 3 | batch_size=256, 4 | actor_cfg=dict( 5 | type="ContinuousActor", 6 | head_cfg=dict( 7 | type="TanhHead", 8 | noise_std=1e-5, 9 | ), 10 | nn_cfg=dict( 11 | type="Visuomotor", 12 | visual_nn_cfg=dict(type="PointNet", feat_dim="pcd_all_channel", mlp_spec=[64, 128, 512], feature_transform=[]), 13 | mlp_cfg=dict( 14 | type="LinearMLP", 15 | norm_cfg=None, 16 | mlp_spec=["512 + agent_shape", 256, "action_shape"], 17 | inactivated_output=True, 18 | zero_init_output=True, 19 | ), 20 | ), 21 | optim_cfg=dict(type="Adam", lr=3e-4), 22 | ), 23 | ) 24 | 25 | env_cfg = dict( 26 | type="gym", 27 | env_name="PickCube-v0", 28 | unwrapped=False, 29 | ) 30 | 31 | 32 | replay_cfg = dict( 33 | type="ReplayMemory", 34 | capacity=-1, 35 | num_samples=-1, 36 | keys=["obs", "actions", "dones", "episode_dones"], 37 | buffer_filenames=[ 38 | "SOME_DEMO_FILE", 39 | ], 40 | ) 41 | 42 | train_cfg = dict( 43 | on_policy=False, 44 | total_steps=50000, 45 | warm_steps=0, 46 | n_steps=0, 47 | n_updates=500, 48 | n_eval=50000, 49 | n_checkpoint=50000, 50 | ) 51 | 52 | eval_cfg = dict( 53 | type="Evaluation", 54 | num=10, 55 | num_procs=1, 56 | use_hidden_state=False, 57 | save_traj=False, 58 | save_video=True, 59 | use_log=False, 60 | ) 61 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/brl/bc/rgbd.py: -------------------------------------------------------------------------------- 1 | agent_cfg = dict( 2 | type="BC", 3 | batch_size=256, 4 | actor_cfg=dict( 5 | type="ContinuousActor", 6 | head_cfg=dict( 7 | type="TanhHead", 8 | noise_std=1e-5, 9 | ), 10 | nn_cfg=dict( 11 | type="Visuomotor", 12 | visual_nn_cfg=dict(type="IMPALA", in_channel="image_channels", num_pixels="num_pixels", out_feature_size=384), 13 | mlp_cfg=dict( 14 | type="LinearMLP", norm_cfg=None, mlp_spec=["384 + agent_shape", 256, 128, "action_shape"], bias=True, inactivated_output=True 15 | ), 16 | ), 17 | optim_cfg=dict(type="Adam", lr=3e-4), 18 | ), 19 | ) 20 | 21 | env_cfg = dict( 22 | type="gym", 23 | env_name="PickCube-v0", 24 | unwrapped=False, 25 | ) 26 | 27 | 28 | replay_cfg = dict( 29 | type="ReplayMemory", 30 | capacity=-1, 31 | num_samples=-1, 32 | keys=["obs", "actions", "dones", "episode_dones"], 33 | buffer_filenames=[ 34 | "SOME_DEMO_FILE", 35 | ], 36 | ) 37 | 38 | train_cfg = dict( 39 | on_policy=False, 40 | total_steps=50000, 41 | warm_steps=0, 42 | n_steps=0, 43 | n_updates=500, 44 | n_eval=50000, 45 | n_checkpoint=50000, 46 | ) 47 | 48 | eval_cfg = dict( 49 | type="Evaluation", 50 | num=10, 51 | num_procs=1, 52 | use_hidden_state=False, 53 | save_traj=False, 54 | save_video=True, 55 | use_log=False, 56 | ) 57 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/brl/bc/rgbd_soft_body.py: -------------------------------------------------------------------------------- 1 | agent_cfg = dict( 2 | type="BC", 3 | batch_size=256, 4 | actor_cfg=dict( 5 | type="ContinuousActor", 6 | head_cfg=dict( 7 | type="GaussianHead", 8 | init_log_std=-0.5, 9 | clip_return=True, 10 | predict_std=False 11 | ), 12 | nn_cfg=dict( 13 | type="Visuomotor", 14 | visual_nn_cfg=dict(type="IMPALA", in_channel="image_channels", num_pixels="num_pixels", out_feature_size=512), 15 | mlp_cfg=dict( 16 | type="LinearMLP", norm_cfg=None, mlp_spec=["512 + agent_shape", 256, 128, "action_shape"], bias=True, inactivated_output=True 17 | ), 18 | ), 19 | optim_cfg=dict(type="Adam", lr=3e-4), 20 | ), 21 | ) 22 | 23 | env_cfg = dict( 24 | type="gym", 25 | env_name="Fill-v0", 26 | unwrapped=False, 27 | ) 28 | 29 | 30 | replay_cfg = dict( 31 | type="ReplayMemory", 32 | capacity=-1, 33 | num_samples=-1, 34 | keys=["obs", "actions", "dones", "episode_dones"], 35 | buffer_filenames=[ 36 | "SOME_DEMO_FILE", 37 | ], 38 | ) 39 | 40 | train_cfg = dict( 41 | on_policy=False, 42 | total_steps=50000, 43 | warm_steps=0, 44 | n_steps=0, 45 | n_updates=500, 46 | n_eval=50000, 47 | n_checkpoint=50000, 48 | ) 49 | 50 | eval_cfg = dict( 51 | type="Evaluation", 52 | num=10, 53 | num_procs=1, 54 | use_hidden_state=False, 55 | save_traj=False, 56 | save_video=True, 57 | use_log=False, 58 | ) 59 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/mpc/_base_/cem.py: -------------------------------------------------------------------------------- 1 | agent_cfg = dict( 2 | type="CEM", 3 | cem_cfg=dict( 4 | n_iter=5, 5 | population=200, 6 | elite=10, 7 | lr=1.0, 8 | temperature=1.0, 9 | # use_trunc_normal=True, 10 | use_softmax=False, 11 | ), 12 | scheduler_config=dict(type="FixedScheduler"), 13 | horizon=6, 14 | add_actions=True, 15 | action_horizon=1, 16 | ) 17 | 18 | rollout_cfg = dict( 19 | type="Rollout", 20 | num_procs=20, 21 | ) 22 | 23 | log_level = "INFO" 24 | 25 | eval_cfg = dict( 26 | type="Evaluation", 27 | num_procs=1, 28 | use_hidden_state=True, 29 | start_state=None, 30 | save_traj=True, 31 | save_video=True, 32 | use_log=True, 33 | save_info=False, 34 | log_every_step=True, 35 | ) 36 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/mpc/cem/maniskill2_DigitalTwin.py: -------------------------------------------------------------------------------- 1 | log_level = "INFO" 2 | 3 | agent_cfg = dict( 4 | type="CEM", 5 | cem_cfg=dict( 6 | n_iter=2, 7 | population=300, # laptop and faucet: 300, drawer: 300 or 600 8 | elite=20, 9 | lr=1.0, 10 | temperature=1.0, 11 | use_softmax=False, 12 | add_histroy_elites=True 13 | ), 14 | add_actions=True, 15 | action_horizon=1, 16 | scheduler_config=dict( 17 | type="KeyStepScheduler", 18 | keys=["population", "n_iter"], 19 | gammas=1, 20 | steps=15, 21 | ), 22 | horizon=10, 23 | ) 24 | 25 | DIGITAL_TWIN_CONFIG_DIR = '~/Sim2Real2/where2act_ws/CEM/mani_skill2/assets/config_files/digital_twins/' 26 | env_cfg = dict( 27 | type="gym", 28 | env_name="CEM-v0", 29 | articulation_config_path=DIGITAL_TWIN_CONFIG_DIR+'faucet_video_2.yaml', 30 | unwrapped=False, 31 | obs_mode="state_dict", 32 | reward_mode="dense", 33 | # reward_scale=0.3, 34 | control_mode="pd_joint_delta_pos", 35 | # control_mode='pd_ee_twist', 36 | # control_mode = 'pd_ee_delta_pos', 37 | use_cost=False, 38 | # vhacd_mode="new", 39 | horizon=50, 40 | ) 41 | 42 | rollout_cfg = dict( 43 | type="Rollout", 44 | num_procs=20, 45 | shared_memory=True, 46 | ) 47 | 48 | 49 | eval_cfg = dict( 50 | type="Evaluation", 51 | num_procs=1, 52 | horizon=30, 53 | use_hidden_state=True, 54 | start_state=None, 55 | save_traj=True, 56 | save_video=True, 57 | use_log=True, 58 | # save_info=True, 59 | log_every_step=True, 60 | ) 61 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/mpc/cem/maniskill2_DigitalTwin_ablation.py: -------------------------------------------------------------------------------- 1 | log_level = "INFO" 2 | 3 | agent_cfg = dict( 4 | type="CEM", 5 | cem_cfg=dict( 6 | n_iter=2, 7 | population=300, # laptop and faucet: 300, drawer: 300 or 600 8 | elite=20, 9 | lr=1.0, 10 | temperature=1.0, 11 | use_softmax=False, 12 | add_histroy_elites=True 13 | ), 14 | add_actions=True, 15 | action_horizon=1, 16 | scheduler_config=dict( 17 | type="KeyStepScheduler", 18 | keys=["population", "n_iter"], 19 | gammas=1, 20 | steps=15, 21 | ), 22 | horizon=10, 23 | ) 24 | 25 | DIGITAL_TWIN_CONFIG_DIR = '~/Sim2Real2/CEM/mani_skill2/assets/config_files/digital_twins/' 26 | env_cfg = dict( 27 | type="gym", 28 | env_name="CEM_ablation-v0", 29 | articulation_config_path=DIGITAL_TWIN_CONFIG_DIR+'faucet_exp_03.yaml', 30 | unwrapped=False, 31 | obs_mode="state_dict", 32 | reward_mode="dense", 33 | # reward_scale=0.3, 34 | control_mode="pd_joint_delta_pos", 35 | # control_mode='pd_ee_twist', 36 | # control_mode = 'pd_ee_delta_pos', 37 | use_cost=False, 38 | # vhacd_mode="new", 39 | horizon=50, 40 | ) 41 | 42 | rollout_cfg = dict( 43 | type="Rollout", 44 | num_procs=20, 45 | shared_memory=True, 46 | ) 47 | 48 | 49 | eval_cfg = dict( 50 | type="Evaluation", 51 | num_procs=1, 52 | horizon=50, 53 | use_hidden_state=True, 54 | start_state=None, 55 | save_traj=True, 56 | save_video=True, 57 | use_log=True, 58 | # save_info=True, 59 | log_every_step=True, 60 | ) 61 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/configs/mpc/cem/maniskill2_DigitalTwin_tool.py: -------------------------------------------------------------------------------- 1 | log_level = "INFO" 2 | 3 | agent_cfg = dict( 4 | type="CEM", 5 | cem_cfg=dict( 6 | n_iter=2, 7 | population=300, # laptop and faucet: 300, drawer: 300 or 600 8 | elite=20, 9 | lr=1.0, 10 | temperature=1.0, 11 | use_softmax=False, 12 | add_histroy_elites=True 13 | ), 14 | add_actions=True, 15 | action_horizon=1, 16 | scheduler_config=dict( 17 | type="KeyStepScheduler", 18 | keys=["population", "n_iter"], 19 | gammas=1, 20 | steps=15, 21 | ), 22 | horizon=10, 23 | ) 24 | 25 | DIGITAL_TWIN_CONFIG_DIR = '~/Sim2Real2/CEM/mani_skill2/assets/config_files/digital_twins/' 26 | env_cfg = dict( 27 | type="gym", 28 | env_name="CEM_tool-v0", 29 | articulation_config_path=DIGITAL_TWIN_CONFIG_DIR+'drawer_tool_1.yaml', 30 | unwrapped=False, 31 | obs_mode="state_dict", 32 | reward_mode="dense", 33 | # reward_scale=0.3, 34 | control_mode="pd_joint_delta_pos", 35 | # control_mode='pd_ee_twist', 36 | # control_mode = 'pd_ee_delta_pos', 37 | use_cost=False, 38 | # vhacd_mode="new", 39 | horizon=50, 40 | ) 41 | 42 | rollout_cfg = dict( 43 | type="Rollout", 44 | num_procs=20, 45 | shared_memory=True, 46 | ) 47 | 48 | 49 | eval_cfg = dict( 50 | type="Evaluation", 51 | num_procs=1, 52 | horizon=30, 53 | use_hidden_state=True, 54 | start_state=None, 55 | save_traj=True, 56 | save_video=True, 57 | use_log=True, 58 | # save_info=True, 59 | log_every_step=True, 60 | ) 61 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import * 2 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/apis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2-Learn/maniskill2_learn/apis/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/env/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_rollout, build_evaluation, build_replay 2 | from .rollout import Rollout 3 | from .replay_buffer import ReplayMemory 4 | from .sampling_strategy import OneStepTransition, TStepTransition 5 | from .evaluation import BatchEvaluation, Evaluation, save_eval_statistics 6 | from .observation_process import pcd_uniform_downsample 7 | from .env_utils import get_env_info, true_done, make_gym_env, build_vec_env, import_env, build_env 8 | from .vec_env import VectorEnv 9 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/env/builder.py: -------------------------------------------------------------------------------- 1 | from maniskill2_learn.utils.meta import Registry, build_from_cfg 2 | 3 | 4 | ROLLOUTS = Registry("rollout") 5 | EVALUATIONS = Registry("evaluation") 6 | 7 | REPLAYS = Registry("replay") 8 | SAMPLING = Registry("sampling") 9 | 10 | 11 | def build_rollout(cfg, default_args=None): 12 | # cfg.type = 'Rollout' 13 | # elif cfg.get("type", "Rollout") == 'BatchRollout': 14 | # print("Although we use only one thread, you still want to use BatchRollout!") 15 | return build_from_cfg(cfg, ROLLOUTS, default_args) 16 | 17 | 18 | def build_evaluation(cfg, default_args=None): 19 | if cfg.get("num_procs", 1) > 1 and cfg.type == "Evaluation": 20 | cfg.type = "BatchEvaluation" 21 | elif cfg.get("type", "Evaluation") == "BatchEvaluation": 22 | print("Although we use only one thread, you still want to use BatchEvaluation!") 23 | return build_from_cfg(cfg, EVALUATIONS, default_args) 24 | 25 | 26 | def build_replay(cfg, default_args=None): 27 | return build_from_cfg(cfg, REPLAYS, default_args) 28 | 29 | 30 | def build_sampling(cfg, default_args=None): 31 | return build_from_cfg(cfg, SAMPLING, default_args) 32 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/env/observation_process.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from maniskill2_learn.utils.data import float_to_int, as_dtype, GDict, sample_and_pad, is_np 3 | from maniskill2_learn.utils.meta import get_logger 4 | from maniskill2_learn.version import __version__ 5 | import deprecation 6 | 7 | 8 | def select_mask(obs, key, mask): 9 | if key in obs: 10 | obs[key] = obs[key][mask] 11 | 12 | 13 | def pcd_filter_ground(pcd, eps=1e-3): 14 | return pcd["xyz"][..., 2] > eps 15 | 16 | 17 | def pcd_filter_with_mask(obs, mask, env=None): 18 | assert isinstance(obs, dict), f"{type(obs)}" 19 | for key in ["xyz", "rgb", "seg", "visual_seg", "robot_seg"]: 20 | select_mask(obs, key, mask) 21 | 22 | 23 | def pcd_uniform_downsample(obs, env=None, ground_eps=1e-3, num=1200): 24 | obs_mode = env.obs_mode 25 | assert obs_mode in ["pointcloud"] 26 | 27 | if ground_eps is not None: 28 | pcd_filter_with_mask(obs, pcd_filter_ground(obs, eps=ground_eps), env) 29 | pcd_filter_with_mask(obs, sample_and_pad(obs["xyz"].shape[0], num), env) 30 | return obs -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/methods/__init__.py: -------------------------------------------------------------------------------- 1 | from .mfrl import * 2 | from .brl import * 3 | # mlq add 4 | from .mpc import * -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/methods/brl/__init__.py: -------------------------------------------------------------------------------- 1 | from .bc import BC -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/methods/builder.py: -------------------------------------------------------------------------------- 1 | from ..utils.meta import Registry, build_from_cfg 2 | 3 | 4 | MPC = Registry("mpc") # Model predictive control 5 | MFRL = Registry("mfrl") # Model free RL 6 | BRL = Registry("brl") # Offline RL / Batch RL 7 | 8 | 9 | def build_agent(cfg, default_args=None): 10 | for agent_type in [MPC, MFRL, BRL]: 11 | if cfg["type"] in agent_type: 12 | return build_from_cfg(cfg, agent_type, default_args) 13 | return None 14 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/methods/mfrl/__init__.py: -------------------------------------------------------------------------------- 1 | from .sac import SAC 2 | from .ppo import PPO 3 | from .gail import GAIL -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/methods/mpc/__init__.py: -------------------------------------------------------------------------------- 1 | from .cem import CEM 2 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules import * 2 | from .backbones import * 3 | from .applications import * 4 | from .regression_heads import * 5 | 6 | from .builder import build_backbone, build_model, build_reg_head, build_actor_critic 7 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/applications/__init__.py: -------------------------------------------------------------------------------- 1 | from .actor_critic import ContinuousActor, ContinuousCritic, DiscreteActor, DiscreteCritic 2 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .mlp import LinearMLP, ConvMLP 2 | from .visuomotor import Visuomotor 3 | from .pointnet import PointNet 4 | 5 | from .transformer import TransformerEncoder 6 | from .resnet import ResNet, ResNetV1c, ResNetV1d 7 | from .visuomotor import Visuomotor 8 | from .rl_cnn import IMPALA, NatureCNN 9 | 10 | try: 11 | from .sp_resnet import SparseResNet10, SparseResNet18, SparseResNet34, SparseResNet50, SparseResNet101 12 | except ImportError as e: 13 | print("SparseConv is not supported", flush=True) 14 | print(e, flush=True) 15 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv import CONV_LAYERS, build_conv_layer 2 | from .linear import LINEAR_LAYERS, build_linear_layer 3 | from .activation import ACTIVATION_LAYERS, build_activation_layer 4 | from .norm import NORM_LAYERS, build_norm_layer, need_bias 5 | from .padding import PADDING_LAYERS, build_padding_layer 6 | from .weight_init import constant_init, normal_init, kaiming_init, uniform_init, build_init, delta_orthogonal_init 7 | 8 | # from .conv_module import PLUGIN_LAYERS, ConvModule 9 | from .block_utils import NN_BLOCKS, build_nn_block, BasicBlock, FlexibleBasicBlock, LinearModule, ConvModule, MLP, SharedMLP 10 | 11 | try: 12 | from .pn2_modules import * 13 | except ImportError as e: 14 | print("Pointnet++ is not compiled") 15 | print(e) 16 | 17 | from .attention import AttentionPooling, MultiHeadSelfAttention, MultiHeadAttention, ATTENTION_LAYERS, build_attention_layer 18 | from .plugin import PLUGIN_LAYERS, build_plugin_layer 19 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/cnn_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet_utils import BasicBlock, Bottleneck, ResLayer 2 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/padding.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from maniskill2_learn.utils.meta import Registry, build_from_cfg 3 | 4 | PADDING_LAYERS = Registry("padding layer") 5 | 6 | for module in [ 7 | nn.ReflectionPad1d, 8 | nn.ReflectionPad2d, 9 | nn.ReplicationPad1d, 10 | nn.ReplicationPad2d, 11 | nn.ReplicationPad3d, 12 | nn.ZeroPad2d, 13 | nn.ConstantPad1d, 14 | nn.ConstantPad2d, 15 | nn.ConstantPad3d, 16 | ]: 17 | PADDING_LAYERS.register_module(module=module) 18 | 19 | 20 | def build_padding_layer(cfg, default_args=None): 21 | return build_from_cfg(cfg, PADDING_LAYERS, default_args) 22 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/pct_modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/pct_modules/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/pooling.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from maniskill2_learn.utils.meta import Registry, build_from_cfg 5 | 6 | 7 | CONV_LAYERS = Registry("conv layer") 8 | for module in [ 9 | nn.Conv1d, 10 | nn.Conv2d, 11 | nn.Conv3d, 12 | nn.ConvTranspose1d, 13 | nn.ConvTranspose2d, 14 | nn.ConvTranspose3d, 15 | nn.LazyConv1d, 16 | nn.LazyConv2d, 17 | nn.LazyConv3d, 18 | nn.LazyConvTranspose1d, 19 | nn.LazyConvTranspose2d, 20 | nn.LazyConvTranspose3d, 21 | nn.Unfold, 22 | nn.Fold, 23 | ]: 24 | CONV_LAYERS.register_module(module=module) 25 | CONV_LAYERS.register_module("Conv", module=nn.Conv2d) 26 | CONV_LAYERS.register_module("Deconv", module=nn.ConvTranspose2d) 27 | 28 | # SparseConv 29 | SPARSE_CONV_LAYERS = Registry("sparse conv layer") 30 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/modules/spconv_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .spconv_utils import initial_voxelize, point_to_voxel, voxel_to_point, build_points 2 | from .resnet_utils import BasicConvolutionBlock, BasicDeconvolutionBlock, ResidualBlock, Bottleneck, build_sparse_norm 3 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2-Learn/maniskill2_learn/networks/ops/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/ops/ops_3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .ball_query import ball_query 2 | from .pcd_process import downsample_pcd 3 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/ops/ops_3d/ball_query/__init__.py: -------------------------------------------------------------------------------- 1 | from .ball_query import ball_query 2 | 3 | __all__ = ['ball_query'] 4 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/ops/ops_3d/pcd_process/__init__.py: -------------------------------------------------------------------------------- 1 | from .pcd_process import downsample_pcd 2 | 3 | __all__ = ["downsample_pcd"] 4 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/ops/ops_3d/pcd_process/pcd_process.cpp: -------------------------------------------------------------------------------- 1 | /* Modified from [Use double in some computation to improve numerical stability 2 | * https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/ball_query.cpp 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | // at::Tensor VoxelDownSample(const at::Tensor &xyz, const float &z_min, const int &num); 13 | 14 | // at::Tensor ManiSkillDownSample(const at::Tensor &xyz, const float &min_z, const int &num); 15 | 16 | std::tuple UniformDownSample(const at::Tensor &xyz, const float &min_z, const int &num); 17 | 18 | std::tuple ManiSkillDownSample(const at::Tensor &xyz, const at::Tensor &seg, const float &min_z, const int &num, const int &num_min, 19 | const int &num_fg); 20 | 21 | // at::Tensor cumsum(const at::Tensor &mask); 22 | 23 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 24 | // m.def("voxel_downsample", &VoxelDownsample, "VoxelDownsample"); 25 | m.def("uniform_downsample", &UniformDownSample, "UniformDownSample", py::arg("xyz"), py::arg("min_z"), py::arg("num")); 26 | m.def("maniskill_downsample", &ManiSkillDownSample, "ManiSkillDownSample", py::arg("xyz"), py::arg("seg"), py::arg("min_z"), py::arg("num"), 27 | py::arg("num_min"), py::arg("num_fg")); 28 | } 29 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/networks/regression_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .deterministic import TanhHead, BasicHead 2 | from .regression_base import DiscreteBaseHead 3 | from .gaussian import GaussianHead, TanhGaussianHead, SoftplusGaussianHead 4 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | from .custom_scheduler import build_scheduler, SCHEDULERS 2 | from .lr_scheduler import build_lr_scheduler, LRSCHEDULERS 3 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/schedulers/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | from maniskill2_learn.utils.meta import Registry, build_from_cfg 2 | import torch.optim.lr_scheduler as lr_scheduler 3 | 4 | LRSCHEDULERS = Registry("scheduler of pytorch learning rate") 5 | 6 | 7 | for scheduler in [ 8 | lr_scheduler.LambdaLR, 9 | lr_scheduler.MultiplicativeLR, 10 | lr_scheduler.StepLR, 11 | lr_scheduler.MultiStepLR, 12 | lr_scheduler.ConstantLR, 13 | lr_scheduler.LinearLR, 14 | lr_scheduler.ExponentialLR, 15 | lr_scheduler.CosineAnnealingLR, 16 | lr_scheduler.ChainedScheduler, 17 | lr_scheduler.SequentialLR, 18 | lr_scheduler.ReduceLROnPlateau, 19 | lr_scheduler.CyclicLR, 20 | lr_scheduler.OneCycleLR, 21 | lr_scheduler.CosineAnnealingWarmRestarts, 22 | ]: 23 | LRSCHEDULERS.register_module(module=scheduler) 24 | 25 | 26 | def build_lr_scheduler(cfg, default_args=None): 27 | if cfg.get("type", None) == "LambdaLR": 28 | assert cfg.get("lr_lambda") is not None 29 | if isinstance(cfg["lr_lambda"], str): 30 | cfg["lr_lambda"] = eval(cfg["lr_lambda"]) 31 | 32 | return build_from_cfg(cfg, LRSCHEDULERS, default_args) 33 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2-Learn/maniskill2_learn/utils/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/data/filtering.py: -------------------------------------------------------------------------------- 1 | from .string_utils import regex_match 2 | from .type_utils import is_dict, is_tuple_of, is_list_of 3 | 4 | 5 | def custom_filter(item, func, value=True): 6 | """ 7 | Recursively filter all elements with function func. 8 | Assumptions: 9 | None means the item does not pass func. 10 | """ 11 | if is_tuple_of(item): 12 | item = list(item) 13 | if is_list_of(item): 14 | ret = [] 15 | for i in range(len(item)): 16 | x = custom_filter(item[i], func, value) 17 | if x is not None: 18 | ret.append(x) 19 | item = ret 20 | elif is_dict(item): 21 | ret = {} 22 | for key in item: 23 | x = custom_filter(item[key], func, value) 24 | if x is not None: 25 | ret[key] = x 26 | item = ret 27 | return item if not value or (item is not None and func(item)) else None 28 | 29 | 30 | def filter_none(x): 31 | func = lambda _: _ is not None 32 | return custom_filter(x, func, True) 33 | 34 | 35 | def filter_with_regex(x, regex, value=True): 36 | func = lambda _: _ is not None and regex_match(_, regex) 37 | return custom_filter(x, func, value) 38 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/data/misc.py: -------------------------------------------------------------------------------- 1 | def equal(x, y): 2 | return True if x is None or y is None else x == y 3 | 4 | 5 | SLICE_ALL = slice(None, None, None) 6 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/file/__init__.py: -------------------------------------------------------------------------------- 1 | from .file_client import BaseStorageBackend, FileClient 2 | 3 | # (is_saved_with_pandas, , load_h5_as_dict_array, 4 | # load_h5s_as_list_dict_array, convert_h5_trajectory_to_pandas, DataEpisode, 5 | # generate_chunked_h5_replay) 6 | from .hash_utils import md5sum, check_md5sum 7 | from .lmdb_utils import LMDBFile 8 | 9 | # from .pandas_utils import (convert_hdf_with_pickle_4, load_hdf, hdf_to_dict_list, merge_hdf_trajectory, 10 | # save_hdf_trajectory, concat_hdf_trajectory, try_to_open_hdf_trajectory) 11 | from .serialization import * 12 | from .zip_utils import extract_files 13 | from .record_utils import ( 14 | output_record, 15 | generate_index_from_record, 16 | shuffle_merge_records, 17 | shuffle_reocrd, 18 | get_index_filenames, 19 | read_record, 20 | merge_h5_trajectory, 21 | convert_h5_trajectory_to_record, 22 | load_items_from_record, 23 | load_record_indices, 24 | train_test_split, 25 | convert_h5_trajectories_to_shard, 26 | do_train_test_split, 27 | ) 28 | 29 | from .hdf5_utils import load_hdf5, dump_hdf5 30 | from .cache_utils import get_total_size, FileCache, is_h5_traj, decode_items 31 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/file/hash_utils.py: -------------------------------------------------------------------------------- 1 | import hashlib, numpy as np, struct 2 | 3 | 4 | def md5sum(filename, block_size=None): 5 | if block_size is None: 6 | block_size = 65536 7 | hash_res = hashlib.md5() 8 | with open(filename, "rb") as f: 9 | for block in iter(lambda: f.read(block_size), b""): 10 | hash_res.update(block) 11 | return hash_res.hexdigest() 12 | 13 | 14 | def check_md5sum(filename, md5, block_size=None): 15 | if not (isinstance(md5, str) and len(md5) == 32): 16 | raise ValueError(f"MD5 must be 32 chars: {md5}") 17 | md5_actual = md5sum(filename, block_size=block_size) 18 | if md5_actual == md5: 19 | return True 20 | else: 21 | print(f"MD5 does not match!: {filename} has md5 {md5_actual}, target md5 is {md5}") 22 | return False 23 | 24 | 25 | def masked_crc(data: bytes) -> bytes: 26 | try: 27 | from crc32c import crc32c 28 | except ImportError: 29 | print("Cannot import crc32c, please install it!") 30 | exit(0) 31 | """CRC checksum.""" 32 | mask = 0xA282EAD8 33 | crc = crc32c(data) 34 | masked = ((crc >> 15) | (crc << 17)) + mask 35 | masked = np.uint32(masked & np.iinfo(np.uint32).max) 36 | masked_bytes = struct.pack("= 0 and cnt >= max_num: 20 | break 21 | item_list.append(prefix + line.rstrip("\n")) 22 | cnt += 1 23 | return item_list 24 | 25 | 26 | def dict_from_file(filename, key_type=str, offset=0, max_num=-1): 27 | mapping = {} 28 | cnt = 0 29 | with open(filename, "r") as f: 30 | for _ in range(offset): 31 | f.readline() 32 | for line in f: 33 | if max_num >= 0 and cnt >= max_num: 34 | break 35 | items = line.rstrip("\n").split() 36 | assert len(items) >= 2 37 | key = key_type(items[0]) 38 | val = items[1:] if len(items) > 2 else items[1] 39 | mapping[key] = val 40 | cnt += 1 41 | return mapping 42 | 43 | 44 | def dict_to_csv_table(x): 45 | ret = [] 46 | for key in x.keys(): 47 | ret.append([key, x[key]]) 48 | return ret 49 | 50 | 51 | def csv_table_to_dict(x): 52 | for y in x: 53 | assert len(y) == 2 54 | ret = {} 55 | for y in x: 56 | ret[y[0]] = y[1] 57 | return ret 58 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/image/__init__.py: -------------------------------------------------------------------------------- 1 | from .colorspace import ( 2 | bgr2gray, 3 | bgr2hls, 4 | bgr2hsv, 5 | bgr2rgb, 6 | bgr2ycbcr, 7 | gray2bgr, 8 | gray2rgb, 9 | hls2bgr, 10 | hsv2bgr, 11 | imconvert, 12 | rgb2bgr, 13 | rgb2gray, 14 | rgb2ycbcr, 15 | ycbcr2bgr, 16 | ycbcr2rgb, 17 | ) 18 | from .geometric import ( 19 | imcrop, 20 | imflip, 21 | imflip_, 22 | impad, 23 | impad_to_multiple, 24 | imrescale, 25 | imresize, 26 | imresize_like, 27 | imrotate, 28 | imshear, 29 | imtranslate, 30 | rescale_size, 31 | ) 32 | from .io import imfrombytes, imread, imwrite, supported_backends, use_backend, imencode, imdecode 33 | from .misc import tensor2imgs 34 | from .photometric import ( 35 | adjust_brightness, 36 | adjust_color, 37 | adjust_contrast, 38 | clahe, 39 | imdenormalize, 40 | imequalize, 41 | iminvert, 42 | imnormalize, 43 | imnormalize_, 44 | lut_transform, 45 | posterize, 46 | solarize, 47 | ) 48 | from .video_utils import concat_videos, put_names_on_image, grid_images, video_to_frames, images_to_video 49 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/lib3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .o3d_utils import ( 2 | to_o3d, 3 | np2mesh, 4 | merge_mesh, 5 | np2pcd, 6 | one_point_vis, 7 | create_aabb, 8 | create_obb, 9 | create_aabb_from_pcd, 10 | create_obb_from_pcd, 11 | create_aabb_from_mesh, 12 | create_obb_from_mesh, 13 | ) 14 | from .trimesh_utils import to_trimesh 15 | from .utils import convex_hull, angle, check_coplanar, apply_pose, mesh_to_pcd 16 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/lib3d/trimesh_utils.py: -------------------------------------------------------------------------------- 1 | import trimesh, numpy as np, open3d as o3d 2 | from maniskill2_learn.utils.data import is_pcd 3 | 4 | 5 | def to_trimesh(x): 6 | if is_trimesh(x): 7 | return x 8 | elif isinstance(x, np.ndarray): 9 | assert is_pcd(x) 10 | return trimesh.points.PointCloud(x) 11 | elif isinstance(x, o3d.geometry.TriangleMesh): 12 | vertices = np.asarray(x.vertices) 13 | faces = np.asarray(x.triangles) 14 | return trimesh.Trimesh(vertices=vertices, faces=faces) 15 | elif isinstance(x, o3d.geometry.PointCloud): 16 | points = np.asarray(x.points) 17 | return trimesh.points.PointCloud(vertices=points) 18 | else: 19 | print(type(x)) 20 | raise NotImplementedError() 21 | 22 | 23 | def is_trimesh(x): 24 | return isinstance(x, (trimesh.Trimesh, trimesh.points.PointCloud)) 25 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/math/__init__.py: -------------------------------------------------------------------------------- 1 | from .running_stats import MovingAverage, RunningMeanStd 2 | from .trunc_normal import trunc_normal 3 | from .split_array import split_num 4 | from .counting import EveryNSteps 5 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/math/counting.py: -------------------------------------------------------------------------------- 1 | class EveryNSteps: 2 | def __init__(self, interval=None): 3 | self.interval = interval 4 | self.next_value = interval 5 | 6 | def reset(self): 7 | self.next_value = self.interval 8 | 9 | def check(self, x): 10 | if self.interval is None: 11 | return False 12 | sign = False 13 | while x >= self.next_value: 14 | self.next_value += self.interval 15 | sign = True 16 | return sign 17 | 18 | def standard(self, x): 19 | return int(x // self.interval) * self.interval 20 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/math/split_array.py: -------------------------------------------------------------------------------- 1 | def split_num(num, n): 2 | """ 3 | Divide num into m=min(n, num) elements x_1, ...., x_n, where x_1, ..., x_n >= 1 and max_{i,j} |x_i - x_j| <= 1 4 | """ 5 | n = min(num, n) 6 | min_steps = num // n 7 | splits = [] 8 | for i in range(n): 9 | if i < num - min_steps * n: 10 | splits.append(min_steps + 1) 11 | else: 12 | splits.append(min_steps) 13 | assert sum(splits) == num 14 | return n, splits 15 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/math/trunc_normal.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.stats import norm 3 | 4 | 5 | def trunc_normal(shape, a=-2, b=2): 6 | """ 7 | :param shape: The shape of the trunc normal 8 | :param a, b: Sample between [a, b] with i.i.d. normal distribution 9 | :return: samples 10 | Gaussian density of N(mu, sigma^2): exp(-((x - mu) / sigma)^2 / 2) / (sqrt(2 * pi) * sigma) 11 | """ 12 | a_cdf = norm.cdf(a) 13 | b_cdf = norm.cdf(b) 14 | p = a_cdf + (b_cdf - a_cdf) * np.random.rand(*shape) 15 | return np.clip(norm.ppf(p), a, b) 16 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/meta/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import ConfigDict, Config, DictAction, merge_a_to_b 2 | from .collect_env import collect_env, log_meta_info, get_meta_info 3 | from .logger import get_logger, print_log, flush_print, get_logger_name, TqdmToLogger, flush_logger 4 | from .magic_utils import * 5 | from .module_utils import import_modules_from_strings, check_prerequisites, requires_package, requires_executable, deprecated_api_warning 6 | from .path_utils import ( 7 | is_filepath, 8 | fopen, 9 | check_files_exist, 10 | mkdir_or_exist, 11 | parse_files, 12 | symlink, 13 | scandir, 14 | find_vcs_root, 15 | get_filename, 16 | get_filename_suffix, 17 | copy_folder, 18 | copy_folders, 19 | add_suffix_to_filename, 20 | get_dirname, 21 | to_abspath, 22 | replace_suffix, 23 | ) 24 | from .process_utils import get_total_memory, get_memory_list, get_subprocess_ids, get_memory_dict 25 | from .progressbar import ProgressBar, track_progress, track_iter_progress, track_parallel_progress 26 | from .random_utils import RandomWrapper, get_random_generator, set_random_seed, random_id_generator 27 | from .registry import Registry, build_from_cfg 28 | from .timer import Timer, TimerError, check_time, get_time_stamp, td_format, get_today 29 | from .version_utils import digit_version 30 | from .env_var import add_env_var, add_dist_var, get_world_rank, get_world_size, is_debug_mode, get_dist_info 31 | from .parallel_runner import Worker 32 | from .network import is_port_in_use 33 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/meta/env_var.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def add_env_var(): 5 | default_values = {"NUMEXPR_MAX_THREADS": "1", "MKL_NUM_THREADS": "1", "OMP_NUM_THREADS": "1", "CUDA_DEVICE_ORDER": "PCI_BUS_ID", "DISPLAY": "0", "MUJOCO_GL": "egl"} 6 | for key, value in default_values.items(): 7 | os.environ[key] = os.environ.get(key, value) 8 | 9 | 10 | def add_dist_var(rank, world_size): 11 | os.environ["PYRL_RANK"] = f"{rank}" 12 | os.environ["PYRL_WORLD_SIZE"] = f"{world_size}" 13 | os.environ["MASTER_ADDR"] = "localhost" 14 | 15 | def find_free_port(port): 16 | from .network import is_port_in_use 17 | 18 | while is_port_in_use(port): 19 | port += 1 20 | return port 21 | 22 | os.environ["MASTER_PORT"] = str(find_free_port(12355)) 23 | os.environ["PYRL_TCP_PORT"] = str(find_free_port(15015)) 24 | 25 | 26 | def get_world_rank(): 27 | if "PYRL_RANK" not in os.environ: 28 | return 0 29 | return eval(os.environ["PYRL_RANK"]) 30 | 31 | 32 | def get_world_size(): 33 | if "PYRL_WORLD_SIZE" not in os.environ: 34 | return 1 35 | return eval(os.environ["PYRL_WORLD_SIZE"]) 36 | 37 | 38 | def get_dist_info(): 39 | return get_world_rank(), get_world_size() 40 | 41 | 42 | def is_debug_mode(): 43 | if "PYRL_DEBUG" not in os.environ: 44 | return 0 45 | return eval(os.environ["PYRL_DEBUG"]) 46 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/meta/magic_utils.py: -------------------------------------------------------------------------------- 1 | # https://github.com/alexmojaki/sorcery 2 | from sorcery import assigned_names, unpack_keys, unpack_attrs, dict_of, print_args, call_with_name, delegate_to_attr, maybe, select_from 3 | 4 | # https://github.com/gruns/icecream 5 | # from icecream import ic 6 | 7 | 8 | def colored_print(output_string, level="info", logger=print): 9 | from termcolor import colored 10 | import sys 11 | 12 | if level.lower() in ["warning", "error"]: 13 | level = colored(level.upper(), "red") 14 | output_string = colored(output_string, "cyan") 15 | logger(f"{level}: {output_string}") 16 | else: 17 | logger(output_string) 18 | 19 | 20 | def empty_print(*args, **kwargs): 21 | pass 22 | 23 | 24 | def custom_assert(pause, output_string, logger=None): 25 | if logger is not None: 26 | logger = logger.log 27 | else: 28 | logger = print 29 | import sys 30 | 31 | if not pause: 32 | from termcolor import colored 33 | file_name = colored(sys._getframe().f_code.co_filename, "red") 34 | line_number = colored(sys._getframe().f_back.f_lineno, "cyan") 35 | output_string = colored(output_string, "red") 36 | logger(f"Assert Error at {file_name}, line {line_number}") 37 | logger(f"Output: {output_string}") 38 | 39 | 40 | class SlicePrinter: 41 | def __getitem__(self, index): 42 | print(index) 43 | 44 | slice_printer = SlicePrinter() 45 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/meta/network.py: -------------------------------------------------------------------------------- 1 | def is_port_in_use(port: int) -> bool: 2 | import socket 3 | 4 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 5 | return s.connect_ex(("localhost", port)) == 0 6 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/meta/random_utils.py: -------------------------------------------------------------------------------- 1 | import random, string, time, numpy as np 2 | 3 | 4 | def random_id_generator(size=6, chars=string.ascii_uppercase): 5 | timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) 6 | return timestamp + "." + "".join(random.choice(chars) for _ in range(size)) 7 | 8 | 9 | class RandomWrapper(object): 10 | def __init__(self, seed): 11 | self.seed = seed 12 | self.state = None 13 | 14 | def __enter__(self): 15 | self.state = np.random.get_state() 16 | np.random.seed(self.seed) 17 | return self.state 18 | 19 | def __exit__(self, exc_type, exc_val, exc_tb): 20 | np.random.set_state(self.state) 21 | 22 | 23 | def get_random_generator(seed): 24 | return np.random.RandomState(seed) 25 | 26 | 27 | def set_random_seed(seed): 28 | if seed is not None: 29 | random.seed(seed) 30 | np.random.seed(seed) 31 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/meta/version_utils.py: -------------------------------------------------------------------------------- 1 | def digit_version(version_str): 2 | ret = [] 3 | for x in version_str.split("."): 4 | if x.isdigit(): 5 | ret.append(int(x)) 6 | elif x.find("rc") != -1: 7 | ret = x.split("rc") 8 | ret.append(int(patch_version[0]) - 1) 9 | ret.append(int(patch_version[1])) 10 | return tuple(ret) 11 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/torch/__init__.py: -------------------------------------------------------------------------------- 1 | from .checkpoint_utils import load_checkpoint, save_checkpoint, load_state_dict, get_state_dict 2 | 3 | try: 4 | from .cuda_utils import ( 5 | get_cuda_info, 6 | get_gpu_utilization, 7 | get_gpu_memory_usage_by_process, 8 | get_gpu_memory_usage_by_current_program, 9 | get_device, 10 | get_one_device, 11 | ) 12 | except: 13 | print(f"Not support gpu usage printing") 14 | 15 | from .misc import no_grad, disable_gradients, run_with_mini_batch, mini_batch 16 | from .ops import ( 17 | set_flat_params, 18 | get_flat_params, 19 | get_flat_grads, 20 | set_flat_grads, 21 | batch_random_perm, 22 | masked_average, 23 | masked_max, 24 | smooth_cross_entropy, 25 | batch_rot_with_axis, 26 | soft_update, 27 | hard_update, 28 | avg_grad, 29 | ) 30 | from .logger import * 31 | from .running_stats import RunningMeanStdTorch, MovingMeanStdTorch, RunningSecondMomentumTorch 32 | from .module_utils import BaseAgent, ExtendedModule, ExtendedModuleList, ExtendedDDP, async_no_grad_pi, ExtendedSequential 33 | from .distributions import ScaledTanhNormal, CustomIndependent, ScaledNormal, CustomCategorical 34 | from .optimizer_utils import get_mean_lr, build_optimizer 35 | from .distributed_utils import init_dist, cleanup_dist, master_only, allreduce_params, allreduce_grads, barrier, build_dist_var, get_dist_info 36 | from .freezer import freeze_modules, freeze_params, freeze_bn, unfreeze_modules, unfreeze_params 37 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/torch/logger/__init__.py: -------------------------------------------------------------------------------- 1 | from .tensorboard_logger import TensorboardLogger 2 | from .tensorboard_utils import load_tb_summaries_as_df 3 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/torch/logger/wandb_logger.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .o3d_utils import visualize_3d, visualize_pcd 2 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/utils/visualization/o3d_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np, open3d as o3d 2 | from ..lib3d import np2pcd, to_o3d 3 | 4 | 5 | def visualize_3d(objects, show_frame=True, frame_size=1.0, frame_origin=(0, 0, 0)): 6 | if not isinstance(objects, (list, tuple)): 7 | objects = [objects] 8 | objects = [to_o3d(obj) for obj in objects] 9 | if show_frame: 10 | objects.append(o3d.geometry.TriangleMesh.create_coordinate_frame(size=frame_size, origin=frame_origin)) 11 | return o3d.visualization.draw_geometries(objects) 12 | 13 | 14 | def visualize_pcd(points, colors=None, normals=None, bbox=None, show_frame=False, frame_size=1.0, frame_origin=(0, 0, 0)): 15 | """Visualize a point cloud.""" 16 | pc = np2pcd(points, colors, normals) 17 | geometries = [pc] 18 | if show_frame: 19 | coord_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=frame_size, origin=frame_origin) 20 | geometries.append(coord_frame) 21 | if bbox is None: 22 | bbox = [] 23 | elif not isinstance(bbox, (tuple, list)): 24 | bbox = [bbox] 25 | o3d.visualization.draw_geometries(geometries + bbox) 26 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/maniskill2_learn/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.8.0b0" 2 | 3 | 4 | def parse_version_info(version_str): 5 | version_info = [] 6 | for x in version_str.split("."): 7 | if x.isdigit(): 8 | version_info.append(int(x)) 9 | elif x.find("rc") != -1: 10 | patch_version = x.split("rc") 11 | version_info.append(int(patch_version[0])) 12 | version_info.append(f"rc{patch_version[1]}") 13 | return tuple(version_info) 14 | 15 | 16 | version_info = parse_version_info(__version__) 17 | __all__ = ["__version__", "version_info", "parse_version_info"] 18 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/requirements.txt: -------------------------------------------------------------------------------- 1 | addict 2 | cython 3 | numpy 4 | Pillow 5 | pyyaml 6 | regex;sys_platform=='win32' 7 | yapf 8 | ffmpeg-python 9 | pandas 10 | tables 11 | h5py 12 | coverage 13 | lmdb 14 | pytest 15 | PyTurboJPEG 16 | open3d 17 | trimesh 18 | gym==0.19.0 19 | shapely 20 | transforms3d 21 | sorcery 22 | psutil 23 | opencv-python 24 | tensorboard 25 | tabulate 26 | rtree 27 | GitPython 28 | pynvml 29 | kubernetes 30 | pynput 31 | sqlalchemy 32 | docker 33 | crc32c 34 | pypi-simple 35 | numpy-quaternion 36 | scikit-image==0.18.3 37 | termcolor 38 | pymeshlab 39 | plyfile 40 | einops 41 | ninja 42 | pytorch3d 43 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/bc_soft_body_pointcloud.sh: -------------------------------------------------------------------------------- 1 | # Replace --work-dir, env_cfg.env_name, and replay_cfg.buffer_filenames when you run other environments 2 | 3 | python maniskill2_learn/apis/run_rl.py configs/brl/bc/pointnet_soft_body.py \ 4 | --work-dir ./logs/bc_excavate_pointcloud --gpu-ids 0 \ 5 | --cfg-options "env_cfg.env_name=Excavate-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 6 | "env_cfg.control_mode=pd_joint_delta_pos" \ 7 | "replay_cfg.buffer_filenames=../ManiSkill2/demos/soft_body/Excavate-v0/trajectory.none.pd_ee_delta_pose_pointcloud.h5" \ 8 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 9 | "train_cfg.n_eval=50000" "train_cfg.total_steps=50000" "train_cfg.n_checkpoint=50000" "train_cfg.n_updates=500" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/bc_soft_body_rgbd.sh: -------------------------------------------------------------------------------- 1 | # Replace --work-dir, env_cfg.env_name, and replay_cfg.buffer_filenames when you run other environments 2 | 3 | python maniskill2_learn/apis/run_rl.py configs/brl/bc/rgbd_soft_body.py \ 4 | --work-dir ./logs/bc_excavate_rgbd --gpu-ids 0 \ 5 | --cfg-options "env_cfg.env_name=Excavate-v0" "env_cfg.obs_mode=rgbd" "env_cfg.n_points=1200" \ 6 | "env_cfg.control_mode=pd_joint_delta_pos" \ 7 | "replay_cfg.buffer_filenames=../ManiSkill2/demos/soft_body/Excavate-v0/trajectory.none.pd_ee_delta_pose_rgbd.h5" \ 8 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 9 | "train_cfg.n_eval=50000" "train_cfg.total_steps=50000" "train_cfg.n_checkpoint=50000" "train_cfg.n_updates=500" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/dapg_pickcube_pointcloud.sh: -------------------------------------------------------------------------------- 1 | # Assuming 2 gpus each with 12GB memory; 2 | # if you have a GPU with more memory (e.g. 24GB), you can set --gpu-ids and --sim-gpu-ids to be the same; 3 | # if you only have one GPU with small memory, then you can set a smaller rollout_cfg.num_procs (e.g. =5) 4 | 5 | python maniskill2_learn/apis/run_rl.py configs/mfrl/dapg/maniskill2_pn.py \ 6 | --work-dir ./logs/dapg_pickcube_pointcloud --gpu-ids 0 --sim-gpu-ids 1 \ 7 | --cfg-options "env_cfg.env_name=PickCube-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 8 | "rollout_cfg.num_procs=16" "env_cfg.reward_mode=dense" \ 9 | "env_cfg.control_mode=pd_ee_delta_pose" "env_cfg.obs_frame=ee" "env_cfg.n_goal_points=50" \ 10 | "agent_cfg.demo_replay_cfg.buffer_filenames=../ManiSkill2/demos/rigid_body/PickCube-v0/trajectory.none.pd_ee_delta_pose_pointcloud.h5" \ 11 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 12 | "train_cfg.total_steps=25000000" "train_cfg.n_checkpoint=5000000" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/dapg_pickcube_rgbd.sh: -------------------------------------------------------------------------------- 1 | # Assuming 2 gpus each with 12GB memory; 2 | # if you have a GPU with more memory (e.g. 24GB), you can set --gpu-ids and --sim-gpu-ids to be the same; 3 | # if you only have one GPU with small memory, then you can set a smaller rollout_cfg.num_procs (e.g. =5) 4 | 5 | python maniskill2_learn/apis/run_rl.py configs/mfrl/dapg/maniskill2_rgbd.py \ 6 | --work-dir ./logs/dapg_pickcube_rgbd --gpu-ids 0 --sim-gpu-ids 1 \ 7 | --cfg-options "env_cfg.env_name=PickCube-v0" "env_cfg.obs_mode=rgbd" \ 8 | "env_cfg.control_mode=pd_ee_delta_pose" \ 9 | "rollout_cfg.num_procs=16" "env_cfg.reward_mode=dense" \ 10 | "agent_cfg.demo_replay_cfg.buffer_filenames=../ManiSkill2/demos/rigid_body/PickCube-v0/trajectory.none.pd_ee_delta_pose_rgbd.h5" \ 11 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 12 | "train_cfg.total_steps=25000000" "train_cfg.n_checkpoint=5000000" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/dapg_picksingleycb_pointcloud.sh: -------------------------------------------------------------------------------- 1 | # Assuming 2 gpus each with 12GB memory; 2 | # if you have a GPU with more memory (e.g. 24GB), you can set --gpu-ids and --sim-gpu-ids to be the same; 3 | # if you only have one GPU with small memory, then you can set a smaller rollout_cfg.num_procs (e.g. =5) 4 | 5 | # Since the demo file is very large, we use dynamic loading to save memory 6 | python maniskill2_learn/apis/run_rl.py configs/mfrl/dapg/maniskill2_pn.py \ 7 | --work-dir ./logs/dapg_picksingleycb_pointcloud --gpu-ids 0 --sim-gpu-ids 1 \ 8 | --cfg-options "env_cfg.env_name=PickSingleYCB-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 9 | "rollout_cfg.num_procs=16" "env_cfg.reward_mode=dense" \ 10 | "env_cfg.control_mode=pd_ee_delta_pose" "env_cfg.obs_frame=ee" "env_cfg.n_goal_points=50" \ 11 | "agent_cfg.demo_replay_cfg.capacity=20000" "agent_cfg.demo_replay_cfg.cache_size=20000" \ 12 | "agent_cfg.demo_replay_cfg.dynamic_loading=True" "agent_cfg.demo_replay_cfg.num_samples=-1" \ 13 | "agent_cfg.demo_replay_cfg.buffer_filenames=../ManiSkill2/demos/rigid_body/PickSingleYCB-v0/trajectory_merged.none.pd_ee_delta_pose_pointcloud.h5" \ 14 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 15 | "train_cfg.total_steps=25000000" "train_cfg.n_checkpoint=5000000" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/dapg_picksingleycb_rgbd.sh: -------------------------------------------------------------------------------- 1 | # Assuming 2 gpus each with 12GB memory; 2 | # if you have a GPU with more memory (e.g. 24GB), you can set --gpu-ids and --sim-gpu-ids to be the same; 3 | # if you only have one GPU with small memory, then you can set a smaller rollout_cfg.num_procs (e.g. =5) 4 | 5 | # Since the demo file is very large, we use dynamic loading to save memory 6 | python maniskill2_learn/apis/run_rl.py configs/mfrl/dapg/maniskill2_rgbd.py \ 7 | --work-dir ./logs/dapg_picksingleycb_rgbd --gpu-ids 0 --sim-gpu-ids 1 \ 8 | --cfg-options "env_cfg.env_name=PickSingleYCB-v0" "env_cfg.obs_mode=rgbd" \ 9 | "rollout_cfg.num_procs=16" "env_cfg.reward_mode=dense" \ 10 | "env_cfg.control_mode=pd_ee_delta_pose" \ 11 | "agent_cfg.demo_replay_cfg.capacity=20000" "agent_cfg.demo_replay_cfg.cache_size=20000" \ 12 | "agent_cfg.demo_replay_cfg.dynamic_loading=True" "agent_cfg.demo_replay_cfg.num_samples=-1" \ 13 | "agent_cfg.demo_replay_cfg.buffer_filenames=../ManiSkill2/demos/rigid_body/PickSingleYCB-v0/trajectory_merged.none.pd_ee_delta_pose_rgbd.h5" \ 14 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 15 | "train_cfg.total_steps=25000000" "train_cfg.n_checkpoint=5000000" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/dapg_stackcube_pointcloud.sh: -------------------------------------------------------------------------------- 1 | # Assuming 2 gpus each with 12GB memory; 2 | # if you have a GPU with more memory (e.g. 24GB), you can set --gpu-ids and --sim-gpu-ids to be the same; 3 | # if you only have one GPU with small memory, then you can set a smaller rollout_cfg.num_procs (e.g. =5) 4 | 5 | python maniskill2_learn/apis/run_rl.py configs/mfrl/dapg/maniskill2_pn.py \ 6 | --work-dir ./logs/dapg_stackcube_pointcloud --gpu-ids 0 --sim-gpu-ids 1 \ 7 | --cfg-options "env_cfg.env_name=StackCube-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 8 | "rollout_cfg.num_procs=16" "env_cfg.reward_mode=dense" \ 9 | "env_cfg.control_mode=pd_ee_delta_pose" "env_cfg.obs_frame=ee" \ 10 | "agent_cfg.demo_replay_cfg.buffer_filenames=../ManiSkill2/demos/rigid_body/StackCube-v0/trajectory.none.pd_ee_delta_pose_pointcloud.h5" \ 11 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 12 | "train_cfg.total_steps=25000000" "train_cfg.n_checkpoint=5000000" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/pretrained_model/dapg_stackcube_rgbd.sh: -------------------------------------------------------------------------------- 1 | # Assuming 2 gpus each with 12GB memory; 2 | # if you have a GPU with more memory (e.g. 24GB), you can set --gpu-ids and --sim-gpu-ids to be the same; 3 | # if you only have one GPU with small memory, then you can set a smaller rollout_cfg.num_procs (e.g. =5) 4 | 5 | python maniskill2_learn/apis/run_rl.py configs/mfrl/dapg/maniskill2_rgbd.py \ 6 | --work-dir ./logs/dapg_stackcube_rgbd --gpu-ids 0 --sim-gpu-ids 1 \ 7 | --cfg-options "env_cfg.env_name=StackCube-v0" "env_cfg.obs_mode=rgbd" \ 8 | "env_cfg.control_mode=pd_ee_delta_pose" \ 9 | "rollout_cfg.num_procs=16" "env_cfg.reward_mode=dense" \ 10 | "agent_cfg.demo_replay_cfg.buffer_filenames=../ManiSkill2/demos/rigid_body/StackCube-v0/trajectory.none.pd_ee_delta_pose_rgbd.h5" \ 11 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 12 | "train_cfg.total_steps=25000000" "train_cfg.n_checkpoint=5000000" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/scratch_pointcloud_template/run_bc.sh: -------------------------------------------------------------------------------- 1 | python maniskill2_learn/apis/run_rl.py configs/brl/bc/pointnet.py \ 2 | --work-dir YOUR_LOGGING_DIRECTORY --gpu-ids 0 \ 3 | --cfg-options "env_cfg.env_name=PickCube-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 4 | "env_cfg.control_mode=pd_joint_delta_pos" \ 5 | "replay_cfg.buffer_filenames=PATH_TO_POINT_CLOUD_DEMO" \ 6 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" \ 7 | "train_cfg.n_eval=50000" "train_cfg.total_steps=50000" "train_cfg.n_checkpoint=50000" "train_cfg.n_updates=500" -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/scratch_pointcloud_template/run_dapg.sh: -------------------------------------------------------------------------------- 1 | python maniskill2_learn/apis/run_rl.py configs/mfrl/ppo/maniskill2_pn_dapg.py \ 2 | --work-dir YOUR_LOGGING_DIRECTORY --gpu-ids 0 \ 3 | --cfg-options "env_cfg.env_name=PegInsertionSide-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 4 | "env_cfg.reward_mode=dense" "env_cfg.control_mode=pd_joint_delta_pos" \ 5 | "agent_cfg.demo_replay_cfg.buffer_filenames=PATH_TO_POINT_CLOUD_DEMO.h5" 6 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" 7 | # To manually evaluate the model, add --evaluation and --resume-from YOUR_LOGGING_DIRECTORY/models/SOME_CHECKPOINT.ckpt 8 | # to the above commands. 9 | 10 | # Using multiple GPUs will increase training speed; 11 | # Note that train_cfg.n_steps will also be multiplied by the number of gpus you use, so you may want to divide it by the number of gpus 12 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/scratch_pointcloud_template/run_gail.sh: -------------------------------------------------------------------------------- 1 | # Point Cloud-based GAIL 2 | 3 | # ** Before you run it, check that demonstrations are converted with "--with-next" and rewards behave as intended. ** 4 | 5 | python maniskill2_learn/apis/run_rl.py configs/mfrl/gail/maniskill2_pn.py \ 6 | --gpu-ids 0 --work-dir YOUR_LOGGING_DIRECTORY --print-steps 16 \ 7 | --cfg-options "env_cfg.env_name=PlugCharger-v0" "env_cfg.control_mode=pd_joint_delta_pos" \ 8 | "env_cfg.reward_mode=dense" "replay_cfg.buffer_filenames=[PATH_TO_DEMO.h5]" \ 9 | "expert_replay_cfg.buffer_filenames=[PATH_TO_DEMO.h5]" 10 | 11 | # Using multiple GPUs will increase training speed; 12 | # Note that the effective batch size is multiplied by the number of gpus; large batch can be crucial for stabilizing GAIL training 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/scratch_pointcloud_template/run_ppo.sh: -------------------------------------------------------------------------------- 1 | python maniskill2_learn/apis/run_rl.py configs/mfrl/ppo/maniskill2_pn.py \ 2 | --work-dir YOUR_LOGGING_DIRECTORY --gpu-ids 0 \ 3 | --cfg-options "env_cfg.env_name=PickCube-v0" "env_cfg.obs_mode=pointcloud" "env_cfg.n_points=1200" \ 4 | "env_cfg.reward_mode=dense" "env_cfg.control_mode=pd_joint_delta_pos" \ 5 | "eval_cfg.num=100" "eval_cfg.save_traj=False" "eval_cfg.save_video=True" 6 | 7 | # The above command does automatic evaluation after training. Alternatively, you can manually evaluate a model checkpoint 8 | # by appending --evaluation and --resume-from YOUR_LOGGING_DIRECTORY/models/SOME_CHECKPOINT.ckpt to the above commands. 9 | 10 | 11 | # Using multiple GPUs will increase training speed; 12 | # Note that train_cfg.n_steps will also be multiplied by the number of gpus you use, so you may want to divide it by the number of gpus -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/scripts/example_training/scratch_pointcloud_template/run_sac.sh: -------------------------------------------------------------------------------- 1 | # Point Cloud-based SAC 2 | python maniskill2_learn/apis/run_rl.py configs/mfrl/sac/maniskill2_pn.py \ 3 | --gpu-ids 0 --work-dir YOUR_LOGGING_DIRECTORY --print-steps 16 \ 4 | --cfg-options "env_cfg.env_name=PegInsertionSide-v0" "env_cfg.control_mode=pd_joint_delta_pos" \ 5 | "env_cfg.reward_mode=dense" 6 | 7 | # Using multiple GPUs will increase training speed; 8 | # Note that the effective batch size is multiplied by the number of gpus; large batch can be crucial for stabilizing SAC training 9 | 10 | 11 | # State-based SAC for debugging purposes 12 | """ 13 | python maniskill2_learn/apis/run_rl.py configs/mfrl/sac/maniskill2_state.py \ 14 | --work-dir YOUR_LOGGING_DIRECTORY --gpu-ids 0 \ 15 | --cfg-options 'env_cfg.env_name=PegInsertionSide-v0' 'env_cfg.control_mode=pd_joint_delta_pos' \ 16 | 'env_cfg.reward_mode=dense' 17 | """ 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/tools/merge_h5.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import os, numpy as np 4 | import os.path as osp 5 | import h5py 6 | import glob 7 | 8 | os.environ["D4RL_SUPPRESS_IMPORT_ERROR"] = "1" 9 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 10 | os.environ["MKL_NUM_THREADS"] = "1" 11 | os.environ["NUMEXPR_NUM_THREADS"] = "1" 12 | os.environ["OMP_NUM_THREADS"] = "1" 13 | 14 | from maniskill2_learn.utils.file import merge_h5_trajectory 15 | 16 | """ 17 | Example: 18 | python tools/merge_h5.py --source-dir /TODO/demos/PickSingleYCB-v0/ --pattern "trajectory_pcd" \ 19 | --output-file /TODO/demos/PickSingleYCB-v0/trajectory_pcd_all.h5 20 | """ 21 | 22 | parser = argparse.ArgumentParser(description="Merge h5 files that match {pattern}.h5 under a directory into a single file") 23 | parser.add_argument("--source-dir", type=str, default="") 24 | parser.add_argument("--pattern", type=str, default="") 25 | parser.add_argument("--output-file", type=str, default="") 26 | args = parser.parse_args() 27 | assert args.source_dir != "" and args.pattern != "" and args.output_file != "" 28 | 29 | files = glob.glob(f'{args.source_dir}/**/{args.pattern}.h5', recursive=True) 30 | print("Input files", files) 31 | try: 32 | os.remove(args.output_file) 33 | except: 34 | pass 35 | merge_h5_trajectory(files, args.output_file) 36 | -------------------------------------------------------------------------------- /CEM/ManiSkill2-Learn/tools/shuffle_demo.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import random 3 | import argparse 4 | import tqdm 5 | import json 6 | import copy 7 | from multiprocessing import Pool 8 | 9 | 10 | def copy_group(from_file, to_file, key_name, new_key_name): 11 | if new_key_name not in to_file.keys(): 12 | to_file.require_group(new_key_name) 13 | for key in from_file[key_name].keys(): 14 | new_data_key = key 15 | if "dist" in key or "str" in key: 16 | new_data_key = "_".join(key.split("_")[2:]) 17 | if isinstance(from_file[key_name][key], h5py.Group): 18 | copy_group(from_file[key_name], to_file[new_key_name], key, new_data_key) 19 | else: 20 | to_file[new_key_name].create_dataset( 21 | new_data_key, data=from_file[key_name][key] 22 | ) 23 | 24 | 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument("--source-file", type=str) 27 | parser.add_argument("--target-file", type=str) 28 | args = parser.parse_args() 29 | 30 | mapping = {} 31 | source_file = h5py.File(args.source_file, "r") 32 | target_file = h5py.File(args.target_file, "w") 33 | order_list = list(range(len(list(source_file.keys())))) 34 | random.shuffle(order_list) 35 | for index, ori_index in tqdm.tqdm(enumerate(order_list), total=len(order_list)): 36 | old_key = "traj_" + str(ori_index) 37 | new_key = "traj_" + str(index) 38 | mapping[old_key] = new_key 39 | copy_group(source_file, target_file, old_key, new_key) 40 | print(mapping) 41 | source_file.close() 42 | target_file.close() 43 | -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | ASSET_DIR = Path(__file__).parent / "assets" 4 | AGENT_CONFIG_DIR = ASSET_DIR / "config_files/agents" 5 | DESCRIPTION_DIR = ASSET_DIR / "descriptions" 6 | DIGITAL_TWIN_DIR = ASSET_DIR / "digital_twins" 7 | DIGITAL_TWIN_CONFIG_DIR = ASSET_DIR / "config_files/digital_twins" -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/agents/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | # arm controllers 2 | from .arm_imp_ee_pos import ( 3 | ArmImpEEPosConstController, 4 | ArmImpEEPosKpController, 5 | ArmImpEEPosKpKdController, 6 | ) 7 | from .arm_imp_ee_pos_vel import ( 8 | ArmImpEEPosVelConstController, 9 | ArmImpEEPosVelKpController, 10 | ArmImpEEPosVelKpKdController, 11 | ) 12 | from .arm_imp_ee_vel import ArmImpEEVelConstController, ArmImpEEVelKdController 13 | from .arm_imp_joint_pos import ( 14 | ArmImpJointPosConstController, 15 | ArmImpJointPosKpController, 16 | ArmImpJointPosKpKdController, 17 | ) 18 | from .arm_imp_joint_pos_vel import ( 19 | ArmImpJointPosVelConstController, 20 | ArmImpJointPosVelKpController, 21 | ArmImpJointPosVelKpKdController, 22 | ) 23 | from .arm_imp_joint_vel import ArmImpJointVelConstController, ArmImpJointVelKdController 24 | from .arm_pd_ee_delta_position import ArmPDEEDeltaPositionController 25 | 26 | # general controllers 27 | from .general_pd_ee_twist import GeneralPDEETwistController 28 | from .general_pd_joint_pos import GeneralPDJointPosController 29 | from .general_pd_joint_pos_vel import GeneralPDJointPosVelController 30 | from .general_pd_joint_vel import GeneralPDJointVelController 31 | 32 | # gripper controllers 33 | from .gripper_pd_joint_pos_mimic import GripperPDJointPosMimicController 34 | from .gripper_pd_joint_vel_mimic import GripperPDJointVelMimicController 35 | 36 | # mobile platform controllers 37 | from .mobile_pd_joint_vel_decoupled import MobilePDJointVelDecoupledController 38 | from .mobile_pd_joint_vel_diff import MobilePDJointVelDiffController 39 | -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/assets/config_files/.gitignore: -------------------------------------------------------------------------------- 1 | digital_twins -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/assets/descriptions/.gitignore: -------------------------------------------------------------------------------- 1 | optical_table 2 | xmate3_description 3 | robotiq_description 4 | !.gitignore -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/assets/digital_twins/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2/mani_skill2/utils/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/cv_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def depth2pts_np( 5 | depth_map: np.ndarray, 6 | cam_intrinsic: np.ndarray, 7 | cam_extrinsic: np.ndarray = np.eye(4), 8 | ) -> np.ndarray: 9 | assert (len(depth_map.shape) == 2) or ( 10 | len(depth_map.shape) == 3 and depth_map.shape[2] == 1 11 | ) 12 | assert cam_intrinsic.shape == (3, 3) 13 | assert cam_extrinsic.shape == (4, 4) 14 | feature_grid = get_pixel_grids_np(depth_map.shape[0], depth_map.shape[1]) 15 | 16 | uv = np.matmul(np.linalg.inv(cam_intrinsic), feature_grid) 17 | cam_points = uv * np.reshape(depth_map, (1, -1)) # (3, N) 18 | 19 | R = cam_extrinsic[:3, :3] 20 | t = cam_extrinsic[:3, 3:4] 21 | R_inv = np.linalg.inv(R) 22 | 23 | world_points = np.matmul(R_inv, cam_points - t).transpose() # (N, 3) 24 | return world_points 25 | 26 | 27 | def get_pixel_grids_np(height: int, width: int): 28 | x_linspace = np.linspace(0.5, width - 0.5, width) 29 | y_linspace = np.linspace(0.5, height - 0.5, height) 30 | x_coordinates, y_coordinates = np.meshgrid(x_linspace, y_linspace) 31 | x_coordinates = np.reshape(x_coordinates, (1, -1)) 32 | y_coordinates = np.reshape(y_coordinates, (1, -1)) 33 | ones = np.ones_like(x_coordinates).astype(np.float) 34 | grid = np.concatenate([x_coordinates, y_coordinates, ones], axis=0) 35 | 36 | return grid 37 | -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/io.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import json 3 | from json import JSONEncoder 4 | from pathlib import Path 5 | from typing import Union 6 | 7 | import numpy as np 8 | 9 | 10 | class NumpyArrayEncoder(JSONEncoder): 11 | def default(self, obj): 12 | if isinstance(obj, np.ndarray): 13 | return obj.tolist() 14 | return JSONEncoder.default(self, obj) 15 | 16 | 17 | def load_json(filename: Union[str, Path]): 18 | filename = str(filename) 19 | if filename.endswith(".gz"): 20 | f = gzip.open(filename, "rt") 21 | elif filename.endswith(".json"): 22 | f = open(filename, "rt") 23 | else: 24 | raise RuntimeError(f"Unsupported extension: {filename}") 25 | ret = json.loads(f.read()) 26 | f.close() 27 | return ret 28 | 29 | 30 | def dump_json(filename: Union[str, Path], obj, **kwargs): 31 | filename = str(filename) 32 | if filename.endswith(".gz"): 33 | f = gzip.open(filename, "wt") 34 | elif filename.endswith(".json"): 35 | f = open(filename, "wt") 36 | else: 37 | raise RuntimeError(f"Unsupported extension: {filename}") 38 | json.dump(obj, f, cls=NumpyArrayEncoder, **kwargs) 39 | f.close() 40 | -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/string_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def custom_format(template_string, **kwargs): 5 | template_string = template_string.replace("{", "{{") 6 | template_string = template_string.replace("}", "}}") 7 | template_string = template_string.replace("&lformat ", "{") 8 | template_string = template_string.replace(" &rformat", "}") 9 | return template_string.format_map(kwargs) 10 | 11 | 12 | def regex_match(string, pattern): 13 | return re.match(pattern, string) is not None 14 | 15 | 16 | def prefix_match(string, prefix=None): 17 | """Check if the string matches the given prefix""" 18 | if prefix is None or len(prefix) == 0: 19 | return True 20 | return re.match(f"({prefix})+(.*?)", string) is not None 21 | 22 | 23 | def float_str(num, precision): 24 | format_str = "%.{0}f".format(precision) 25 | return format_str % num 26 | 27 | 28 | def num_to_str(num, unit=None, precision=2, number_only=False, auto_select_unit=False): 29 | unit_list = ["K", "M", "G", "T", "P"] 30 | if auto_select_unit and unit is None: 31 | for i, tmp in enumerate(unit_list): 32 | unit_num = 1024 ** (i + 1) 33 | if num < unit_num: 34 | break 35 | unit = tmp 36 | if unit is not None: 37 | unit_num = 1024 ** (unit_list.index(unit) + 1) 38 | num = num * 1.0 / unit_num 39 | else: 40 | unit = "" 41 | if number_only: 42 | return num 43 | else: 44 | return float_str(num, precision) + unit 45 | -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/urdf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2/mani_skill2/utils/urdf/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/visualization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/CEM/ManiSkill2/mani_skill2/utils/visualization/__init__.py -------------------------------------------------------------------------------- /CEM/ManiSkill2/mani_skill2/utils/visualization/jupyter.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | from IPython.display import HTML, display 6 | from matplotlib import animation 7 | 8 | 9 | def display_images(images: List[np.ndarray], dpi=100.0, format="html5_video", **kwargs): 10 | """Display images as an animation in jupyter notebook. 11 | 12 | Args: 13 | images: images with equal shape. 14 | dpi: resolution (dots per inch). 15 | format (str): ("html5_video", "jshtml") 16 | 17 | References: 18 | https://gist.github.com/foolishflyfox/e30fd8bfbb6a9cee9b1a1fa6144b209c 19 | http://louistiao.me/posts/notebooks/embedding-matplotlib-animations-in-jupyter-as-interactive-javascript-widgets/ 20 | https://stackoverflow.com/questions/35532498/animation-in-ipython-notebook/46878531#46878531 21 | """ 22 | h, w = images[0].shape[:2] 23 | fig = plt.figure(figsize=(h / dpi, w / dpi), dpi=dpi) 24 | fig_im = plt.figimage(images[0]) 25 | 26 | def animate(image): 27 | fig_im.set_array(image) 28 | return (fig_im,) 29 | 30 | anim = animation.FuncAnimation(fig, animate, frames=images, **kwargs) 31 | if format == "html5_video": 32 | # NOTE(jigu): can not show in VSCode 33 | display(HTML(anim.to_html5_video())) 34 | elif format == "jshtml": 35 | display(HTML(anim.to_jshtml())) 36 | else: 37 | raise NotImplementedError(format) 38 | -------------------------------------------------------------------------------- /CEM/ManiSkill2/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name="mani_skill2", 5 | version="0.1.0", 6 | author="SU Lab at UC San Diego", 7 | zip_safe=False, 8 | py_modules=[] 9 | ) 10 | -------------------------------------------------------------------------------- /ditto/.gitignore: -------------------------------------------------------------------------------- 1 | # dirs 2 | build/ 3 | .stl 4 | -------------------------------------------------------------------------------- /ditto/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 UT Robot Perception and Learning Lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ditto/assets/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/assets/pipeline.png -------------------------------------------------------------------------------- /ditto/assets/stats/drawer_train.txt: -------------------------------------------------------------------------------- 1 | 0007 Drawer 2 | 0008 Drawer 3 | 0009 Drawer 4 | 0010 Drawer 5 | 0014 Drawer 6 | 0015 Drawer 7 | 0018 Drawer 8 | 0018 Drawer 9 | 0021 Drawer 10 | 0022 Drawer 11 | 0023 Drawer 12 | 0025 Drawer 13 | 0026 Drawer 14 | 0027 Drawer 15 | -------------------------------------------------------------------------------- /ditto/assets/stats/drawer_val.txt: -------------------------------------------------------------------------------- 1 | 0013 Drawer 2 | 0017 Drawer 3 | 0019 Drawer 4 | 0030 Drawer 5 | -------------------------------------------------------------------------------- /ditto/assets/stats/faucet_train.txt: -------------------------------------------------------------------------------- 1 | 0011 Faucet 2 | 0034 Faucet 3 | 0036 Faucet 4 | 0050 Faucet 5 | 0056 Faucet 6 | 0057 Faucet 7 | 0125 Faucet 8 | 0142 Faucet 9 | -------------------------------------------------------------------------------- /ditto/assets/stats/faucet_val.txt: -------------------------------------------------------------------------------- 1 | 0018 Faucet 2 | 0020 Faucet 3 | 0032 Faucet 4 | -------------------------------------------------------------------------------- /ditto/assets/stats/ins_cnt_drawer.txt: -------------------------------------------------------------------------------- 1 | Drawer 35 800 2 | -------------------------------------------------------------------------------- /ditto/assets/stats/ins_cnt_faucet.txt: -------------------------------------------------------------------------------- 1 | Faucet 8 1250 2 | -------------------------------------------------------------------------------- /ditto/assets/stats/ins_cnt_laptop.txt: -------------------------------------------------------------------------------- 1 | Laptop 7 800 2 | -------------------------------------------------------------------------------- /ditto/assets/stats/laptop_train.txt: -------------------------------------------------------------------------------- 1 | 10211 Laptop 2 | 10213 Laptop 3 | 10305 Laptop 4 | 10626 Laptop 5 | -------------------------------------------------------------------------------- /ditto/assets/stats/laptop_val.txt: -------------------------------------------------------------------------------- 1 | 9748 Laptop 2 | -------------------------------------------------------------------------------- /ditto/conda_env_gpu.yaml: -------------------------------------------------------------------------------- 1 | name: Ditto 2 | 3 | channels: 4 | - pytorch 5 | - conda-forge 6 | - pyg 7 | 8 | dependencies: 9 | - python=3.8 10 | - pip 11 | - cudatoolkit=11.3 12 | - pytorch=1.10.2=py3.8_cuda11.3_cudnn8.2.0_0 13 | - torchvision 14 | - pytorch-scatter 15 | - pip: 16 | - -r requirements.txt 17 | -------------------------------------------------------------------------------- /ditto/configs/callbacks/default.yaml: -------------------------------------------------------------------------------- 1 | model_checkpoint: 2 | _target_: pytorch_lightning.callbacks.ModelCheckpoint 3 | monitor: "val/loss" # name of the logged metric which determines when model is improving 4 | save_top_k: 1 # save k best models (determined by above metric) 5 | save_last: True # additionaly always save model from last epoch 6 | mode: "min" # can be "max" or "min" 7 | verbose: False 8 | dirpath: "checkpoints/" 9 | filename: "{epoch:02d}" 10 | 11 | early_stopping: 12 | _target_: pytorch_lightning.callbacks.EarlyStopping 13 | monitor: "val/loss" # name of the logged metric which determines when model is improving 14 | patience: 100 # how many epochs of not improving until training stops 15 | mode: "min" # can be "max" or "min" 16 | min_delta: 0 # minimum change in the monitored metric needed to qualify as an improvement 17 | 18 | write_hparams: 19 | _target_: src.callbacks.misc_callbacks.OnCheckpointHparams 20 | -------------------------------------------------------------------------------- /ditto/configs/callbacks/none.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/configs/callbacks/none.yaml -------------------------------------------------------------------------------- /ditto/configs/callbacks/wandb.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default.yaml 3 | 4 | watch_model: 5 | _target_: src.callbacks.wandb_callbacks.WatchModel 6 | log: "all" 7 | log_freq: 100 8 | 9 | upload_code_as_artifact: 10 | _target_: src.callbacks.wandb_callbacks.UploadCodeAsArtifact 11 | code_dir: ${work_dir}/src 12 | 13 | upload_ckpts_as_artifact: 14 | _target_: src.callbacks.wandb_callbacks.UploadCheckpointsAsArtifact 15 | ckpt_dir: "checkpoints/" 16 | upload_best_only: True 17 | 18 | log_f1_precision_recall_heatmap: 19 | _target_: src.callbacks.wandb_callbacks.LogF1PrecRecHeatmap 20 | 21 | log_confusion_matrix: 22 | _target_: src.callbacks.wandb_callbacks.LogConfusionMatrix 23 | 24 | log_image_predictions: 25 | _target_: src.callbacks.wandb_callbacks.LogImagePredictions 26 | num_samples: 8 27 | -------------------------------------------------------------------------------- /ditto/configs/config.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # specify here default training configuration 4 | defaults: 5 | - trainer: minimal.yaml 6 | - model: screw_model.yaml 7 | - datamodule: screw_datamodule.yaml 8 | - callbacks: default.yaml # set this to null if you don't want to use callbacks 9 | - logger: csv # set logger here or use command line (e.g. `python run.py logger=wandb`) 10 | 11 | - experiment: null 12 | - hparams_search: null 13 | 14 | - hydra: default.yaml 15 | 16 | # enable color logging 17 | - override hydra/hydra_logging: colorlog 18 | - override hydra/job_logging: colorlog 19 | 20 | # path to original working directory 21 | # hydra hijacks working directory by changing it to the current log directory, 22 | # so it's useful to have this path as a special variable 23 | # learn more here: https://hydra.cc/docs/next/tutorials/basic/running_your_app/working_directory 24 | work_dir: ${hydra:runtime.cwd} 25 | 26 | # path to folder with data 27 | data_dir: ${work_dir}/data/ 28 | 29 | # use `python run.py debug=true` for easy debugging! 30 | # this will run 1 train, val and test loop with only 1 batch 31 | # equivalent to running `python run.py trainer.fast_dev_run=true` 32 | # (this is placed here just for easier access from command line) 33 | debug: False 34 | 35 | # pretty print config at the start of the run using Rich library 36 | print_config: True 37 | 38 | # disable python warnings if they annoy you 39 | ignore_warnings: True 40 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_datamodule.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetSapien 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetSapienTest 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_datamodule_ablation.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetAblation 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetTestAblation 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_different_angle_datamodule.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetSapien 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien_different_angle/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetSapienTest 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien_different_angle/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien_different_angle/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_different_angle_stereo_depth_datamodule.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetSapien 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien_different_angle/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetSapienTest 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien_different_angle/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien_different_angle/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_different_pose_angle_stereo_depth_datamodule.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetSapien 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien_different_angle/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetSapienTest 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien_different_angle/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien_different_angle/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_single_datamodule.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetSapien 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien_single_demo/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetSapienTest 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien_single_demo/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien_single_demo/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/datamodule/sapien_single_stereo_depth_datamodule.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.datamodules.default_datamodule.DefaultDataModule 2 | 3 | opt: 4 | train: 5 | dataset_type: GeoArtDatasetSapien 6 | data_dir: ${data_dir} # data_dir is specified in config.yaml 7 | data_path: 8 | - sapien_single_stereo_depth/train 9 | num_point: 8192 10 | num_point_occ: 2048 11 | num_point_seg: 512 12 | norm: True 13 | norm_padding: 0.1 14 | batch_size: 32 15 | num_workers: 4 16 | rand_rot: False # random rotation of input data 17 | weighted_occ_sample: False 18 | 19 | val: 20 | dataset_type: GeoArtDatasetSapienTest 21 | data_dir: ${data_dir} # data_dir is specified in config.yaml 22 | data_path: 23 | - sapien_single_stereo_depth/val 24 | num_point: 8192 25 | #num_point_occ: 90000 26 | #num_point_seg: 5000 27 | norm: True 28 | norm_padding: 0.1 29 | batch_size: 1 30 | num_workers: 4 31 | rand_rot: False # random rotation of input data 32 | 33 | test: 34 | dataset_type: GeoArtDatasetSapienTest 35 | data_dir: ${data_dir} # data_dir is specified in config.yaml 36 | data_path: 37 | - sapien_single_stereo_depth/test 38 | 39 | num_point: 8192 40 | #num_point_occ: 90000 41 | #num_point_seg: 5000 42 | norm: True 43 | norm_padding: 0.1 44 | batch_size: 1 45 | num_workers: 4 46 | rand_rot: False # random rotation of input data 47 | -------------------------------------------------------------------------------- /ditto/configs/experiment/all_stereo.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | 37 | datamodule: 38 | opt: 39 | train: 40 | batch_size: 6 #8 41 | num_point: 8192 42 | data_path: 43 | - drawer_stereo/train 44 | - faucet_stereo/train 45 | - laptop_stereo/train 46 | val: 47 | num_point: 8192 48 | data_path: 49 | - laptop_stereo/val 50 | test: 51 | num_point: 8192 52 | data_path: 53 | - sapien/test 54 | -------------------------------------------------------------------------------- /ditto/configs/experiment/all_stereo_ablation.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule_ablation 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | r_use_state_loss: False 37 | p_use_state_loss: False 38 | 39 | datamodule: 40 | opt: 41 | train: 42 | batch_size: 6 #8 43 | num_point: 8192 44 | data_path: 45 | - laptop_stereo/train 46 | - faucet_stereo/train 47 | - drawer_stereo/train 48 | val: 49 | num_point: 8192 50 | data_path: 51 | - laptop_stereo/val 52 | test: 53 | num_point: 8192 54 | data_path: 55 | - laptop/test 56 | -------------------------------------------------------------------------------- /ditto/configs/experiment/drawer.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | 37 | datamodule: 38 | opt: 39 | train: 40 | batch_size: 6 #8 41 | num_point: 8192 42 | data_path: 43 | - drawer/train 44 | val: 45 | num_point: 8192 46 | data_path: 47 | - drawer/val 48 | test: 49 | num_point: 8192 50 | data_path: 51 | - sapien/test 52 | -------------------------------------------------------------------------------- /ditto/configs/experiment/drawer_stereo_ablation.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule_ablation 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | r_use_state_loss: False 37 | p_use_state_loss: False 38 | 39 | datamodule: 40 | opt: 41 | train: 42 | batch_size: 6 #8 43 | num_point: 8192 44 | data_path: 45 | - drawer_stereo/train 46 | val: 47 | num_point: 8192 48 | data_path: 49 | - drawer_stereo/val 50 | test: 51 | num_point: 8192 52 | data_path: 53 | - laptop/test 54 | -------------------------------------------------------------------------------- /ditto/configs/experiment/faucet.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | 37 | datamodule: 38 | opt: 39 | train: 40 | batch_size: 6 #8 41 | num_point: 8192 42 | data_path: 43 | - faucet/train 44 | val: 45 | num_point: 8192 46 | data_path: 47 | - faucet/val 48 | test: 49 | num_point: 8192 50 | data_path: 51 | - laptop/test 52 | -------------------------------------------------------------------------------- /ditto/configs/experiment/faucet_stereo.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | 37 | datamodule: 38 | opt: 39 | train: 40 | batch_size: 6 #8 41 | num_point: 8192 42 | data_path: 43 | - faucet_stereo/train 44 | val: 45 | num_point: 8192 46 | data_path: 47 | - faucet_stereo/val 48 | test: 49 | num_point: 8192 50 | data_path: 51 | - laptop/test 52 | -------------------------------------------------------------------------------- /ditto/configs/experiment/faucet_stereo_ablation.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule_ablation 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | r_use_state_loss: False 37 | p_use_state_loss: False 38 | 39 | datamodule: 40 | opt: 41 | train: 42 | batch_size: 6 #8 43 | num_point: 8192 44 | data_path: 45 | - faucet_stereo/train 46 | val: 47 | num_point: 8192 48 | data_path: 49 | - faucet_stereo/val 50 | test: 51 | num_point: 8192 52 | data_path: 53 | - laptop/test 54 | -------------------------------------------------------------------------------- /ditto/configs/experiment/laptop.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | 37 | datamodule: 38 | opt: 39 | train: 40 | batch_size: 6 #8 41 | num_point: 8192 42 | data_path: 43 | - laptop/train 44 | val: 45 | num_point: 8192 46 | data_path: 47 | - laptop/val 48 | test: 49 | num_point: 8192 50 | data_path: 51 | - laptop/test 52 | -------------------------------------------------------------------------------- /ditto/configs/experiment/laptop_stereo_ablation.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule_ablation 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | r_use_state_loss: False 37 | p_use_state_loss: False 38 | 39 | datamodule: 40 | opt: 41 | train: 42 | batch_size: 6 #8 43 | num_point: 8192 44 | data_path: 45 | - laptop_stereo/train 46 | val: 47 | num_point: 8192 48 | data_path: 49 | - laptop_stereo/val 50 | test: 51 | num_point: 8192 52 | data_path: 53 | - laptop/test 54 | -------------------------------------------------------------------------------- /ditto/configs/experiment/sapien.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 300 25 | 26 | callbacks: 27 | early_stopping: 28 | patience: 50 29 | 30 | model: 31 | opt: 32 | hparams: 33 | learning_rate: 1.0e-4 #1.0e-4 34 | weight_decay: 0.0001 35 | loss_weight_attn: 0.0 36 | 37 | datamodule: 38 | opt: 39 | train: 40 | batch_size: 8 #8 41 | num_point: 8192 42 | data_path: 43 | - sapien/train 44 | val: 45 | num_point: 8192 46 | data_path: 47 | - sapien/val 48 | test: 49 | num_point: 8192 50 | data_path: 51 | - sapien/test 52 | -------------------------------------------------------------------------------- /ditto/configs/experiment/sapien_different_angle_stereo_depth.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_different_angle_stereo_depth_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 2000 25 | # resume_from_checkpoint: ${work_dir}/logs/runs/2022-05-14/Sapien_single_demo-12-25-23/checkpoints/last.ckpt 26 | 27 | callbacks: 28 | early_stopping: 29 | patience: 200 30 | 31 | model: 32 | opt: 33 | hparams: 34 | learning_rate: 1.0e-4 35 | weight_decay: 0.0001 36 | loss_weight_attn: 0.0 37 | 38 | datamodule: 39 | opt: 40 | train: 41 | batch_size: 8 42 | num_point: 8192 43 | data_path: 44 | - sapien_different_angle/train 45 | val: 46 | num_point: 8192 47 | data_path: 48 | - sapien_different_angle/val 49 | test: 50 | num_point: 8192 51 | data_path: 52 | - sapien_different_angle/test 53 | -------------------------------------------------------------------------------- /ditto/configs/experiment/sapien_different_pose_angle_stereo_depth.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_different_pose_angle_stereo_depth_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 2000 25 | # resume_from_checkpoint: ${work_dir}/logs/runs/2022-05-14/Sapien_single_demo-12-25-23/checkpoints/last.ckpt 26 | 27 | callbacks: 28 | early_stopping: 29 | patience: 200 30 | 31 | model: 32 | opt: 33 | hparams: 34 | learning_rate: 1.0e-4 35 | weight_decay: 0.0001 36 | loss_weight_attn: 0.0 37 | 38 | datamodule: 39 | opt: 40 | train: 41 | batch_size: 8 42 | num_point: 8192 43 | data_path: 44 | - sapien_different_angle/train 45 | val: 46 | num_point: 8192 47 | data_path: 48 | - sapien_different_angle/val 49 | test: 50 | num_point: 8192 51 | data_path: 52 | - sapien_different_angle/test 53 | -------------------------------------------------------------------------------- /ditto/configs/experiment/sapien_single_stereo_depth.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # to execute this experiment run: 4 | # python run.py experiment=example_simple.yaml 5 | 6 | # full model 7 | # separate ppp decoder 8 | # separate feature 9 | defaults: 10 | - override /trainer: minimal.yaml # choose trainer from 'configs/trainer/' 11 | - override /model: geo_art_model_v0.yaml 12 | - override /model/network: geo_art_net_v0.yaml 13 | - override /datamodule: sapien_single_stereo_depth_datamodule 14 | - override /callbacks: default.yaml 15 | - override /logger: null 16 | 17 | # all parameters below will be merged with parameters from default configurations set above 18 | # this allows you to overwrite only specified parameters 19 | 20 | seed: 12345 21 | 22 | trainer: 23 | min_epochs: 1 24 | max_epochs: 500 25 | # resume_from_checkpoint: ${work_dir}/logs/runs/2022-05-25/sapien_single_stereo_depth-18-39-31/checkpoints/last.ckpt 26 | 27 | callbacks: 28 | early_stopping: 29 | patience: 100 30 | 31 | model: 32 | opt: 33 | hparams: 34 | learning_rate: 1.0e-4 35 | weight_decay: 0.0001 36 | loss_weight_attn: 0.0 37 | 38 | datamodule: 39 | opt: 40 | train: 41 | batch_size: 1 42 | num_point: 8192 43 | data_path: 44 | - sapien_single_demo/train 45 | val: 46 | num_point: 8192 47 | data_path: 48 | - sapien_single_demo/val 49 | test: 50 | num_point: 8192 51 | data_path: 52 | - sapien_single_demo/test 53 | -------------------------------------------------------------------------------- /ditto/configs/hydra/default.yaml: -------------------------------------------------------------------------------- 1 | # output paths for hydra logs 2 | run: 3 | dir: logs/runs/${now:%Y-%m-%d}/${hydra.runtime.choices.experiment}-${now:%H-%M-%S} 4 | sweep: 5 | dir: logs/multiruns/${now:%Y-%m-%d_%H-%M-%S} 6 | subdir: ${hydra.job.num} 7 | 8 | # you can set here environment variables that are universal for all users 9 | # for system specific variables (like data paths) it's better to use .env file! 10 | job: 11 | env_set: 12 | EXAMPLE_VAR: "example_value" 13 | -------------------------------------------------------------------------------- /ditto/configs/logger/comet.yaml: -------------------------------------------------------------------------------- 1 | # https://www.comet.ml 2 | 3 | comet: 4 | _target_: pytorch_lightning.loggers.comet.CometLogger 5 | api_key: ${oc.env:COMET_API_TOKEN} # api key is laoded from environment variable 6 | project_name: "template-tests" 7 | experiment_name: null 8 | -------------------------------------------------------------------------------- /ditto/configs/logger/csv.yaml: -------------------------------------------------------------------------------- 1 | # csv logger built in lightning 2 | 3 | csv: 4 | _target_: pytorch_lightning.loggers.csv_logs.CSVLogger 5 | save_dir: "." 6 | name: "csv/" 7 | version: null 8 | prefix: "" 9 | -------------------------------------------------------------------------------- /ditto/configs/logger/many_loggers.yaml: -------------------------------------------------------------------------------- 1 | # train with many loggers at once 2 | 3 | defaults: 4 | # - aim.yaml 5 | # - comet.yaml 6 | - csv.yaml 7 | # - mlflow.yaml 8 | # - neptune.yaml 9 | # - tensorboard.yaml 10 | - wandb.yaml 11 | -------------------------------------------------------------------------------- /ditto/configs/logger/mlflow.yaml: -------------------------------------------------------------------------------- 1 | # https://mlflow.org 2 | 3 | mlflow: 4 | _target_: pytorch_lightning.loggers.mlflow.MLFlowLogger 5 | experiment_name: default 6 | tracking_uri: null 7 | tags: null 8 | save_dir: ./mlruns 9 | prefix: "" 10 | artifact_location: null 11 | -------------------------------------------------------------------------------- /ditto/configs/logger/neptune.yaml: -------------------------------------------------------------------------------- 1 | # https://neptune.ai 2 | 3 | neptune: 4 | _target_: pytorch_lightning.loggers.neptune.NeptuneLogger 5 | api_key: ${oc.env:NEPTUNE_API_TOKEN} # api key is laoded from environment variable 6 | project_name: your_name/template-tests 7 | close_after_fit: True 8 | offline_mode: False 9 | experiment_name: null 10 | experiment_id: null 11 | prefix: "" 12 | -------------------------------------------------------------------------------- /ditto/configs/logger/tensorboard.yaml: -------------------------------------------------------------------------------- 1 | # https://www.tensorflow.org/tensorboard/ 2 | 3 | tensorboard: 4 | _target_: pytorch_lightning.loggers.tensorboard.TensorBoardLogger 5 | save_dir: "tensorboard/" 6 | name: "default" 7 | version: null 8 | log_graph: False 9 | default_hp_metric: True 10 | prefix: "" 11 | -------------------------------------------------------------------------------- /ditto/configs/logger/wandb.yaml: -------------------------------------------------------------------------------- 1 | # https://wandb.ai 2 | 3 | wandb: 4 | _target_: pytorch_lightning.loggers.wandb.WandbLogger 5 | project: "Ditto" 6 | name: ${hydra:runtime.choices.experiment}-${now:%H-%M-%S} 7 | save_dir: "." 8 | offline: False # set True to store all logs only locally 9 | id: null # pass correct id to resume experiment! 10 | # entity: "" # set to name of your wandb team or just remove it 11 | log_model: False 12 | prefix: "" 13 | job_type: "train" 14 | group: "" 15 | tags: [] 16 | -------------------------------------------------------------------------------- /ditto/configs/model/geo_art_model_v0.yaml: -------------------------------------------------------------------------------- 1 | _target_: src.models.geo_art_model_v0.GeoArtModelV0 2 | 3 | defaults: 4 | - network: geo_art_net_v0.yaml 5 | 6 | opt: 7 | hparams: 8 | learning_rate: 1.0e-4 9 | weight_decay: 0 10 | lr_decay_gamma: 0.9 11 | lr_decay_freq: 200 12 | 13 | loss_weight_occ: 1.0 14 | loss_weight_seg: 1.0 15 | loss_weight_joint_type: 1.0 16 | loss_weight_joint_param: 1.0 17 | 18 | no_seg_mask: True 19 | 20 | p_ori_arccos: True 21 | p_ori_weight: 1.0 22 | p_offset_weight: 1.0 23 | p_use_state_loss: True 24 | p_cos_ambiguity: False 25 | 26 | r_ori_arccos: True 27 | r_ori_weight: 1.0 28 | r_p2l_ori_arccos: True 29 | r_p2l_ori_weight: 1.0 30 | r_p2l_dist_weight: 1.0 31 | r_use_state_loss: True 32 | r_cos_ambiguity: False 33 | r_state_weight: 1.0 34 | r_rot_weight: 1.0 35 | r_displacement_weight: 1.0 36 | 37 | test_occ_th: 0.5 38 | test_seg_th: 0.5 39 | test_res: 32 40 | -------------------------------------------------------------------------------- /ditto/configs/trainer/ddp.yaml: -------------------------------------------------------------------------------- 1 | _target_: pytorch_lightning.Trainer 2 | 3 | gpus: 4 4 | accelerator: ddp 5 | -------------------------------------------------------------------------------- /ditto/configs/trainer/debug.yaml: -------------------------------------------------------------------------------- 1 | _target_: pytorch_lightning.Trainer 2 | 3 | gpus: 0 4 | 5 | min_epochs: 1 6 | max_epochs: 2 7 | 8 | # prints 9 | progress_bar_refresh_rate: null 10 | weights_summary: null 11 | profiler: null 12 | 13 | # debugs 14 | num_sanity_val_steps: 2 15 | fast_dev_run: False 16 | overfit_batches: 0 17 | limit_train_batches: 1.0 18 | limit_val_batches: 1.0 19 | limit_test_batches: 1.0 20 | track_grad_norm: -1 21 | terminate_on_nan: False 22 | -------------------------------------------------------------------------------- /ditto/configs/trainer/default.yaml: -------------------------------------------------------------------------------- 1 | _target_: pytorch_lightning.Trainer 2 | 3 | # default values for all trainer parameters 4 | checkpoint_callback: True 5 | default_root_dir: null 6 | gradient_clip_val: 0.0 7 | process_position: 0 8 | num_nodes: 1 9 | num_processes: 1 10 | gpus: null 11 | auto_select_gpus: False 12 | tpu_cores: null 13 | log_gpu_memory: null 14 | progress_bar_refresh_rate: 1 15 | overfit_batches: 0.0 16 | track_grad_norm: -1 17 | check_val_every_n_epoch: 1 18 | fast_dev_run: False 19 | accumulate_grad_batches: 1 20 | max_epochs: 1 21 | min_epochs: 1 22 | max_steps: null 23 | min_steps: null 24 | limit_train_batches: 1.0 25 | limit_val_batches: 1.0 26 | limit_test_batches: 1.0 27 | val_check_interval: 1.0 28 | flush_logs_every_n_steps: 100 29 | log_every_n_steps: 50 30 | accelerator: null 31 | sync_batchnorm: False 32 | precision: 32 33 | weights_summary: "top" 34 | weights_save_path: null 35 | num_sanity_val_steps: 2 36 | truncated_bptt_steps: null 37 | resume_from_checkpoint: null 38 | profiler: null 39 | benchmark: False 40 | deterministic: False 41 | reload_dataloaders_every_epoch: False 42 | auto_lr_find: False 43 | replace_sampler_ddp: True 44 | terminate_on_nan: False 45 | auto_scale_batch_size: False 46 | prepare_data_per_node: True 47 | plugins: null 48 | amp_backend: "native" 49 | amp_level: "O2" 50 | move_metrics_to_cpu: False 51 | -------------------------------------------------------------------------------- /ditto/configs/trainer/minimal.yaml: -------------------------------------------------------------------------------- 1 | _target_: pytorch_lightning.Trainer 2 | 3 | # set `1` to train on GPU, `0` to train on CPU only 4 | gpus: 1 5 | 6 | min_epochs: 1 7 | max_epochs: 100 8 | 9 | weights_summary: null 10 | progress_bar_refresh_rate: 10 11 | resume_from_checkpoint: null 12 | -------------------------------------------------------------------------------- /ditto/data_generation/d415-pattern-sq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/data_generation/d415-pattern-sq.png -------------------------------------------------------------------------------- /ditto/data_generation/mesh_fix.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from tqdm import tqdm 4 | from glob import glob 5 | 6 | #---------------------modify name/classes/sub_dataset---------------------- 7 | data_path = '~/Sim2Real2/ditto/assets/urdf/Laptop' 8 | 9 | def fixmesh(input_path,output_path): 10 | # fix mesh with Manifold Plus 11 | try: 12 | completed = subprocess.run(["~/Sim2Real2/ManifoldPlus/build/manifold", "--input", input_path, "--output", output_path, "--depth", '7'], timeout=60, check=True, capture_output=True) 13 | except: 14 | print("{} failed to run",format(input_path)) 15 | return False 16 | return True 17 | 18 | def fix_mesh_in_folder(data_path): 19 | for shape_id in tqdm(os.listdir(data_path)): 20 | ori_mesh_path = os.path.join(data_path, shape_id, 'part_mesh') 21 | out_mesh_path = os.path.join(data_path, shape_id, 'part_mesh_fixed') 22 | if not os.path.exists(out_mesh_path): 23 | os.makedirs(out_mesh_path) 24 | for mesh in os.listdir(ori_mesh_path): 25 | if mesh[-4:] == '.obj': 26 | input_path = os.path.join(ori_mesh_path,mesh) 27 | output_path = os.path.join(out_mesh_path,mesh) 28 | fixmesh(input_path,output_path) 29 | # print(mesh) 30 | 31 | fix_mesh_in_folder(data_path) 32 | -------------------------------------------------------------------------------- /ditto/real_experiment/.gitignore: -------------------------------------------------------------------------------- 1 | *.pcd 2 | TestVHACD 3 | 4 | -------------------------------------------------------------------------------- /ditto/requirements.txt: -------------------------------------------------------------------------------- 1 | # --------- pytorch --------- # 2 | pytorch-lightning==1.5.4 3 | torchmetrics==0.4.1 4 | setuptools 5 | 6 | # --------- hydra --------- # 7 | hydra-core==1.1.0.rc1 8 | hydra-colorlog==1.1.0.dev1 9 | hydra-optuna-sweeper==1.1.0.dev2 10 | # hydra-ax-sweeper==1.1.0 11 | # hydra-ray-launcher==0.1.2 12 | # hydra-submitit-launcher==1.1.0 13 | 14 | # --------- loggers --------- # 15 | wandb>=0.10.30 16 | # neptune-client 17 | # mlflow 18 | # comet-ml 19 | # torch_tb_profiler 20 | 21 | # --------- linters --------- # 22 | pre-commit 23 | black 24 | isort 25 | flake8 26 | 27 | # --------- others --------- # 28 | jupyterlab 29 | python-dotenv 30 | rich 31 | pytest 32 | sh 33 | scikit-learn 34 | seaborn 35 | pudb 36 | # dvc 37 | 38 | # --------- Ditto --------- # 39 | open3d 40 | trimesh 41 | cython 42 | pykdtree 43 | -------------------------------------------------------------------------------- /ditto/run.py: -------------------------------------------------------------------------------- 1 | import dotenv 2 | import hydra 3 | from omegaconf import DictConfig 4 | 5 | # load environment variables from `.env` file if it exists 6 | # recursively searches for `.env` in all folders starting from work dir 7 | dotenv.load_dotenv(override=True) 8 | 9 | 10 | @hydra.main(config_path="configs/", config_name="config.yaml") 11 | def main(config: DictConfig): 12 | 13 | # Imports should be nested inside @hydra.main to optimize tab completion 14 | # Read more here: https://github.com/facebookresearch/hydra/issues/934 15 | from src.train import train 16 | from src.utils import utils 17 | 18 | # A couple of optional utilities: 19 | # - disabling python warnings 20 | # - easier access to debug mode 21 | # - forcing debug friendly configuration 22 | # - forcing multi-gpu friendly configuration 23 | # You can safely get rid of this line if you don't want those 24 | utils.extras(config) 25 | utils.save_config(config) 26 | 27 | # Pretty print config using Rich library 28 | if config.get("print_config"): 29 | utils.print_config(config, resolve=True) 30 | 31 | # Train model 32 | return train(config) 33 | 34 | 35 | if __name__ == "__main__": 36 | main() 37 | -------------------------------------------------------------------------------- /ditto/run_test.py: -------------------------------------------------------------------------------- 1 | import dotenv 2 | import hydra 3 | from omegaconf import DictConfig 4 | 5 | # load environment variables from `.env` file if it exists 6 | # recursively searches for `.env` in all folders starting from work dir 7 | dotenv.load_dotenv(override=True) 8 | 9 | 10 | @hydra.main(config_path="configs/", config_name="config.yaml") 11 | def main(config: DictConfig): 12 | 13 | # Imports should be nested inside @hydra.main to optimize tab completion 14 | # Read more here: https://github.com/facebookresearch/hydra/issues/934 15 | from src.test import test 16 | from src.utils import utils 17 | 18 | # A couple of optional utilities: 19 | # - disabling python warnings 20 | # - easier access to debug mode 21 | # - forcing debug friendly configuration 22 | # - forcing multi-gpu friendly configuration 23 | # You can safely get rid of this line if you don't want those 24 | utils.extras(config) 25 | assert config.trainer.get("resume_from_checkpoint", None) 26 | 27 | utils.save_config(config) 28 | 29 | # Pretty print config using Rich library 30 | if config.get("print_config"): 31 | utils.print_config(config, resolve=True) 32 | 33 | # Train model 34 | return test(config) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /ditto/scripts/run_collect_data.sh: -------------------------------------------------------------------------------- 1 | # python data_generation/collect_data.py \ 2 | # 0007 \ 3 | # Drawer \ 4 | # 1 \ 5 | # python data_generation/collect_data.py \ 6 | # 44817 \ 7 | # Drawer \ 8 | # 1 \ 9 | 10 | # python data_generation/collect_data.py \ 11 | # 857 \ 12 | # Faucet \ 13 | # 1 \ 14 | 15 | # python data_generation/collect_data.py \ 16 | # 9748 \ 17 | # Laptop \ 18 | # 1 \ 19 | 20 | # python data_generation/collect_data.py \ 21 | # 10211 \ 22 | # Laptop \ 23 | # 1 \ 24 | 25 | # python data_generation/collect_data.py 0008 Drawer 3 --out_dir data/laptop_train # > /dev/null 2>&1 26 | # python data_generation/collect_data.py 10626 Laptop 0 --out_dir data/laptop_train # > /dev/null 2>&1 27 | python data_generation/collect_data.py 0011 Faucet 0 --out_dir results --stereo_out_dir results/stereo # > /dev/null 2>&1 -------------------------------------------------------------------------------- /ditto/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/__init__.py -------------------------------------------------------------------------------- /ditto/src/callbacks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/callbacks/__init__.py -------------------------------------------------------------------------------- /ditto/src/callbacks/misc_callbacks.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from pytorch_lightning.callbacks import Callback 4 | from pytorch_lightning.core.saving import save_hparams_to_yaml 5 | 6 | from src.utils import utils 7 | 8 | log = utils.get_logger(__name__) 9 | 10 | 11 | class OnCheckpointHparams(Callback): 12 | def on_save_checkpoint(self, trainer, pl_module, checkpoint): 13 | # only do this 1 time 14 | if trainer.current_epoch == 0: 15 | file_path = f"{os.getcwd()}/hparams.yaml" 16 | log.info(f"Saving hparams to file_path: {file_path}") 17 | save_hparams_to_yaml(config_yaml=file_path, hparams=pl_module.hparams) 18 | -------------------------------------------------------------------------------- /ditto/src/datamodules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/datamodules/__init__.py -------------------------------------------------------------------------------- /ditto/src/datamodules/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import DataLoader 2 | 3 | from src.datamodules.datasets.geo_art_dataset_v0 import GeoArtDatasetV0 4 | from src.datamodules.datasets.geo_art_dataset_v1 import GeoArtDatasetV1 5 | from src.datamodules.datasets.geo_art_dataset_sapien import GeoArtDatasetSapien 6 | from src.datamodules.datasets.geo_art_dataset_sapien_test import GeoArtDatasetSapienTest 7 | from src.datamodules.datasets.geo_art_dataset_ablation import GeoArtDatasetAblation 8 | from src.datamodules.datasets.geo_art_dataset_ablation_test import GeoArtDatasetTestAblation 9 | 10 | 11 | def create_dataset(dataset_opt): 12 | ds = eval(dataset_opt["dataset_type"])(dataset_opt) 13 | return ds 14 | 15 | 16 | def create_dataloader(dataset, dataset_opt, phase): 17 | if phase == "train": 18 | return DataLoader( 19 | dataset, 20 | batch_size=dataset_opt["batch_size"], 21 | shuffle=True, 22 | num_workers=dataset_opt["num_workers"], 23 | sampler=None, 24 | drop_last=True, 25 | pin_memory=True, 26 | ) 27 | else: 28 | return DataLoader( 29 | dataset, 30 | batch_size=1, 31 | shuffle=False, 32 | num_workers=4, 33 | sampler=None, 34 | pin_memory=True, 35 | ) 36 | -------------------------------------------------------------------------------- /ditto/src/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/models/__init__.py -------------------------------------------------------------------------------- /ditto/src/models/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from src.third_party.ConvONets.config import get_model as ConvONets 2 | 3 | 4 | def create_network(mode_opt): 5 | network = eval(mode_opt.network_type)(mode_opt) 6 | return network 7 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/__init__.py -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/conv_onet/__init__.py: -------------------------------------------------------------------------------- 1 | from src.third_party.ConvONets.conv_onet import config, generation_two_stage, models 2 | 3 | __all__ = [config, generation_two_stage, models] 4 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/encoder/__init__.py: -------------------------------------------------------------------------------- 1 | from src.third_party.ConvONets.encoder import encoder 2 | 3 | encoder_dict = { 4 | "pointnetpp": encoder.LocalPoolPointnetPPFusion, 5 | } 6 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/layers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | # Resnet Blocks 6 | class ResnetBlockFC(nn.Module): 7 | """Fully connected ResNet Block class. 8 | 9 | Args: 10 | size_in (int): input dimension 11 | size_out (int): output dimension 12 | size_h (int): hidden dimension 13 | """ 14 | 15 | def __init__(self, size_in, size_out=None, size_h=None): 16 | super().__init__() 17 | # Attributes 18 | if size_out is None: 19 | size_out = size_in 20 | 21 | if size_h is None: 22 | size_h = min(size_in, size_out) 23 | 24 | self.size_in = size_in 25 | self.size_h = size_h 26 | self.size_out = size_out 27 | # Submodules 28 | self.fc_0 = nn.Linear(size_in, size_h) 29 | self.fc_1 = nn.Linear(size_h, size_out) 30 | self.actvn = nn.ReLU() 31 | 32 | if size_in == size_out: 33 | self.shortcut = None 34 | else: 35 | self.shortcut = nn.Linear(size_in, size_out, bias=False) 36 | # Initialization 37 | nn.init.zeros_(self.fc_1.weight) 38 | 39 | def forward(self, x): 40 | net = self.fc_0(self.actvn(x)) 41 | dx = self.fc_1(self.actvn(net)) 42 | 43 | if self.shortcut is not None: 44 | x_s = self.shortcut(x) 45 | else: 46 | x_s = x 47 | 48 | return x_s + dx 49 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/__init__.py -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmcubes/.gitignore: -------------------------------------------------------------------------------- 1 | PyMCubes.egg-info 2 | build 3 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmcubes/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2015, P. M. Neila 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the copyright holder nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmcubes/__init__.py: -------------------------------------------------------------------------------- 1 | from src.third_party.ConvONets.utils.libmcubes.exporter import ( 2 | export_mesh, 3 | export_obj, 4 | export_off, 5 | ) 6 | from src.third_party.ConvONets.utils.libmcubes.mcubes import ( 7 | marching_cubes, 8 | marching_cubes_func, 9 | ) 10 | 11 | __all__ = [ 12 | marching_cubes, 13 | marching_cubes_func, 14 | export_mesh, 15 | export_obj, 16 | export_off, 17 | ] 18 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmcubes/mcubes.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/libmcubes/mcubes.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmcubes/pyarray_symbol.h: -------------------------------------------------------------------------------- 1 | 2 | #define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API 3 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmcubes/pywrapper.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef _PYWRAPPER_H 3 | #define _PYWRAPPER_H 4 | 5 | #include 6 | #include "pyarraymodule.h" 7 | 8 | #include 9 | 10 | PyObject* marching_cubes(PyArrayObject* arr, double isovalue); 11 | PyObject* marching_cubes2(PyArrayObject* arr, double isovalue); 12 | PyObject* marching_cubes3(PyArrayObject* arr, double isovalue); 13 | PyObject* marching_cubes_func(PyObject* lower, PyObject* upper, 14 | int numx, int numy, int numz, PyObject* f, double isovalue); 15 | 16 | #endif // _PYWRAPPER_H 17 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmesh/.gitignore: -------------------------------------------------------------------------------- 1 | triangle_hash.cpp 2 | build 3 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmesh/__init__.py: -------------------------------------------------------------------------------- 1 | from .inside_mesh import MeshIntersector, TriangleIntersector2d, check_mesh_contains 2 | 3 | __all__ = [check_mesh_contains, MeshIntersector, TriangleIntersector2d] 4 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmesh/triangle_hash.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/libmesh/triangle_hash.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmise/.gitignore: -------------------------------------------------------------------------------- 1 | mise.c 2 | mise.cpp 3 | mise.html 4 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmise/__init__.py: -------------------------------------------------------------------------------- 1 | from .mise import MISE 2 | 3 | __all__ = [MISE] 4 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmise/mise.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/libmise/mise.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libmise/test.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import numpy as np 4 | from mise import MISE 5 | 6 | t0 = time.time() 7 | extractor = MISE(1, 2, 0.0) 8 | 9 | p = extractor.query() 10 | i = 0 11 | 12 | while p.shape[0] != 0: 13 | print(i) 14 | print(p) 15 | v = 2 * (p.sum(axis=-1) > 2).astype(np.float64) - 1 16 | extractor.update(p, v) 17 | p = extractor.query() 18 | i += 1 19 | if i >= 8: 20 | break 21 | 22 | print(extractor.to_dense()) 23 | # p, v = extractor.get_points() 24 | # print(p) 25 | # print(v) 26 | print("Total time: %f" % (time.time() - t0)) 27 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libsimplify/__init__.py: -------------------------------------------------------------------------------- 1 | import trimesh 2 | 3 | from .simplify_mesh import mesh_simplify 4 | 5 | 6 | def simplify_mesh(mesh, f_target=10000, agressiveness=7.0): 7 | vertices = mesh.vertices 8 | faces = mesh.faces 9 | 10 | vertices, faces = mesh_simplify(vertices, faces, f_target, agressiveness) 11 | 12 | mesh_simplified = trimesh.Trimesh(vertices, faces, process=False) 13 | 14 | return mesh_simplified 15 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libsimplify/simplify_mesh.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/libsimplify/simplify_mesh.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libsimplify/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from simplify_mesh import mesh_simplify 3 | 4 | v = np.random.rand(100, 3) 5 | f = np.random.choice(range(100), (50, 3)) 6 | 7 | mesh_simplify(v, f, 50) 8 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libvoxelize/.gitignore: -------------------------------------------------------------------------------- 1 | voxelize.c 2 | voxelize.html 3 | build 4 | -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libvoxelize/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/libvoxelize/__init__.py -------------------------------------------------------------------------------- /ditto/src/third_party/ConvONets/utils/libvoxelize/voxelize.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/src/third_party/ConvONets/utils/libvoxelize/voxelize.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /ditto/src/utils/__init__.py: -------------------------------------------------------------------------------- 1 | def workspace_lines(size): 2 | return [ 3 | [0.0, 0.0, 0.0], 4 | [size, 0.0, 0.0], 5 | [size, 0.0, 0.0], 6 | [size, size, 0.0], 7 | [size, size, 0.0], 8 | [0.0, size, 0.0], 9 | [0.0, size, 0.0], 10 | [0.0, 0.0, 0.0], 11 | [0.0, 0.0, size], 12 | [size, 0.0, size], 13 | [size, 0.0, size], 14 | [size, size, size], 15 | [size, size, size], 16 | [0.0, size, size], 17 | [0.0, size, size], 18 | [0.0, 0.0, size], 19 | [0.0, 0.0, 0.0], 20 | [0.0, 0.0, size], 21 | [size, 0.0, 0.0], 22 | [size, 0.0, size], 23 | [size, size, 0.0], 24 | [size, size, size], 25 | [0.0, size, 0.0], 26 | [0.0, size, size], 27 | ] 28 | -------------------------------------------------------------------------------- /ditto/src/utils/visual.py: -------------------------------------------------------------------------------- 1 | import trimesh 2 | 3 | ######### 4 | # Mesh 5 | ######### 6 | 7 | 8 | def as_mesh(scene_or_mesh): 9 | """ 10 | Convert a possible scene to a mesh. 11 | The returned mesh has only vertex and face data. 12 | """ 13 | if isinstance(scene_or_mesh, trimesh.Scene): 14 | if len(scene_or_mesh.geometry) == 0: 15 | mesh = None # empty scene 16 | else: 17 | # we lose texture information here 18 | mesh = trimesh.util.concatenate( 19 | tuple( 20 | trimesh.Trimesh(vertices=g.vertices, faces=g.faces) 21 | for g in scene_or_mesh.geometry.values() 22 | ) 23 | ) 24 | else: 25 | assert isinstance(scene_or_mesh, trimesh.Trimesh) 26 | mesh = scene_or_mesh 27 | return mesh 28 | -------------------------------------------------------------------------------- /ditto/template.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /ditto/template_pri.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /ditto/utils3d/.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v3.4.0 4 | hooks: 5 | # list of supported hooks: https://pre-commit.com/hooks.html 6 | - id: trailing-whitespace 7 | - id: end-of-file-fixer 8 | - id: check-yaml 9 | # - id: check-added-large-files 10 | - id: debug-statements 11 | - id: detect-private-key 12 | 13 | # python code formatting 14 | - repo: https://github.com/psf/black 15 | rev: 20.8b1 16 | hooks: 17 | - id: black 18 | args: [--line-length, "99"] 19 | 20 | # python import sorting 21 | - repo: https://github.com/PyCQA/isort 22 | rev: 5.8.0 23 | hooks: 24 | - id: isort 25 | args: ["--profile", "black", "--filter-files"] 26 | 27 | # python code analysis 28 | - repo: https://github.com/PyCQA/flake8 29 | rev: 3.9.2 30 | hooks: 31 | - id: flake8 32 | -------------------------------------------------------------------------------- /ditto/utils3d/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Zhenyu Jiang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ditto/utils3d/README.md: -------------------------------------------------------------------------------- 1 | # utils3D 2 | A small library of 3D related utilities used in my research. 3 | 4 | ## Installation 5 | 6 | ### Install via GitHub 7 | 8 | ```bash 9 | pip install git+https://github.com/Steve-Tod/utils3d --upgrade 10 | ``` 11 | 12 | ### Install locally 13 | 14 | ```bash 15 | git clone git@github.com:Steve-Tod/utils3d.git 16 | cd utils3d 17 | pip install -e . 18 | ``` 19 | 20 | ### Optional 21 | 22 | Install pre-commit to automatically format the code when commit. 23 | 24 | ``` 25 | pip install pre-commit 26 | pre-commit install 27 | ``` 28 | 29 | Install extra package for: 30 | 31 | - Photo realistic rendering (`utils3d.render.nvisii`) 32 | 33 | ``` 34 | pip install -r requirements-extra.txt 35 | 36 | ``` 37 | -------------------------------------------------------------------------------- /ditto/utils3d/data/Squirrel.mtl: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC. 2 | # 3 | # This work is licensed under the Creative Commons Attribution 4.0 4 | # International License. To view a copy of this license, visit 5 | # http://creativecommons.org/licenses/by/4.0/ or send a letter 6 | # to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. 7 | newmtl material_0 8 | # shader_type beckmann 9 | map_Kd Squirrel_texture.png 10 | 11 | # Kd: Diffuse reflectivity. 12 | Kd 1.000000 1.000000 1.000000 13 | -------------------------------------------------------------------------------- /ditto/utils3d/data/Squirrel_texture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/Squirrel_texture.png -------------------------------------------------------------------------------- /ditto/utils3d/data/pointcloud_color.pcd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/pointcloud_color.pcd -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_0.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_0_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_0_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_1.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_1_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_1_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_2.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_2_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_2_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_3.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_3_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_3_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_4.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_4_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_4_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_5.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_5_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_5_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_6.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_6_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_6_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_7.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_7_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_7_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_8.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_8_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_8_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_9.png -------------------------------------------------------------------------------- /ditto/utils3d/data/rgbd/r_9_depth_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/data/rgbd/r_9_depth_0019.png -------------------------------------------------------------------------------- /ditto/utils3d/examples/pointcloud_io.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import numpy as np 4 | 5 | from utils3d.pointcloud.io import read_pointcloud, write_pointcloud 6 | from utils3d.pointcloud.utils import sample_pointcloud 7 | 8 | 9 | def main(): 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument( 12 | "-i", 13 | "--input", 14 | type=str, 15 | default="../data/pointcloud_color.pcd", 16 | help="Input point cloud path.", 17 | ) 18 | parser.add_argument("-o", "--output", type=str, help="Output point cloud path.") 19 | args = parser.parse_args() 20 | 21 | xyz, color = read_pointcloud(args.input) 22 | xyz, color = sample_pointcloud(xyz, color, 2048) 23 | 24 | print(f"Saving point cloud of shape {xyz.shape} to {args.output}") 25 | write_pointcloud(xyz, args.output, color=color) 26 | 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /ditto/utils3d/examples/render_mesh_nvisii.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | 6 | from utils3d.mesh.io import read_mesh 7 | from utils3d.render.nvisii import NViSIIRenderer 8 | from utils3d.utils.utils import get_pose 9 | 10 | 11 | def main(): 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | "-i", 15 | "--input", 16 | type=str, 17 | default="../data/Squirrel_visual.obj", 18 | help="Input mesh path.", 19 | ) 20 | args = parser.parse_args() 21 | 22 | NViSIIRenderer.init() 23 | renderer = NViSIIRenderer() 24 | 25 | mesh_pose_dict = {"mesh": (args.input, [1.0] * 3, np.eye(4))} 26 | 27 | mesh = read_mesh(args.input) 28 | center = mesh.bounds.mean(0) 29 | scale = np.sqrt(((mesh.bounds[1] - mesh.bounds[0]) ** 2).sum()) 30 | camera_pose = get_pose(scale * 1, center=center, ax=np.pi / 3, az=np.pi / 3) 31 | # camera_pose = get_pose(scale * 1, center=center, ax=0, az=np.pi) 32 | light_pose = get_pose(scale * 2, center=center, ax=np.pi / 3, az=np.pi / 3) 33 | 34 | renderer.reset(camera_pose, light_pose) 35 | renderer.update_objects(mesh_pose_dict) 36 | img = renderer.render() 37 | 38 | fig, ax = plt.subplots(1, 1, dpi=150) 39 | ax.imshow(img) 40 | plt.show() 41 | 42 | NViSIIRenderer.deinit() 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /ditto/utils3d/examples/render_mesh_pyrender.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | 6 | from utils3d.mesh.io import read_mesh 7 | from utils3d.render.pyrender import PyRenderer 8 | from utils3d.utils.utils import get_pose 9 | 10 | 11 | def main(): 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | "-i", 15 | "--input", 16 | type=str, 17 | default="../data/Squirrel_visual.obj", 18 | help="Input mesh path.", 19 | ) 20 | args = parser.parse_args() 21 | 22 | renderer = PyRenderer() 23 | 24 | mesh = read_mesh(args.input) 25 | center = mesh.bounds.mean(0) 26 | scale = np.sqrt(((mesh.bounds[1] - mesh.bounds[0]) ** 2).sum()) 27 | 28 | surface_point_cloud = mesh.sample(2048) 29 | 30 | camera_pose = get_pose(scale * 1, center=center, ax=np.pi / 3, az=np.pi / 3) 31 | light_pose = get_pose(scale * 2, center=center, ax=np.pi / 3, az=np.pi / 3) 32 | img, depth = renderer.render_mesh(mesh, camera_pose=camera_pose, light_pose=light_pose) 33 | 34 | pc_light_pose = get_pose(scale * 4, center=center, ax=np.pi / 3, az=np.pi / 3) 35 | pc_img, _ = renderer.render_pointcloud( 36 | surface_point_cloud, 37 | camera_pose=camera_pose, 38 | light_pose=pc_light_pose, 39 | radius=scale * 0.01, 40 | colors=[102, 204, 102, 102], 41 | ) 42 | 43 | fig, axs = plt.subplots(1, 3, dpi=150) 44 | axs[0].imshow(img) 45 | axs[1].imshow(pc_img) 46 | axs[2].imshow(depth) 47 | plt.show() 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /ditto/utils3d/examples/visualize_3d_pointcloud.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from utils3d.pointcloud.io import read_pointcloud 4 | from utils3d.pointcloud.utils import normalize_pointcloud 5 | from utils3d.pointcloud.visualization import ( 6 | visualize_3d_point_cloud_mpl, 7 | visualize_3d_point_cloud_o3d, 8 | ) 9 | 10 | 11 | def main(): 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | "-t", "--tool", type=str, choices=["o3d", "mpl"], help="Which tool for visualization." 15 | ) 16 | parser.add_argument( 17 | "-i", 18 | "--input", 19 | type=str, 20 | default="../data/pointcloud_color.pcd", 21 | help="Input point cloud path.", 22 | ) 23 | parser.add_argument("--norm", action="store_true", help="Normalize data.") 24 | args = parser.parse_args() 25 | 26 | xyz, color = read_pointcloud(args.input) 27 | 28 | # normalize point cloud 29 | if args.norm: 30 | xyz, _, _ = normalize_pointcloud(xyz) 31 | 32 | if args.tool == "o3d": 33 | visualize_3d_point_cloud_o3d(xyz, color=color) 34 | else: 35 | visualize_3d_point_cloud_mpl(xyz, color=color) 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /ditto/utils3d/requirements-extra.txt: -------------------------------------------------------------------------------- 1 | nvisii 2 | -------------------------------------------------------------------------------- /ditto/utils3d/requirements.txt: -------------------------------------------------------------------------------- 1 | . 2 | -------------------------------------------------------------------------------- /ditto/utils3d/setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 99 3 | profile = black 4 | filter_files = True 5 | 6 | 7 | [flake8] 8 | max_line_length = 99 9 | show_source = True 10 | format = pylint 11 | ignore = 12 | F401 # Module imported but unused 13 | W503 # line break before binary operator 14 | W504 # Line break occurred after a binary operator 15 | F841 # Local variable name is assigned to but never used 16 | F403 # from module import * 17 | E501 # Line too long 18 | E402 # module level import not at top of file 19 | exclude = 20 | .git 21 | __pycache__ 22 | data/* 23 | tests/* 24 | notebooks/* 25 | logs/* 26 | utils3d/utils/pyrender.py # have to change environ first 27 | 28 | 29 | [tool:pytest] 30 | python_files = tests/* 31 | log_cli = True 32 | markers = 33 | slow 34 | addopts = 35 | --durations=0 36 | --strict-markers 37 | --doctest-modules 38 | filterwarnings = 39 | ignore::DeprecationWarning 40 | ignore::UserWarning 41 | -------------------------------------------------------------------------------- /ditto/utils3d/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | setup( 4 | name="utils3d", # change "src" folder name to your project name 5 | version="0.0.0", 6 | description="some 3D related utilities", 7 | author="Zhenyu Jiang", 8 | author_email="stevetod98@gmail.com", 9 | url="https://github.com/Steve-Tod/utils3d", # replace with your own github project link 10 | install_requires=[ 11 | "numpy", 12 | "matplotlib", 13 | "pillow", 14 | "trimesh", 15 | "open3d", 16 | "scipy", 17 | "pyrender", 18 | "pytest", 19 | "numba", 20 | "scikit-image", 21 | ], 22 | packages=find_packages(), 23 | ) 24 | -------------------------------------------------------------------------------- /ditto/utils3d/tests/unit/test_io.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import pytest 5 | import trimesh 6 | 7 | from utils3d.mesh.io import read_mesh, write_mesh 8 | from utils3d.pointcloud.io import read_pointcloud, write_pointcloud 9 | from utils3d.pointcloud.utils import sample_pointcloud 10 | 11 | 12 | @pytest.mark.parametrize("num_point", [1, 2048]) 13 | def test_pointcloud_io(num_point): 14 | xyz, color = read_pointcloud("data/pointcloud_color.pcd") 15 | xyz, color = sample_pointcloud(xyz, color, 2048) 16 | with tempfile.TemporaryDirectory() as tmpdirname: 17 | output = os.path.join(tmpdirname, "tmp.npy") 18 | write_pointcloud(xyz, output, color=color) 19 | 20 | output = os.path.join(tmpdirname, "tmp.pcd") 21 | write_pointcloud(xyz, output, color=color) 22 | 23 | output = os.path.join(tmpdirname, "tmp.txt") 24 | write_pointcloud(xyz, output, color=color) 25 | 26 | output = os.path.join(tmpdirname, "tmp.pts") 27 | write_pointcloud(xyz, output, color=color) 28 | 29 | 30 | def test_mesh_io(): 31 | mesh = read_mesh("data/Squirrel_visual.obj") 32 | with tempfile.TemporaryDirectory() as tmpdirname: 33 | output = os.path.join(tmpdirname, "tmp.stl") 34 | write_mesh(mesh, output) 35 | -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/utils3d/__init__.py -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/mesh/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/utils3d/mesh/__init__.py -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/mesh/io.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author Zhenyu Jiang 3 | @email stevetod98@gmail.com 4 | @date 2022-01-12 5 | @desc 6 | """ 7 | 8 | import trimesh 9 | 10 | 11 | def read_mesh(path): 12 | return trimesh.load(path) 13 | 14 | 15 | def write_mesh(mesh, path): 16 | mesh.export(path) 17 | -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/mesh/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author Zhenyu Jiang 3 | @email stevetod98@gmail.com 4 | @date 2022-01-12 5 | @desc Mesh utils 6 | """ 7 | import trimesh 8 | 9 | 10 | def as_mesh(scene_or_mesh): 11 | """Convert a possible scene to mesh 12 | 13 | Args: 14 | scene_or_mesh (trimesh.Scene or trimesh.Trimesh): input scene or mesh. 15 | 16 | Returns: 17 | trimesh.Trimesh: Converted mesh with only vertex and face data. 18 | """ 19 | if isinstance(scene_or_mesh, trimesh.Scene): 20 | if len(scene_or_mesh.geometry) == 0: 21 | mesh = None # empty scene 22 | else: 23 | # we lose texture information here 24 | mesh = trimesh.util.concatenate( 25 | tuple( 26 | trimesh.Trimesh(vertices=g.vertices, faces=g.faces, visual=g.visual) 27 | for g in scene_or_mesh.geometry.values() 28 | ) 29 | ) 30 | else: 31 | assert isinstance(scene_or_mesh, trimesh.Trimesh) 32 | mesh = scene_or_mesh 33 | return mesh 34 | -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/pointcloud/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/utils3d/pointcloud/__init__.py -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/render/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/utils3d/render/__init__.py -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/rgbd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/utils3d/rgbd/__init__.py -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/rgbd/io.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author Zhenyu Jiang 3 | @email stevetod98@gmail.com 4 | @date 2022-03-03 5 | @desc RGBD IO 6 | """ 7 | 8 | import os 9 | 10 | import numpy as np 11 | import open3d as o3d 12 | from PIL import Image 13 | 14 | 15 | def read_rgbd(rgb_path, depth_path, depth_scale=1000, depth_trunc=1): 16 | """read rgbd images 17 | 18 | Args: 19 | rgb_path (str): path to rgb image 20 | depth_path (str): path to depth image 21 | depth_scale (float, optional): The scale of depth, multiplicative. Defaults to 0.001. 22 | depth_trunc (float, optional): The truncate scale of depth. Defaults to 0.7. 23 | 24 | Returns: 25 | o3d.geometry.RGBDImage: Merged RGBD Image 26 | """ 27 | depth_img = np.array(Image.open(depth_path)) 28 | depth_img[depth_img == depth_img.max()] == 0 29 | depth_img = depth_img.astype(np.float32) 30 | if len(depth_img.shape) == 3: 31 | depth_img = np.ascontiguousarray(depth_img[:, :, 0]) 32 | 33 | rgb_img = np.array(Image.open(rgb_path)) 34 | 35 | rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth( 36 | o3d.geometry.Image(rgb_img), 37 | o3d.geometry.Image(depth_img), 38 | depth_scale=depth_scale, 39 | depth_trunc=depth_trunc, 40 | convert_rgb_to_intensity=False, 41 | ) 42 | return rgbd 43 | -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/ditto/utils3d/utils3d/utils/__init__.py -------------------------------------------------------------------------------- /ditto/utils3d/utils3d/utils/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author Zhenyu Jiang 3 | @email stevetod98@gmail.com 4 | @date 2022-01-14 5 | @desc 6 | """ 7 | 8 | import numpy as np 9 | 10 | from utils3d.utils3d.utils.transform import Rotation, Transform 11 | 12 | 13 | def get_pose(distance, center=np.zeros(3), ax=0, ay=0, az=0): 14 | """generate camera pose from distance, center and euler angles 15 | 16 | Args: 17 | distance (float): distance from camera to center 18 | center (np.ndarray, optional): the look at center. Defaults to np.zeros(3). 19 | ax (float, optional): euler angle x. Defaults to 0. 20 | ay (float, optional): euler angle around y axis. Defaults to 0. 21 | az (float, optional): euler angle around z axis. Defaults to 0. 22 | 23 | Returns: 24 | np.ndarray: camera pose of 4*4 numpy array 25 | """ 26 | rotation = Rotation.from_euler("xyz", (ax, ay, az)) 27 | vec = np.array([0, 0, distance]) 28 | translation = rotation.as_matrix().dot(vec) + center 29 | camera_pose = Transform(rotation, translation).as_matrix() 30 | return camera_pose 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | addict==2.4.0 2 | attr==0.3.2 3 | attrs==21.4.0 4 | bleach==5.0.0 5 | bpy==3.4.0 6 | crc32c==2.3 7 | cupy==11.5.0 8 | cupy_cuda11x==11.0.0 9 | Cython==0.29.32 10 | einops==0.4.1 11 | executing==1.2.0 12 | gym==0.19.0 13 | h5py==3.7.0 14 | imageio==2.19.3 15 | ipython==8.10.0 16 | lmdb==1.3.0 17 | lxml==4.9.2 18 | MarkupSafe==2.1.1 19 | mathutils==3.3.0 20 | matplotlib==3.5.2 21 | mc==0.0 22 | mise==1.0 23 | networkx==2.8.5 24 | numba==0.56.4 25 | numpy==1.22.3 26 | omegaconf==1.4.1 27 | open3d==0.12.0 28 | pandas==1.5.3 29 | Pillow==9.4.0 30 | plyfile==0.7.4 31 | psutil==5.9.1 32 | pycuda==2022.2.2 33 | pykdtree==1.3.5 34 | pynput==1.7.6 35 | pynvml==11.4.1 36 | pytest==7.1.2 37 | python-dotenv==0.21.1 38 | pytorch3d==0.3.0 39 | pytorch_lightning==0.7.1 40 | PyTurboJPEG==1.6.7 41 | PyYAML==6.0 42 | regex==2022.10.31 43 | requests==2.27.1 44 | rich==13.3.1 45 | rospy==1.15.14 46 | sapien==2.0.0.dev20220701 47 | scikit_image==0.18.3 48 | scikit_learn==1.2.1 49 | scipy==1.10.1 50 | seaborn==0.11.2 51 | setuptools==61.2.0 52 | Shapely==1.8.2 53 | six==1.16.0 54 | skimage==0.0 55 | sorcery==0.2.2 56 | tensorboard==2.9.1 57 | tensorboardX==2.6 58 | termcolor==1.1.0 59 | torch_scatter==2.1.0 60 | torchmetrics==0.4.1 61 | torchvision==0.12.0 62 | tqdm==4.64.1 63 | transforms3d==0.4.1 64 | trimesh==3.13.0 65 | wandb==0.13.2 66 | yapf==0.32.0 67 | hydra-core==0.11.3 68 | -------------------------------------------------------------------------------- /where2act/code/.gitignore: -------------------------------------------------------------------------------- 1 | Pointnet2_PyTorch 2 | imgui.ini 3 | -------------------------------------------------------------------------------- /where2act/code/blender_utils/camera.blend: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/blender_utils/camera.blend -------------------------------------------------------------------------------- /where2act/code/blender_utils/cube.obj: -------------------------------------------------------------------------------- 1 | v -0.500000 -0.500000 0.500000 2 | v 0.500000 -0.500000 0.500000 3 | v -0.500000 0.500000 0.500000 4 | v 0.500000 0.500000 0.500000 5 | v -0.500000 0.500000 -0.500000 6 | v 0.500000 0.500000 -0.500000 7 | v -0.500000 -0.500000 -0.500000 8 | v 0.500000 -0.500000 -0.500000 9 | f 1 2 3 10 | f 3 2 4 11 | f 3 4 5 12 | f 5 4 6 13 | f 5 6 7 14 | f 7 6 8 15 | f 7 8 1 16 | f 1 8 2 17 | f 2 8 4 18 | f 4 8 6 19 | f 7 1 5 20 | f 5 1 3 21 | -------------------------------------------------------------------------------- /where2act/code/colors.py: -------------------------------------------------------------------------------- 1 | colors = [[0.5, 0.5, 0.5], [0.8, 0, 0], [0, 0.8, 0], [0, 0, 0.8], \ 2 | [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5], \ 3 | [0.3, 0.6, 0], [0.6, 0, 0.3], [0.3, 0, 0.6], \ 4 | [0.6, 0.3, 0], [0.3, 0, 0.6], [0.6, 0, 0.3], \ 5 | [0.8, 0.2, 0.5]] 6 | 7 | -------------------------------------------------------------------------------- /where2act/code/logs/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !README.md 3 | !.gitignore 4 | -------------------------------------------------------------------------------- /where2act/code/logs/README.md: -------------------------------------------------------------------------------- 1 | This folder stores the training logs. 2 | 3 | ~~Please download `final_logs.zip` and unzip the content here.~~ 4 | -------------------------------------------------------------------------------- /where2act/code/pyquaternion/.gitignore: -------------------------------------------------------------------------------- 1 | /__pycache__/ 2 | -------------------------------------------------------------------------------- /where2act/code/pyquaternion/README.md: -------------------------------------------------------------------------------- 1 | This tool is modified from https://github.com/KieranWynn/pyquaternion. 2 | -------------------------------------------------------------------------------- /where2act/code/pyquaternion/__init__.py: -------------------------------------------------------------------------------- 1 | from pyquaternion.quaternion import Quaternion 2 | -------------------------------------------------------------------------------- /where2act/code/requirements.txt: -------------------------------------------------------------------------------- 1 | h5py 2 | imageio 3 | matplotlib 4 | opencv-python 5 | Pillow 6 | progressbar2 7 | requests 8 | scipy 9 | transforms3d 10 | torch 11 | torchvision 12 | tensorboardX 13 | -------------------------------------------------------------------------------- /where2act/code/results/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !README.md 3 | !.gitignore 4 | -------------------------------------------------------------------------------- /where2act/code/results/README.md: -------------------------------------------------------------------------------- 1 | This folder stores any temporary results and is left empty intentionally. 2 | -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/finger.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/hand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/hand.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link0.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link0.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link1.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link1.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link2.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link2.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link3.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link3.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link4.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link4.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link5.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link5.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link6.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link6.stl -------------------------------------------------------------------------------- /where2act/code/robots/franka_description/meshes/collision/link7.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/franka_description/meshes/collision/link7.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_inner_finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_inner_finger.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_inner_knuckle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_inner_knuckle.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_outer_finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_outer_finger.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_outer_knuckle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/collision/robotiq_arg2f_140_outer_knuckle.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/collision/robotiq_arg2f_base_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/collision/robotiq_arg2f_base_link.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/collision/robotiq_arg2f_coupling.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/collision/robotiq_arg2f_coupling.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_inner_finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_inner_finger.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_inner_knuckle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_inner_knuckle.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_outer_finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_outer_finger.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_outer_knuckle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/visual/robotiq_arg2f_140_outer_knuckle.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/visual/robotiq_arg2f_base_link.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/visual/robotiq_arg2f_base_link.stl -------------------------------------------------------------------------------- /where2act/code/robots/robotiq_description/visual/robotiq_arg2f_coupling.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TTimelord/Sim2Real2/65cc27f80dda5720f7a1692395ad8eb0dd5103d3/where2act/code/robots/robotiq_description/visual/robotiq_arg2f_coupling.stl -------------------------------------------------------------------------------- /where2act/code/scripts/.gitignore: -------------------------------------------------------------------------------- 1 | *.out 2 | *.err -------------------------------------------------------------------------------- /where2act/code/scripts/README.md: -------------------------------------------------------------------------------- 1 | This folder stores the scripts used to generate data for where2act, and training/testing. 2 | 3 | Scripts used in Sim2Real$^2$ paper are in the folder "history". -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_gen_offline_data.sh: -------------------------------------------------------------------------------- 1 | python gen_offline_data.py \ 2 | --data_dir ../data/drawer_35_pushing_train_70000 \ 3 | --data_fn ../stats/drawer_35.txt \ 4 | --category_types Drawer \ 5 | --primact_types pushing \ 6 | --num_processes 20 \ 7 | --num_epochs 200 \ 8 | --ins_cnt_fn ../stats/ins_cnt_drawer_35.txt \ 9 | 10 | python gen_offline_data.py \ 11 | --data_dir ../data/drawer_35_pushing_validation_14000 \ 12 | --data_fn ../stats/drawer_35.txt \ 13 | --category_types Drawer \ 14 | --primact_types pushing \ 15 | --num_processes 5 \ 16 | --num_epochs 40 \ 17 | --ins_cnt_fn ../stats/ins_cnt_drawer_35.txt \ 18 | 19 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_test_real_visu_action_heatmap.sh: -------------------------------------------------------------------------------- 1 | python test_real_visu_action_heatmap.py \ 2 | --exp_name exp-model_3d-pushing-Drawer-drawer_35_all_train-val=70000-14000_off-on=100-1 \ 3 | --model_epoch 99 \ 4 | --model_version model_3d \ 5 | --pointcloud_name $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_testing.sh: -------------------------------------------------------------------------------- 1 | python testing.py \ 2 | --exp_name exp-model_3d-pushing-Drawer-drawer_35_all_train-val=70000-14000_off-on=100-1 \ 3 | --model_epoch 99 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite \ 7 | --primact_type pushing 8 | 9 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_train_3d.sh: -------------------------------------------------------------------------------- 1 | python train_3d.py \ 2 | --exp_suffix drawer_35_all_train-val=70000-14000_off-on=100-1 \ 3 | --model_version model_3d \ 4 | --primact_type pushing \ 5 | --category_types Drawer \ 6 | --data_dir_prefix ../data/drawer_35 \ 7 | --offline_data_dir ../data/drawer_35_pushing_train_70000 \ 8 | --val_data_dir ../data/drawer_35_pushing_validation_14000 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/drawer_35.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_drawer_35.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 10 \ 14 | --num_interaction_data_offline 100 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --pretrained_critic_ckpt ~/Sim2Real2/where2act/code/logs/exp-model_3d_critic-pushing-Drawer-drawer_35_critic_train-val=70000-14000_off-on=100-1/ckpts/30-network.pth \ 18 | --epochs 100 \ 19 | --overwrite \ 20 | --num_point_per_shape 2000 \ 21 | 22 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_train_3d_critic.sh: -------------------------------------------------------------------------------- 1 | python train_3d_critic.py \ 2 | --exp_suffix drawer_35_critic_train-val=70000-14000_off-on=100-1 \ 3 | --model_version model_3d_critic \ 4 | --primact_type pushing \ 5 | --category_types Drawer \ 6 | --data_dir_prefix ../data/drawer_35 \ 7 | --offline_data_dir ../data/drawer_35_pushing_train_70000 \ 8 | --val_data_dir ../data/drawer_35_pushing_validation_14000 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/drawer_35.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_drawer_35.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 10 \ 14 | --num_interaction_data_offline 100 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --epochs 100 \ 18 | --overwrite \ 19 | --num_point_per_shape 2000 \ 20 | 21 | 22 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_visu_action_heatmap_proposals.sh: -------------------------------------------------------------------------------- 1 | python visu_action_heatmap_proposals.py \ 2 | --exp_name exp-model_3d-pushing-Drawer-drawer_35_all_train-val=70000-14000_off-on=100-1 \ 3 | --model_epoch 99 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_35/run_visu_critic_heatmap.sh: -------------------------------------------------------------------------------- 1 | python visu_critic_heatmap.py \ 2 | --exp_name exp-model_3d_critic-pushing-Drawer-drawer_35_critic_train-val=70000-14000_off-on=100-1 \ 3 | --model_epoch 30 \ 4 | --model_version model_3d_critic \ 5 | --shape_id $1 \ 6 | --overwrite 7 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_45677/run_gen_offline_data.sh: -------------------------------------------------------------------------------- 1 | python gen_offline_data.py \ 2 | --data_dir ../data/drawer_45677_pulling_train_9000 \ 3 | --data_fn ../stats/drawer_45677.txt \ 4 | --category_types Drawer \ 5 | --primact_types pulling \ 6 | --num_processes 20 \ 7 | --num_epochs 900 \ 8 | --ins_cnt_fn ../stats/ins_cnt_drawer_45677.txt \ 9 | 10 | python gen_offline_data.py \ 11 | --data_dir ../data/drawer_45677_pulling_validation_1500 \ 12 | --data_fn ../stats/drawer_45677.txt \ 13 | --category_types Drawer \ 14 | --primact_types pulling \ 15 | --num_processes 20 \ 16 | --num_epochs 150 \ 17 | --ins_cnt_fn ../stats/ins_cnt_drawer_45677.txt \ 18 | 19 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_45677/run_train_3d.sh: -------------------------------------------------------------------------------- 1 | python train_3d.py \ 2 | --exp_suffix train_3d_45677_train-val=9000-1500_off-on=800-1 \ 3 | --model_version model_3d \ 4 | --primact_type pulling \ 5 | --category_types Drawer \ 6 | --data_dir_prefix ../data/drawer_45677 \ 7 | --offline_data_dir ../data/drawer_45677_pulling_train_9000 \ 8 | --val_data_dir ../data/drawer_45677_pulling_validation_1500 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/drawer_45677.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_drawer_45677.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 800 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --pretrained_critic_ckpt ~/Sim2Real2/where2act/code/logs/exp-model_3d_critic-pulling-Drawer-train_3d_critic_45677_train-val=9000-1500_off-on=800-1/ckpts/16-network.pth \ 18 | --epochs 100 \ 19 | --overwrite 20 | 21 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_45677/run_train_3d_critic.sh: -------------------------------------------------------------------------------- 1 | python train_3d_critic.py \ 2 | --exp_suffix train_3d_critic_45677_train-val=9000-1500_off-on=800-1 \ 3 | --model_version model_3d_critic \ 4 | --primact_type pulling \ 5 | --category_types Drawer \ 6 | --data_dir_prefix ../data/drawer_45677 \ 7 | --offline_data_dir ../data/drawer_45677_pulling_train_9000 \ 8 | --val_data_dir ../data/drawer_45677_pulling_validation_1500 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/drawer_45677.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_drawer_45677.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 800 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --epochs 100 \ 18 | --overwrite 19 | 20 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_45677/run_visu_action_heatmap_proposals.sh: -------------------------------------------------------------------------------- 1 | python visu_action_heatmap_proposals.py \ 2 | --exp_name exp-model_3d-pulling-Drawer-train_3d_45677_train-val=9000-1500_off-on=800-1 \ 3 | --model_epoch 99 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/drawer_45677/run_visu_critic_heatmap.sh: -------------------------------------------------------------------------------- 1 | python visu_critic_heatmap.py \ 2 | --exp_name exp-model_3d_critic-pulling-Drawer-train_3d_critic_45677_train-val=9000-1500_off-on=800-1 \ 3 | --model_epoch 31 \ 4 | --model_version model_3d_critic \ 5 | --shape_id $1 \ 6 | --overwrite 7 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_10211_pushing_1500&100/run_gen_offline_data.sh: -------------------------------------------------------------------------------- 1 | python gen_offline_data.py \ 2 | --data_dir ../data/laptop_10211_pushing_train \ 3 | --data_fn ../stats/laptop_10211.txt \ 4 | --category_types Laptop \ 5 | --primact_types pushing \ 6 | --num_processes 20 \ 7 | --num_epochs 150 \ 8 | --ins_cnt_fn ../stats/ins_cnt_laptop_10211.txt \ 9 | 10 | python gen_offline_data.py \ 11 | --data_dir ../data/laptop_10211_pushing_validation \ 12 | --data_fn ../stats/laptop_10211.txt \ 13 | --category_types Laptop \ 14 | --primact_types pushing \ 15 | --num_processes 20 \ 16 | --num_epochs 10 \ 17 | --ins_cnt_fn ../stats/ins_cnt_laptop_10211.txt \ 18 | 19 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_10211_pushing_1500&100/run_train_3d.sh: -------------------------------------------------------------------------------- 1 | python train_3d.py \ 2 | --exp_suffix train_3d_10211_train-val=1500-100_off-on=100-1 \ 3 | --model_version model_3d \ 4 | --primact_type pushing \ 5 | --category_types Laptop \ 6 | --data_dir_prefix ../data/laptop_10211 \ 7 | --offline_data_dir ../data/laptop_10211_pushing_train \ 8 | --val_data_dir ../data/laptop_10211_pushing_validation \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/laptop_10211.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_laptop_10211.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 100 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --pretrained_critic_ckpt ~/Sim2Real2/where2act/code/logs/exp-model_3d_critic-pushing-Laptop-train_3d_critic/ckpts/32-network.pth \ 18 | --epochs 200 \ 19 | --overwrite 20 | 21 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_10211_pushing_1500&100/run_train_3d_critic.sh: -------------------------------------------------------------------------------- 1 | python train_3d_critic.py \ 2 | --exp_suffix train_3d_critic_10211_train-val=1500-100_off-on=100-1 \ 3 | --model_version model_3d_critic \ 4 | --primact_type pushing \ 5 | --category_types Laptop \ 6 | --data_dir_prefix ../data/laptop_10211 \ 7 | --offline_data_dir ../data/laptop_10211_pushing_train \ 8 | --val_data_dir ../data/laptop_10211_pushing_validation \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/laptop_10211.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_laptop_10211.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 100 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --epochs 200 \ 18 | --overwrite 19 | 20 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_10211_pushing_1500&100/run_visu_action_heatmap_proposals.sh: -------------------------------------------------------------------------------- 1 | python visu_action_heatmap_proposals.py \ 2 | --exp_name exp-model_3d-pushing-Laptop-train_3d_10211 \ 3 | --model_epoch 73 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_10211_pushing_1500&100/run_visu_critic_heatmap.sh: -------------------------------------------------------------------------------- 1 | python visu_critic_heatmap.py \ 2 | --exp_name exp-model_3d_critic-pushing-Laptop-train_3d_critic \ 3 | --model_epoch 32 \ 4 | --model_version model_3d_critic \ 5 | --shape_id $1 \ 6 | --overwrite 7 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_gen_offline_data.sh: -------------------------------------------------------------------------------- 1 | python gen_offline_data.py \ 2 | --data_dir ../data/laptop_7_pushing_train_7000 \ 3 | --data_fn ../stats/laptop_7.txt \ 4 | --category_types Laptop \ 5 | --primact_types pushing \ 6 | --num_processes 18 \ 7 | --num_epochs 100 \ 8 | --ins_cnt_fn ../stats/ins_cnt_laptop_7.txt \ 9 | 10 | python gen_offline_data.py \ 11 | --data_dir ../data/laptop_7_pushing_validation_1400 \ 12 | --data_fn ../stats/laptop_7.txt \ 13 | --category_types Laptop \ 14 | --primact_types pushing \ 15 | --num_processes 18 \ 16 | --num_epochs 20 \ 17 | --ins_cnt_fn ../stats/ins_cnt_laptop_7.txt \ 18 | 19 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_test_real_visu_action_heatmap.sh: -------------------------------------------------------------------------------- 1 | python test_real_visu_action_heatmap.py \ 2 | --exp_name exp-model_3d-pushing-Laptop-laptop_7_all_train-val=7000-1400_off-on=90-1 \ 3 | --model_epoch 70 \ 4 | --model_version model_3d \ 5 | --pointcloud_name $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_testing.sh: -------------------------------------------------------------------------------- 1 | python testing.py \ 2 | --exp_name exp-model_3d-pushing-Laptop-laptop_7_all_train-val=7000-1400_off-on=90-1 \ 3 | --model_epoch 70 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite \ 7 | --primact_type pushing 8 | 9 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_train_3d.sh: -------------------------------------------------------------------------------- 1 | python train_3d.py \ 2 | --exp_suffix laptop_7_all_train-val=7000-1400_off-on=90-1 \ 3 | --model_version model_3d \ 4 | --primact_type pushing \ 5 | --category_types Laptop \ 6 | --data_dir_prefix ../data/laptop_7 \ 7 | --offline_data_dir ../data/laptop_7_pushing_train_7000 \ 8 | --val_data_dir ../data/laptop_7_pushing_validation_1400 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/laptop_7.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_laptop_7.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 90 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --pretrained_critic_ckpt ~/Sim2Real2/where2act/code/logs/exp-model_3d_critic-pushing-Laptop-laptop_7_critic_train-val=7000-1400_off-on=90-1/ckpts/30-network.pth \ 18 | --epochs 100 \ 19 | --overwrite \ 20 | --num_point_per_shape 2000 \ 21 | 22 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_train_3d_critic.sh: -------------------------------------------------------------------------------- 1 | python train_3d_critic.py \ 2 | --exp_suffix laptop_7_critic_train-val=7000-1400_off-on=90-1 \ 3 | --model_version model_3d_critic \ 4 | --primact_type pushing \ 5 | --category_types Laptop \ 6 | --data_dir_prefix ../data/laptop_7 \ 7 | --offline_data_dir ../data/laptop_7_pushing_train_7000 \ 8 | --val_data_dir ../data/laptop_7_pushing_validation_1400 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/laptop_7.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_laptop_7.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 90 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --epochs 100 \ 18 | --overwrite \ 19 | --num_point_per_shape 2000 \ 20 | 21 | 22 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_visu_action_heatmap_proposals.sh: -------------------------------------------------------------------------------- 1 | python visu_action_heatmap_proposals.py \ 2 | --exp_name exp-model_3d-pushing-Laptop-laptop_7_all_train-val=7000-1400_off-on=90-1 \ 3 | --model_epoch 70 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/history/laptop_7/run_visu_critic_heatmap.sh: -------------------------------------------------------------------------------- 1 | python visu_critic_heatmap.py \ 2 | --exp_name exp-model_3d_critic-pushing-Laptop-laptop_7_critic_train-val=7000-1400_off-on=90-1 \ 3 | --model_epoch 30 \ 4 | --model_version model_3d_critic \ 5 | --shape_id $1 \ 6 | --overwrite 7 | -------------------------------------------------------------------------------- /where2act/code/scripts/original/run_gen_offline_data.sh: -------------------------------------------------------------------------------- 1 | python gen_offline_data.py \ 2 | --data_dir ../data/gt_data-train_10cats_train_data-pushing \ 3 | --data_fn ../stats/train_10cats_train_data_list.txt \ 4 | --primact_types pushing \ 5 | --num_processes [?] \ 6 | --num_epochs 150 \ 7 | --ins_cnt_fn ../stats/ins_cnt_15cats.txt 8 | 9 | python gen_offline_data.py \ 10 | --data_dir ../data/gt_data-train_10cats_test_data-pushing \ 11 | --data_fn ../stats/train_10cats_test_data_list.txt \ 12 | --primact_types pushing \ 13 | --num_processes [?] \ 14 | --num_epochs 10 \ 15 | --ins_cnt_fn ../stats/ins_cnt_15cats.txt 16 | 17 | python gen_offline_data.py \ 18 | --data_dir ../data/gt_data-test_5cats-pushing \ 19 | --data_fn ../stats/test_5cats_data_list.txt \ 20 | --primact_types pushing \ 21 | --num_processes [?] \ 22 | --num_epochs 10 \ 23 | --ins_cnt_fn ../stats/ins_cnt_5cats.txt 24 | 25 | -------------------------------------------------------------------------------- /where2act/code/scripts/original/run_train_3d.sh: -------------------------------------------------------------------------------- 1 | python train_3d.py \ 2 | --exp_suffix train_3d \ 3 | --model_version model_3d \ 4 | --primact_type pushing \ 5 | --data_dir_prefix ../data/gt_data \ 6 | --offline_data_dir ../data/gt_data-train_10cats_train_data-pushing \ 7 | --val_data_dir ../data/gt_data-train_10cats_test_data-pushing \ 8 | --val_data_fn data_tuple_list.txt \ 9 | --train_shape_fn ../stats/train_10cats_train_data_list.txt \ 10 | --ins_cnt_fn ../stats/ins_cnt_15cats.txt \ 11 | --buffer_max_num 10000 \ 12 | --num_processes_for_datagen [?] \ 13 | --num_interaction_data_offline 50 \ 14 | --num_interaction_data 1 \ 15 | --sample_succ \ 16 | --pretrained_critic_ckpt [?] \ 17 | --overwrite 18 | 19 | -------------------------------------------------------------------------------- /where2act/code/scripts/original/run_train_3d_critic.sh: -------------------------------------------------------------------------------- 1 | python train_3d_critic.py \ 2 | --exp_suffix train_3d_critic \ 3 | --model_version model_3d_critic \ 4 | --primact_type pushing \ 5 | --data_dir_prefix ../data/gt_data \ 6 | --offline_data_dir ../data/gt_data-train_10cats_train_data-pushing \ 7 | --val_data_dir ../data/gt_data-train_10cats_test_data-pushing \ 8 | --val_data_fn data_tuple_list.txt \ 9 | --train_shape_fn ../stats/train_10cats_train_data_list.txt \ 10 | --ins_cnt_fn ../stats/ins_cnt_15cats.txt \ 11 | --buffer_max_num 10000 \ 12 | --num_processes_for_datagen [?] \ 13 | --num_interaction_data_offline 50 \ 14 | --num_interaction_data 1 \ 15 | --sample_succ \ 16 | --overwrite 17 | 18 | -------------------------------------------------------------------------------- /where2act/code/scripts/original/run_visu_action_heatmap_proposals.sh: -------------------------------------------------------------------------------- 1 | python visu_action_heatmap_proposals.py \ 2 | --exp_name finalexp-model_all_final-pulling-None-train_all_v1 \ 3 | --model_epoch 81 \ 4 | --model_version model_3d_legacy \ 5 | --shape_id $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/original/run_visu_critic_heatmap.sh: -------------------------------------------------------------------------------- 1 | python visu_critic_heatmap.py \ 2 | --exp_name finalexp-model_all_final-pulling-None-train_all_v1 \ 3 | --model_epoch 81 \ 4 | --model_version model_3d_critic_legacy \ 5 | --shape_id $1 \ 6 | --overwrite 7 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_gen_offline_data.sh: -------------------------------------------------------------------------------- 1 | # python gen_offline_data.py \ 2 | # --data_dir ../data/faucet_8_pushing-left_train_48000 \ 3 | # --data_fn ../stats/faucet_8.txt \ 4 | # --category_types Faucet \ 5 | # --primact_types pushing-left \ 6 | # --num_processes 20 \ 7 | # --num_epochs 600 \ 8 | # --ins_cnt_fn ../stats/ins_cnt_faucet_8.txt \ 9 | 10 | # python gen_offline_data.py \ 11 | # --data_dir ../data/faucet_8_pushing-left_validation_9600 \ 12 | # --data_fn ../stats/faucet_8.txt \ 13 | # --category_types Faucet \ 14 | # --primact_types pushing-left \ 15 | # --num_processes 20 \ 16 | # --num_epochs 120 \ 17 | # --ins_cnt_fn ../stats/ins_cnt_faucet_8.txt \ 18 | 19 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_test_real_visu_action_heatmap.sh: -------------------------------------------------------------------------------- 1 | python test_real_visu_action_heatmap.py \ 2 | --exp_name exp-model_3d-pushing-left-Faucet-faucet_8_all_train-val=96000-9600_off-on=500-1 \ 3 | --model_epoch 50 \ 4 | --model_version model_3d \ 5 | --pointcloud_name $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_testing.sh: -------------------------------------------------------------------------------- 1 | python testing.py \ 2 | --exp_name exp-model_3d-pushing-left-Faucet-faucet_8_all_train-val=48000-9600_off-on=500-1 \ 3 | --model_epoch 40 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite \ 7 | --primact_type pushing-left 8 | 9 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_train_3d.sh: -------------------------------------------------------------------------------- 1 | python train_3d.py \ 2 | --exp_suffix faucet_8_all_train-val=96000-9600_off-on=500-1 \ 3 | --model_version model_3d \ 4 | --primact_type pushing-left \ 5 | --category_types Faucet \ 6 | --data_dir_prefix ../data/faucet_8 \ 7 | --offline_data_dir ../data/faucet_8_pushing-left_train_96000 \ 8 | --val_data_dir ../data/faucet_8_pushing-left_validation_9600 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/faucet_8.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_faucet_8.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 10 \ 14 | --num_interaction_data_offline 1100 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --pretrained_critic_ckpt ~/Sim2Real2/where2act/code/logs/exp-model_3d_critic-pushing-left-Faucet-faucet_8_critic_train-val=48000-9600_off-on=500-1/ckpts/9-network.pth \ 18 | --epochs 100 \ 19 | --overwrite \ 20 | --num_point_per_shape 2000 \ 21 | --abs_thres 0.17 \ 22 | --rel_thres 0.1 \ 23 | # abs thres 10 degree 24 | 25 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_train_3d_critic.sh: -------------------------------------------------------------------------------- 1 | python train_3d_critic.py \ 2 | --exp_suffix faucet_8_critic_train-val=48000-9600_off-on=500-1 \ 3 | --model_version model_3d_critic \ 4 | --primact_type pushing-left \ 5 | --category_types Faucet \ 6 | --data_dir_prefix ../data/faucet_8 \ 7 | --offline_data_dir ../data/faucet_8_pushing-left_train_48000 \ 8 | --val_data_dir ../data/faucet_8_pushing-left_validation_9600 \ 9 | --val_data_fn data_tuple_list.txt \ 10 | --train_shape_fn ../stats/faucet_8.txt \ 11 | --ins_cnt_fn ../stats/ins_cnt_faucet_8.txt \ 12 | --buffer_max_num 10000 \ 13 | --num_processes_for_datagen 20 \ 14 | --num_interaction_data_offline 500 \ 15 | --num_interaction_data 1 \ 16 | --sample_succ \ 17 | --epochs 100 \ 18 | --overwrite \ 19 | --num_point_per_shape 2000 \ 20 | --abs_thres 0.17 \ 21 | --rel_thres 0.1 \ 22 | # abs thres 10 degree 23 | 24 | 25 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_visu_action_heatmap_proposals.sh: -------------------------------------------------------------------------------- 1 | python visu_action_heatmap_proposals.py \ 2 | --exp_name exp-model_3d-pushing-left-Faucet-faucet_8_all_train-val=96000-9600_off-on=500-1 \ 3 | --model_epoch 50 \ 4 | --model_version model_3d \ 5 | --shape_id $1 \ 6 | --overwrite 7 | 8 | -------------------------------------------------------------------------------- /where2act/code/scripts/run_visu_critic_heatmap.sh: -------------------------------------------------------------------------------- 1 | python visu_critic_heatmap.py \ 2 | --exp_name exp-model_3d_critic-pushing-left-Faucet-faucet_8_critic_train-val=48000-9600_off-on=500-1 \ 3 | --model_epoch 18 \ 4 | --model_version model_3d_critic \ 5 | --shape_id $1 \ 6 | --overwrite 7 | -------------------------------------------------------------------------------- /where2act/code/training_tips.md: -------------------------------------------------------------------------------- 1 | # Data generation 2 | 1. change urdf dir 3 | 2. change scale to the real scale, then check the scale_rand_ratio: 4 | 1. laptop*1/3.2 5 | 2. drawer 0.333 6 | 3. faucet 0.2 7 | 3. change initial joint state 8 | 4. check friction 9 | 5. check traj length 10 | 1. laptop 11 | 2. faucet 0.06 (pushing-left) 12 | 6. check camera distance 13 | 14 | # Train Critic 15 | 1. change data dir in .sh 16 | 2. change offline data number 17 | 18 | # testing 19 | 1. change urdf dir 20 | 2. change initial state 21 | 3. change scale -------------------------------------------------------------------------------- /where2act/data/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !README.md 3 | !.gitignore 4 | -------------------------------------------------------------------------------- /where2act/data/README.md: -------------------------------------------------------------------------------- 1 | This folder stores collected data. 2 | -------------------------------------------------------------------------------- /where2act/stats/README.md: -------------------------------------------------------------------------------- 1 | The files in this folder is used to determine what data will be used when training. The original "stats" files are in the "original" folder. -------------------------------------------------------------------------------- /where2act/stats/drawer_35.txt: -------------------------------------------------------------------------------- 1 | 44817 Drawer 2 | 44853 Drawer 3 | 44962 Drawer 4 | 45092 Drawer 5 | 45132 Drawer 6 | 45135 Drawer 7 | 45243 Drawer 8 | 45261 Drawer 9 | 45290 Drawer 10 | 45374 Drawer 11 | 45427 Drawer 12 | 45677 Drawer 13 | 45710 Drawer 14 | 45746 Drawer 15 | 45756 Drawer 16 | 45801 Drawer 17 | 46014 Drawer 18 | 46060 Drawer 19 | 46109 Drawer 20 | 46130 Drawer 21 | 46172 Drawer 22 | 46230 Drawer 23 | 46334 Drawer 24 | 46443 Drawer 25 | 46462 Drawer 26 | 46466 Drawer 27 | 46549 Drawer 28 | 46641 Drawer 29 | 46762 Drawer 30 | 47089 Drawer 31 | 47207 Drawer 32 | 48169 Drawer 33 | 48253 Drawer 34 | 48876 Drawer 35 | 49140 Drawer 36 | -------------------------------------------------------------------------------- /where2act/stats/faucet_8.txt: -------------------------------------------------------------------------------- 1 | 857 Faucet 2 | 1053 Faucet 3 | 1288 Faucet 4 | 1343 Faucet 5 | 1633 Faucet 6 | 1646 Faucet 7 | 1667 Faucet 8 | 1832 Faucet 9 | -------------------------------------------------------------------------------- /where2act/stats/ins_cnt_drawer_35.txt: -------------------------------------------------------------------------------- 1 | Drawer 35 10 2 | -------------------------------------------------------------------------------- /where2act/stats/ins_cnt_faucet_8.txt: -------------------------------------------------------------------------------- 1 | Faucet 8 10 2 | -------------------------------------------------------------------------------- /where2act/stats/ins_cnt_laptop_7.txt: -------------------------------------------------------------------------------- 1 | Laptop 7 10 2 | -------------------------------------------------------------------------------- /where2act/stats/laptop_7.txt: -------------------------------------------------------------------------------- 1 | 9748 Laptop 2 | 10211 Laptop 3 | 10213 Laptop 4 | 10239 Laptop 5 | 10269 Laptop 6 | 10305 Laptop 7 | 10626 Laptop 8 | -------------------------------------------------------------------------------- /where2act/stats/original/data_cabinet_41003.txt: -------------------------------------------------------------------------------- 1 | 41003 StorageFurniture 2 | -------------------------------------------------------------------------------- /where2act/stats/original/ins_cnt_15cats.txt: -------------------------------------------------------------------------------- 1 | Box 28 12 2 | Bucket 36 10 3 | Door 36 10 4 | Faucet 84 4 5 | Kettle 29 12 6 | KitchenPot 25 14 7 | Microwave 16 22 8 | Refrigerator 44 8 9 | Safe 30 12 10 | StorageFurniture 345 1 11 | Switch 70 5 12 | Table 101 3 13 | TrashCan 70 5 14 | WashingMachine 17 20 15 | Window 58 6 16 | -------------------------------------------------------------------------------- /where2act/stats/original/ins_cnt_5cats.txt: -------------------------------------------------------------------------------- 1 | Bucket 36 3 2 | KitchenPot 25 4 3 | Safe 30 3 4 | Table 101 1 5 | WashingMachine 17 6 6 | -------------------------------------------------------------------------------- /where2act/urdf/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | !README.md -------------------------------------------------------------------------------- /where2act/urdf/README.md: -------------------------------------------------------------------------------- 1 | Put data from partnet mobility dataset here. --------------------------------------------------------------------------------