├── data
└── .gitignore
├── .gitignore
├── .gitattributes
├── src
├── __init__.py
├── models
│ ├── __init__.py
│ ├── MLP.py
│ ├── resnet.py
│ └── pointnet.py
├── mpc
│ ├── __init__.py
│ ├── robot_planner_task.py
│ ├── se3_optimization_task.py
│ ├── concept_cost.py
│ ├── se3_optimization_rollout.py
│ ├── robot_planner_rollout.py
│ ├── robot_concept_planner.py
│ ├── se3_concept_planner.py
│ └── robot_concept_reacher.py
├── train
│ ├── __init__.py
│ ├── get_new_pose.py
│ ├── check_transform.py
│ ├── train_human_concept.py
│ ├── train_oracle_concept.py
│ └── evaluate_concept.py
├── utils
│ ├── __init__.py
│ ├── visualize_pointcloud_samples.py
│ ├── visualize_AL_samples.py
│ ├── test_concept.py
│ ├── input_utils.py
│ ├── camera_utils.py
│ ├── gym_utils.py
│ ├── concept_utils.py
│ ├── train_utils.py
│ └── data_utils.py
├── datasets
│ ├── __init__.py
│ ├── create_urdf_objects.py
│ ├── multilabel_concept_data.py
│ ├── collect_human_data.py
│ ├── label_concept_data.py
│ ├── generate_concept_data.py
│ ├── split_data.py
│ ├── ycb_downloader.py
│ └── passive_querier.py
├── notebooks
│ └── __init__.py
└── world
│ ├── basic_world.py
│ ├── object_world.py
│ └── robot_world.py
├── configs
├── rgb.yaml
├── rawstate_AL.yaml
├── rawstate_oracle.yaml
├── rawstate_human.yaml
├── pointcloud_oracle.yaml
└── pointcloud_human.yaml
├── scripts
├── ngc_unzip_data.sh
├── ngc_multilabel_series.sh
├── ngc_generate_oracle_data.sh
├── ngc_sync_code.sh
├── ngc_labelsplit_data.sh
├── ngc_multilabel_parallel.sh
├── ngc_train_pointnet_concept.sh
├── ngc_evaluate_pointnet_concept.sh
├── ngc_multi_learned_train.sh
├── ngc_oracle_g_train.sh
├── ngc_human_g_train.sh
├── ngc_generate_AL_data.sh
├── ngc_multi_baseline_evaluate.sh
├── ngc_multi_learned_evaluate.sh
├── ngc_multi_baseline_train.sh
├── ngc_human_baseline_train.sh
├── gen_copyright_headers.sh
└── scratch.txt
├── LICENSE
└── README.md
/data/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.csv
3 | *.ipynb_checkpoints
4 | *.DS_Store
5 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | data/shapenet_objects/** filter=lfs diff=lfs merge=lfs -text
2 | data/ycb_objects/** filter=lfs diff=lfs merge=lfs -text
3 | data/models/** filter=lfs diff=lfs merge=lfs -text
4 |
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
--------------------------------------------------------------------------------
/src/models/__init__.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
--------------------------------------------------------------------------------
/src/mpc/__init__.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
--------------------------------------------------------------------------------
/src/train/__init__.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
--------------------------------------------------------------------------------
/src/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
--------------------------------------------------------------------------------
/configs/rgb.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | type: "rgb"
3 | data_dir: "/../../data/concept_shapenet/"
4 | save_dir: "/../../data/models/"
5 | seed: 0
6 | train:
7 | num_epochs: 50
8 | learning_rate: 0.001
9 | batch_size: 32
10 | num_workers: 16
11 | network:
12 | input_dim: 1
13 | frozen: True
--------------------------------------------------------------------------------
/configs/rawstate_AL.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | type: "rawstate"
3 | data_dir: "/../../data/concept_shapenet/"
4 | save_dir: "/../../data/models/"
5 | results_dir: "/../../data/results/"
6 | seed: 0
7 | train:
8 | num_epochs: 100
9 | learning_rate: 0.01
10 | batch_size: 32
11 | num_workers: 16
12 | network:
13 | nb_layers: 3
14 | nb_units: 256
--------------------------------------------------------------------------------
/configs/rawstate_oracle.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | type: "rawstate"
3 | data_dir: "/../../data/concept_shapenet/"
4 | save_dir: "/../../data/models/"
5 | results_dir: "/../../data/results/"
6 | seed: 0
7 | train:
8 | num_epochs: 100
9 | learning_rate: 0.01
10 | batch_size: 32
11 | num_workers: 16
12 | network:
13 | nb_layers: 3
14 | nb_units: 256
--------------------------------------------------------------------------------
/configs/rawstate_human.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | type: "rawstate"
3 | train_dir: "/../../data/g_shapenet/"
4 | test_dir: "/../../data/concept_shapenet/"
5 | save_dir: "/../../data/models/"
6 | results_dir: "/../../data/results/"
7 | seed: 0
8 | train:
9 | num_epochs: 100
10 | learning_rate: 0.01
11 | batch_size: 32
12 | num_workers: 16
13 | network:
14 | nb_layers: 3
15 | nb_units: 256
--------------------------------------------------------------------------------
/configs/pointcloud_oracle.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | type: "pointcloud"
3 | data_dir: "/../../data/concept_shapenet/"
4 | save_dir: "/../../data/models/"
5 | results_dir: "/../../data/results/"
6 | seed: 0
7 | train:
8 | num_epochs: 50
9 | learning_rate: 0.0003
10 | batch_size: 64
11 | num_workers: 16
12 | network:
13 | pointnet_radius: 0.5
14 | pointnet_nclusters: 512
15 | scale: 1
16 | in_features: 2
--------------------------------------------------------------------------------
/configs/pointcloud_human.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | type: "pointcloud"
3 | train_dir: "/../../data/g_shapenet/"
4 | test_dir: "/../../data/concept_shapenet/"
5 | save_dir: "/../../data/models/"
6 | results_dir: "/../../data/results/"
7 | seed: 0
8 | train:
9 | num_epochs: 50
10 | learning_rate: 0.0003
11 | batch_size: 64
12 | num_workers: 16
13 | network:
14 | pointnet_radius: 0.5
15 | pointnet_nclusters: 512
16 | scale: 1
17 | in_features: 2
--------------------------------------------------------------------------------
/scripts/ngc_unzip_data.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | ngc batch run \
6 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
7 | --name $NAME \
8 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
9 | --workspace concept_learning:/concept_learning \
10 | --result /result \
11 | --port 6006 \
12 | --commandline "
13 | cd /concept_learning/concept_learning/data
14 | tar -xf concept_shapenet.tar.xz
15 | "
16 |
--------------------------------------------------------------------------------
/scripts/ngc_multilabel_series.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | ngc batch run \
6 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
7 | --name $NAME--ml-model.pointnet \
8 | --image "nvcr.io/nvidian/robotics/storm_kit:cuda_10.2_cudnn_driver418.40.04_20210701-154151" \
9 | --workspace concept_learning:concept_learning \
10 | --result /result \
11 | --port 6006 \
12 | --commandline "
13 | export OMP_NUM_THREADS=1
14 | cd /concept_learning/concept_learning/src
15 | pip3 install shapely
16 | pip3 install h5py==2.10.0
17 | python3 datasets/multilabel_concept_data.py --concept 'above45'
18 | "
--------------------------------------------------------------------------------
/src/notebooks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 |
10 | # concept_learning
11 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
12 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
13 |
--------------------------------------------------------------------------------
/scripts/ngc_generate_oracle_data.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | ngc batch run \
6 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
7 | --name $NAME--model.nomodel \
8 | --image "nvcr.io/nvidian/robotics/storm_kit:cuda_10.2_cudnn_driver418.40.04_20210701-154151" \
9 | --workspace concept_learning:concept_learning \
10 | --result /result \
11 | --port 6006 \
12 | --commandline "
13 | export OMP_NUM_THREADS=1
14 | cd /concept_learning/concept_learning/src
15 | pip3 install shapely
16 | pip3 install open3d-python
17 | pip3 install h5py==2.10.0
18 | pip3 install moviepy
19 | python3 datasets/generate_concept_data.py --headless --cuda --envs 100 --samples 10000
20 | "
--------------------------------------------------------------------------------
/scripts/ngc_sync_code.sh:
--------------------------------------------------------------------------------
1 | WSNAME=concept_learning
2 | MOUNTDIR=/home/abobu/Project/ngc_ws/${WSNAME}
3 |
4 | # # mount workspace
5 | # ngc workspace unmount ${MOUNTDIR}
6 | # ngc workspace mount ${WSNAME} ${MOUNTDIR} --mode RW
7 |
8 | # sync files
9 | SCRIPTDIR=$(dirname $(readlink -f "$0"))
10 | PROJECTNAME=$(basename "$(dirname "$SCRIPTDIR")")
11 | echo ${SCRIPTDIR}
12 | echo ${MOUNTDIR}/${PROJECTNAME}
13 |
14 | rsync -rlvczP \
15 | --exclude '__pycache__' \
16 | --exclude '.git' \
17 | --exclude 'checkpoints' \
18 | --exclude '*.pyc' \
19 | --exclude 'graspnet.egg-info' \
20 | --exclude '.eggs' \
21 | --exclude 'data/concept_*/' \
22 | --exclude 'data/g_*/' \
23 | --exclude 'data/test_shapenet/' \
24 | --exclude 'scripts/' \
25 | --exclude 'models*/' \
26 | --exclude 'results/' \
27 | ${SCRIPTDIR}/.. ${MOUNTDIR}/${PROJECTNAME}/
28 |
--------------------------------------------------------------------------------
/src/mpc/robot_planner_task.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 |
7 | from storm_kit.mpc.task.reacher_task import ReacherTask
8 | from src.mpc.robot_planner_rollout import RobotPlannerRollout
9 |
10 | class RobotPlannerTask(ReacherTask):
11 | def __init__(self, task_file='franka.yml', robot_file='ur10_reacher.yml',
12 | world_file='collision_env.yml',
13 | tensor_args={'device':"cpu", 'dtype':torch.float32},
14 | spawn_process=True):
15 | super().__init__(task_file, robot_file, world_file, tensor_args, spawn_process)
16 |
17 | def get_rollout_fn(self, **kwargs):
18 | rollout_fn = RobotPlannerRollout(**kwargs)
19 | return rollout_fn
--------------------------------------------------------------------------------
/src/mpc/se3_optimization_task.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 |
7 | from storm_kit.mpc.task.se3_task import SE3Task
8 | from src.mpc.se3_optimization_rollout import SE3OptimizationRollout
9 |
10 |
11 | class SE3OptimizationTask(SE3Task):
12 | def __init__(self, task_file='franka.yml', robot_file='ur10_reacher.yml',
13 | world_file='collision_env.yml',
14 | tensor_args={'device':"cpu", 'dtype':torch.float32},
15 | spawn_process=True):
16 |
17 | super().__init__(task_file, robot_file, world_file, tensor_args, spawn_process)
18 |
19 | def get_rollout_fn(self, **kwargs):
20 | rollout_fn = SE3OptimizationRollout(**kwargs)
21 | return rollout_fn
--------------------------------------------------------------------------------
/scripts/ngc_labelsplit_data.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | ngc batch run \
9 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
10 | --name $NAME-$CONCEPT-ml-model.nomodel \
11 | --image "nvcr.io/nvidian/robotics/storm_kit:cuda_10.2_cudnn_driver418.40.04_20210701-154151" \
12 | --workspace concept_learning:concept_learning \
13 | --result /result \
14 | --port 6006 \
15 | --commandline "
16 | export OMP_NUM_THREADS=1
17 | cd /concept_learning/concept_learning/src
18 | pip3 install shapely
19 | pip3 install open3d-python
20 | pip3 install h5py==2.10.0
21 | pip3 install moviepy
22 | python3 datasets/label_concept_data.py --concept $CONCEPT
23 | python3 datasets/split_data.py --concept $CONCEPT
24 | "
25 | done
--------------------------------------------------------------------------------
/scripts/ngc_multilabel_parallel.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm.beta \
12 | --name $NAME-$CONCEPT-ml-model.nomodel \
13 | --image "nvcr.io/nvidian/robotics/storm_kit:cuda_10.2_cudnn_driver418.40.04_20210701-154151" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip3 install shapely
21 | pip3 install h5py==2.10.0
22 | pip3 install moviepy
23 | python3 datasets/label_concept_data.py \
24 | --concept $CONCEPT --concept_model 'confrandgt_rawstate_'$TRAIN_AMT'_0.pt'
25 | "
26 | done
27 | done
--------------------------------------------------------------------------------
/scripts/ngc_train_pointnet_concept.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | ngc batch run \
9 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
10 | --name $NAME-$CONCEPT-lr_0.001_bs_64_adam-model.pointnet \
11 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
12 | --workspace concept_learning:concept_learning \
13 | --result /result \
14 | --port 6006 \
15 | --commandline "
16 | export OMP_NUM_THREADS=1
17 | cd /concept_learning/concept_learning/src
18 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
19 | pip install h5py==2.10.0
20 | pip install moviepy
21 | pip install open3d --ignore-installed PyYAML
22 | pip install pytorch3d
23 | python train/train_oracle_concept.py --concept_dir $CONCEPT --config '/../../configs/pointcloud_oracle.yaml'
24 | "
25 | done
--------------------------------------------------------------------------------
/scripts/ngc_evaluate_pointnet_concept.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | ngc batch run \
9 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
10 | --name $NAME-$CONCEPT-$TRAIN_AMT-ml-model.pointnet \
11 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
12 | --workspace concept_learning:concept_learning \
13 | --result /result \
14 | --port 6006 \
15 | --commandline "
16 | export OMP_NUM_THREADS=1
17 | cd /concept_learning/concept_learning/src
18 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
19 | pip install h5py==2.10.0
20 | pip3 install moviepy
21 | pip install pytorch3d
22 | pip install shapely
23 | python train/evaluate_concept.py --concept_dir $CONCEPT --config '/../../configs/pointcloud_oracle.yaml' \
24 | --concept_model 'oracle_pointcloud_0.pt'
25 | "
26 | done
--------------------------------------------------------------------------------
/scripts/ngc_multi_learned_train.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-ml-model.pointnet \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | cd /concept_learning/concept_learning/src
19 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
20 | pip install h5py==2.10.0
21 | pip3 install moviepy
22 | pip install pytorch3d
23 | python train/train_oracle_concept.py --concept_dir $CONCEPT'_randomgt'$TRAIN_AMT \
24 | --config '/../../configs/pointcloud_oracle.yaml'
25 | "
26 | done
27 | done
--------------------------------------------------------------------------------
/scripts/ngc_oracle_g_train.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop" "upright"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-lr_0.01_bs_32_adam_model.MLP \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
21 | pip install h5py
22 | pip install moviepy
23 | pip install pytorch3d
24 | python train/train_oracle_concept.py --concept_dir $CONCEPT --config '/../../configs/rawstate_oracle.yaml' \
25 | --train_amt $TRAIN_AMT
26 | "
27 | done
28 | done
--------------------------------------------------------------------------------
/scripts/ngc_human_g_train.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-lr_0.01_bs_32_adam_model.MLP \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
21 | pip install h5py
22 | pip install moviepy
23 | pip install pytorch3d
24 | python train/train_human_concept.py --concept $CONCEPT --config '/../../configs/rawstate_human.yaml' \
25 | --train_amt $TRAIN_AMT --strategy 'randommine'
26 | "
27 | done
28 | done
--------------------------------------------------------------------------------
/scripts/ngc_generate_AL_data.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | ngc batch run \
9 | --instance dgx1v.${MEMORY}g.${NGPU}.norm.beta \
10 | --name $NAME-$CONCEPT-$TRAIN_AMT-lr_0.01_bs_32_adam_model.MLP \
11 | --image "nvcr.io/nvidian/robotics/storm_kit:cuda_10.2_cudnn_driver418.40.04_20210701-154151" \
12 | --workspace concept_learning:concept_learning \
13 | --result /result \
14 | --port 6006 \
15 | --commandline "
16 | export OMP_NUM_THREADS=1
17 | cd /concept_learning/concept_learning/src
18 | pip3 install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
19 | pip3 install shapely
20 | pip3 install open3d-python
21 | pip3 install h5py==2.10.0
22 | pip3 install moviepy
23 | pip3 install pytorch3d
24 | python3 datasets/collect_human_data.py --config '/../../configs/rawstate_AL.yaml' --concept $CONCEPT \
25 | --simulated --active_samples 1000 --passive_samples 0 --batch_size 100 --objective 'confusion' \
26 | --warmstart 0 --mining 0
27 | "
28 | done
--------------------------------------------------------------------------------
/scripts/ngc_multi_baseline_evaluate.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-ml-model.pointnet \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
21 | pip install h5py==2.10.0
22 | pip3 install moviepy
23 | pip install pytorch3d
24 | pip install shapely
25 | python train/evaluate_concept.py --concept_dir $CONCEPT --config '/../../configs/pointcloud_oracle.yaml' \
26 | --concept_model 'randomgt_pointcloud_'$TRAIN_AMT'_0.pt'
27 | "
28 | done
29 | done
--------------------------------------------------------------------------------
/scripts/ngc_multi_learned_evaluate.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-ml-model.pointnet \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
21 | pip install h5py==2.10.0
22 | pip3 install moviepy
23 | pip install pytorch3d
24 | pip install shapely
25 | python train/evaluate_concept.py --concept_dir $CONCEPT --config '/../../configs/pointcloud_oracle.yaml' \
26 | --concept_model 'oracle_pointcloud_grandomgt'$TRAIN_AMT'_0.pt'
27 | "
28 | done
29 | done
--------------------------------------------------------------------------------
/scripts/ngc_multi_baseline_train.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-lr_0.001_bs_64_adam-model.pointnet \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
21 | pip install h5py==2.10.0
22 | pip install moviepy
23 | pip install open3d --ignore-installed PyYAML
24 | pip install pytorch3d
25 | python train/train_oracle_concept.py --concept_dir $CONCEPT \
26 | --config '/../../configs/pointcloud_oracle.yaml' --train_amt $TRAIN_AMT
27 | "
28 | done
29 | done
--------------------------------------------------------------------------------
/scripts/ngc_human_baseline_train.sh:
--------------------------------------------------------------------------------
1 | NAME=$1
2 | MEMORY=$2
3 | NGPU=1
4 |
5 | for CONCEPT in "above45" "abovebb" "near" "upright" "upright45" \
6 | "alignedvertical" "alignedhorizontal" "front" "front45" "ontop"
7 | do
8 | for TRAIN_AMT in 100 200 300 400 500 600 700 800 900 1000
9 | do
10 | ngc batch run \
11 | --instance dgx1v.${MEMORY}g.${NGPU}.norm \
12 | --name $NAME-$CONCEPT-$TRAIN_AMT-lr_0.001_bs_64_adam-model.pointnet \
13 | --image "nvidian/robotics/weiy_pytorch:1.7.0-py3.7-cuda11.1-cudnn7-devel-ubuntu18.04-egl" \
14 | --workspace concept_learning:concept_learning \
15 | --result /result \
16 | --port 6006 \
17 | --commandline "
18 | export OMP_NUM_THREADS=1
19 | cd /concept_learning/concept_learning/src
20 | pip install 'git+git://github.com/erikwijmans/Pointnet2_PyTorch.git#egg=pointnet2_ops&subdirectory=pointnet2_ops_lib'
21 | pip install h5py
22 | pip install moviepy
23 | pip install open3d --ignore-installed PyYAML
24 | pip install pytorch3d
25 | python train/train_human_concept.py --concept $CONCEPT --config '/../../configs/pointcloud_human.yaml' \
26 | --train_amt $TRAIN_AMT --strategy 'randomgt'
27 | "
28 | done
29 | done
--------------------------------------------------------------------------------
/src/models/MLP.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 |
9 | import sys
10 |
11 | sys.path.insert(1, '../')
12 |
13 | from src.utils.input_utils import transform_input
14 |
15 | class MLP(nn.Module):
16 | """
17 | Creates a NN with leaky ReLu non-linearity.
18 | ---
19 | input nb_layers, nb_units, input_dim
20 | output scalar
21 | """
22 | def __init__(self, nb_layers, nb_units, input_dim, concept):
23 | super(MLP, self).__init__()
24 | self.concept = concept
25 |
26 | layers = []
27 | dim_list = [input_dim] + [nb_units] * nb_layers + [1]
28 |
29 | for i in range(len(dim_list) - 1):
30 | layers.append(nn.Linear(dim_list[i], dim_list[i+1]))
31 |
32 | self.fc = nn.ModuleList(layers)
33 |
34 | # initialize weights
35 | def weights_init(m):
36 | if isinstance(m, nn.Linear):
37 | torch.nn.init.xavier_normal_(m.weight, gain=nn.init.calculate_gain('leaky_relu'))
38 | torch.nn.init.zeros_(m.bias)
39 |
40 | self.apply(weights_init)
41 |
42 | print("Initializing MLP with input dimensionality ", input_dim)
43 |
44 | def forward(self, x):
45 | x = self.input_torchify(x)
46 | x = transform_input(x, self.concept)
47 | for layer in self.fc[:-1]:
48 | x = F.leaky_relu(layer(x))
49 | return self.fc[-1](x)
50 |
51 | def input_torchify(self, x):
52 | """
53 | Transforms numpy input to torch tensors.
54 | """
55 | if not torch.is_tensor(x):
56 | x = torch.Tensor(x)
57 | if len(x.shape) == 1:
58 | x = torch.unsqueeze(x, axis=0)
59 | return x
--------------------------------------------------------------------------------
/scripts/gen_copyright_headers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | header_c="/*
4 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
5 | *
6 | * NVIDIA CORPORATION and its licensors retain all intellectual property
7 | * and proprietary rights in and to this software, related documentation
8 | * and any modifications thereto. Any use, reproduction, disclosure or
9 | * distribution of this software and related documentation without an express
10 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
11 | */"
12 |
13 | header_s="# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
14 | #
15 | # NVIDIA CORPORATION and its licensors retain all intellectual property
16 | # and proprietary rights in and to this software, related documentation
17 | # and any modifications thereto. Any use, reproduction, disclosure or
18 | # distribution of this software and related documentation without an express
19 | # license agreement from NVIDIA CORPORATION is strictly prohibited."
20 |
21 | SRC_DIR=../
22 | if [ -n "$1" ]
23 | then
24 | SRC_DIR=$1
25 | fi
26 |
27 | for f in $(find "${SRC_DIR}" -name '*.cpp' -or -name '*.h'); do
28 | echo -n $f;
29 | if grep -q "Copyright (c)" $f; then
30 | echo " Copyright not needed"
31 | else
32 | echo " Copyright needed"
33 | echo -e "$header_c\n\n" > $f.new
34 | cat $f >> $f.new
35 | mv $f.new $f
36 | fi
37 | done
38 |
39 | #for f in $(find . -name 'Makefile*'); do
40 | # for f in Makefile; do
41 | for f in $(find "${SRC_DIR}" -name '*.py'); do
42 | echo -n $f;
43 | if grep -q "Copyright (c)" $f; then
44 | echo " Copyright not needed"
45 | else
46 | echo " Copyright needed"
47 | echo -e "$header_s\n\n" > $f.new
48 | cat $f >> $f.new
49 | mv $f.new $f
50 | fi
51 | done
52 |
53 |
--------------------------------------------------------------------------------
/src/datasets/create_urdf_objects.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import os
6 | import glob
7 |
8 | urdf_template = """
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | """
28 |
29 |
30 | def obj_to_urdf(obj_dir):
31 | """
32 | Takes a directory of object directories containing meshes and textures and converts them to urdf.
33 | """
34 | obj_asset_dirs = sorted(glob.glob("{}/**".format(obj_dir)))
35 |
36 | for obj in obj_asset_dirs:
37 | name = os.path.relpath(obj, obj_dir)
38 | obj_filename = obj + "/textured.obj"
39 | mtl_filename = obj + "/textured.mtl"
40 | urdf_filename = obj + "/textured.urdf"
41 | urdf = urdf_template.format(name=name,
42 | obj_path=obj_filename,
43 | mtl_path=mtl_filename,
44 | scale=1.0)
45 | with open(urdf_filename, 'w') as f:
46 | f.write(urdf + "\n")
47 |
48 | if __name__ == '__main__':
49 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/..")
50 | obj_asset_root = os.path.abspath(parent_dir + "/data/ycb_objects/")
51 | obj_to_urdf(obj_asset_root)
--------------------------------------------------------------------------------
/src/mpc/concept_cost.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | import os
7 | import torch.nn as nn
8 |
9 | from storm_kit.util_file import get_assets_path, join_path, get_weights_path
10 | from src.models.pointnet import PointNetEncoder
11 |
12 | class ConceptCost(nn.Module):
13 | def __init__(self, weight=None, nn_weight_file='',
14 | tensor_args={'device':torch.device('cpu'), 'dtype':torch.float32}):
15 | super(ConceptCost, self).__init__()
16 | self.tensor_args = tensor_args
17 | self.weight = torch.as_tensor(weight, **self.tensor_args)
18 |
19 | self.load_nn(nn_weight_file)
20 |
21 | def load_nn(self, weight_file_name):
22 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
23 | checkpoints_dir = os.path.abspath(parent_dir + "/data/models/")
24 | model_data = torch.load(join_path(checkpoints_dir, weight_file_name))
25 | pointnet_radius = 0.5
26 | pointnet_nclusters = 512
27 | scale = 1
28 | in_features = 2
29 |
30 | model = PointNetEncoder(pointnet_radius, pointnet_nclusters, scale, in_features)
31 | model = model.to(**self.tensor_args)
32 | model.load_state_dict(model_data)
33 | self.model = model.to(**self.tensor_args)
34 | self.model.eval()
35 |
36 | def forward(self, points):
37 | def l2(x, y):
38 | return (x-y)**2
39 | outputs = torch.sigmoid(self.model(points)).squeeze()
40 | labels = torch.full((outputs.shape[0], ), 0.0, requires_grad=True, device=self.tensor_args['device'])
41 | cost = self.weight * self.model(points).squeeze()
42 | #return cost
43 | return self.weight * l2(outputs, labels)
44 |
45 |
--------------------------------------------------------------------------------
/src/utils/visualize_pointcloud_samples.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import os, sys
6 | import yaml
7 | import argparse
8 | import numpy as np
9 | import random
10 |
11 | sys.path.insert(1, '../')
12 |
13 | import torch
14 | from torch import nn
15 | from torch.utils.data import DataLoader, Subset
16 |
17 | from src.utils.data_utils import PointDataset
18 | from src.utils.geom_utils import show_pcs_with_frame
19 |
20 | if __name__ == "__main__":
21 | # Parse args.
22 | parser = argparse.ArgumentParser(description='pass args')
23 | parser.add_argument('--concept', type=str, default='above180', help='data directory')
24 | args = parser.parse_args()
25 |
26 | # Load in data folder and labels.
27 | here = os.path.dirname(os.path.abspath(__file__))
28 |
29 | data_filename = here+"/../../data/concept_shapenet/data.hdf5"
30 | label_filename = here+"/../../data/concept_shapenet/{}/label.hdf5".format(args.concept)
31 | split_filename = here+"/../../data/concept_shapenet/{}/test.txt".format(args.concept)
32 |
33 | torch.manual_seed(0)
34 | random.seed(0)
35 | np.random.seed(0)
36 |
37 | samples = 500
38 | train_set = PointDataset(data_filename, label_filename, split_filename)
39 | indices = random.sample(range(len(train_set)), samples)
40 | train_set = Subset(train_set, indices)
41 |
42 | # Sort the training set based on uid.
43 | idxes = []
44 | for i in range(len(train_set)):
45 | data, label = train_set[i]
46 | idx = train_set.examples[i].split("_")[0]
47 | idxes.append(int(idx))
48 | idxes = np.argsort(idxes)
49 |
50 | pts = []
51 | # Shift all points so that they're anchor-centered
52 | for i in range(samples):
53 | idx = idxes[i]
54 | data, label = train_set[idx]
55 | pt = (data[:3] - data[7:10])
56 | color = np.zeros((3))
57 | color[0] = label.item()
58 | pts.append(np.hstack((pt, color)))
59 | pts = np.array(pts)
60 |
61 | # Plot the pcs with label as color.
62 | show_pcs_with_frame(pts, [0,0,0])
--------------------------------------------------------------------------------
/src/models/resnet.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | from torch import nn
7 | from torchvision import models
8 | import ssl
9 | ssl._create_default_https_context = ssl._create_unverified_context
10 |
11 | from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
12 | """
13 | import torchvision.models
14 | ...: from torchvision.models.vgg import model_urls
15 | ...:
16 | ...: model_urls['vgg16'] = model_urls['vgg16'].replace('https://', 'http://')
17 | ...: vgg16 = torchvision.models.vgg16(pretrained=True)
18 | """
19 |
20 | class PretrainedImageEncoder(nn.Module):
21 | """
22 | Compute resnet features for the image
23 | maxpool over object mask or bounding box
24 | """
25 |
26 | def __init__(self, input_dim, frozen=True):
27 | super(PretrainedImageEncoder, self).__init__()
28 | self.dim = input_dim
29 | model_conv = models.resnet18(pretrained=True)
30 | if frozen:
31 | for param in model_conv.parameters():
32 | param.requires_grad = False
33 | num_features = model_conv.fc.in_features
34 | last_layer = "fc"
35 | modules = []
36 | print("--- LOADING WEIGHTS ---")
37 | for n, c in model_conv.named_children():
38 | print(n, last_layer, n == last_layer)
39 | if n == last_layer:
40 | print("--- END MODEL HERE ---")
41 | break
42 | modules.append(c)
43 | print("NUM FEATURES =", num_features)
44 | self.num_features = num_features
45 | self.extractor = nn.Sequential(*modules)
46 | self.linear = nn.Linear(num_features, self.dim)
47 | self.norm = nn.LayerNorm(self.dim)
48 |
49 | def encode(self, inp):
50 | return self.forward(inp)
51 |
52 | def forward(self, inp):
53 | x = self.extractor(inp)
54 | x = x.view(inp.shape[0], self.num_features)
55 | x = self.linear(x)
56 | return self.norm(x)
57 |
--------------------------------------------------------------------------------
/src/utils/visualize_AL_samples.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import os, sys
6 | import yaml
7 | import argparse
8 | import numpy as np
9 | import random
10 |
11 | sys.path.insert(1, '../')
12 |
13 | import torch
14 | from torch import nn
15 | from torch.utils.data import DataLoader, Subset
16 |
17 | from src.utils.data_utils import RawStateDataset
18 | from src.utils.geom_utils import show_pcs_with_frame
19 |
20 | if __name__ == "__main__":
21 | # Parse args.
22 | parser = argparse.ArgumentParser(description='pass args')
23 | parser.add_argument('--concept', type=str, default='above180', help='data directory')
24 | parser.add_argument('--strategy', type=str, default='random', help='model file')
25 | args = parser.parse_args()
26 |
27 | # Load in data folder and labels.
28 | here = os.path.dirname(os.path.abspath(__file__))
29 | data_filename = here+"/../../data/g_shapenet/{}/{}gt_data.hdf5".format(args.concept, args.strategy)
30 | label_filename = here+"/../../data/g_shapenet/{}/{}gt_label.hdf5".format(args.concept, args.strategy)
31 | split_filename = None
32 |
33 | #data_filename = here+"/../../data/concept_shapenet/data.hdf5"
34 | #label_filename = here+"/../../data/concept_shapenet/{}/label.hdf5".format(args.concept)
35 | #split_filename = here+"/../../data/concept_shapenet/{}/train.txt".format(args.concept)
36 |
37 | torch.manual_seed(0)
38 | random.seed(0)
39 | np.random.seed(0)
40 |
41 | samples = 500
42 | train_set = RawStateDataset(data_filename, label_filename, split_filename)
43 | indices = random.sample(range(len(train_set)), samples)
44 | #train_set = Subset(train_set, indices)
45 |
46 | # Sort the training set based on uid.
47 | idxes = []
48 | for i in range(len(train_set)):
49 | data, label = train_set[i]
50 | idx = train_set.examples[i].split("_")[0]
51 | idxes.append(int(idx))
52 | idxes = np.argsort(idxes)
53 |
54 | pts = []
55 | # Shift all points so that they're anchor-centered
56 | for i in range(samples):
57 | idx = idxes[i]
58 | data, label = train_set[idx]
59 | pt = (data[:3] - data[7:10])
60 | color = np.zeros((3))
61 | color[0] = label.item()
62 | pts.append(np.hstack((pt, color)))
63 | pts = np.array(pts)
64 |
65 | # Plot the pcs with label as color.
66 | show_pcs_with_frame(pts, [0,0,0])
--------------------------------------------------------------------------------
/src/train/get_new_pose.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | import sys, os
7 | import numpy as np
8 |
9 | sys.path.insert(1, '../')
10 |
11 | from src.utils.train_utils import evaluate_concept
12 | from src.models.pointnet import PointNetEncoder
13 |
14 | def get_new_pose(input_moving_object_pointcloud, input_static_object_pointcloud):
15 | """
16 | input_moving_object_pointcloud is N_pts1 x 3
17 | input_static_object_pointcloud is N_pts2 x 3
18 | """
19 |
20 | # Set up data and model parameters.
21 | device = ("cuda" if torch.cuda.is_available() else "cpu")
22 | model_path=here+"/../../data/models/above180/pointcloud_classification.pt"
23 |
24 | model_params = {}
25 | model_params["pointnet_radius"] = 0.5
26 | model_params["pointnet_nclusters"] = 512
27 | model_params["scale"] = 1
28 | model_params["in_features"] = 2
29 |
30 | # Define model, optimization, and loss.
31 | model = PointNetEncoder(**model_params).to(device)
32 | model.load_state_dict(torch.load(model_path))
33 | model.eval()
34 |
35 | # Process input.
36 | pc = np.vstack((input_moving_object_pointcloud, input_static_object_pointcloud))
37 | anchor_center = np.mean(input_static_object_pointcloud, axis=0)
38 | pc -= anchor_center
39 | seg1 = np.vstack((np.ones((input_moving_object_pointcloud.shape[0],1)), np.zeros((input_static_object_pointcloud.shape[0],1))))
40 | seg2 = np.vstack((np.zeros((input_moving_object_pointcloud.shape[0],1)), np.ones((input_static_object_pointcloud.shape[0],1))))
41 | old_state = torch.tensor(np.hstack((pc, seg1, seg2))).float()
42 | T, _ = evaluate_concept(model, (old_state, [], 1.0), dtype="pointcloud", \
43 | opt="CEM", batch_size=100, epochs=10, device=device)
44 | return T # this is xyz, wijk
45 |
46 | if __name__ == '__main__':
47 | # Test the function.
48 | from src.utils.data_utils import OptimizationDataset
49 | here = os.path.dirname(os.path.abspath(__file__))
50 | data_path = here + "/../../data/test_shapenet/"
51 | concept = "above180"
52 | dataset = OptimizationDataset(data_path+"/data.hdf5", split_path=data_path+concept+'/test.txt', sample=False)
53 |
54 | idx = np.random.choice(range(len(dataset)))
55 | sample = dataset[idx]
56 | state = sample[0]
57 |
58 | moving_pts = state[state[:, 3]==1,:3]
59 | anchor_pts = state[state[:, 4]==1,:3]
60 |
61 | T = get_new_pose(moving_pts.cpu().detach().numpy(), anchor_pts.cpu().detach().numpy())
62 | print(T)
--------------------------------------------------------------------------------
/src/datasets/multilabel_concept_data.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 |
9 | import numpy as np
10 | import random
11 | import argparse
12 | import os, sys
13 |
14 | sys.path.insert(1, '../')
15 |
16 | from isaacgym import gymtorch
17 | import torch
18 |
19 | from src.utils.concept_utils import *
20 | from src.utils.input_utils import Hdf5Cacher, transform_input
21 |
22 | from src.models.MLP import MLP
23 |
24 | torch.set_default_tensor_type(torch.FloatTensor)
25 | np.set_printoptions(precision=2)
26 |
27 |
28 | class MultiConceptLabeler(object):
29 | def __init__(self, args):
30 | path = args.data_dir + "/data.hdf5"
31 | self.concept = args.concept
32 | self.hdf5cacher = Hdf5Cacher(path, "a")
33 | self.examples = list(self.hdf5cacher._hdf5().keys())
34 | print("Loading dataset with {} examples".format(len(self.examples)))
35 |
36 | def label_data(self):
37 | device = ("cuda" if torch.cuda.is_available() else "cpu")
38 | sample = self.hdf5cacher.__getitem__(self.examples[0])
39 | raw_state = sample["raw_state"].astype(np.float32)
40 | input_dim = transform_input(torch.tensor(raw_state).unsqueeze(0), self.concept).shape[1]
41 | train_amts = [50, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
42 |
43 | for train_amt in train_amts:
44 | model_path = os.path.abspath(parent_dir + "/data/models/{}/".format(args.concept) + "rawstate_classification_{}.pt".format(train_amt))
45 | model = MLP(2, 64, input_dim, self.concept).to(device)
46 | model.load_state_dict(torch.load(model_path))
47 | model.eval()
48 | label_type = "label_{}".format(train_amt)
49 | for example in self.examples:
50 | sample = self.hdf5cacher.__getitem__(example)
51 | raw_state = sample["raw_state"]
52 | raw_state = torch.tensor(raw_state, device=device, dtype=torch.float32)
53 | label = torch.sigmoid(model(raw_state)).cpu().detach().numpy()
54 | sample[label_type] = label
55 | self.hdf5cacher.__setitem__(example, sample)
56 |
57 | if __name__ == '__main__':
58 | # instantiate empty gym:
59 | parser = argparse.ArgumentParser(description='pass args')
60 | parser.add_argument('--concept', type=str, default='above180', help='concept')
61 |
62 | args = parser.parse_args()
63 |
64 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
65 | args.data_dir = os.path.abspath(parent_dir + "/data/concept_shapenet/")
66 |
67 | generator = MultiConceptLabeler(args)
68 | generator.label_data()
69 | generator.hdf5cacher.close()
--------------------------------------------------------------------------------
/src/datasets/collect_human_data.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 |
9 | import numpy as np
10 | import argparse
11 | import os, sys
12 |
13 | sys.path.insert(1, '../')
14 |
15 | from src.datasets.passive_querier import PassiveQuerier
16 | from src.datasets.active_querier import ActiveQuerier
17 |
18 | import torch
19 |
20 | torch.set_default_tensor_type(torch.FloatTensor)
21 | np.set_printoptions(precision=2)
22 |
23 |
24 | if __name__ == '__main__':
25 | # instantiate empty gym:
26 | parser = argparse.ArgumentParser(description='pass args')
27 | parser.add_argument('--config', type=str, default="/../../configs/rawstate.yaml", help='config file')
28 | parser.add_argument('--concept', type=str, default='above180', help='concept')
29 | parser.add_argument('--simulated', action='store_true', default=False, help='cuda')
30 | parser.add_argument('--active_samples', type=int, default=100, help='samples')
31 | parser.add_argument('--passive_samples', type=int, default=10, help='samples')
32 | parser.add_argument('--batch_size', type=int, default=10, help='batch for active learning')
33 | parser.add_argument('--objective', type=str, default="random", help='type of AL strategy')
34 | parser.add_argument('--warmstart', type=int, default=0, help='first epochs only random')
35 | parser.add_argument('--mining', type=int, default=500, help='first epochs mine for positives')
36 | args = parser.parse_args()
37 |
38 | args.cuda = False
39 | args.envs = 1
40 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
41 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
42 |
43 | # First collect some demonstration data if needed.
44 | if args.passive_samples > 0:
45 | args.headless = False
46 | passive_generator = PassiveQuerier(args, asset_root)
47 | data_filename, label_filename = passive_generator.collect_data(args.concept, N_queries=args.passive_samples, query_type="demo")
48 | passive_generator.kill_instance()
49 |
50 | save_dir = parent_dir + "/data/g_shapenet/" + "{}/".format(args.concept)
51 | data_filename = save_dir+"/demo_gt_data.hdf5"
52 | label_filename = save_dir+"/demo_gt_label.hdf5"
53 |
54 | if args.active_samples > 0:
55 | args.headless = True if args.simulated else False
56 |
57 | active_generator = ActiveQuerier(args, asset_root)
58 |
59 | # Collect data.
60 | active_generator.reset_model()
61 |
62 | if args.passive_samples > 0:
63 | # Warmstart the model.
64 | active_generator.retrain(data_filename, label_filename)
65 |
66 | errors = active_generator.collect_data(args.concept, N_queries=args.active_samples, objective=args.objective,\
67 | warmstart=args.warmstart, mining=args.mining)
68 | print(errors)
69 | active_generator.kill_instance()
--------------------------------------------------------------------------------
/src/world/basic_world.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Empty world on the cpu or the gpu.
7 | """
8 | from isaacgym import gymapi
9 |
10 | import numpy as np
11 | import yaml
12 | import argparse
13 | import glob
14 | import os, sys
15 |
16 | sys.path.insert(1, '../')
17 |
18 | import trimesh
19 |
20 | from storm_kit.gym.core import Gym
21 | from storm_kit.util_file import get_gym_configs_path, join_path, load_yaml, get_assets_path
22 |
23 | np.set_printoptions(precision=3)
24 |
25 |
26 | class BasicWorld(object):
27 | def __init__(self, args, asset_root):
28 | # Create gym instance.
29 | self.cuda = args.cuda
30 | sim_params = load_yaml(join_path(get_gym_configs_path(), 'physx.yml'))
31 | sim_params['headless'] = args.headless
32 | sim_params['sim_params']['use_gpu_pipeline'] = True if self.cuda else False
33 |
34 | self.gym_instance = Gym(**sim_params)
35 | self.gym = self.gym_instance.gym
36 | self.sim = self.gym_instance.sim
37 |
38 | # Object folders and options.
39 | asset_options = gymapi.AssetOptions()
40 | asset_options.armature = 0.001
41 | asset_options.thickness = 0.002
42 | asset_options.fix_base_link = True
43 | asset_options.flip_visual_attachments = False
44 |
45 | if "ycb" in asset_root:
46 | obj_urdf_files = sorted(glob.glob("{}/**/textured.urdf".format(asset_root)))
47 | obj_mesh_files = sorted(glob.glob("{}/**/textured.obj".format(asset_root)))
48 | elif "shapenet" in asset_root:
49 | obj_urdf_files = sorted(glob.glob("{}/urdf/*.urdf".format(asset_root)))
50 | obj_mesh_files = sorted(glob.glob("{}/meshes/*.obj".format(asset_root)))
51 | obj_urdf_files = [os.path.relpath(i, asset_root) for i in obj_urdf_files]
52 | self.obj_assets = [self.gym.load_asset(self.sim, asset_root, urdf, asset_options) for urdf in obj_urdf_files]
53 | self.obj_meshes= [trimesh.load(obj_mesh_file) for obj_mesh_file in obj_mesh_files]
54 | self.num_objs = len(obj_urdf_files)
55 | print("Loaded object assets.")
56 |
57 | # Some world parameters.
58 | world_file = 'collision_table.yml'
59 | world_yml = join_path(get_gym_configs_path(), world_file)
60 | with open(world_yml) as file:
61 | self.world_params = yaml.safe_load(file)
62 |
63 | self.num_envs = args.envs
64 |
65 |
66 | if __name__ == '__main__':
67 | # instantiate empty gym:
68 | parser = argparse.ArgumentParser(description='pass args')
69 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
70 | parser.add_argument('--cuda', action='store_true', default=False, help='cuda')
71 | parser.add_argument('--envs', type=int, default=1, help='number of parallel environments')
72 | args = parser.parse_args()
73 |
74 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
75 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
76 | object_world = BasicWorld(args, asset_root)
--------------------------------------------------------------------------------
/src/models/pointnet.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | import torch.optim.lr_scheduler as lr_sched
9 | from pointnet2_ops.pointnet2_modules import PointnetSAModule
10 |
11 |
12 | class PointNetEncoder(nn.Module):
13 | def __init__(self, pointnet_radius, pointnet_nclusters, scale, in_features):
14 | super().__init__()
15 |
16 | self._build_model(pointnet_radius, pointnet_nclusters, scale, in_features)
17 |
18 | def _build_model(self, pointnet_radius, pointnet_nclusters, scale, in_features):
19 | # The number of input features is 3+additional input info where 3
20 | # represents the x, y, z position of the point-cloud
21 |
22 | self.SA_modules = nn.ModuleList()
23 | self.SA_modules.append(
24 | PointnetSAModule(
25 | npoint=pointnet_nclusters,
26 | radius=pointnet_radius,
27 | nsample=64,
28 | mlp=[in_features, 64 * scale, 64 * scale, 128 * scale]
29 | )
30 | )
31 | self.SA_modules.append(
32 | PointnetSAModule(
33 | npoint=128,
34 | radius=0.4,
35 | nsample=64,
36 | mlp=[128 * scale, 128 * scale, 128 * scale, 256 * scale]
37 | )
38 | )
39 | self.SA_modules.append(
40 | PointnetSAModule(
41 | mlp=[256 * scale, 256 * scale, 512 * scale, 1024 * scale]
42 | )
43 | )
44 |
45 | self.fc_layer = nn.Sequential(
46 | nn.Linear(1024 * scale, 512 * scale),
47 | nn.BatchNorm1d(512 * scale),
48 | nn.ReLU(True),
49 | nn.Linear(512 * scale, 256 * scale),
50 | nn.BatchNorm1d(256 * scale),
51 | nn.ReLU(True),
52 | nn.Dropout(0.5),
53 | nn.Linear(256, 1)
54 | )
55 |
56 | def _break_up_pc(self, pc):
57 | xyz = pc[..., 0:3].contiguous()
58 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
59 |
60 | return xyz, features
61 |
62 | def forward(self, pointcloud):
63 | """
64 | Forward pass of the network
65 | Parameters
66 | ----------
67 | pointcloud: Variable(torch.cuda.FloatTensor)
68 | (B, N, 3 + input_channels) tensor
69 | Point cloud to run predicts on
70 | Each point in the point-cloud MUST
71 | be formated as (x, y, z, features...)
72 | """
73 | xyz, features = self._break_up_pc(pointcloud)
74 |
75 | for module in self.SA_modules:
76 | xyz, features = module(xyz, features)
77 |
78 | return self.fc_layer(features.squeeze(-1))
79 |
80 |
81 | if __name__ == '__main__':
82 | device = ("cuda" if torch.cuda.is_available() else "cpu")
83 | pointnet_radius = 0.02
84 | pointnet_nclusters = 128
85 | scale = 1
86 | in_features = 1
87 |
88 | points = torch.rand(2, 1024, 3 + in_features).to(device)
89 | model = PointNetEncoder(pointnet_radius, pointnet_nclusters, scale, in_features).to(device)
90 |
91 | output = model(points)
92 | print("Output: ", output.shape)
93 | print(output)
94 |
--------------------------------------------------------------------------------
/src/utils/test_concept.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 |
9 | import numpy as np
10 | import random
11 | import argparse
12 | import os, sys
13 |
14 | sys.path.insert(1, '../')
15 |
16 | from src.utils.gym_utils import *
17 | from src.world.object_world import ObjectWorld
18 | from src.utils.concept_utils import *
19 |
20 | np.set_printoptions(precision=2)
21 |
22 |
23 | class ConceptTester(object):
24 | def __init__(self, args, asset_root):
25 | self.object_world = ObjectWorld(args, asset_root)
26 |
27 | def test_concept(self, concept):
28 | from storm_kit.util_file import get_assets_path
29 |
30 | gym = self.object_world.gym
31 | sim = self.object_world.sim
32 | gym_instance = self.object_world.gym_instance
33 | world = self.object_world.worlds[0]
34 | num_actors = gym.get_sim_actor_count(sim)
35 |
36 | obj_asset_file = "urdf/mug/moving.urdf"
37 | obj_asset_root = get_assets_path()
38 | moving_color = gymapi.Vec3(0.8, 0.1, 0.1)
39 | anchor_color = gymapi.Vec3(0.1, 0.8, 0.1)
40 | # Spawn moving.
41 | moving_object = world.spawn_object(obj_asset_file, obj_asset_root, gymapi.Transform(), name='moving_object')
42 | moving_body_handle = gym.get_actor_rigid_body_handle(world.env_ptr, moving_object, 6)
43 | gym.set_rigid_body_color(world.env_ptr, moving_object, 0, gymapi.MESH_VISUAL_AND_COLLISION, moving_color)
44 | gym.set_rigid_body_color(world.env_ptr, moving_object, 6, gymapi.MESH_VISUAL_AND_COLLISION, moving_color)
45 | # Spawn anchor.
46 | #obj_asset_root = asset_root
47 | #obj_asset_file = "urdf/Teapot_7c381f85d3b6e8f46a47bc678e9c8907_L.urdf"
48 | #obj_asset_file = "urdf/Candle_bf7150430f5b5aa12f841233486fac2b_L.urdf"
49 | anchor_object = world.spawn_object(obj_asset_file, obj_asset_root, gymapi.Transform(), name='anchor_object')
50 | anchor_body_handle = gym.get_actor_rigid_body_handle(world.env_ptr, anchor_object, 6)
51 | gym.set_rigid_body_color(world.env_ptr, anchor_object, 0, gymapi.MESH_VISUAL_AND_COLLISION, anchor_color)
52 | gym.set_rigid_body_color(world.env_ptr, anchor_object, 6, gymapi.MESH_VISUAL_AND_COLLISION, anchor_color)
53 | world.anchor = world.anchors[0]
54 | world.anchor.agent_handle = anchor_body_handle
55 | world.moving = world.movings[0]
56 | world.moving.agent_handle = moving_body_handle
57 |
58 | while True:
59 | try:
60 | raw_state = get_raw_state(self.object_world, world)
61 | print(concept_value(raw_state, concept))
62 | gym_instance.step()
63 | except KeyboardInterrupt:
64 | print('Closing')
65 | done = True
66 | break
67 | return
68 |
69 |
70 | if __name__ == '__main__':
71 | parser = argparse.ArgumentParser(description='pass args')
72 | parser.add_argument('--concept', type=str, default='above180', help='concept')
73 | args = parser.parse_args()
74 |
75 | args.headless = False
76 | args.cuda = False
77 | args.envs = 1
78 |
79 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
80 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
81 |
82 | tester = ConceptTester(args, asset_root)
83 | tester.test_concept(args.concept)
--------------------------------------------------------------------------------
/src/world/object_world.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 | from isaacgym import gymapi
9 |
10 | import numpy as np
11 | import yaml
12 | import argparse
13 | import glob
14 | import os, sys
15 |
16 | sys.path.insert(1, '../')
17 |
18 | from storm_kit.gym.core import World
19 |
20 | from src.utils.camera_utils import *
21 | from src.world.basic_world import BasicWorld
22 | from src.utils.geom_utils import SegLabel
23 | from src.utils.gym_utils import WorldObject
24 |
25 | np.set_printoptions(precision=3)
26 |
27 |
28 | class ObjectWorld(BasicWorld):
29 | def __init__(self, args, asset_root):
30 | super(ObjectWorld, self).__init__(args, asset_root)
31 |
32 | # Create gym environments one by one.
33 | self.worlds = []
34 | self.storage_pose = gymapi.Transform(gymapi.Vec3(100, 100, 100), gymapi.Quat(0, 0, 0, 1))
35 | spacing = 5.0
36 | lower = gymapi.Vec3(-spacing, 0.0, -spacing)
37 | upper = gymapi.Vec3(spacing, spacing, spacing)
38 |
39 | for env_idx in range(self.num_envs):
40 | env_ptr = self.gym.create_env(self.sim, lower, upper, int(np.sqrt(self.num_envs)))
41 | self.gym_instance.env_list.append(env_ptr)
42 | world = World(self.gym, self.sim, env_ptr, self.world_params, w_T_r=gymapi.Transform())
43 | world.anchors = []
44 | world.movings = []
45 |
46 | for obj_idx in range(self.num_objs):
47 | # Anchor.
48 | obj_handle = self.gym.create_actor(env_ptr, self.obj_assets[obj_idx], self.storage_pose,
49 | 'anc{}_env{}'.format(obj_idx, env_idx), 2, 2, SegLabel.ANCHOR.value)
50 | obj_body_handle = self.gym.get_actor_rigid_body_handle(env_ptr, obj_handle, 0)
51 | world.anchors.append(WorldObject(obj_handle, obj_body_handle, obj_idx))
52 | # Moving.
53 | obj_handle = self.gym.create_actor(env_ptr, self.obj_assets[obj_idx], self.storage_pose,
54 | 'mov{}_env{}'.format(obj_idx, env_idx), 2, 2, SegLabel.MOVING.value)
55 | obj_body_handle = self.gym.get_actor_rigid_body_handle(env_ptr, obj_handle, 0)
56 | world.movings.append(WorldObject(obj_handle, obj_body_handle, obj_idx))
57 |
58 | # Spawn camera.
59 | spawn_camera(world, 60, 640, 480, cuda=self.cuda)
60 |
61 | # Save world and mesh files.
62 | self.worlds.append(world)
63 |
64 | if self.cuda:
65 | self.gym.prepare_sim(self.sim)
66 | self._root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
67 | else:
68 | self._root_tensor = None
69 | print("Worlds initialized.")
70 |
71 | if __name__ == '__main__':
72 | # instantiate empty gym:
73 | parser = argparse.ArgumentParser(description='pass args')
74 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
75 | parser.add_argument('--cuda', action='store_true', default=False, help='cuda')
76 | parser.add_argument('--envs', type=int, default=1, help='number of parallel environments')
77 | args = parser.parse_args()
78 |
79 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
80 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
81 | object_world = ObjectWorld(args, asset_root)
--------------------------------------------------------------------------------
/src/datasets/label_concept_data.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 |
9 | import numpy as np
10 | import random
11 | import argparse
12 | import os, sys
13 |
14 | sys.path.insert(1, '../')
15 |
16 | from isaacgym import gymtorch
17 | import torch
18 |
19 | from src.utils.concept_utils import *
20 | from src.utils.input_utils import Hdf5Cacher, transform_input
21 |
22 | from src.models.MLP import MLP
23 |
24 | torch.set_default_tensor_type(torch.FloatTensor)
25 | np.set_printoptions(precision=2)
26 |
27 |
28 | class ConceptLabeler(object):
29 | def __init__(self, args):
30 | # Open data hdf5 cacher for read.
31 | data_path = args.data_dir + "/data.hdf5"
32 | self.hdf5cacher_read = Hdf5Cacher(data_path, "r")
33 | self.examples = list(self.hdf5cacher_read._hdf5().keys())
34 |
35 | print("Loading dataset with {} examples".format(len(self.examples)))
36 | self.concept = args.concept
37 | self.model = None
38 | self.label_type = "label"
39 | if args.concept_model is not None:
40 | device = ("cuda" if torch.cuda.is_available() else "cpu")
41 | sample = self.hdf5cacher_read.__getitem__(self.examples[0])
42 | raw_state = sample["raw_state"].astype(np.float32)
43 | input_dim = transform_input(torch.tensor(raw_state).unsqueeze(0), self.concept).shape[1]
44 | self.model = MLP(3, 256, input_dim, self.concept).to(device)
45 | self.model.load_state_dict(torch.load(args.concept_model))
46 | self.model.eval()
47 | model_name = args.concept_model.split("/")[-1]
48 | model_params = model_name.split("_")
49 | strat = model_params[0]
50 | strat_str = "" if strat == "oracle" else strat
51 | self.label_type = "label_{}{}".format(strat_str, model_params[-2])
52 |
53 | # Open label hdf5 cacher for write.
54 | label_dir = args.data_dir + "/" + args.concept
55 | if not os.path.isdir(label_dir):
56 | os.mkdir(label_dir)
57 | label_path = label_dir + "/{}.hdf5".format(self.label_type)
58 | self.hdf5cacher_write = Hdf5Cacher(label_path, "w")
59 |
60 | def label_data(self):
61 | for example in self.examples:
62 | sample = self.hdf5cacher_read.__getitem__(example)
63 | label_dict = {}
64 |
65 | raw_state = sample["raw_state"]
66 |
67 | if self.model is None:
68 | label = concept_value(raw_state, self.concept)
69 | else:
70 | device = ("cuda" if torch.cuda.is_available() else "cpu")
71 | raw_state = torch.tensor(raw_state, device=device, dtype=torch.float32)
72 | label = torch.sigmoid(self.model(raw_state)).cpu().detach().numpy()
73 | label_dict["label"] = label
74 | self.hdf5cacher_write.__setitem__(example, label_dict)
75 |
76 |
77 | if __name__ == '__main__':
78 | # instantiate empty gym:
79 | parser = argparse.ArgumentParser(description='pass args')
80 | parser.add_argument('--concept', type=str, default='above180', help='concept')
81 | parser.add_argument('--concept_model', type=str, default=None, help='concept model')
82 |
83 | args = parser.parse_args()
84 |
85 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
86 | args.data_dir = os.path.abspath(parent_dir + "/data/concept_shapenet/")
87 | if args.concept_model is not None:
88 | args.concept_model = os.path.abspath(parent_dir + "/data/models/{}/".format(args.concept) + args.concept_model)
89 |
90 | generator = ConceptLabeler(args)
91 | generator.label_data()
92 | generator.hdf5cacher_read.close()
93 | generator.hdf5cacher_write.close()
--------------------------------------------------------------------------------
/src/datasets/generate_concept_data.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate data on the cpu or the gpu.
7 | """
8 |
9 | import numpy as np
10 | import random
11 | import copy
12 | import argparse
13 | import os, sys
14 |
15 | sys.path.insert(1, '../')
16 |
17 | from src.utils.camera_utils import *
18 | from src.utils.gym_utils import *
19 | from src.utils.input_utils import *
20 | from src.world.object_world import ObjectWorld
21 |
22 | from src.models.MLP import MLP
23 |
24 | np.set_printoptions(precision=2)
25 |
26 |
27 | class DataGenerator(object):
28 | def __init__(self, args, asset_root, save_dir):
29 | self.object_world = ObjectWorld(args, asset_root)
30 | self.save_dir = save_dir
31 |
32 | def generate_data(self, samples_per_env=1000):
33 | # Create dataset file to save data to.
34 | save_filename = self.save_dir+"/data.hdf5"
35 | hdf5cacher = Hdf5Cacher(save_filename, "w")
36 |
37 | gym = self.object_world.gym
38 | sim = self.object_world.sim
39 | gym_instance = self.object_world.gym_instance
40 | num_actors = gym.get_sim_actor_count(sim)
41 |
42 | for i in range(samples_per_env):
43 | try:
44 | # Move all objects randomly.
45 | anchor_idxes, moving_idxes = np.arange(self.object_world.num_objs), np.arange(self.object_world.num_objs)
46 | random.shuffle(anchor_idxes)
47 | random.shuffle(moving_idxes)
48 | for idx, world in enumerate(self.object_world.worlds):
49 | # Pick anchor and moving.
50 | world.anchor = world.anchors[anchor_idxes[idx]]
51 | world.moving = world.movings[moving_idxes[idx]]
52 | move_all_active_agents_to_random_pose(self.object_world)
53 | gym_instance.step()
54 |
55 | if self.object_world.cuda:
56 | gym.refresh_actor_root_state_tensor(sim)
57 | gym.render_all_camera_sensors(sim)
58 | gym.start_access_image_tensors(sim)
59 |
60 | # Save camera data.
61 | for world in self.object_world.worlds:
62 | camera_data = observe_camera(world, cuda=self.object_world.cuda)
63 | save_dict = get_camera_dict(camera_data)
64 | save_dict["raw_state"] = get_raw_state(self.object_world, world)
65 | uid = "{}_{}_{}".format(world.moving.idx, world.anchor.idx, i)
66 | hdf5cacher.__setitem__(uid, save_dict)
67 |
68 | # Move anchor and moving back to original position.
69 | move_all_active_agents_to_pose(self.object_world, self.object_world.storage_pose)
70 | gym_instance.step()
71 |
72 | if self.object_world.cuda:
73 | gym.refresh_actor_root_state_tensor(sim)
74 | gym.end_access_image_tensors(sim)
75 | print("Finished {}".format(i))
76 | except KeyboardInterrupt:
77 | print('Closing')
78 | done = True
79 | break
80 | hdf5cacher.close()
81 | return
82 |
83 |
84 | if __name__ == '__main__':
85 | parser = argparse.ArgumentParser(description='pass args')
86 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
87 | parser.add_argument('--cuda', action='store_true', default=False, help='cuda')
88 | parser.add_argument('--samples', type=int, default=1000, help='samples')
89 | parser.add_argument('--envs', type=int, default=1, help='number of parallel environments')
90 |
91 | args = parser.parse_args()
92 |
93 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
94 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
95 | save_dir = os.path.abspath(parent_dir + "/data/concept_shapenet/")
96 |
97 | generator = DataGenerator(args, asset_root, save_dir)
98 | generator.generate_data(samples_per_env=args.samples)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | NVIDIA Source Code License for concept_learning
2 |
3 | 1. Definitions
4 |
5 | "Licensor" means any person or entity that distributes its Work.
6 |
7 | "Software" means the original work of authorship made available under this
8 | License.
9 |
10 | "Work" means the Software and any additions to or derivative works of the
11 | Software that are made available under this License.
12 |
13 | The terms "reproduce," "reproduction," "derivative works," and "distribution"
14 | have the meaning as provided under U.S. copyright law; provided, however, that
15 | for the purposes of this License, derivative works shall not include works that
16 | remain separable from, or merely link (or bind by name) to the interfaces of,
17 | the Work.
18 |
19 | Works, including the Software, are "made available" under this License by
20 | including in or with the Work either (a) a copyright notice referencing the
21 | applicability of this License to the Work, or (b) a copy of this License.
22 |
23 | 2. License Grant
24 |
25 | 2.1 Copyright Grant. Subject to the terms and conditions of this License, each
26 | Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free,
27 | copyright license to reproduce, prepare derivative works of, publicly display,
28 | publicly perform, sublicense and distribute its Work and any resulting
29 | derivative works in any form.
30 |
31 | 3. Limitations
32 |
33 | 3.1 Redistribution. You may reproduce or distribute the Work only if (a) you do
34 | so under this License, (b) you include a complete copy of this License with your
35 | distribution, and (c) you retain without modification any copyright, patent,
36 | trademark, or attribution notices that are present in the Work.
37 |
38 | 3.2 Derivative Works. You may specify that additional or different terms apply
39 | to the use, reproduction, and distribution of your derivative works of the Work
40 | ("Your Terms") only if (a) Your Terms provide that the use limitation in Section
41 | 3.3 applies to your derivative works, and (b) you identify the specific
42 | derivative works that are subject to Your Terms. Notwithstanding Your Terms,
43 | this License (including the redistribution requirements in Section 3.1) will
44 | continue to apply to the Work itself.
45 |
46 | 3.3 Use Limitation. The Work and any derivative works thereof only may be used
47 | or intended for use non-commercially. Notwithstanding the foregoing, NVIDIA and
48 | its affiliates may use the Work and any derivative works commercially. As used
49 | herein, "non-commercially" means for research or evaluation purposes only.
50 |
51 | 3.4 Patent Claims. If you bring or threaten to bring a patent claim against any
52 | Licensor (including any claim, cross-claim or counterclaim in a lawsuit) to
53 | enforce any patents that you allege are infringed by any Work, then your rights
54 | under this License from such Licensor (including the grant in Section 2.1) will
55 | terminate immediately.
56 |
57 | 3.5 Trademarks. This License does not grant any rights to use any Licensor’s or
58 | its affiliates’ names, logos, or trademarks, except as necessary to reproduce
59 | the notices described in this License.
60 |
61 | 3.6 Termination. If you violate any term of this License, then your rights under
62 | this License (including the grant in Section 2.1) will terminate immediately.
63 |
64 | 4. Disclaimer of Warranty.
65 |
66 | THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
67 | EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
68 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT.
69 | YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE.
70 |
71 | 5. Limitation of Liability.
72 |
73 | EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY,
74 | WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY
75 | LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL,
76 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE,
77 | THE USE OR INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF
78 | GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR DATA, COMPUTER FAILURE OR
79 | MALFUNCTION, OR ANY OTHER COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR
80 | HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
81 |
--------------------------------------------------------------------------------
/src/train/check_transform.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import os, sys
6 | import yaml
7 | import argparse
8 | import numpy as np
9 | import random
10 |
11 | sys.path.insert(1, '../')
12 |
13 | from src.utils.geom_utils import *
14 | from src.utils.data_utils import OptimizationDataset as ObjectDataset
15 |
16 | import torch
17 | from torch import nn
18 | from torch.utils.data import DataLoader
19 |
20 |
21 | def sanity_check(old_state, old_rawstate):
22 | # Define input points.
23 | moving_idx = np.where(old_state[:,3]==1)[0]
24 | anchor_idx = np.where(old_state[:,4]==1)[0]
25 | notmoving_idx = np.where(old_state[:,3]==0)[0]
26 |
27 | # Extract the relevant points.
28 | idxes = np.hstack((moving_idx, anchor_idx))
29 | pc_old = old_state[idxes].unsqueeze(0)
30 |
31 | # Initialize transform around anchor.
32 | init_anchor_pts = pc_old[0, pc_old[0, :, 4]==1,:3].unsqueeze(0).detach().numpy()
33 | init_moving_pts = pc_old[0, pc_old[0, :, 3]==1,:3].unsqueeze(0).detach().numpy()
34 | #T = torch.tensor(initialize_T_around_pts(init_moving_pts, init_anchor_pts), dtype=torch.float32)
35 |
36 | xyz = np.random.randn(1,3) * 0.1
37 | quat = np.random.randn(1, 4)
38 | quat = quat / np.linalg.norm(quat, axis=1)[:, None]
39 | T = torch.tensor(np.hstack((xyz, quat)), dtype=torch.float32)
40 |
41 | # Move points.
42 | pc_old_shifted = copy.deepcopy(pc_old)
43 | moving_center = torch.mean(pc_old[:,pc_old[0, :, 3]==1,:3],axis=1).unsqueeze(1)
44 | pc_old_shifted[:,pc_old[0, :, 3]==1,:3] -= moving_center
45 | pc_new = move_points(pc_old_shifted, T)
46 | pc_new[:,pc_new[0, :, 3]==1,:3] += moving_center
47 | movingpc_new = pc_new[:, pc_new[0, :, 3]==1, :]
48 | new_state = torch.cat((movingpc_new[0], old_state[notmoving_idx]), dim=0)
49 |
50 | # Move rawstate.
51 | moving_center = torch.mean(pc_old[0,pc_old[0, :, 3]==1,:3],axis=0)
52 | rawstate = copy.deepcopy(old_rawstate).unsqueeze(0)
53 | rawstate[:,:3] -= moving_center
54 | new_rawstate = transform_rawstate(rawstate, T).float()
55 | new_rawstate[:,:3] += moving_center
56 | new_rawstate = new_rawstate.squeeze().detach().numpy()
57 |
58 | # Show old state and new state.
59 | show_pcs_with_frame(old_state.cpu().detach().numpy(), [0,0,0]) # center at anchor in pre-moving state
60 | show_pcs_with_frame(old_state.cpu().detach().numpy(), old_rawstate[:3]) # center at moving object
61 | show_pcs_with_frame(new_state.cpu().detach().numpy(), [0,0,0]) # center at anchor in post-moving state
62 | show_pcs_with_frame(new_state.cpu().detach().numpy(), new_rawstate[:3]) # center at moved object
63 |
64 | if __name__ == "__main__":
65 | # Parse args.
66 | parser = argparse.ArgumentParser(description='pass args')
67 | parser.add_argument('--config', type=str, default="/../../configs/rawstate.yaml", help='config file')
68 | parser.add_argument('--concept', type=str, default='above180', help='data directory')
69 | args = parser.parse_args()
70 |
71 | # Load yaml parameters.
72 | here = os.path.dirname(os.path.abspath(__file__))
73 | with open(here+args.config, 'r') as stream:
74 | params = yaml.safe_load(stream)
75 |
76 | # Set random seed if it exists.
77 | if "seed" in params["data"].keys():
78 | torch.manual_seed(params["data"]["seed"])
79 | random.seed(params["data"]["seed"])
80 | np.random.seed(params["data"]["seed"])
81 |
82 | # Set up data and model parameters.
83 | data_path = here + params["data"]["data_dir"]
84 | concept = args.concept
85 |
86 | dataset = ObjectDataset(data_path+"/data.hdf5",\
87 | split_path=data_path+concept+'/test.txt',
88 | sample=False)
89 |
90 | # Sanity check transform.
91 | tests = 1000
92 | i=0
93 | while i < tests:
94 | idx = np.random.choice(range(len(dataset)))
95 | sample = dataset[idx]
96 | old_state = torch.cat((sample[0], torch.zeros((sample[0].shape[0],1))), dim=1)
97 | old_rawstate = sample[1]
98 |
99 | # Recenter around anchor.
100 | anchor_center = torch.mean(old_state[old_state[:, 4]==1,:3], axis=0)
101 | old_state[:, :3] -= anchor_center
102 | old_rawstate[:3] -= anchor_center # center around anchor
103 | old_rawstate[7:10] -= anchor_center # center around anchor
104 |
105 | sanity_check(old_state, old_rawstate)
--------------------------------------------------------------------------------
/src/datasets/split_data.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 |
9 | import numpy as np
10 | import random
11 | import argparse
12 | import os, sys
13 | import glob
14 |
15 | sys.path.insert(1, '../')
16 |
17 | from src.utils.input_utils import Hdf5Cacher
18 |
19 | np.set_printoptions(precision=2)
20 |
21 |
22 | if __name__ == '__main__':
23 | # instantiate empty gym:
24 | parser = argparse.ArgumentParser(description='pass args')
25 | parser.add_argument('--concept', type=str, default='above180', help='concept')
26 |
27 | args = parser.parse_args()
28 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
29 | args.data_dir = os.path.abspath(parent_dir + "/data/concept_shapenet/" + args.concept)
30 |
31 | # Take all examples and separate into 0 and 1.
32 | hdf5cacher = Hdf5Cacher(args.data_dir + "/label.hdf5", "r")
33 | examples = list(hdf5cacher._hdf5().keys())
34 |
35 | # Check if concept is upright.
36 | obj_idxes = None
37 | if args.concept in ["upright", "upright45", "ontop"]:
38 | obj_idxes = [0, 1, 2, 3, 4, 5, 132, 133, 134, 135, 136, 137, 12, 13, 14, 15, 16, 17, \
39 | 18, 19, 20, 21, 22, 23, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, \
40 | 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, \
41 | 96, 97, 98, 99, 100, 101, 126, 127, 128, 129, 130, 131]
42 | elif args.concept in ["alignedvertical"]:
43 | obj_idxes = [0, 1, 2, 3, 4, 5, 132, 133, 134, 135, 136, 137, 12, 13, 14, 15, 16, 17, \
44 | 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, 108, 109, 110, 111, 112, 113]
45 | elif args.concept in ["alignedhorizontal"]:
46 | obj_idxes = [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 54, 55, 56, 57, 58, 59, \
47 | 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 90, 91, 92, \
48 | 102, 103, 104, 105, 106, 107, 114, 115, 116, 117, 118, 119]
49 | elif args.concept in ["front", "front45"]:
50 | obj_idxes = [66, 67, 68, 69, 70, 71, 90, 91, 92, 126, 127, 128, 129, 130, 131]
51 | elif args.concept in ["left", "left45", "right", "right45"]:
52 | obj_idxes = [90, 91, 92]
53 |
54 | if obj_idxes is not None:
55 | restricted = []
56 | for example in examples:
57 | objs = example.split("_")
58 | if args.concept in ["alignedvertical", "alignedhorizontal"]:
59 | if int(objs[0]) in obj_idxes and int(objs[1]) in obj_idxes:
60 | restricted.append(example)
61 | elif args.concept in ["upright", "upright45"]:
62 | if int(objs[0]) in obj_idxes:
63 | restricted.append(example)
64 | else:
65 | if int(objs[1]) in obj_idxes:
66 | restricted.append(example)
67 | examples = restricted
68 |
69 | zero_examples = []
70 | one_examples = []
71 | for example in examples:
72 | sample = hdf5cacher.__getitem__(example)
73 | label = sample["label"].astype(np.float32)
74 | if np.round(label) == 1:
75 | one_examples.append(example)
76 | else:
77 | zero_examples.append(example)
78 | hdf5cacher.close()
79 |
80 | # Split into train, test.
81 | n_train, n_test = 80000, 20000
82 | random.shuffle(zero_examples)
83 | random.shuffle(one_examples)
84 | zero_train, zero_test = zero_examples[:int(0.8*len(zero_examples))], zero_examples[int(0.8*len(zero_examples)):]
85 | one_train, one_test = one_examples[:int(0.8*len(one_examples))], one_examples[int(0.8*len(one_examples)):]
86 | train = random.choices(zero_train, k=int(n_train/2)) + random.choices(one_train, k=int(n_train/2))
87 | test = random.choices(zero_test, k=int(n_test/2)) + random.choices(one_test, k=int(n_test/2))
88 | random.shuffle(train)
89 | random.shuffle(test)
90 |
91 | # Save the train and test files.
92 | train_split = args.data_dir + "/train.txt"
93 | with open(train_split, 'w') as f:
94 | for item in train:
95 | f.write("%s\n" % item)
96 | test_split = args.data_dir + "/test.txt"
97 | with open(test_split, 'w') as f:
98 | for item in test:
99 | f.write("%s\n" % item)
--------------------------------------------------------------------------------
/src/mpc/se3_optimization_rollout.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | import copy
7 | from storm_kit.mpc.rollout.se3_rollout import SE3Rollout
8 | from storm_kit.differentiable_robot_model.coordinate_transform import transform_point, CoordinateTransform
9 |
10 | from src.mpc.concept_cost import ConceptCost
11 | from src.utils.geom_utils import SegLabel
12 |
13 | class SE3OptimizationRollout(SE3Rollout):
14 | def __init__(self, exp_params, world_params=None,
15 | tensor_args={'device':'cpu','dtype':torch.float32}):
16 | super().__init__(exp_params, world_params, tensor_args)
17 |
18 | if(exp_params['cost']['concept']['weight'] > 0.0):
19 | self.concept_cost = ConceptCost(**exp_params['cost']['concept'], tensor_args=self.tensor_args)
20 |
21 | def cost_fn(self, state_dict, action_batch, no_coll=False, horizon_cost=True, return_dist=False):
22 | if return_dist:
23 | cost, rot_err_norm, goal_dist = super(SE3OptimizationRollout, self).cost_fn(state_dict, action_batch, no_coll, horizon_cost, return_dist)
24 | else:
25 | cost = super(SE3OptimizationRollout, self).cost_fn(state_dict, action_batch, no_coll, horizon_cost, return_dist)
26 |
27 | if(self.exp_params['cost']['concept']['weight'] > 0.0):
28 | # Get transform from state_batch
29 | state_batch = state_dict['state_seq']
30 | ee_pos_batch, ee_rot_batch = state_dict['ee_pos_seq'], state_dict['ee_rot_seq']
31 |
32 | # Separate points based on segmentation.
33 | pc = copy.deepcopy(self.pc)
34 | pc_seg = copy.deepcopy(self.pc_seg)
35 | moving_pc = pc[self.pc_seg==SegLabel.MOVING.value]
36 |
37 | # Get transform.
38 | pc_pose = CoordinateTransform(trans=self.pc_pose[0:3,3].unsqueeze(0),
39 | rot=self.pc_pose[0:3,0:3].unsqueeze(0),
40 | tensor_args=self.tensor_args)
41 | pc_pose_inv = pc_pose.inverse()
42 | batch_pose = CoordinateTransform(trans=ee_pos_batch, rot=ee_rot_batch, tensor_args=self.tensor_args)
43 | old_T_new = batch_pose.multiply_transform(pc_pose_inv)
44 |
45 | # Apply transform on the pointcloud to get it in the world state?
46 | new_moving_pc = torch.stack([transform_point(moving_pc[i], old_T_new._rot, old_T_new._trans) for i in range(moving_pc.shape[0])],dim=2)
47 |
48 | # Concatenate with the non-object points and pass to the network.
49 | pc = pc.unsqueeze(0).unsqueeze(0).repeat(new_moving_pc.shape[0], new_moving_pc.shape[1], 1, 1)
50 | pc[:,:,self.pc_seg==SegLabel.MOVING.value,:] = new_moving_pc
51 |
52 | # Add one-hot segmentation.
53 | anchor_hot = (pc_seg==SegLabel.ANCHOR.value).int().unsqueeze(0).unsqueeze(0).repeat(new_moving_pc.shape[0], new_moving_pc.shape[1], 1)
54 | moving_hot = (pc_seg==SegLabel.MOVING.value).int().unsqueeze(0).unsqueeze(0).repeat(new_moving_pc.shape[0], new_moving_pc.shape[1], 1)
55 | points = torch.cat((pc, moving_hot.unsqueeze(-1), anchor_hot.unsqueeze(-1)),dim=-1).float()
56 | points_reshaped = points.reshape((points.shape[0]*points.shape[1],points.shape[2],points.shape[3]))
57 | with torch.cuda.amp.autocast(enabled=False):
58 | c_cost = self.concept_cost.forward(points_reshaped)
59 | c_cost = c_cost.reshape((points.shape[0], points.shape[1]))
60 | cost += c_cost
61 | if(return_dist):
62 | return cost, rot_err_norm, goal_dist
63 | return cost
64 |
65 | def update_params(self, goal_state=None, goal_ee_pos=None, goal_ee_rot=None, goal_ee_quat=None,
66 | pc=None, pc_seg=None, pc_pose=None):
67 | """
68 | Update params for the cost terms and dynamics model.
69 | goal_state: n_dofs
70 | goal_ee_pos: 3
71 | goal_ee_rot: 3,3
72 | goal_ee_quat: 4
73 | pc: N_points, 3
74 | pc_seg: N_points
75 | """
76 | super(SE3OptimizationRollout, self).update_params(goal_state, goal_ee_pos, goal_ee_rot, goal_ee_quat)
77 | if (pc is not None):
78 | self.pc = torch.as_tensor(pc, **self.tensor_args)
79 | if (pc_seg is not None):
80 | self.pc_seg = torch.as_tensor(pc_seg, **self.tensor_args)
81 | if (pc_pose is not None):
82 | self.pc_pose = torch.as_tensor(pc_pose, **self.tensor_args)
83 | return True
84 |
--------------------------------------------------------------------------------
/src/world/robot_world.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 | from isaacgym import gymapi
9 |
10 | import numpy as np
11 | import random
12 | import yaml
13 | import argparse
14 | import glob
15 | import os, sys
16 | import copy
17 |
18 | sys.path.insert(1, '../')
19 |
20 | from storm_kit.gym.core import World
21 | from storm_kit.gym.sim_robot import RobotSim
22 | from storm_kit.util_file import get_configs_path, get_gym_configs_path, join_path, load_yaml, get_assets_path
23 | from quaternion import from_euler_angles, as_float_array
24 |
25 | from src.utils.camera_utils import spawn_camera
26 | from src.utils.geom_utils import SegLabel
27 | from src.utils.gym_utils import WorldObject
28 | from src.world.basic_world import BasicWorld
29 |
30 | np.set_printoptions(precision=3)
31 |
32 |
33 | class RobotWorld(BasicWorld):
34 | def __init__(self, args, asset_root):
35 | super(RobotWorld, self).__init__(args, asset_root)
36 |
37 | robot_file = args.robot + '.yml'
38 | robot_yml = join_path(get_gym_configs_path(), robot_file)
39 | with open(robot_yml) as file:
40 | robot_params = yaml.safe_load(file)
41 |
42 | # Define robot parameters and create robot simulation.
43 | sim_params = robot_params['sim_params']
44 | sim_params['asset_root'] = get_assets_path()
45 | sim_params['collision_model'] = None
46 | #sim_params["init_state"] = [0.8, 0.3, 0.0, -1.57, 0.0, 1.86, 0.]
47 | device = 'cuda' if self.cuda else 'cpu'
48 |
49 | # Create gym environment.
50 | spacing = 5.0
51 | lower = gymapi.Vec3(-spacing, 0.0, -spacing)
52 | upper = gymapi.Vec3(spacing, spacing, spacing)
53 | self.env_ptr = self.gym.create_env(self.sim, lower, upper, 1)
54 | self.gym_instance.env_list.append(self.env_ptr)
55 |
56 | self.robot_sim = RobotSim(gym_instance=self.gym, sim_instance=self.sim, **sim_params, device=device)
57 | self.robot_ptr = self.robot_sim.spawn_robot(self.env_ptr, sim_params['robot_pose'], coll_id=2)
58 |
59 | # Create world.
60 | self.w_T_r = copy.deepcopy(self.robot_sim.spawn_robot_pose)
61 | self.world = World(self.gym, self.sim, self.env_ptr, self.world_params, w_T_r=self.w_T_r)
62 |
63 | # Add objects.
64 | obj1_idx, obj2_idx = random.sample(range(self.num_objs), 2)
65 | obj1_idx = 42 #18
66 | obj2_idx = 6
67 | obj_handle = self.gym.create_actor(self.env_ptr, self.obj_assets[obj1_idx], gymapi.Transform() * self.w_T_r,
68 | 'moving', 2, 2, SegLabel.MOVING.value)
69 | obj_body_handle = self.gym.get_actor_rigid_body_handle(self.env_ptr, obj_handle, 0)
70 | self.gym.set_rigid_body_color(self.env_ptr, obj_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0.1, 0.8, 0.1))
71 | self.world.moving = WorldObject(obj_handle, obj_body_handle, obj1_idx)
72 |
73 | anc_pose = gymapi.Transform(gymapi.Vec3(-0.9, 0.1, -0.0), gymapi.Quat(0, 0, 0, 1)) * self.w_T_r
74 | obj_handle = self.gym.create_actor(self.env_ptr, self.obj_assets[obj2_idx], anc_pose,
75 | 'anchor', 2, 2, SegLabel.ANCHOR.value)
76 | obj_body_handle = self.gym.get_actor_rigid_body_handle(self.env_ptr, obj_handle, 0)
77 | self.gym.set_rigid_body_color(self.env_ptr, obj_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0.8, 0.1, 0.1))
78 | self.world.anchor = WorldObject(obj_handle, obj_body_handle, obj2_idx)
79 |
80 | # Spawn camera.
81 | camera_pose = np.array([0.0,-2.8, 0.3,0.707,0.0,0.0,0.707])
82 | q = as_float_array(from_euler_angles(-90.0 * 0.01745, 90.0 * 0.01745, 90 * 0.01745))
83 | camera_pose[3:] = np.array([q[1], q[2], q[3], q[0]])
84 | spawn_camera(self.world, 60, 640, 480, cuda=self.cuda, camera_pose=camera_pose)
85 |
86 | if self.cuda:
87 | self.gym.prepare_sim(self.sim)
88 | self._root_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
89 | else:
90 | self._root_tensor = None
91 | print("Worlds initialized.")
92 |
93 | if __name__ == '__main__':
94 | # instantiate empty gym:
95 | parser = argparse.ArgumentParser(description='pass args')
96 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
97 | parser.add_argument('--cuda', action='store_true', default=False, help='cuda')
98 | args = parser.parse_args()
99 | args.envs = 1
100 |
101 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
102 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
103 | object_world = RobotWorld(args, asset_root)
--------------------------------------------------------------------------------
/src/mpc/robot_planner_rollout.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | import copy
7 | from storm_kit.mpc.rollout.arm_reacher import ArmReacher
8 | from storm_kit.differentiable_robot_model.coordinate_transform import transform_point, CoordinateTransform
9 |
10 | from src.mpc.concept_cost import ConceptCost
11 | from src.utils.geom_utils import SegLabel
12 |
13 | class RobotPlannerRollout(ArmReacher):
14 | def __init__(self, exp_params, world_params=None,
15 | tensor_args={'device':'cpu','dtype':torch.float32}):
16 | super().__init__(exp_params=exp_params, world_params=world_params, tensor_args=tensor_args)
17 |
18 | if(exp_params['cost']['concept']['weight'] > 0.0):
19 | self.concept_cost = ConceptCost(**exp_params['cost']['concept'], tensor_args=self.tensor_args)
20 |
21 | def cost_fn(self, state_dict, action_batch, no_coll=False, horizon_cost=True, return_dist=False):
22 | cost = super(RobotPlannerRollout, self).cost_fn(state_dict, action_batch, no_coll, horizon_cost, return_dist)
23 |
24 | if(self.exp_params['cost']['concept']['weight'] > 0.0):
25 | # Get transform from state_batch
26 | state_batch = state_dict['state_seq']
27 | ee_pos_batch, ee_rot_batch = state_dict['ee_pos_seq'], state_dict['ee_rot_seq']
28 |
29 | # Separate points based on segmentation.
30 | pc = copy.deepcopy(self.pc)
31 | pc_seg = copy.deepcopy(self.pc_seg)
32 | moving_pc = pc[self.pc_seg==SegLabel.MOVING.value]
33 |
34 | # Get transform.
35 | pc_pose = CoordinateTransform(trans=self.pc_pose[0:3,3].unsqueeze(0),
36 | rot=self.pc_pose[0:3,0:3].unsqueeze(0),
37 | tensor_args=self.tensor_args)
38 | pc_pose_inv = pc_pose.inverse()
39 | batch_pose = CoordinateTransform(trans=ee_pos_batch, rot=ee_rot_batch, tensor_args=self.tensor_args)
40 | old_T_new = batch_pose.multiply_transform(pc_pose_inv)
41 |
42 | # Apply transform on the pointcloud to get it in the world state?
43 | new_moving_pc = torch.stack([transform_point(moving_pc[i], old_T_new._rot, old_T_new._trans) for i in range(moving_pc.shape[0])],dim=2)
44 |
45 | # Concatenate with the non-object points and pass to the network.
46 | pc = pc.unsqueeze(0).unsqueeze(0).repeat(new_moving_pc.shape[0], new_moving_pc.shape[1], 1, 1)
47 | pc[:,:,self.pc_seg==SegLabel.MOVING.value,:] = new_moving_pc
48 |
49 | # Add one-hot segmentation.
50 | moving_hot = (pc_seg==SegLabel.MOVING.value).int().unsqueeze(0).unsqueeze(0).repeat(new_moving_pc.shape[0], new_moving_pc.shape[1], 1)
51 | anchor_hot = (pc_seg==SegLabel.ANCHOR.value).int().unsqueeze(0).unsqueeze(0).repeat(new_moving_pc.shape[0], new_moving_pc.shape[1], 1)
52 | points = torch.cat((pc, moving_hot.unsqueeze(-1), anchor_hot.unsqueeze(-1)),dim=-1).float()
53 | points_reshaped = points.reshape((points.shape[0]*points.shape[1],points.shape[2],points.shape[3]))
54 |
55 | with torch.cuda.amp.autocast(enabled=False):
56 | #c_cost = self.concept_cost.forward(points_reshaped)
57 | #c_cost = self.concept_cost.forward(points_reshaped[:int(points_reshaped.shape[0]/2)])
58 | #c_cost = torch.cat((c_cost,self.concept_cost.forward(points_reshaped[int(points_reshaped.shape[0]/2):])))
59 | c_cost = torch.stack([self.concept_cost.forward(points[:,i,:,:]) for i in range(points.shape[1])], dim=1)
60 | #c_cost = c_cost.reshape((points.shape[0], points.shape[1]))
61 | print(torch.sum(c_cost, axis=1))
62 | cost += c_cost
63 | if(return_dist):
64 | return cost, rot_err_norm, goal_dist
65 | return cost
66 |
67 |
68 | def update_params(self, goal_state=None, goal_ee_pos=None, goal_ee_rot=None, goal_ee_quat=None,
69 | pc=None, pc_seg=None, pc_pose=None):
70 | """
71 | Update params for the cost terms and dynamics model.
72 | goal_state: n_dofs
73 | goal_ee_pos: 3
74 | goal_ee_rot: 3,3
75 | goal_ee_quat: 4
76 | pc: N_points, 3
77 | pc_seg: N_points
78 |
79 | """
80 | super(RobotPlannerRollout, self).update_params(goal_state=goal_state, goal_ee_pos=goal_ee_pos,
81 | goal_ee_rot=goal_ee_rot, goal_ee_quat=goal_ee_quat)
82 | if (pc is not None):
83 | self.pc = torch.as_tensor(pc, **self.tensor_args)
84 | if (pc_seg is not None):
85 | self.pc_seg = torch.as_tensor(pc_seg, **self.tensor_args)
86 | if (pc_pose is not None):
87 | self.pc_pose = torch.as_tensor(pc_pose, **self.tensor_args)
88 | return True
89 |
--------------------------------------------------------------------------------
/src/utils/input_utils.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import numpy as np
6 | import torch
7 |
8 | from PIL import Image
9 | import io
10 | import os
11 | import sys
12 | import h5py
13 | import errno
14 |
15 | sys.path.insert(1, '../')
16 |
17 | from src.utils.geom_utils import *
18 |
19 | from PIL import ImageFile
20 | ImageFile.LOAD_TRUNCATED_IMAGES = True
21 |
22 |
23 | class Hdf5Cacher:
24 | r"""**Save and load data from disk using** `h5py` **module.**
25 | Data will be saved as `.h5` in specified path. If path does not exist,
26 | it will be created.
27 | Tips for using HDF5 with multiprocessing (e.g. pytorch)
28 | https://github.com/pytorch/pytorch/issues/11929
29 | Attributes
30 | ----------
31 | path: str
32 | Path to the file where samples will be saved and loaded from.
33 | """
34 |
35 | def __init__(self, path: str, access='a'):
36 | self.path = path
37 | try:
38 | os.makedirs(os.path.dirname(self.path))
39 | except OSError as e:
40 | if e.errno != errno.EEXIST:
41 | raise
42 | self.hdf5 = h5py.File(self.path, access)
43 |
44 | def _hdf5(self):
45 | return self.hdf5
46 |
47 | def __contains__(self, key: str) -> bool:
48 | """**Check whether file exists on disk.**
49 | If file is available it is considered cached, hence you can cache data
50 | between multiple runs (if you ensure repeatable sampling).
51 | """
52 | contains = key in self._hdf5()
53 | return contains
54 |
55 | def __setitem__(self, key: str, data: dict):
56 | """**Save** `data` **in specified folder.**
57 | Name of the item will be equal to `{self.path}/{key}{extension}`.
58 | """
59 | if key in self._hdf5().keys():
60 | del self._hdf5()[key]
61 | grp = self._hdf5().create_group(key)
62 | if data is not None:
63 | for key, val in data.items():
64 | if val is not None:
65 | grp.create_dataset(key, data=val)
66 |
67 | def __getitem__(self, key: str):
68 | """**Retrieve** `data` **specified by** `key`.
69 | """
70 | data = dict()
71 | grp = self._hdf5()[key]
72 | for key in grp.keys():
73 | data[key] = grp[key][()]
74 | return data
75 |
76 | def clean(self) -> None:
77 | """**Remove file** `self.path`.
78 | Behaves just like `os.remove`, but won't act if file does not exist.
79 | """
80 | if os.path.isfile(self.path):
81 | os.remove(self.path)
82 |
83 | def close(self) -> None:
84 | """
85 | Close file
86 | """
87 | self.hdf5.close()
88 |
89 | # Data saving and loading utils.
90 | def get_png(img):
91 | '''
92 | Save a numpy array as a PNG, then get it out as a binary blob
93 | '''
94 | im = Image.fromarray(img)
95 | output = io.BytesIO()
96 | im.save(output, format="PNG")
97 | return output.getvalue()
98 |
99 | def png_to_numpy(png):
100 | stream = io.BytesIO(png)
101 | im = Image.open(stream)
102 | return np.array(im)
103 |
104 | def transform_input(x, concept):
105 | # Ask the person questions about whether the concept cares about the moving/anchor absolute pose.
106 | single_object_matters = False
107 | absolute_poses_matter = False
108 | obj_bbs_matter = False
109 | if concept in ["above180", "above45", "abovebb"]:
110 | absolute_poses_matter = True
111 | if concept in ["upright", "upright45"]:
112 | single_object_matters = True
113 | if concept in ["abovebb"]:
114 | obj_bbs_matter = True
115 | rel_pose = relative_pose(x[:, :7], x[:, 7:14])
116 | rel_diff = relative_difference(x[:, :7], x[:, 7:14])
117 |
118 | # Based on the answer to the questions, construct input space.
119 | if single_object_matters:
120 | # Concept doesn't have an anchor.
121 | x_transform = x[:, :7]
122 | else:
123 | if absolute_poses_matter:
124 | # Concept is not relative, rather it is wrt the world frame.
125 | x_transform = rel_diff
126 | else:
127 | # Concept is relative: consider moving wrt to anchor.
128 | x_transform = rel_pose
129 | if obj_bbs_matter:
130 | # Concept cares about the bounding boxes of the objects, not just the poses.
131 | x_transform = torch.cat((x_transform, x[:,17:20] - x[:,14:17], x[:,23:] - x[:,20:23]), axis=1)
132 | return x_transform
133 |
134 | def make_weights_for_balanced_classes(inputs, nclasses):
135 | count = [0] * nclasses
136 | for item in inputs:
137 | count[int(item[1].item())] += 1
138 | weight_per_class = [0.] * nclasses
139 | N = float(sum(count))
140 | for i in range(nclasses):
141 | weight_per_class[i] = N/float(count[i])
142 | weight = [0] * len(inputs)
143 | for idx, val in enumerate(inputs):
144 | weight[idx] = weight_per_class[int(val[1].item())]
145 | return weight
--------------------------------------------------------------------------------
/src/datasets/ycb_downloader.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | #Copyright 2015 Yale University - Grablab
6 | #Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\
7 | #The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 | #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9 |
10 | import os
11 | import sys
12 | import json
13 | import urllib.request
14 |
15 | output_directory = "/home/abobu/Project/ycb_data" #"./ycb"
16 |
17 | # You can either set this to "all" or a list of the objects that you'd like to
18 | # download.
19 | objects_to_download = "all"
20 | #objects_to_download = ["002_master_chef_can", "003_cracker_box"]
21 | #objects_to_download = ["002_master_chef_can"]
22 |
23 | # You can edit this list to only download certain kinds of files.
24 | # 'berkeley_rgbd' contains all of the depth maps and images from the Carmines.
25 | # 'berkeley_rgb_highres' contains all of the high-res images from the Canon cameras.
26 | # 'berkeley_processed' contains all of the segmented point clouds and textured meshes.
27 | # 'google_16k' contains google meshes with 16k vertices.
28 | # 'google_64k' contains google meshes with 64k vertices.
29 | # 'google_512k' contains google meshes with 512k vertices.
30 | # See the website for more details.
31 | files_to_download = ["berkeley_rgbd", "berkeley_rgb_highres", "berkeley_processed", "google_16k", "google_64k", "google_512k"]
32 | #files_to_download = ["berkeley_processed", "berkeley_rgbd"]
33 |
34 | # Extract all files from the downloaded .tgz, and remove .tgz files.
35 | # If false, will just download all .tgz files to output_directory
36 | extract = True
37 |
38 | base_url = "http://ycb-benchmarks.s3-website-us-east-1.amazonaws.com/data/"
39 | objects_url = base_url + "objects.json"
40 |
41 | if not os.path.exists(output_directory):
42 | os.makedirs(output_directory)
43 |
44 | def fetch_objects(url):
45 | response = urllib.request.urlopen(url)
46 | html = response.read()
47 | objects = json.loads(html)
48 | return objects["objects"]
49 |
50 | def download_file(url, filename):
51 | u = urllib.request.urlopen(url)
52 | f = open(filename, 'wb')
53 | meta = u.info()
54 | file_size = int(meta.get("Content-Length")[0])
55 | print("Downloading: %s (%s MB)" % (filename, file_size/1000000.0))
56 |
57 | file_size_dl = 0
58 | block_sz = 65536
59 | while True:
60 | buffer = u.read(block_sz)
61 | if not buffer:
62 | break
63 |
64 | file_size_dl += len(buffer)
65 | f.write(buffer)
66 | status = r"%10d [%3.2f%%]" % (file_size_dl/1000000.0, file_size_dl * 100. / file_size)
67 | status = status + chr(8)*(len(status)+1)
68 | print(status)
69 | f.close()
70 |
71 | def tgz_url(object, type):
72 | if type in ["berkeley_rgbd", "berkeley_rgb_highres"]:
73 | return base_url + "berkeley/{object}/{object}_{type}.tgz".format(object=object,type=type)
74 | elif type in ["berkeley_processed"]:
75 | return base_url + "berkeley/{object}/{object}_berkeley_meshes.tgz".format(object=object,type=type)
76 | else:
77 | return base_url + "google/{object}_{type}.tgz".format(object=object,type=type)
78 |
79 | def extract_tgz(filename, dir):
80 | tar_command = "tar -xzf {filename} -C {dir}".format(filename=filename,dir=dir)
81 | os.system(tar_command)
82 | os.remove(filename)
83 |
84 | def check_url(url):
85 | try:
86 | request = urllib.request.Request(url)
87 | request.get_method = lambda : 'HEAD'
88 | response = urllib.request.urlopen(request)
89 | return True
90 | except Exception as e:
91 | return False
92 |
93 |
94 | if __name__ == "__main__":
95 |
96 | objects = fetch_objects(objects_url)
97 |
98 | for object in objects:
99 | if objects_to_download == "all" or object in objects_to_download:
100 | for file_type in files_to_download:
101 | url = tgz_url(object, file_type)
102 | if not check_url(url):
103 | continue
104 | filename = "{path}/{object}_{file_type}.tgz".format(path=output_directory,
105 | object=object,
106 | file_type=file_type)
107 | download_file(url, filename)
108 | if extract:
109 | extract_tgz(filename, output_directory)
--------------------------------------------------------------------------------
/src/utils/camera_utils.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Code adapted from https://gitlab-master.nvidia.com/srl/stochastic-control/-/tree/main
7 |
8 | """
9 | from isaacgym import gymapi
10 | from isaacgym import gymutil
11 | from isaacgym import gymtorch
12 |
13 | import numpy as np
14 | import os
15 | import random
16 |
17 | from quaternion import *
18 | from src.utils.input_utils import get_png
19 | from src.utils.gym_utils import gymT_to_quatT
20 |
21 | def move_camera(world, camera_pose=None):
22 | """
23 | Move camera to a random hozirontal view.
24 | """
25 | gym = world.gym
26 | sim = world.sim
27 | ch = world.camera_handle
28 | ep = world.env_ptr
29 |
30 | if camera_pose is None:
31 | # Pick random location.
32 | while True:
33 | p = np.array([random.uniform(-3.5, 3.5), random.uniform(-3.5, 3.5), 0.3])
34 | if np.linalg.norm(p) < 3.5 and np.linalg.norm(p) > 2.7:
35 | break
36 |
37 | # Look at target.
38 | cam_loc = gymapi.Vec3(p[0], p[1], 0.3)
39 | target_loc = gymapi.Vec3(0, 0, 0)
40 | gym.set_camera_location(ch, ep, cam_loc, target_loc)
41 |
42 | camera_pose = gym.get_camera_transform(sim, ep, ch)
43 | e = as_euler_angles(quaternion(*[getattr(camera_pose.r, k) for k in 'wxyz']))
44 | q = as_float_array(from_euler_angles(e[0], e[1], 90 * 0.01745))
45 | camera_pose = np.hstack((p, np.array([q[1], q[2], q[3], q[0]])))
46 |
47 | camera_pose = gymapi.Transform(
48 | gymapi.Vec3(camera_pose[0], camera_pose[1], camera_pose[2]),
49 | gymapi.Quat(camera_pose[3], camera_pose[4], camera_pose[5], camera_pose[6])
50 | )
51 | world_camera_pose = world.robot_pose * camera_pose
52 |
53 | gym.set_camera_transform(ch, ep, world_camera_pose)
54 |
55 |
56 | def spawn_camera(world, fov, width, height, camera_pose=None, cuda=False):
57 | """
58 | Spawn a camera in the environment.
59 | Args:
60 | fov, width, height: camera params
61 | robot_camera_pose: Camera pose w.r.t robot_body_handle [x, y, z, qx, qy, qz, qw]
62 | """
63 | camera_props = gymapi.CameraProperties()
64 | camera_props.horizontal_fov = fov
65 | camera_props.height = height
66 | camera_props.width = width
67 | camera_props.use_collision_geometry = False
68 | if cuda:
69 | camera_props.enable_tensors = True
70 |
71 | camera_handle = world.gym.create_camera_sensor(world.env_ptr, camera_props)
72 | world.camera_handle = camera_handle
73 | move_camera(world, camera_pose)
74 |
75 | if cuda:
76 | world.cam_tensors = get_camera_tensors(world)
77 |
78 | def get_camera_tensors(world):
79 | gym = world.gym
80 | sim = world.sim
81 | ch = world.camera_handle
82 | ep = world.env_ptr
83 |
84 | color_image = gym.get_camera_image_gpu_tensor(sim, ep, ch, gymapi.IMAGE_COLOR)
85 | depth_image = gym.get_camera_image_gpu_tensor(sim, ep, ch, gymapi.IMAGE_DEPTH)
86 | segmentation = gym.get_camera_image_gpu_tensor(sim, ep, ch, gymapi.IMAGE_SEGMENTATION)
87 |
88 | return (gymtorch.wrap_tensor(color_image), gymtorch.wrap_tensor(depth_image), gymtorch.wrap_tensor(segmentation))
89 |
90 | def observe_camera(world, cuda=False):
91 | gym = world.gym
92 | sim = world.sim
93 | ch = world.camera_handle
94 | ep = world.env_ptr
95 |
96 | if not cuda:
97 | gym.render_all_camera_sensors(sim)
98 |
99 | proj_matrix = gym.get_camera_proj_matrix(sim, ep, ch)
100 | camera_pose = gym.get_camera_transform(sim, ep, ch)
101 | view_matrix = gymT_to_quatT(camera_pose).cpu().numpy()
102 | if cuda:
103 | color_image = world.cam_tensors[0].cpu().numpy()
104 | depth_image = world.cam_tensors[1].cpu().numpy()
105 | segmentation = world.cam_tensors[2].cpu().numpy()
106 | else:
107 | color_image = gym.get_camera_image(sim, ep, ch, gymapi.IMAGE_COLOR)
108 | depth_image = gym.get_camera_image(sim, ep, ch, gymapi.IMAGE_DEPTH)
109 | segmentation = gym.get_camera_image(sim, ep, ch, gymapi.IMAGE_SEGMENTATION)
110 |
111 | color_image = np.reshape(color_image, [480, 640, 4])[:, :, :3]
112 | depth_image[np.abs(depth_image) == np.inf] = 0
113 | depth_min = depth_image.min()
114 | depth_max = depth_image.max()
115 | depth_image = (depth_image - depth_min) * 65535.0 / (depth_max - depth_min)
116 |
117 | camera_data = {'color':color_image, 'depth':depth_image, 'mask':segmentation, 'proj_matrix':proj_matrix,
118 | 'view_matrix':view_matrix, "depth_min": depth_min, "depth_max":depth_max}
119 | return camera_data
120 |
121 | def get_camera_dict(camera_data):
122 | camera_dict = {}
123 | camera_dict["rgb"] = [get_png(camera_data["color"].astype(np.uint8))]
124 | camera_dict["depth"] = [get_png(camera_data["depth"].astype(np.uint16))]
125 | camera_dict["mask"] = [get_png(camera_data["mask"].astype(np.uint16))]
126 | camera_dict["proj_matrix"] = camera_data["proj_matrix"]
127 | camera_dict["view_matrix"] = camera_data["view_matrix"]
128 | camera_dict["depth_min"] = camera_data["depth_min"]
129 | camera_dict["depth_max"] = camera_data["depth_max"]
130 | return camera_dict
--------------------------------------------------------------------------------
/src/utils/gym_utils.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import numpy as np
6 | import open3d
7 | import random
8 |
9 | from isaacgym import gymapi
10 | from isaacgym import gymtorch
11 |
12 | import torch
13 |
14 | from storm_kit.differentiable_robot_model.coordinate_transform import quaternion_to_matrix
15 | from quaternion import from_euler_angles, as_float_array
16 |
17 |
18 | class WorldObject(object):
19 | def __init__(self, agent_handle, body_handle, idx):
20 | self.agent_handle = agent_handle
21 | self.body_handle = body_handle
22 | self.idx = idx
23 |
24 |
25 | def gymT_to_quatT(gymT):
26 | rotT = torch.eye(4)
27 | quat = torch.tensor([gymT.r.w, gymT.r.x, gymT.r.y, gymT.r.z]).unsqueeze(0)
28 | rot = quaternion_to_matrix(quat)
29 | rotT[0,3] = gymT.p.x
30 | rotT[1,3] = gymT.p.y
31 | rotT[2,3] = gymT.p.z
32 | rotT[:3,:3] = rot[0]
33 | return rotT
34 |
35 | def get_raw_state(object_world, world):
36 | if object_world._root_tensor is not None:
37 | root_tensor = gymtorch.wrap_tensor(object_world._root_tensor)
38 | root_positions = root_tensor[:, 0:3]
39 | root_orientations = root_tensor[:, 3:7]
40 | anchor_idx = world.gym.get_actor_index(world.env_ptr, world.anchor.agent_handle, gymapi.DOMAIN_SIM)
41 | moving_idx = world.gym.get_actor_index(world.env_ptr, world.moving.agent_handle, gymapi.DOMAIN_SIM)
42 |
43 | ap = root_positions[anchor_idx].cpu().numpy()
44 | ao = root_orientations[anchor_idx].cpu().numpy()
45 | mp = root_positions[moving_idx].cpu().numpy()
46 | mo = root_orientations[moving_idx].cpu().numpy()
47 | poses = np.hstack((mp, mo, ap, ao))
48 | else:
49 | ap = world.get_pose(world.anchor.agent_handle)
50 | mp = world.get_pose(world.moving.agent_handle)
51 | poses = np.array([mp.p.x, mp.p.y, mp.p.z, mp.r.x, mp.r.y, mp.r.z, mp.r.w,
52 | ap.p.x, ap.p.y, ap.p.z, ap.r.x, ap.r.y, ap.r.z, ap.r.w])
53 |
54 | obj1_mesh, obj2_mesh = object_world.obj_meshes[world.moving.idx], object_world.obj_meshes[world.anchor.idx]
55 | obj1_bb, obj2_bb = obj1_mesh.bounds, obj2_mesh.bounds
56 | return np.hstack((poses, np.ravel(obj1_bb), np.ravel(obj2_bb)))
57 |
58 | def move_agent_to_random_pose(world, agent_handle, _root_tensor=None):
59 | # Set data generation boundary.
60 | limits = [(-0.15, 0.15), (-0.15, 0.15), (0.2, 0.5)]
61 |
62 | pos = [random.uniform(l[0], l[1]) for l in limits]
63 | ang = [random.uniform(-np.pi, np.pi) for l in range(3)]
64 | q = as_float_array(from_euler_angles(ang[0], ang[1], ang[2]))
65 | target_pose = gymapi.Transform(gymapi.Vec3(pos[0], pos[1], pos[2]),
66 | gymapi.Quat(q[1], q[2], q[3], q[0]))
67 | move_agent_to_pose(world, agent_handle, target_pose, _root_tensor=_root_tensor)
68 |
69 | def move_all_active_agents_to_random_pose(object_world):
70 | actor_idxes = []
71 | for idx, world in enumerate(object_world.worlds):
72 | move_agent_to_random_pose(world, world.anchor.agent_handle, object_world._root_tensor)
73 | move_agent_to_random_pose(world, world.moving.agent_handle, object_world._root_tensor)
74 | actor_idxes.append(world.gym.get_actor_index(world.env_ptr, world.anchor.agent_handle, gymapi.DOMAIN_SIM))
75 | actor_idxes.append(world.gym.get_actor_index(world.env_ptr, world.moving.agent_handle, gymapi.DOMAIN_SIM))
76 | if object_world._root_tensor is not None:
77 | actor_idxes = torch.tensor(actor_idxes, dtype=torch.int32, device="cuda:0")
78 | object_world.gym.set_actor_root_state_tensor_indexed(object_world.sim, object_world._root_tensor,
79 | gymtorch.unwrap_tensor(actor_idxes), len(actor_idxes))
80 |
81 | def move_agent_to_pose(world, agent_handle, target_pose, _root_tensor=None):
82 | if _root_tensor is not None:
83 | obj_idx = world.gym.get_actor_index(world.env_ptr, agent_handle, gymapi.DOMAIN_SIM)
84 |
85 | root_tensor = gymtorch.wrap_tensor(_root_tensor)
86 | root_positions = root_tensor[:, 0:3]
87 | root_orientations = root_tensor[:, 3:7]
88 |
89 | root_positions[obj_idx] = torch.tensor([target_pose.p.x, target_pose.p.y, target_pose.p.z])
90 | root_orientations[obj_idx] = torch.tensor([target_pose.r.x, target_pose.r.y, target_pose.r.z, target_pose.r.w])
91 | else:
92 | world.gym.set_rigid_transform(world.env_ptr, agent_handle, world.robot_pose * target_pose)
93 |
94 | def move_all_active_agents_to_pose(object_world, target_pose):
95 | actor_idxes = []
96 | for idx, world in enumerate(object_world.worlds):
97 | move_agent_to_pose(world, world.anchor.agent_handle, target_pose, _root_tensor=object_world._root_tensor)
98 | move_agent_to_pose(world, world.moving.agent_handle, target_pose, _root_tensor=object_world._root_tensor)
99 | actor_idxes.append(world.gym.get_actor_index(world.env_ptr, world.anchor.agent_handle, gymapi.DOMAIN_SIM))
100 | actor_idxes.append(world.gym.get_actor_index(world.env_ptr, world.moving.agent_handle, gymapi.DOMAIN_SIM))
101 | if object_world._root_tensor is not None:
102 | actor_idxes = torch.tensor(actor_idxes, dtype=torch.int32, device="cuda:0")
103 | object_world.gym.set_actor_root_state_tensor_indexed(object_world.sim, object_world._root_tensor,
104 | gymtorch.unwrap_tensor(actor_idxes), len(actor_idxes))
--------------------------------------------------------------------------------
/src/train/train_human_concept.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | from torch import nn
7 | from torch.utils.data import DataLoader, Subset
8 |
9 | import os, sys
10 | import glob
11 | import yaml
12 | import random
13 | import argparse
14 | import numpy as np
15 |
16 | sys.path.insert(1, '../')
17 |
18 | from src.utils.train_utils import *
19 | from src.utils.input_utils import transform_input, make_weights_for_balanced_classes
20 |
21 | if __name__ == "__main__":
22 | # Parse args.
23 | parser = argparse.ArgumentParser(description='pass args')
24 | parser.add_argument('--config', type=str, default="/../../configs/rawstate_human.yaml", help='config file')
25 | parser.add_argument('--concept', type=str, default='above180', help='data directory')
26 | parser.add_argument('--test', action='store_true', default=False, help='training mode')
27 | parser.add_argument('--train_amt', type=int, default=None, help='data amount used for training')
28 | parser.add_argument('--strategy', type=str, default="oracle", help='query collection strategy')
29 | args = parser.parse_args()
30 |
31 | # Load yaml parameters.
32 | here = os.path.dirname(os.path.abspath(__file__))
33 | with open(here+args.config, 'r') as stream:
34 | params = yaml.safe_load(stream)
35 |
36 | # Set random seed if it exists.
37 | if "seed" in params["data"].keys():
38 | torch.manual_seed(params["data"]["seed"])
39 | random.seed(params["data"]["seed"])
40 | np.random.seed(params["data"]["seed"])
41 |
42 | # Set up data and model parameters.
43 | device = ("cuda" if torch.cuda.is_available() else "cpu")
44 | concept = args.concept
45 |
46 | model_params = {}
47 | if params["data"]["type"] == "rawstate":
48 | from src.utils.data_utils import RawStateDataset as ObjectDataset
49 | from src.models.MLP import MLP as Network
50 | model_params["nb_layers"] = params["train"]["network"]["nb_layers"]
51 | model_params["nb_units"] = params["train"]["network"]["nb_units"]
52 | model_params["concept"] = concept
53 | elif params["data"]["type"] == "pointcloud":
54 | from src.utils.data_utils import PointDataset as ObjectDataset
55 | from src.models.pointnet import PointNetEncoder as Network
56 | model_params["pointnet_radius"] = params["train"]["network"]["pointnet_radius"]
57 | model_params["pointnet_nclusters"] = params["train"]["network"]["pointnet_nclusters"]
58 | model_params["scale"] = params["train"]["network"]["scale"]
59 | model_params["in_features"] = params["train"]["network"]["in_features"]
60 |
61 | training_params = {}
62 | training_params["num_epochs"] = params["train"]["num_epochs"]
63 | training_params["learning_rate"] = params["train"]["learning_rate"]
64 | training_params["batch_size"] = params["train"]["batch_size"]
65 | training_params["num_workers"] = params["train"]["num_workers"]
66 |
67 | # Get dataset and dataloaders.
68 | train_path = here + params["data"]["train_dir"]
69 | test_path = here + params["data"]["test_dir"]
70 | train_set = ObjectDataset(train_path+concept+"/{}_data.hdf5".format(args.strategy),\
71 | train_path+concept+"/{}_label.hdf5".format(args.strategy),\
72 | None)
73 | test_set = ObjectDataset(test_path+"/data.hdf5",\
74 | test_path+concept+"/{}.hdf5".format("label"),\
75 | test_path+concept+'/test.txt')
76 |
77 | # Select only first train_amt datapoints.
78 | if args.train_amt is not None:
79 | idxes = []
80 | for i in range(len(train_set)):
81 | data, label = train_set[i]
82 | idx = train_set.examples[i].split("_")[0]
83 | idxes.append(int(idx))
84 | idxes = np.argsort(idxes)
85 | train_set = Subset(train_set, idxes[:args.train_amt])
86 |
87 | weights = make_weights_for_balanced_classes(train_set, 2)
88 | weights = torch.DoubleTensor(weights)
89 | sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
90 |
91 | train_loader = DataLoader(dataset=train_set, batch_size=training_params["batch_size"], sampler=sampler,
92 | num_workers=training_params["num_workers"], pin_memory=True)
93 | test_loader = DataLoader(dataset=test_set, shuffle=False, batch_size=training_params["batch_size"],
94 | num_workers=training_params["num_workers"], pin_memory=True)
95 |
96 | if params["data"]["type"] == "rawstate":
97 | input_dim = transform_input(train_set[0][0].unsqueeze(0), concept).shape[1]
98 | model_params["input_dim"] = input_dim
99 |
100 | # Define model, optimization, and loss.
101 | model = Network(**model_params).to(device)
102 |
103 | # Train and evaluate.
104 | if args.train_amt is not None:
105 | query_str = "_{}".format(args.train_amt)
106 | else:
107 | query_str = ""
108 | model_str = "/{}_{}{}_{}.pt".format(args.strategy, params["data"]["type"], query_str, params["data"]["seed"])
109 | save_dir = here + params["data"]["save_dir"] + concept
110 | if not os.path.isdir(save_dir):
111 | os.mkdir(save_dir)
112 | model_path = save_dir + model_str
113 |
114 | if args.test:
115 | model.load_state_dict(torch.load(model_path))
116 | else:
117 | train(model, train_loader, test_loader, epochs=training_params["num_epochs"],
118 | lr=training_params["learning_rate"], device=device)
119 | torch.save(model.state_dict(), model_path)
120 | print("Saved in ", model_path)
121 | test_acc = check_accuracy(model, test_loader, device=device)
122 |
123 | # Save test error.
124 | results_str = "/{}_{}{}_{}.txt".format(args.strategy, params["data"]["type"], query_str, params["data"]["seed"])
125 | results_dir = here + params["data"]["results_dir"] + concept
126 | if not os.path.isdir(results_dir):
127 | os.mkdir(results_dir)
128 | results_path = results_dir + results_str
129 | with open(results_path, 'w') as f:
130 | f.write('%.5f' % test_acc)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # concept_learning
2 |
3 | **Downloading Object Assets and Pre-trained Models:**
4 |
5 | Download [data.tar.gz](https://drive.google.com/file/d/1h-jcEI-SArBFR4FO4uL09jUYle6zRPjQ) and place it under the `data` folder. Unzip it with the following command:
6 |
7 | ```Shell
8 | tar zxvf data.tar.gz
9 | ```
10 |
11 | **Functionality:**
12 |
13 | 1. a) Generate passive data using Isaac Gym:
14 |
15 | `python datasets/generate_concept_data.py --headless --cuda --envs --samples `
16 |
17 | Options:
18 |
19 | - `--headless`: starts data generation without simulator visualizer;
20 | - `--cuda`: activates GPU computation;
21 | - ``: number of Gym environments to generate data in parallel;
22 | - ``: number of samples to collect per environment.
23 |
24 | 1. b) Generate active data:
25 |
26 | `python datasets/collect_human_data.py --config --concept --simulated --active_samples --passive_samples --batch_size --objective --warmstart --mining `
27 |
28 | Options:
29 |
30 | - ``: concept to ask for queries about ("above180", "above45", "abovebb", "near", "upright", "upright45", "alignedhorizontal", "alignedvertical", "front", "front45", "ontop");
31 | - ``: path to the `/configs` folder configuration file that specifies training parameters;
32 | - `--simulated`: uses the oracle human to label the queries, otherwise uses real human input via interface;
33 | - ``: number of samples to collect from the human actively (label queries);
34 | - ``: number of samples to collect from the human passively (demo queries);
35 | - ``: how often to retrain the active learning model;
36 | - ``: active learning objective ("random", "min", "max", "confusion", "allrand", "confrand")
37 | - `--warmstart`: number of demonstration queries to use for warmstarting the model;
38 | - `--mining`: number of queries that use mining.
39 |
40 | 2. Label data using ground truth or learned concept:
41 |
42 | `python datasets/label_concept_data.py --concept --concept_model `
43 |
44 | Options:
45 |
46 | - ``: name of desired concept ("above180", "above45", "abovebb", "near", "upright", "upright45", "alignedhorizontal", "alignedvertical", "front", "front45", "ontop");
47 | - ``: name of ".pt" model weights file.
48 |
49 | 3. Create test-train split files for oracle demonstration data:
50 |
51 | `python datasets/split_data.py --concept `
52 |
53 | Options:
54 |
55 | - ``: name of desired concept ("above180", "above45", "abovebb", "near", "upright", "upright45", "alignedhorizontal", "alignedvertical", "front", "front45", "ontop");
56 |
57 | 4. Train concept:
58 |
59 | `python train/train_oracle_concept.py --concept_dir --config --train_amt `
60 |
61 | Options:
62 |
63 | - ``: concept's label directory;
64 | - ``: path to the `/configs` folder configuration file that specifies training parameters;
65 | - ``: data amount used for training (optional: if left out, uses all data);
66 |
67 | 5. Evaluate concept:
68 |
69 | `python train/evaluate_concept.py --concept_dir --config --concept_model `
70 |
71 | Options:
72 |
73 | - ``: concept's label directory;
74 | - ``: path to the `/configs` folder configuration file that specifies evaluation parameters;
75 | - ``: model to use for evaluation;
76 |
77 | Note: The above functionalities are replicated for ngc job scheduling in the `/scripts` folder.
78 |
79 | **Passive Example (Demo Queries):**
80 |
81 | 1. Generate Isaac Gym data: `python datasets/generate_concept_data.py --headless --cuda --envs 100 --samples 10000`
82 | 2. Label data with ground truth: `python datasets/label_concept_data.py --concept "above45"`
83 | 3. Split data in a balanced way (demo queries): `python datasets/split_data.py --concept "above45"`
84 | 4. Train low-dimensional concept: `python train/train_oracle_concept.py --concept_dir "above45" --config '/../../configs/rawstate_oracle.yaml' --train_amt 500`
85 | 5. Train baseline: `python train/train_oracle_concept.py --concept_dir "above45" --config '/../../configs/pointcloud_oracle.yaml' --train_amt 500`
86 | 6. Train handcrafted: `python train/train_oracle_concept.py --concept_dir "above45" --config '/../../configs/pointcloud_oracle.yaml'`
87 | 7. Label using the low-dimensional concept: `python datasets/label_concept_data.py --concept "above45" --concept_model 'oracle_rawstate_500_0.pt'`
88 | 8. Train ours: `python train/train_oracle_concept.py --concept_dir above45_500 --config '/../../configs/pointcloud_oracle.yaml'`
89 | 9. Evaluate model: `python train/evaluate_concept.py --concept_dir "above45" --config '/../../configs/pointcloud_oracle.yaml' --concept_model 'oracle_pointcloud_g500_0.pt'`
90 |
91 | **Active Example (Label Queries):**
92 |
93 | 1. Generate active learning data: `python datasets/collect_human_data.py --config '/../../configs/rawstate_AL.yaml' --concept "above45" --simulated --active_samples 1000 --passive_samples 0 --batch_size 100 --objective 'confrand' --warmstart 0 --mining 1000`
94 | 2. Train low-dimensional concept: `python train/train_human_concept.py --concept "above45" --config '/../../configs/rawstate_human.yaml' --train_amt 1000 --strategy 'confrandmine'`
95 | 3. Train baseline: `python train/train_human_concept.py --concept "anove45" --config '/../../configs/pointcloud_human.yaml' --train_amt 1000 --strategy 'randomgt'`
96 | 4. Label using the low-dimensional concept: `python datasets/label_concept_data.py --concept "above45" --concept_model 'confrandgt_rawstate_1000_0.pt'`
97 | 5. Train ours: `python train/train_oracle_concept.py --concept_dir above45_confrandgtmine500 --config '/../../configs/pointcloud_oracle.yaml'`
98 | 6. Evaluate model: `python train/evaluate_concept.py --concept_dir "above45" --config '/../../configs/pointcloud_oracle.yaml' --concept_model 'confrandgtmine_pointcloud_g500_0.pt'`
99 |
--------------------------------------------------------------------------------
/src/train/train_oracle_concept.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | from torch import nn
7 | from torch.utils.data import DataLoader, Subset
8 |
9 | import os, sys
10 | import glob
11 | import yaml
12 | import random
13 | import argparse
14 | import numpy as np
15 |
16 | sys.path.insert(1, '../')
17 |
18 | from src.utils.train_utils import *
19 | from src.utils.input_utils import transform_input
20 |
21 | torch.backends.cudnn.benchmark = True
22 |
23 | if __name__ == "__main__":
24 | # Parse args.
25 | parser = argparse.ArgumentParser(description='pass args')
26 | parser.add_argument('--config', type=str, default="/../../configs/rawstate_oracle.yaml", help='config file')
27 | parser.add_argument('--concept_dir', type=str, default='above180', help='data directory')
28 | parser.add_argument('--test', action='store_true', default=False, help='training mode')
29 | parser.add_argument('--train_amt', type=int, default=None, help='data amount used for training')
30 | args = parser.parse_args()
31 |
32 | # Load yaml parameters.
33 | here = os.path.dirname(os.path.abspath(__file__))
34 | with open(here+args.config, 'r') as stream:
35 | params = yaml.safe_load(stream)
36 |
37 | # Set random seed if it exists.
38 | if "seed" in params["data"].keys():
39 | torch.manual_seed(params["data"]["seed"])
40 | random.seed(params["data"]["seed"])
41 | np.random.seed(params["data"]["seed"])
42 |
43 | # Set up data and model parameters.
44 | device = ("cuda" if torch.cuda.is_available() else "cpu")
45 | concept = args.concept_dir.split("_")[0]
46 |
47 | dataset_params = {}
48 | model_params = {}
49 | if params["data"]["type"] == "rawstate":
50 | from src.utils.data_utils import RawStateDataset as ObjectDataset
51 | from src.models.MLP import MLP as Network
52 | model_params["nb_layers"] = params["train"]["network"]["nb_layers"]
53 | model_params["nb_units"] = params["train"]["network"]["nb_units"]
54 | model_params["concept"] = concept
55 | elif params["data"]["type"] == "pointcloud":
56 | from src.utils.data_utils import PointDataset as ObjectDataset
57 | from src.models.pointnet import PointNetEncoder as Network
58 | model_params["pointnet_radius"] = params["train"]["network"]["pointnet_radius"]
59 | model_params["pointnet_nclusters"] = params["train"]["network"]["pointnet_nclusters"]
60 | model_params["scale"] = params["train"]["network"]["scale"]
61 | model_params["in_features"] = params["train"]["network"]["in_features"]
62 | elif params["data"]["type"] == "rgb":
63 | from src.utils.data_utils import RGBDataset as ObjectDataset
64 | from src.models.pointnet import PretrainedImageEncoder as Network
65 | transform = transforms.Compose(
66 | [
67 | transforms.Resize((224, 224)),
68 | transforms.ToTensor(),
69 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
70 | ]
71 | )
72 | dataset_params["transform"] = transform
73 | model_params["input_dim"] = params["train"]["network"]["input_dim"]
74 | model_params["frozen"] = params["train"]["network"]["frozen"]
75 |
76 | training_params = {}
77 | training_params["num_epochs"] = params["train"]["num_epochs"]
78 | training_params["learning_rate"] = params["train"]["learning_rate"]
79 | training_params["batch_size"] = params["train"]["batch_size"]
80 | training_params["num_workers"] = params["train"]["num_workers"]
81 |
82 | # Get dataset and dataloaders.
83 | data_path = here + params["data"]["data_dir"]
84 | label_folder = "label" if len(args.concept_dir.split("_")) == 1 else "label_{}".format(args.concept_dir.split("_")[1])
85 | train_set = ObjectDataset(data_path+"/data.hdf5", \
86 | data_path+concept+"/{}.hdf5".format(label_folder), \
87 | data_path+concept+'/train.txt', **dataset_params)
88 | test_set = ObjectDataset(data_path+"/data.hdf5",\
89 | data_path+concept+"/{}.hdf5".format("label"),\
90 | data_path+concept+'/test.txt', **dataset_params)
91 |
92 | if args.train_amt is not None:
93 | indices = random.sample(range(len(train_set)), args.train_amt)
94 | train_set = Subset(train_set, indices)
95 |
96 | train_loader = DataLoader(dataset=train_set, shuffle=True, batch_size=training_params["batch_size"],
97 | num_workers=training_params["num_workers"], pin_memory=True)
98 | test_loader = DataLoader(dataset=test_set, shuffle=False, batch_size=training_params["batch_size"],
99 | num_workers=training_params["num_workers"], pin_memory=True)
100 |
101 | if params["data"]["type"] == "rawstate":
102 | input_dim = transform_input(train_set[0][0].unsqueeze(0), concept).shape[1]
103 | model_params["input_dim"] = input_dim
104 |
105 | # Define model, optimization, and loss.
106 | model = Network(**model_params).to(device)
107 |
108 | # Train and evaluate.
109 | if args.train_amt is not None:
110 | query_str = "_{}".format(args.train_amt)
111 | elif len(args.concept_dir.split("_")) > 1:
112 | query_str = "_g{}".format(args.concept_dir.split("_")[1])
113 | else:
114 | query_str = ""
115 | model_str = "/oracle_{}{}_{}_{}.pt".format(params["data"]["type"], query_str, noise, params["data"]["seed"])
116 | save_dir = here + params["data"]["save_dir"] + concept
117 | if not os.path.isdir(save_dir):
118 | os.mkdir(save_dir)
119 | model_path = save_dir + model_str
120 |
121 | if args.test:
122 | model.load_state_dict(torch.load(model_path))
123 | else:
124 | train(model, train_loader, test_loader, epochs=training_params["num_epochs"],
125 | lr=training_params["learning_rate"], device=device)
126 | torch.save(model.state_dict(), model_path)
127 | print("Saved in ", model_path)
128 | test_acc = check_accuracy(model, test_loader, device=device)
129 |
130 | # Save test error.
131 | results_str = "/oracle_{}{}_{}.txt".format(params["data"]["type"], query_str, params["data"]["seed"])
132 | results_dir = here + params["data"]["results_dir"] + concept
133 | if not os.path.isdir(results_dir):
134 | os.mkdir(results_dir)
135 | results_path = results_dir + results_str
136 | with open(results_path, 'w') as f:
137 | f.write('%.3f' % test_acc)
--------------------------------------------------------------------------------
/src/train/evaluate_concept.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import os, sys
6 | import yaml
7 | import argparse
8 | import numpy as np
9 | import random
10 |
11 | sys.path.insert(1, '../')
12 |
13 | from src.utils.concept_utils import concept_value
14 | from src.utils.train_utils import *
15 | from src.utils.input_utils import transform_input
16 | from src.utils.geom_utils import *
17 |
18 | import torch
19 | from torch import nn
20 | from torch.utils.data import DataLoader
21 |
22 |
23 | if __name__ == "__main__":
24 | # Parse args.
25 | parser = argparse.ArgumentParser(description='pass args')
26 | parser.add_argument('--config', type=str, default="/../../configs/rawstate.yaml", help='config file')
27 | parser.add_argument('--concept_dir', type=str, default='above180', help='data directory')
28 | parser.add_argument('--concept_model', type=str, default=None, help='model file')
29 | args = parser.parse_args()
30 |
31 | # Load yaml parameters.
32 | here = os.path.dirname(os.path.abspath(__file__))
33 | with open(here+args.config, 'r') as stream:
34 | params = yaml.safe_load(stream)
35 |
36 | # Set random seed if it exists.
37 | if "seed" in params["data"].keys():
38 | torch.manual_seed(params["data"]["seed"])
39 | random.seed(params["data"]["seed"])
40 | np.random.seed(params["data"]["seed"])
41 |
42 | # Set up data and model parameters.
43 | device = ("cuda" if torch.cuda.is_available() else "cpu")
44 | data_path = here + params["data"]["data_dir"]
45 | model_path = here + params["data"]["save_dir"] + args.concept_dir + "/" + args.concept_model
46 | concept = args.concept_dir.split("_")[0]
47 |
48 | dataset_params = {}
49 | model_params = {}
50 | if params["data"]["type"] == "rawstate":
51 | from src.utils.data_utils import OptimizationDataset as ObjectDataset
52 | from src.models.MLP import MLP as Network
53 | model_params["nb_layers"] = params["train"]["network"]["nb_layers"]
54 | model_params["nb_units"] = params["train"]["network"]["nb_units"]
55 | model_params["concept"] = args.concept_dir.split("_")[0]
56 | elif params["data"]["type"] == "pointcloud":
57 | from src.utils.data_utils import OptimizationDataset as ObjectDataset
58 | from src.models.pointnet import PointNetEncoder as Network
59 | model_params["pointnet_radius"] = params["train"]["network"]["pointnet_radius"]
60 | model_params["pointnet_nclusters"] = params["train"]["network"]["pointnet_nclusters"]
61 | model_params["scale"] = params["train"]["network"]["scale"]
62 | model_params["in_features"] = params["train"]["network"]["in_features"]
63 | elif params["data"]["type"] == "rgb":
64 | from src.utils.data_utils import RGBDataset as ObjectDataset
65 | from src.models.pointnet import PretrainedImageEncoder as Network
66 | transform = transforms.Compose(
67 | [
68 | transforms.Resize((224, 224)),
69 | transforms.ToTensor(),
70 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
71 | ]
72 | )
73 | dataset_params["transform"] = transform
74 | model_params["input_dim"] = params["train"]["network"]["input_dim"]
75 | model_params["frozen"] = params["train"]["network"]["frozen"]
76 |
77 | dataset = ObjectDataset(data_path+"/data.hdf5",\
78 | split_path=data_path+concept+'/test.txt',
79 | sample=False, **dataset_params)
80 | if params["data"]["type"] == "rawstate":
81 | input_dim = transform_input(dataset[0][1].unsqueeze(0), concept).shape[1]
82 | model_params["input_dim"] = input_dim
83 |
84 | # Define model, optimization, and loss.
85 | model = Network(**model_params).to(device)
86 | model.load_state_dict(torch.load(model_path))
87 | model.eval()
88 |
89 | # Evaluate concept network.
90 | visualize = True
91 | results = []
92 | tests = 1000
93 | i=0
94 | while i < tests:
95 | idx = np.random.choice(range(len(dataset)))
96 | sample = dataset[idx]
97 | old_state = torch.cat((sample[0], torch.zeros((sample[0].shape[0],1))), dim=1)
98 | old_rawstate = sample[1]
99 |
100 | # Skip if either object isn't here.
101 | old_concept_val = concept_value(old_rawstate.cpu().detach().numpy(), concept)
102 | if sum(old_state[:, 3]) == 0 or sum(old_state[:, 4]) == 0 or old_concept_val > 0.5:
103 | continue
104 |
105 | # Recenter around anchor.
106 | anchor_center = torch.mean(old_state[old_state[:, 4]==1,:3], axis=0)
107 | old_state[:, :3] -= anchor_center
108 | old_rawstate[:3] -= anchor_center # center around anchor
109 | old_rawstate[7:10] -= anchor_center # center around anchor
110 |
111 | # Get new pose.
112 | T, new_state = evaluate_concept(model, (old_state[:, :5], old_rawstate, 1.0), dtype=params["data"]["type"], \
113 | opt="CEM", batch_size=100, epochs=10, lr=1e-2, device=device, visualize=visualize)
114 | if params["data"]["type"] == "pointcloud":
115 | # Apply transform T to raw state pose.
116 | moving_center = torch.mean(old_state[old_state[:, 3]==1,:3],axis=0).unsqueeze(0).to(device)
117 | rawstate = copy.deepcopy(old_rawstate).unsqueeze(0).to(device)
118 | rawstate[:,:3] -= moving_center
119 | new_rawstate = transform_rawstate(rawstate, T.unsqueeze(0)).float()
120 | new_rawstate[:,:3] += moving_center
121 | new_rawstate = new_rawstate.squeeze()
122 | else:
123 | new_rawstate = new_state
124 | new_rawstate = new_rawstate.cpu().detach().numpy()
125 | new_state = new_state.cpu().detach().numpy()
126 | new_concept_val = concept_value(new_rawstate, concept)
127 | print("old concept: ", old_concept_val)
128 | print("new concept: ", new_concept_val)
129 |
130 | # Record result.
131 | results.append(new_concept_val)
132 | print("Finished test {}.".format(i))
133 | i += 1
134 | print("Got {}/{}".format(sum(np.round(results)), tests))
135 |
136 | # Save test error.
137 | results_str = "/opt_{}.txt".format(args.concept_model[:-3])
138 | results_dir = here + params["data"]["results_dir"] + args.concept_dir.split("_")[0]
139 | if not os.path.isdir(results_dir):
140 | os.mkdir(results_dir)
141 | results_path = results_dir + results_str
142 | with open(results_path, 'w') as f:
143 | f.write('%.3f' % (sum(np.round(results))/tests))
--------------------------------------------------------------------------------
/src/utils/concept_utils.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import numpy as np
6 |
7 | import trimesh
8 | from shapely.geometry import Polygon
9 | from scipy.spatial import ConvexHull
10 |
11 | import pytorch3d.transforms as tra3d
12 | import torch
13 |
14 |
15 | def concept_value(raw_state, concept):
16 | # Returns the concept value for the current world_instance
17 | if concept == "above180":
18 | return above_angle(raw_state)
19 | elif concept == "above45":
20 | return above_angle(raw_state, thresh=45*np.pi/180.0)
21 | elif concept == "abovebb":
22 | return above_bb(raw_state)
23 | elif concept == "upright":
24 | return upright(raw_state)
25 | elif concept == "upright45":
26 | return upright(raw_state, thresh=45*np.pi/180.0)
27 | elif concept == "near":
28 | return near(raw_state, thresh=0.3)
29 | elif concept == "alignedvertical":
30 | return aligned_vertical(raw_state, thresh=45*np.pi/180.0)
31 | elif concept == "alignedhorizontal":
32 | return aligned_horizontal(raw_state, thresh=45*np.pi/180.0)
33 | elif concept == "front":
34 | return front(raw_state)
35 | elif concept == "left":
36 | return left(raw_state)
37 | elif concept == "right":
38 | return right(raw_state)
39 | elif concept == "front45":
40 | return front(raw_state, thresh=45*np.pi/180.0)
41 | elif concept == "left45":
42 | return left(raw_state, thresh=45*np.pi/180.0)
43 | elif concept == "right45":
44 | return right(raw_state, thresh=45*np.pi/180.0)
45 | elif concept == "ontop":
46 | return ontop(raw_state, thresh=45*np.pi/180.0)
47 | else:
48 | raise NotImplementedError
49 |
50 | def above_angle(raw_state, thresh=np.pi):
51 | # Return how much obj1 is above obj2.
52 | rel_pos = raw_state[:3] - raw_state[7:10]
53 | rel_pos /= np.linalg.norm(rel_pos)
54 | angle = np.arccos(rel_pos[2])
55 | return max(0.0, 1.0 - angle / thresh) # Normalize to be between 0 and 1.
56 |
57 | def above_bb(raw_state):
58 | # Return how much obj1 is above obj2.
59 | if raw_state[2] < raw_state[9]:
60 | return 0
61 |
62 | # Unpack raw state.
63 | obj1_quat = torch.tensor([raw_state[6], raw_state[3], raw_state[4], raw_state[5]]).unsqueeze(0)
64 | obj2_quat = torch.tensor([raw_state[13], raw_state[10], raw_state[11], raw_state[12]]).unsqueeze(0)
65 | obj1_bb, obj2_bb = raw_state[14:20].reshape((2,3)), raw_state[20:].reshape((2,3))
66 | obj1_corners, obj2_corners = trimesh.bounds.corners(obj1_bb), trimesh.bounds.corners(obj2_bb)
67 |
68 | obj1_corners = tra3d.quaternion_apply(obj1_quat, torch.tensor(obj1_corners))
69 | obj1_corners += raw_state[:3]
70 | obj2_corners = tra3d.quaternion_apply(obj2_quat, torch.tensor(obj2_corners))
71 | obj2_corners += raw_state[7:10]
72 |
73 | obj1_corners = obj1_corners[:, :2].detach().numpy()
74 | obj2_corners = obj2_corners[:, :2].detach().numpy()
75 |
76 | obj1_hull = ConvexHull(obj1_corners)
77 | obj2_hull = ConvexHull(obj2_corners)
78 |
79 | obj1_poly = Polygon(obj1_hull.points[obj1_hull.vertices])
80 | obj2_poly = Polygon(obj2_hull.points[obj2_hull.vertices])
81 | return obj1_poly.intersection(obj2_poly).area / min(obj1_poly.area, obj2_poly.area)
82 |
83 | def near(raw_state, thresh=0.3):
84 | # Return how much obj1 is near obj2.
85 | length = np.linalg.norm(raw_state[:3] - raw_state[7:10])
86 | return max(0.0, 1.0 - length / thresh) # Normalize to be between 0 and 1.
87 |
88 | def upright(raw_state, thresh=np.pi):
89 | # Return how much obj1 is upright
90 | q = [raw_state[6], raw_state[3], raw_state[4], raw_state[5]]
91 | R = tra3d.quaternion_to_matrix(torch.tensor(q)).detach().numpy()
92 | angle = np.arccos(R[2, 2])
93 | return max(0.0, 1.0 - angle / thresh) # Normalize to be between 0 and 1.
94 |
95 | def aligned_vertical(raw_state, thresh=90*np.pi/180.0):
96 | q1 = [raw_state[6], raw_state[3], raw_state[4], raw_state[5]]
97 | q2 = [raw_state[13], raw_state[10], raw_state[11], raw_state[12]]
98 | R1 = tra3d.quaternion_to_matrix(torch.tensor(q1)).detach().numpy()
99 | R2 = tra3d.quaternion_to_matrix(torch.tensor(q2)).detach().numpy()
100 | v1 = R1[:, 2]
101 | v2 = R2[:, 2]
102 | dot_product1 = np.dot(v1, v2)
103 | dot_product2 = np.dot(v1, -v2)
104 | angle1 = np.arccos(dot_product1)
105 | angle2 = np.arccos(dot_product2)
106 | return max(0.0, 1.0 - min(angle1, angle2) / thresh) # Normalize to be between 0 and 1.
107 |
108 | def aligned_horizontal(raw_state, thresh=90*np.pi/180.0):
109 | q1 = [raw_state[6], raw_state[3], raw_state[4], raw_state[5]]
110 | q2 = [raw_state[13], raw_state[10], raw_state[11], raw_state[12]]
111 | R1 = tra3d.quaternion_to_matrix(torch.tensor(q1)).detach().numpy()
112 | R2 = tra3d.quaternion_to_matrix(torch.tensor(q2)).detach().numpy()
113 | v1 = R1[:, 0]
114 | v2 = R2[:, 0]
115 | dot_product1 = np.dot(v1, v2)
116 | dot_product2 = np.dot(v1, -v2)
117 | angle1 = np.arccos(dot_product1)
118 | angle2 = np.arccos(dot_product2)
119 | return max(0.0, 1.0 - min(angle1, angle2) / thresh) # Normalize to be between 0 and 1.
120 |
121 | def front(raw_state, thresh=np.pi):
122 | rel_pos = raw_state[:3] - raw_state[7:10]
123 | rel_pos /= np.linalg.norm(rel_pos)
124 | q = [raw_state[13], raw_state[10], raw_state[11], raw_state[12]]
125 | R = tra3d.quaternion_to_matrix(torch.tensor(q))
126 | v = R[:, 0]
127 | dot_product = np.dot(v, rel_pos)
128 | angle = np.arccos(dot_product)
129 | return max(0.0, 1.0 - angle / thresh) # Normalize to be between 0 and 1.
130 |
131 | def right(raw_state, thresh=np.pi):
132 | rel_pos = raw_state[7:10] - raw_state[:3]
133 | rel_pos /= np.linalg.norm(rel_pos)
134 | q = [raw_state[13], raw_state[10], raw_state[11], raw_state[12]]
135 | R = tra3d.quaternion_to_matrix(torch.tensor(q))
136 | v = R[:, 1]
137 | dot_product = np.dot(v, rel_pos)
138 | angle = np.arccos(dot_product)
139 | return max(0.0, 1.0 - angle / thresh) # Normalize to be between 0 and 1.
140 |
141 | def left(raw_state, thresh=np.pi):
142 | rel_pos = raw_state[:3] - raw_state[7:10]
143 | rel_pos /= np.linalg.norm(rel_pos)
144 | q = [raw_state[13], raw_state[10], raw_state[11], raw_state[12]]
145 | R = tra3d.quaternion_to_matrix(torch.tensor(q))
146 | v = R[:, 1]
147 | dot_product = np.dot(v, rel_pos)
148 | angle = np.arccos(dot_product)
149 | return max(0.0, 1.0 - angle / thresh) # Normalize to be between 0 and 1.
150 |
151 | def ontop(raw_state, thresh=np.pi):
152 | rel_pos = raw_state[:3] - raw_state[7:10]
153 | rel_pos /= np.linalg.norm(rel_pos)
154 | q = [raw_state[13], raw_state[10], raw_state[11], raw_state[12]]
155 | R = tra3d.quaternion_to_matrix(torch.tensor(q))
156 | v = R[:, 2]
157 | dot_product = np.dot(v, rel_pos)
158 | angle = np.arccos(dot_product)
159 | return max(0.0, 1.0 - angle / thresh) # Normalize to be between 0 and 1.
--------------------------------------------------------------------------------
/src/mpc/robot_concept_planner.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """ Example spawning a robot in gym
6 |
7 | """
8 | import copy
9 | from isaacgym import gymapi
10 | from isaacgym import gymutil
11 |
12 | import torch
13 |
14 | import yaml
15 | import argparse
16 | import numpy as np
17 | import os, glob
18 | from quaternion import quaternion, from_rotation_vector, from_rotation_matrix
19 |
20 | from storm_kit.mpc.control import MPPI
21 | from storm_kit.mpc.utils.state_filter import JointStateFilter, RobotStateFilter
22 | from storm_kit.mpc.utils.mpc_process_wrapper import ControlProcess
23 |
24 | from storm_kit.differentiable_robot_model.coordinate_transform import CoordinateTransform
25 |
26 | import sys
27 | sys.path.insert(1, '../')
28 |
29 | from src.utils.gym_utils import *
30 | from src.utils.concept_utils import *
31 | from src.utils.camera_utils import *
32 | from src.utils.geom_utils import *
33 | from src.world.robot_world import RobotWorld
34 | from src.mpc.robot_planner_task import RobotPlannerTask
35 |
36 |
37 | class RobotConceptPlanner(object):
38 | def __init__(self, args, asset_root):
39 | # Create robot world.
40 | self.robot_world = RobotWorld(args, asset_root)
41 |
42 | # Create control task.
43 | task_file = args.robot + '_planner.yml'
44 | opt_file = 'optimize_concept.yml'
45 | robot_file = args.robot + '.yml'
46 | world_file = 'collision_table.yml'
47 | self.tensor_args = {'device':torch.device('cuda', 0), 'dtype':torch.float32}
48 | self.mpc_control = RobotPlannerTask(task_file, robot_file, world_file, self.tensor_args, spawn_process=False)
49 |
50 | def state_to_EEpose(self, state):
51 | filtered_state_mpc = state
52 | curr_state = np.hstack((filtered_state_mpc['position'], filtered_state_mpc['velocity'], filtered_state_mpc['acceleration']))
53 | curr_state_tensor = torch.as_tensor(curr_state, **self.tensor_args).unsqueeze(0)
54 | pose_state = self.mpc_control.controller.rollout_fn.get_ee_pose(curr_state_tensor)
55 |
56 | # get current pose:
57 | e_pos = np.ravel(pose_state['ee_pos_seq'].cpu().numpy())
58 | e_quat = np.ravel(pose_state['ee_quat_seq'].cpu().numpy())
59 | ee_pose = gymapi.Transform(copy.deepcopy(gymapi.Vec3(e_pos[0], e_pos[1], e_pos[2])),
60 | gymapi.Quat(e_quat[1], e_quat[2], e_quat[3], e_quat[0]))
61 | ee_pose = copy.deepcopy(self.robot_world.w_T_r) * copy.deepcopy(ee_pose)
62 | return ee_pose
63 |
64 | def reach(self, target_pose):
65 | # Define some initial parameters.
66 | sim_dt = self.mpc_control.exp_params['control_dt']
67 | gym_instance = self.robot_world.gym_instance
68 | gym = self.robot_world.gym
69 | sim = self.robot_world.sim
70 | robot_sim = self.robot_world.robot_sim
71 | world = self.robot_world.world
72 | env_ptr = self.robot_world.env_ptr
73 | robot_ptr = self.robot_world.robot_ptr
74 | w_T_r = self.robot_world.w_T_r
75 | t_step = gym_instance.get_sim_time()
76 | current_robot_state = copy.deepcopy(robot_sim.get_state(env_ptr, robot_ptr))
77 | w_robot_coord = CoordinateTransform(trans=gymT_to_quatT(w_T_r)[0:3,3].unsqueeze(0),
78 | rot=gymT_to_quatT(w_T_r)[0:3,0:3].unsqueeze(0))
79 |
80 | # Move moving where the robot is.
81 | gym.set_rigid_transform(env_ptr, world.moving.body_handle, self.state_to_EEpose(current_robot_state))
82 | gym_instance.step()
83 | moving_pose = world.get_pose(world.moving.body_handle)
84 |
85 | # Set goal state and pointcloud information.
86 | camera_data = observe_camera(world)
87 | depth = camera_data["depth"] * (camera_data["depth_max"] - camera_data["depth_min"]) / 65535.0 + camera_data["depth_min"]
88 | camera_data['depth'] = depth
89 | camera_data = get_pointcloud_from_depth(camera_data)
90 |
91 | samples = 1024
92 | anchor_idx = np.where(camera_data["pc_seg"]==SegLabel.ANCHOR.value)[0]
93 | moving_idx = np.where(camera_data["pc_seg"]==SegLabel.MOVING.value)[0]
94 | idxes = np.random.choice(np.hstack((anchor_idx, moving_idx)), size=samples, replace=True)
95 | np.random.shuffle(idxes)
96 | camera_data["pc"] -= np.mean(camera_data["pc"][anchor_idx], axis=0)
97 | self.mpc_control.update_params(goal_ee_pos=target_pose[:3], goal_ee_quat=target_pose[3:],
98 | pc=camera_data["pc"][idxes],
99 | pc_seg=np.uint8(camera_data["pc_seg"][idxes]),
100 | pc_pose=gymT_to_quatT(moving_pose))
101 | steps = 0
102 | while(True):
103 | try:
104 | gym_instance.step()
105 | steps += 1
106 | #if steps < 1000:
107 | # continue
108 | t_step += sim_dt
109 | command = self.mpc_control.get_command(t_step, current_robot_state, control_dt=sim_dt, WAIT=True)
110 | q_des = copy.deepcopy(command['position'])
111 | qd_des = copy.deepcopy(command['velocity'])
112 |
113 | ee_pose = self.state_to_EEpose(current_robot_state)
114 | print("Position: ",ee_pose.p)
115 | print("Joint: ", q_des)
116 | gym.set_rigid_transform(env_ptr, world.moving.body_handle, copy.deepcopy(ee_pose))
117 |
118 | # Print current concept value
119 | raw_state = get_raw_state(self.robot_world, world)
120 | print("Raw state: ", raw_state)
121 | print("Concept: ", concept_value(raw_state, "abovebb"))
122 |
123 | # Plot MPPI trajectories.
124 | gym_instance.clear_lines()
125 | top_trajs = self.mpc_control.top_trajs.cpu().float()
126 | n_p, n_t = top_trajs.shape[0], top_trajs.shape[1]
127 | w_pts = w_robot_coord.transform_point(top_trajs.view(n_p * n_t, 3)).view(n_p, n_t, 3)
128 |
129 | top_trajs = w_pts.cpu().numpy()
130 | color = np.array([0.0, 1.0, 0.0])
131 | for k in range(top_trajs.shape[0]):
132 | pts = top_trajs[k,:,:]
133 | color[0] = float(k) / float(top_trajs.shape[0])
134 | color[1] = 1.0 - float(k) / float(top_trajs.shape[0])
135 | gym_instance.draw_lines(pts, color=color)
136 |
137 | robot_sim.set_robot_state(q_des, qd_des, env_ptr, robot_ptr)
138 | current_robot_state = command
139 |
140 | except KeyboardInterrupt:
141 | print('Closing')
142 | done = True
143 | break
144 |
145 | self.mpc_control.close()
146 | return
147 |
148 | if __name__ == '__main__':
149 |
150 | # instantiate empty gym:
151 | parser = argparse.ArgumentParser(description='pass args')
152 | parser.add_argument('--robot', type=str, default='franka', help='Robot to spawn')
153 | parser.add_argument('--cuda', action='store_true', default=False, help='use cuda')
154 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
155 | args = parser.parse_args()
156 | args.envs = 1
157 |
158 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
159 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
160 |
161 | planner = RobotConceptPlanner(args, asset_root)
162 | planner.reach(np.array([0.0, 1.0, 0.5, 1, 0, 0, 0]))
--------------------------------------------------------------------------------
/src/mpc/se3_concept_planner.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """ Example spawning a robot in gym
6 |
7 | """
8 | import copy
9 | from isaacgym import gymapi
10 | from isaacgym import gymutil
11 |
12 | import torch
13 | torch.multiprocessing.set_start_method('spawn',force=True)
14 | torch.set_num_threads(8)
15 | torch.backends.cudnn.benchmark = False
16 | torch.backends.cuda.matmul.allow_tf32 = True
17 | torch.backends.cudnn.allow_tf32 = True
18 |
19 | import time
20 | import yaml
21 | import argparse
22 | import numpy as np
23 | import random
24 | import os, glob
25 | from quaternion import quaternion, from_rotation_vector, from_rotation_matrix
26 |
27 | from quaternion import from_euler_angles, as_float_array, as_rotation_matrix, from_float_array, as_quat_array
28 |
29 | from storm_kit.gym.core import Gym, World
30 | from storm_kit.util_file import get_configs_path, get_gym_configs_path, join_path, load_yaml, get_assets_path
31 |
32 | from storm_kit.mpc.control import MPPI
33 |
34 | from storm_kit.differentiable_robot_model.coordinate_transform import quaternion_to_matrix, CoordinateTransform, transform_point
35 |
36 | from storm_kit.geom.utils import get_pointcloud_from_depth
37 |
38 | from mpc.concept_task import ConceptTask
39 | from src.utils.gym_utils import *
40 | from src.utils.camera_utils import *
41 | from src.utils.geom_utils import *
42 |
43 |
44 | class SE3ConceptPlanner(object):
45 | def __init__(self, args, asset_root):
46 | # Create gym instance.
47 | sim_params = load_yaml(join_path(get_gym_configs_path(), 'physx.yml'))
48 | sim_params['headless'] = args.headless
49 | sim_params['sim_params']['use_gpu_pipeline'] = False
50 | self.gym_instance = Gym(**sim_params)
51 | self.gym_instance._create_envs(1)
52 |
53 | self.gym = self.gym_instance.gym
54 | self.sim = self.gym_instance.sim
55 | self.env_ptr = self.gym_instance.env_list[0]
56 |
57 | # Some environment files.
58 | world_file = 'collision_table.yml'
59 | task_file = 'concept_mbrl.yml'
60 | robot_file = args.robot + '.yml'
61 |
62 | world_yml = join_path(get_gym_configs_path(), world_file)
63 | with open(world_yml) as file:
64 | world_params = yaml.safe_load(file)
65 |
66 | # Create world.
67 | self.w_T_r = gymapi.Transform()
68 | self.world_instance = World(self.gym, self.sim, self.env_ptr, world_params, w_T_r=self.w_T_r)
69 |
70 | # Object folders and options.
71 | if "ycb" in asset_root:
72 | obj_urdf_files = sorted(glob.glob("{}/**/textured.urdf".format(asset_root)))
73 | elif "shapenet" in asset_root:
74 | obj_urdf_files = sorted(glob.glob("{}/urdf/*.urdf".format(asset_root)))
75 | obj_urdf_files = [os.path.relpath(i, asset_root) for i in obj_urdf_files]
76 | obj1_urdf, obj2_urdf = random.sample(obj_urdf_files, 2)
77 |
78 | # Spawn objects and camera in the world instance.
79 | moving_pose = self.w_T_r * gymapi.Transform(gymapi.Vec3(0, -1.0, 0.5), gymapi.Quat(0, 0, 0, 1))
80 | obj1_urdf="urdf/CerealBox_a15f43d04b3d5256c9ea91c70932c737_S.urdf"
81 | obj2_urdf="urdf/Book_59fd296e42d9d65cd889106d819b8d66_L.urdf"
82 | self.world_instance.moving = self.world_instance.spawn_object(obj1_urdf, asset_root, moving_pose,
83 | seg_label=SegLabel.MOVING.value, name='mov_obj')
84 | anchor_pose = self.w_T_r * gymapi.Transform(gymapi.Vec3(0, 0.0, 0.1), gymapi.Quat(0, 0, 0, 1))
85 | self.world_instance.anchor = self.world_instance.spawn_object(obj2_urdf, asset_root, anchor_pose,
86 | seg_label=SegLabel.ANCHOR.value, name='anc_obj')
87 | spawn_camera(self.world_instance, 60, 640, 480)
88 |
89 | # Create control task.
90 | self.tensor_args = {'device':torch.device('cuda', 0), 'dtype':torch.float32}
91 | self.mpc_control = ConceptTask(task_file, robot_file, world_file, self.tensor_args, spawn_process=False)
92 | print("World initialized.")
93 |
94 | def reach(self, target_pose):
95 | # Define some initial parameters.
96 | sim_dt = self.mpc_control.exp_params['control_dt']
97 | t_step = self.gym_instance.get_sim_time()
98 | moving_body_handle = self.gym.get_actor_rigid_body_handle(self.env_ptr, self.world_instance.moving, 0)
99 | moving_pose = self.world_instance.get_pose(moving_body_handle)
100 | moving_state = self.mpc_control.controller.rollout_fn.get_state(gymT_to_quatT(moving_pose))
101 | current_robot_state = {'position':moving_state.cpu().numpy(),
102 | 'velocity':np.zeros(6),
103 | 'acceleration':np.zeros(6)}
104 |
105 | # Set goal state and pointcloud information.
106 | camera_data = observe_camera(self.world_instance)
107 | camera_data = get_pointcloud_from_depth(camera_data)
108 |
109 | samples = 1024
110 | anchor_idx = np.where(camera_data["pc_seg"]==SegLabel.ANCHOR.value)[0]
111 | moving_idx = np.where(camera_data["pc_seg"]==SegLabel.MOVING.value)[0]
112 | idxes = np.random.choice(np.hstack((anchor_idx, moving_idx)), size=samples, replace=True)
113 | np.random.shuffle(idxes)
114 | camera_data["pc"] -= np.mean(camera_data["pc"][anchor_idx], axis=0)
115 |
116 | self.mpc_control.update_params(goal_ee_pos=target_pose[:3], goal_ee_quat=target_pose[3:],
117 | pc=camera_data["pc"][idxes],
118 | pc_seg=np.uint8(camera_data["pc_seg"][idxes]),
119 | pc_pose=gymT_to_quatT(moving_pose))
120 |
121 |
122 | w_robot_coord = CoordinateTransform(trans=torch.tensor([0,0,0]).unsqueeze(0),
123 | rot=quaternion_to_matrix(torch.tensor([1,0,0,0])).unsqueeze(0))
124 |
125 |
126 | while(True):
127 | try:
128 | self.gym_instance.step()
129 |
130 | t_step += sim_dt
131 | command = self.mpc_control.get_command(t_step, current_robot_state, control_dt=sim_dt, WAIT=True)
132 |
133 | filtered_state_mpc = current_robot_state
134 | curr_state = np.hstack((filtered_state_mpc['position'], filtered_state_mpc['velocity'], filtered_state_mpc['acceleration']))
135 | curr_state_tensor = torch.as_tensor(curr_state, **self.tensor_args).unsqueeze(0)
136 | pose_state = self.mpc_control.controller.rollout_fn.get_ee_pose(curr_state_tensor)
137 |
138 | # get current pose:
139 | e_pos = np.ravel(pose_state['ee_pos_seq'].cpu().numpy())
140 | e_quat = np.ravel(pose_state['ee_quat_seq'].cpu().numpy())
141 | ee_pose = gymapi.Transform(copy.deepcopy(gymapi.Vec3(e_pos[0], e_pos[1], e_pos[2])),
142 | gymapi.Quat(e_quat[1], e_quat[2], e_quat[3], e_quat[0]))
143 | ee_pose = copy.deepcopy(self.w_T_r) * copy.deepcopy(ee_pose)
144 |
145 | self.gym.set_rigid_transform(self.env_ptr, moving_body_handle, copy.deepcopy(ee_pose))
146 |
147 | print("Position: ",ee_pose.p)
148 | self.gym_instance.clear_lines()
149 | top_trajs = self.mpc_control.top_trajs.cpu().float()
150 | n_p, n_t = top_trajs.shape[0], top_trajs.shape[1]
151 | w_pts = w_robot_coord.transform_point(top_trajs.view(n_p * n_t, 3)).view(n_p, n_t, 3)
152 |
153 | top_trajs = w_pts.cpu().numpy()
154 | color = np.array([0.0, 1.0, 0.0])
155 | for k in range(top_trajs.shape[0]):
156 | pts = top_trajs[k,:,:]
157 | color[0] = float(k) / float(top_trajs.shape[0])
158 | color[1] = 1.0 - float(k) / float(top_trajs.shape[0])
159 | self.gym_instance.draw_lines(pts, color=color)
160 |
161 | current_robot_state = command
162 |
163 | except KeyboardInterrupt:
164 | print('Closing')
165 | done = True
166 | break
167 |
168 | self.mpc_control.close()
169 | return
170 |
171 | if __name__ == '__main__':
172 |
173 | # instantiate empty gym:
174 | parser = argparse.ArgumentParser(description='pass args')
175 | parser.add_argument('--robot', type=str, default='franka', help='Robot to spawn')
176 | parser.add_argument('--cuda', action='store_true', default=True, help='use cuda')
177 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
178 | parser.add_argument('--control_space', type=str, default='acc', help='Robot to spawn')
179 | args = parser.parse_args()
180 |
181 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/..")
182 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
183 | planner = SE3ConceptPlanner(args, asset_root)
184 | planner.reach(np.array([0.0, 1.0, 0.5, 1, 0, 0, 0]))
--------------------------------------------------------------------------------
/src/datasets/passive_querier.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """
6 | Generate concept data on the cpu or the gpu.
7 | """
8 | import copy
9 | from isaacgym import gymapi
10 |
11 | import numpy as np
12 | import random
13 | import argparse
14 | import os, sys
15 | import h5py
16 |
17 | sys.path.insert(1, '../')
18 |
19 | from quaternion import from_euler_angles, as_float_array, as_euler_angles, as_quat_array
20 |
21 | from src.utils.gym_utils import *
22 | from src.utils.camera_utils import *
23 | from src.utils.concept_utils import *
24 | from src.utils.input_utils import Hdf5Cacher
25 | from src.world.object_world import ObjectWorld
26 |
27 | np.set_printoptions(precision=2)
28 |
29 |
30 | class PassiveQuerier(object):
31 | def __init__(self, args, asset_root):
32 | self.object_world = ObjectWorld(args, asset_root)
33 |
34 | color = gymapi.Vec3(0.8, 0.1, 0.1)
35 | for moving in self.object_world.worlds[0].movings:
36 | self.object_world.worlds[0].gym.set_rigid_body_color(self.object_world.worlds[0].env_ptr,
37 | moving.agent_handle, 0,
38 | gymapi.MESH_VISUAL_AND_COLLISION, color)
39 |
40 | self.simulated = args.simulated
41 |
42 | def collect_data(self, concept, N_queries=100, query_type="label"):
43 | pos_step = 0.05
44 | rot_step = 0.05
45 |
46 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
47 | save_dir = parent_dir + "/data/g_shapenet/" + "{}/".format(concept)
48 | if not os.path.isdir(save_dir):
49 | os.mkdir(save_dir)
50 | labeler_str = "gt" if self.simulated else "human"
51 |
52 | data_filename = save_dir+"/{}_{}_data.hdf5".format(query_type, labeler_str)
53 | hdf5cacher_data = Hdf5Cacher(data_filename, "w")
54 | label_filename = save_dir+"/{}_{}_label.hdf5".format(query_type, labeler_str)
55 | hdf5cacher_label = Hdf5Cacher(label_filename, "w")
56 |
57 | gym = self.object_world.gym
58 | gym_instance = self.object_world.gym_instance
59 | world = self.object_world.worlds[0]
60 |
61 | if query_type == "demo":
62 | # Subscribe to input events.
63 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_LEFT, "left")
64 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_RIGHT, "right")
65 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_UP, "forward")
66 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_DOWN, "backward")
67 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_U, "up")
68 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_J, "down")
69 |
70 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_A, "roll_cc")
71 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_D, "roll_ccw")
72 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_W, "pitch_cc")
73 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_S, "pitch_ccw")
74 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_Q, "yaw_cc")
75 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_E, "yaw_ccw")
76 |
77 | gym.subscribe_viewer_keyboard_event(gym_instance.viewer, gymapi.KEY_ENTER, "save")
78 |
79 | for i in range(N_queries):
80 | try:
81 | # Pick anchor and moving.
82 | world.anchor = random.choice(world.anchors)
83 | world.moving = random.choice(world.movings)
84 |
85 | # Move anchor and moving on the table.
86 | move_agent_to_random_pose(world, world.anchor.agent_handle)
87 | move_agent_to_random_pose(world, world.moving.agent_handle)
88 |
89 | gym_instance.step()
90 |
91 | # Query human until keyboard button is pressed.
92 | if query_type == "demo":
93 | saved = False
94 | while not saved:
95 | new_pose = copy.deepcopy(world.get_pose(world.moving.agent_handle))
96 | new_rot = as_euler_angles(as_quat_array((new_pose.r.w, new_pose.r.x, new_pose.r.y, new_pose.r.z)))
97 |
98 | for evt in gym.query_viewer_action_events(gym_instance.viewer):
99 | if evt.action == "left" and evt.value > 0:
100 | new_pose.p.x += pos_step
101 | if evt.action == "right" and evt.value > 0:
102 | new_pose.p.x -= pos_step
103 | if evt.action == "forward" and evt.value > 0:
104 | new_pose.p.y += pos_step
105 | if evt.action == "backward" and evt.value > 0:
106 | new_pose.p.y -= pos_step
107 | if evt.action == "up" and evt.value > 0:
108 | new_pose.p.z += pos_step
109 | if evt.action == "down" and evt.value > 0:
110 | new_pose.p.z -= pos_step
111 |
112 | if evt.action == "roll_cc" and evt.value > 0:
113 | new_rot[0] += rot_step
114 | if evt.action == "roll_ccw" and evt.value > 0:
115 | new_rot[0] -= rot_step
116 | if evt.action == "pitch_cc" and evt.value > 0:
117 | new_rot[1] += rot_step
118 | if evt.action == "pitch_ccw" and evt.value > 0:
119 | new_rot[1] -= rot_step
120 | if evt.action == "yaw_cc" and evt.value > 0:
121 | new_rot[2] += rot_step
122 | if evt.action == "yaw_ccw" and evt.value > 0:
123 | new_rot[2] -= rot_step
124 |
125 | if evt.action == "save" and evt.value > 0:
126 | saved = True
127 |
128 | if not saved:
129 | q = as_float_array(from_euler_angles(new_rot))
130 | new_pose.r = gymapi.Quat(q[1], q[2], q[3], q[0])
131 | move_agent_to_pose(world, world.moving.agent_handle, new_pose)
132 | gym_instance.step()
133 |
134 | raw_state = get_raw_state(self.object_world, world)
135 | if self.simulated:
136 | label = concept_value(raw_state, concept)
137 | else:
138 | # Ask human for label.
139 | answer = input("Is this {}? (yes/Y/y)\n".format(concept))
140 | label = 1.0 if answer in ["yes", "Y", "y"] else 0.0
141 |
142 | # Save camera data.
143 | camera_data = observe_camera(world, cuda=self.object_world.cuda)
144 | data_dict = get_camera_dict(camera_data)
145 | data_dict["raw_state"] = raw_state
146 | label_dict = {"label": label}
147 | uid = "{}_{}_{}".format(world.moving.idx, world.anchor.idx, i)
148 | hdf5cacher_data.__setitem__(uid, data_dict)
149 | hdf5cacher_label.__setitem__(uid, label_dict)
150 |
151 | # Move anchor and moving back to original position.
152 | move_agent_to_pose(world, world.anchor.agent_handle, self.object_world.storage_pose)
153 | move_agent_to_pose(world, world.moving.agent_handle, self.object_world.storage_pose)
154 |
155 | print("Collected query {}.".format(i))
156 | except KeyboardInterrupt:
157 | print('Closing')
158 | done = True
159 | break
160 | hdf5cacher_data.close()
161 | hdf5cacher_label.close()
162 | return data_filename, label_filename
163 |
164 | def kill_instance(self):
165 | self.object_world.gym.destroy_viewer(self.object_world.gym_instance.viewer)
166 |
167 |
168 | if __name__ == '__main__':
169 | # instantiate empty gym:
170 | parser = argparse.ArgumentParser(description='pass args')
171 | parser.add_argument('--concept', type=str, default='above180', help='concept')
172 | parser.add_argument('--query', type=str, default='label', help='query type')
173 | parser.add_argument('--simulated', action='store_true', default=False, help='cuda')
174 | parser.add_argument('--samples', type=int, default=100, help='samples')
175 | args = parser.parse_args()
176 |
177 | args.headless = False
178 | args.cuda = False
179 | args.envs = 1
180 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
181 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
182 | generator = PassiveQuerier(args, asset_root)
183 | generator.collect_data(args.concept, N_queries=args.samples, query_type=args.query)
--------------------------------------------------------------------------------
/src/mpc/robot_concept_reacher.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | """ Example spawning a robot in gym
6 |
7 | """
8 | import copy
9 | from isaacgym import gymapi
10 | from isaacgym import gymutil
11 |
12 | import torch
13 |
14 | import yaml
15 | import argparse
16 | import numpy as np
17 | import os, glob
18 | from quaternion import quaternion, from_rotation_vector, from_rotation_matrix
19 |
20 | from storm_kit.mpc.control import MPPI
21 | from storm_kit.mpc.utils.state_filter import JointStateFilter, RobotStateFilter
22 | from storm_kit.mpc.utils.mpc_process_wrapper import ControlProcess
23 |
24 | from storm_kit.differentiable_robot_model.coordinate_transform import CoordinateTransform
25 |
26 | import sys
27 | sys.path.insert(1, '../')
28 |
29 | from src.utils.gym_utils import *
30 | from src.utils.camera_utils import *
31 | from src.utils.geom_utils import *
32 | from src.world.robot_world import RobotWorld
33 | from storm_kit.mpc.task.reacher_task import ReacherTask
34 | from src.mpc.se3_optimization_task import SE3OptimizationTask
35 |
36 |
37 | class RobotConceptReacher(object):
38 | def __init__(self, args, asset_root):
39 | # Create robot world.
40 | self.robot_world = RobotWorld(args, asset_root)
41 |
42 | # Create control task.
43 | task_file = args.robot + '_reacher_srl.yml'
44 | opt_file = 'optimize_concept.yml'
45 | robot_file = args.robot + '.yml'
46 | world_file = 'collision_table.yml'
47 | self.tensor_args = {'device':torch.device('cuda', 0), 'dtype':torch.float32}
48 | self.mpc_control = ReacherTask(task_file, robot_file, world_file, self.tensor_args, spawn_process=False)
49 | self.concept_optimization = SE3OptimizationTask(opt_file, robot_file, world_file, self.tensor_args, spawn_process=False)
50 |
51 | def state_to_EEpose(self, state):
52 | filtered_state_mpc = state
53 | curr_state = np.hstack((filtered_state_mpc['position'], filtered_state_mpc['velocity'], filtered_state_mpc['acceleration']))
54 | curr_state_tensor = torch.as_tensor(curr_state, **self.tensor_args).unsqueeze(0)
55 | pose_state = self.mpc_control.controller.rollout_fn.get_ee_pose(curr_state_tensor)
56 |
57 | # get current pose:
58 | e_pos = np.ravel(pose_state['ee_pos_seq'].cpu().numpy())
59 | e_quat = np.ravel(pose_state['ee_quat_seq'].cpu().numpy())
60 | ee_pose = gymapi.Transform(copy.deepcopy(gymapi.Vec3(e_pos[0], e_pos[1], e_pos[2])),
61 | gymapi.Quat(e_quat[1], e_quat[2], e_quat[3], e_quat[0]))
62 | ee_pose = copy.deepcopy(self.robot_world.w_T_r) * copy.deepcopy(ee_pose)
63 | return ee_pose
64 |
65 | def compute_goal(self):
66 | # Define some useful variables.
67 | sim_dt = self.concept_optimization.exp_params['control_dt']
68 | t_step = self.robot_world.gym_instance.get_sim_time()
69 | world = self.robot_world.world
70 | env_ptr = self.robot_world.env_ptr
71 |
72 | # Move moving where the robot is.
73 | current_robot_state = copy.deepcopy(self.robot_world.robot_sim.get_state(env_ptr, self.robot_world.robot_ptr))
74 | self.robot_world.gym.set_rigid_transform(env_ptr, world.moving.body_handle, self.state_to_EEpose(current_robot_state))
75 | self.robot_world.gym_instance.step()
76 | moving_pose = world.get_pose(world.moving.body_handle)
77 | moving_state = self.concept_optimization.controller.rollout_fn.get_state(gymT_to_quatT(moving_pose))
78 | current_state = {'position':moving_state.cpu().numpy(),
79 | 'velocity':np.zeros(6),
80 | 'acceleration':np.zeros(6)}
81 | print(current_state)
82 |
83 | # Set goal state and pointcloud information.
84 | goal_state = torch.as_tensor([0,0,0,0,0,0], **self.tensor_args)
85 |
86 | camera_data = observe_camera(world)
87 | camera_data = get_pointcloud_from_depth(camera_data)
88 |
89 | samples = 1024
90 | anchor_idx = np.where(camera_data["pc_seg"]==SegLabel.ANCHOR.value)[0]
91 | moving_idx = np.where(camera_data["pc_seg"]==SegLabel.MOVING.value)[0]
92 | idxes = np.random.choice(np.hstack((anchor_idx, moving_idx)), size=samples, replace=True)
93 | np.random.shuffle(idxes)
94 | camera_data["pc"] -= np.mean(camera_data["pc"][anchor_idx], axis=0)
95 |
96 | self.concept_optimization.update_params(goal_state=goal_state,
97 | pc=camera_data["pc"][idxes],
98 | pc_seg=np.uint8(camera_data["pc_seg"][idxes]),
99 | pc_pose=gymT_to_quatT(moving_pose))
100 |
101 | # Optimize concept.
102 | prev_error = 0.0
103 | while(True):
104 | try:
105 | t_step += sim_dt
106 | command = self.concept_optimization.get_command(t_step, current_state, control_dt=sim_dt, WAIT=True)
107 | ee_error = self.concept_optimization.get_current_error(current_state)
108 | current_state = command
109 | print(["{:.3f}".format(x) for x in ee_error])
110 | print(current_state)
111 | if ee_error[0] - prev_error < 0.00001:
112 | break
113 | prev_error = ee_error[0]
114 | except KeyboardInterrupt:
115 | print('Closing')
116 | done = True
117 | break
118 | self.concept_optimization.close()
119 |
120 | # Convert SO3 state into pose.
121 | curr_state = np.hstack((current_state['position'], current_state['velocity'], current_state['acceleration']))
122 | curr_state_tensor = torch.as_tensor(curr_state, **self.tensor_args).unsqueeze(0)
123 | return self.concept_optimization.controller.rollout_fn.get_ee_pose(curr_state_tensor)
124 |
125 | def reach(self, goal_pose):
126 | # Set goal state.
127 | self.mpc_control.update_params(goal_ee_pos=goal_pose['ee_pos_seq'], goal_ee_rot=goal_pose['ee_rot_seq'])
128 |
129 | # Define some initial parameters.
130 | sim_dt = self.mpc_control.exp_params['control_dt']
131 | gym_instance = self.robot_world.gym_instance
132 | gym = self.robot_world.gym
133 | sim = self.robot_world.sim
134 | robot_sim = self.robot_world.robot_sim
135 | world = self.robot_world.world
136 | env_ptr = self.robot_world.env_ptr
137 | robot_ptr = self.robot_world.robot_ptr
138 | w_T_r = self.robot_world.w_T_r
139 | t_step = gym_instance.get_sim_time()
140 | current_robot_state = copy.deepcopy(robot_sim.get_state(env_ptr, robot_ptr))
141 | w_robot_coord = CoordinateTransform(trans=gymT_to_quatT(w_T_r)[0:3,3].unsqueeze(0),
142 | rot=gymT_to_quatT(w_T_r)[0:3,0:3].unsqueeze(0))
143 |
144 |
145 | while(True):
146 | try:
147 | gym_instance.step()
148 |
149 | t_step += sim_dt
150 | command = self.mpc_control.get_command(t_step, current_robot_state, control_dt=sim_dt, WAIT=True)
151 | q_des = copy.deepcopy(command['position'])
152 | qd_des = copy.deepcopy(command['velocity'])
153 |
154 | ee_pose = self.state_to_EEpose(current_robot_state)
155 |
156 | gym.set_rigid_transform(env_ptr, world.moving.body_handle, copy.deepcopy(ee_pose))
157 |
158 | gym_instance.clear_lines()
159 | top_trajs = self.mpc_control.top_trajs.cpu().float()
160 | n_p, n_t = top_trajs.shape[0], top_trajs.shape[1]
161 | w_pts = w_robot_coord.transform_point(top_trajs.view(n_p * n_t, 3)).view(n_p, n_t, 3)
162 |
163 | top_trajs = w_pts.cpu().numpy()
164 | color = np.array([0.0, 1.0, 0.0])
165 | for k in range(top_trajs.shape[0]):
166 | pts = top_trajs[k,:,:]
167 | color[0] = float(k) / float(top_trajs.shape[0])
168 | color[1] = 1.0 - float(k) / float(top_trajs.shape[0])
169 | gym_instance.draw_lines(pts, color=color)
170 |
171 | robot_sim.set_robot_state(q_des, qd_des, env_ptr, robot_ptr)
172 | current_robot_state = command
173 |
174 | except KeyboardInterrupt:
175 | print('Closing')
176 | done = True
177 | break
178 |
179 | self.mpc_control.close()
180 | return
181 |
182 | if __name__ == '__main__':
183 |
184 | # instantiate empty gym:
185 | parser = argparse.ArgumentParser(description='pass args')
186 | parser.add_argument('--robot', type=str, default='franka', help='Robot to spawn')
187 | parser.add_argument('--cuda', action='store_true', default=False, help='use cuda')
188 | parser.add_argument('--headless', action='store_true', default=False, help='headless gym')
189 | args = parser.parse_args()
190 | args.envs = 1
191 |
192 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
193 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
194 |
195 | reacher = RobotConceptReacher(args, asset_root)
196 | goal_pose = reacher.compute_goal()
197 | reacher.reach(goal_pose)
--------------------------------------------------------------------------------
/src/utils/train_utils.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | from torch import nn
7 | from tqdm import tqdm
8 | import copy
9 |
10 | from src.utils.geom_utils import *
11 |
12 |
13 | # Train and validate concept network.
14 |
15 | def check_accuracy(model, loader, device="cpu"):
16 | error = 0
17 | num_samples = 0
18 | model.eval()
19 |
20 | num_wrong_zeros = 0
21 | num_wrong_ones = 0
22 | num_correct_zeros = 0
23 | num_correct_ones = 0
24 | num_zeros = 0
25 | num_nonzeros = 0
26 | num_correct = 0
27 | num_samples = 0
28 |
29 | with torch.no_grad():
30 | for x, y in loader:
31 | x = x.to(device=device)
32 | y = y.to(device=device)
33 | predictions = model(x)
34 | scores = torch.sigmoid(predictions)
35 | predictions = torch.tensor([1.0 if i >= 0.5 else 0.0 for i in scores]).to(device)
36 |
37 | # Compute accuracy, precision, recall.
38 | num_wrong_zeros += predictions[y.squeeze()==0].sum()
39 | num_wrong_ones += (y[y==1].squeeze() - predictions[y.squeeze()==1]).sum()
40 | num_correct_zeros += (y==0.0).sum() - predictions[y.squeeze()==0].sum()
41 | num_correct_ones += predictions[y.squeeze()==1].sum()
42 | num_zeros += (y==0.0).sum()
43 | num_nonzeros += (y>0.0).sum()
44 |
45 | error = float(num_correct_zeros + num_correct_ones) / float(num_zeros+num_nonzeros)
46 | precision = num_correct_ones / (num_correct_ones + num_wrong_zeros)
47 | recall = num_correct_ones / (num_correct_ones + num_wrong_ones)
48 | f1 = 2 * (precision * recall) / (precision + recall)
49 | print(f"Got {num_correct_zeros + num_correct_ones} / {num_zeros+num_nonzeros} \
50 | with accuracy {error*100:.2f}, precision {precision*100:.2f}, recall {recall*100:.2f}, f1 {f1}.")
51 |
52 | zero_label_error = num_wrong_zeros / num_zeros
53 | print("Zero Error {} for {} samples.".format(zero_label_error, num_zeros))
54 | nonzero_label_error = num_wrong_ones / num_nonzeros
55 | print("Nonzero Error {} for {} samples.".format(nonzero_label_error, num_nonzeros))
56 | return error
57 |
58 | def train(model, train_loader, validation_loader, epochs=10, lr=1e-4, device="cpu"):
59 | criterion = nn.BCEWithLogitsLoss()
60 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
61 | model.train()
62 |
63 | for epoch in range(epochs):
64 | loop = tqdm(train_loader, total = len(train_loader), leave = True)
65 | if epoch % 5 == 0:
66 | loop.set_postfix(val_acc = check_accuracy(model, validation_loader, device))
67 | model.train()
68 | epoch_loss = []
69 | for pts, labels in loop:
70 | pts = pts.to(device)
71 | labels = labels.to(device)
72 | outputs = model(pts)
73 | loss = criterion(outputs, labels)
74 | optimizer.zero_grad()
75 | loss.backward()
76 | optimizer.step()
77 | epoch_loss.append(loss.item())
78 | loop.set_description(f"Epoch [{epoch}/{epochs}]")
79 | loop.set_postfix(loss = np.mean(epoch_loss))
80 |
81 | # Optimize concept network.
82 | def evaluate_concept(model, sample, dtype="pointcloud", opt="sanity", batch_size=100, epochs=10, lr=1e-2, device="cpu", visualize=False):
83 | # For CEM if necessary.
84 | def sample_dx(mu, sigma):
85 | mu = mu[None].repeat(batch_size, 1)
86 | sigma = sigma[None].repeat(batch_size, 1)
87 | eps = torch.randn(batch_size, mu.shape[1]).to(device)
88 | dx = mu + (eps * sigma)
89 | dx[:, 3:] = dx[:, 3:] / torch.norm(dx[:, 3:], dim=1)[:, None]
90 | return dx.to(device)
91 |
92 | def l2(x, y):
93 | return torch.mean((x-y)**2, dim=-1)
94 |
95 | def l2_pose_dist(pose_old, pose_new):
96 | return torch.norm(pose_old[:,:3] - pose_new[:,:3], dim=1)
97 |
98 | def anchor_pose_dist(pose):
99 | dists = torch.norm(pose[:,7:10] - pose[:,:3], dim=1) - 0.3
100 | return dists * (dists > 0)
101 |
102 | def l2_pc_dist(pc_old, pc_new):
103 | moving_center = torch.mean(pc_old[:,pc_old[0, :, 3]==1,:3], axis=1)
104 | moved_center = torch.mean(pc_new[:,pc_new[0, :, 3]==1,:3], axis=1)
105 | dists = torch.norm(moving_center - anchor_center, dim=1) - 0.3
106 | return dists * (dists > 0)
107 |
108 | def anchor_pc_dist(pc):
109 | moving_center = torch.mean(pc[:,pc[0, :, 3]==1,:3],axis=1)
110 | anchor_center = torch.mean(pc[:,pc[0, :, 4]==1,:3],axis=1)
111 | dists = torch.norm(moving_center - anchor_center, dim=1) - 0.3
112 | return dists * (dists > 0)
113 |
114 | # Set optimizer variables.
115 | requires_grad = True if opt=="gradient" else False
116 |
117 | # Unpack input.
118 | state = sample[0]
119 | rawstate = sample[1]
120 | labels = torch.full((batch_size, 1), sample[2], requires_grad=requires_grad, device=device)
121 |
122 | # Define input points.
123 | moving_idx = np.where(state[:,3]==1)[0]
124 | anchor_idx = np.where(state[:,4]==1)[0]
125 | notmoving_idx = np.where(state[:,3]==0)[0]
126 | if dtype == "pointcloud":
127 | # Extract the relevant points.
128 | idxes = np.hstack((moving_idx, anchor_idx))
129 | pts = state[idxes].to(device).repeat(batch_size, 1, 1)
130 | else:
131 | pts = rawstate.to(device).repeat(batch_size, 1)
132 | pts.requires_grad = requires_grad
133 |
134 | # Initialize transform.
135 | if dtype == "pointcloud":
136 | # Initialize pose around anchor.
137 | init_anchor_pts = pts[:,pts[0, :, 4]==1,:3].cpu().detach().numpy()
138 | init_moving_pts = pts[:,pts[0, :, 3]==1,:3].cpu().detach().numpy()
139 |
140 | # Sample to fit in the network.
141 | samples = 1024
142 | idxes = np.random.choice(np.arange(pts.shape[1]), size=samples, replace=True)
143 | else:
144 | init_moving_pts = pts[:,:3].unsqueeze(1).cpu().detach().numpy()
145 | init_anchor_pts = pts[:,7:10].unsqueeze(1).cpu().detach().numpy()
146 | T = torch.tensor(initialize_T_around_pts(init_moving_pts, init_anchor_pts),\
147 | requires_grad=requires_grad, device=device, dtype=torch.float32)
148 |
149 | if opt == "sanity":
150 | epochs = 1
151 | elif opt == "gradient":
152 | optimizer = torch.optim.Adam([T], lr=lr)
153 |
154 | if visualize:
155 | # Save gif pcs.
156 | pc_obj_gif = [[torch.cat((state, torch.zeros((state.shape[0],1))),dim=1).cpu().numpy()]*batch_size]
157 | pc_gif = []
158 |
159 | for epoch in range(epochs):
160 | if dtype == "pointcloud":
161 | # Transform points (NOT in place) then pass them to the concept network.
162 | pc_old = copy.deepcopy(pts)
163 | pc_old_shifted = copy.deepcopy(pc_old)
164 | moving_center = torch.mean(pc_old[:,pc_old[0, :, 3]==1,:3],axis=1).unsqueeze(1)
165 | pc_old_shifted[:,pc_old[0, :, 3]==1,:3] -= moving_center
166 | pc_new = move_points(pc_old_shifted, T)
167 | pc_new[:,pc_new[0, :, 3]==1,:3] += moving_center
168 | net_input = pc_new[:,idxes,:]
169 | else:
170 | net_input = transform_rawstate(pts, T).float()
171 |
172 | # Use the pointcloud too.
173 | idxes = np.hstack((moving_idx, anchor_idx))
174 | pc_old = copy.deepcopy(state[idxes].to(device).repeat(batch_size, 1, 1))
175 | pc_new = move_points(pc_old, T)
176 | outputs = model(net_input)
177 | outputs = torch.sigmoid(outputs)
178 |
179 | if dtype == "pointcloud":
180 | res1 = l2(outputs, labels)
181 | res2 = anchor_pc_dist(pc_new)
182 | res = res1 + res2
183 | else:
184 | res1 = l2(outputs, labels)
185 | res2 = anchor_pose_dist(net_input)
186 | res = res1 + res2
187 |
188 | if opt == "gradient":
189 | loss = torch.mean(res)
190 | optimizer.zero_grad()
191 | loss.backward()
192 | optimizer.step()
193 | #print("LOSS: ", loss.item())
194 | elif opt == "CEM":
195 | # Score and pick top 4.
196 | _, best_idxes = torch.topk(res, 4, largest=False)
197 | mu = torch.mean(T[best_idxes], dim=0)
198 | sigma = torch.std(T[best_idxes], dim=0) + (0.01 * torch.ones(T.shape[1], device=device))
199 | if epoch < epochs-1:
200 | T = sample_dx(mu, sigma)
201 | #print(torch.norm(sigma))
202 |
203 | if visualize:
204 | # Add gif frames.
205 | movingpc_new = pc_new[:, pc_new[0, :, 3]==1, :]
206 | pc_obj_frame = []
207 | for idx in range(batch_size):
208 | new_pc = torch.cat((movingpc_new[idx], state[notmoving_idx].to(device)), dim=0)
209 | new_pc = torch.cat((new_pc, torch.zeros((new_pc.shape[0],1)).to(device)), dim=1)
210 | pc_obj_frame.append(new_pc.cpu().detach().numpy())
211 | pc_obj_gif.append(pc_obj_frame)
212 |
213 | # Show objects as dots.
214 | background_idx = np.where((state[:,3]==0)*(state[:,4]==0)==1)[0]
215 | background_pts = state[background_idx].to(device)
216 | background_pts = torch.cat((background_pts, torch.zeros((background_pts.shape[0],1)).to(device)), dim=1)
217 | cloud = torch.mean(pc_old[0,pc_old[0, :, 4]==1,:], axis=0).unsqueeze(0)
218 | cloud = torch.cat((cloud, torch.zeros((cloud.shape[0],1)).to(device)), dim=1)
219 | for idx in range(batch_size):
220 | moved_pts = movingpc_new[idx]
221 | moving_center = torch.mean(moved_pts, axis=0).unsqueeze(0)
222 | moving_center[0,3] *= outputs[idx,0]
223 | if opt == "CEM" and idx in best_idxes:
224 | moving_center = torch.cat((moving_center, torch.ones((moving_center.shape[0],1)).to(device)), dim=1)
225 | else:
226 | moving_center = torch.cat((moving_center, torch.zeros((moving_center.shape[0],1)).to(device)), dim=1)
227 | cloud = torch.cat((cloud, moving_center), dim=0)
228 | cloud = torch.cat((cloud, background_pts))
229 | pc_gif.append(cloud.cpu().detach().numpy())
230 |
231 | # Save results.
232 | best_idx = torch.argmin(res1)
233 | T_final = T[best_idx]
234 |
235 | if dtype == "pointcloud":
236 | best_pts = torch.cat((pc_new[:, pc_new[0, :, 3]==1, :][best_idx], state[notmoving_idx].to(device)), dim=0)
237 | best_pts = torch.cat((best_pts, torch.zeros((best_pts.shape[0],1)).to(device)),dim=1)
238 | else:
239 | best_pts = net_input[best_idx]
240 |
241 | # Visualize if necessary.
242 | if visualize:
243 | # View object center batches optimization evolution.
244 | pc_gif_final = [pc_obj_gif[i][best_idx] for i in range(len(pc_obj_gif))]
245 | show_pcs_gif(pc_gif, shadow=True)
246 |
247 | # View first and last state.
248 | viz_old = torch.cat((state, torch.zeros((state.shape[0],1))),dim=1)
249 | show_pcs(viz_old.cpu().detach().numpy())
250 | show_pcs(pc_gif_final[-1])
251 |
252 | # View object pc optimization evolution.
253 | show_pcs_gif(pc_gif_final)
254 |
255 | return T_final, best_pts
--------------------------------------------------------------------------------
/src/utils/data_utils.py:
--------------------------------------------------------------------------------
1 | # concept_learning
2 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | # Licensed under the NVIDIA Source Code License [see LICENSE for details]
4 |
5 | import torch
6 | from torch.utils.data import Dataset
7 | import numpy as np
8 | import glob
9 | import os
10 | import h5py
11 |
12 | from src.utils.geom_utils import *
13 | from src.utils.input_utils import png_to_numpy, Hdf5Cacher
14 |
15 |
16 | # Dataset utils.
17 | class RGBDataset(Dataset):
18 | """
19 | Define a dataset class that reads in data that is organized by image view:
20 | {
21 | 'color':color_image,
22 | 'depth':depth_image,
23 | 'segmentation':segmentation,
24 | 'concept':label
25 | }
26 | Params:
27 | data_dir -- root directory where the data is stored.
28 | """
29 | def __init__(self, path, label_type="label", split_path=None, transform=False):
30 | self.data_path = data_path
31 | self.label_path = label_path
32 | self.hdf5cacher_data = None
33 | self.hdf5cacher_label = None
34 | self.transform = transform
35 |
36 | if split_path is None:
37 | with h5py.File(self.data_path, 'r') as file:
38 | examples = list(file.keys())
39 | else:
40 | examples = []
41 | with open(split_path, "r") as f:
42 | while True:
43 | line = f.readline()
44 | if not line:
45 | break
46 | examples.append(line[:-1])
47 |
48 | self.examples = examples
49 | print("Loading dataset with {} examples".format(len(self.examples)))
50 |
51 | def __len__(self):
52 | return len(self.examples)
53 |
54 | def __getitem__(self, index):
55 | if self.hdf5cacher_data is None:
56 | self.hdf5cacher_data = Hdf5Cacher(self.data_path, 'r')
57 | self.hdf5cacher_label = Hdf5Cacher(self.label_path, 'r')
58 | example = self.examples[index]
59 | data = self.hdf5cacher_data.__getitem__(example)
60 | label = self.hdf5cacher_label.__getitem__(example)
61 | img = png_to_numpy(data["rgb"]).astype(np.uint8)
62 | label = label["label"]
63 |
64 | img = Image.fromarray(img)
65 | if self.transform is not None:
66 | img = self.transform(img)
67 |
68 | y_label = torch.tensor(float(label)).unsqueeze(-1)
69 |
70 | return (img, y_label)
71 |
72 |
73 | class PointDataset(Dataset):
74 | """
75 | Define a dataset class that reads in data that is organized by image view:
76 | {
77 | 'color':color_image,
78 | 'depth':depth_image,
79 | 'segmentation':segmentation,
80 | 'concept':label
81 | }
82 | Params:
83 | data_dir -- root directory where the data is stored.
84 | """
85 | def __init__(self, data_path, label_path, split_path=None):
86 | self.data_path = data_path
87 | self.label_path = label_path
88 | self.hdf5cacher_data = None
89 | self.hdf5cacher_label = None
90 |
91 | if split_path is None:
92 | with h5py.File(self.data_path, 'r') as file:
93 | examples = list(file.keys())
94 | else:
95 | examples = []
96 | with open(split_path, "r") as f:
97 | while True:
98 | line = f.readline()
99 | if not line:
100 | break
101 | examples.append(line[:-1])
102 |
103 | self.examples = examples
104 | print("Loading dataset with {} examples".format(len(self.examples)))
105 |
106 | def __len__(self):
107 | return len(self.examples)
108 |
109 | def __getitem__(self, index):
110 | if self.hdf5cacher_data is None:
111 | self.hdf5cacher_data = Hdf5Cacher(self.data_path, 'r')
112 | self.hdf5cacher_label = Hdf5Cacher(self.label_path, 'r')
113 | example = self.examples[index]
114 | data = self.hdf5cacher_data.__getitem__(example)
115 | label = self.hdf5cacher_label.__getitem__(example)
116 | depth = png_to_numpy(data["depth"]).astype(np.float32)
117 | mask = png_to_numpy(data["mask"]).astype(np.uint16)
118 | proj_matrix = data["proj_matrix"]
119 | view_matrix = data["view_matrix"]
120 | depth_max = data["depth_max"]
121 | depth_min = data["depth_min"]
122 | label = label["label"]
123 |
124 | depth = depth * (depth_max - depth_min) / 65535.0 + depth_min
125 | camera_data = {'depth':depth, 'mask':mask, 'proj_matrix':proj_matrix, 'view_matrix':view_matrix}
126 | camera_data = get_pointcloud_from_depth(camera_data)
127 |
128 | # Center around the anchor.
129 | anchor_pts = camera_data["pc"][camera_data["pc_seg"]==SegLabel.ANCHOR.value]
130 | if anchor_pts.shape[0] > 0:
131 | camera_data["pc"] -= np.mean(anchor_pts, axis=0)
132 |
133 | # Add one-hot segmentation.
134 | anchor_hot = (camera_data["pc_seg"]==SegLabel.ANCHOR.value).astype(int).reshape((-1,1))
135 | moving_hot = (camera_data["pc_seg"]==SegLabel.MOVING.value).astype(int).reshape((-1,1))
136 | points = np.hstack((camera_data["pc"], moving_hot, anchor_hot))
137 |
138 | # Sample to fit in the network.
139 | samples = 1024
140 | anchor_idx = np.where(camera_data["pc_seg"]==SegLabel.ANCHOR.value)[0]
141 | moving_idx = np.where(camera_data["pc_seg"]==SegLabel.MOVING.value)[0]
142 | idxes = np.random.choice(np.hstack((anchor_idx, moving_idx)), size=samples, replace=True)
143 | np.random.shuffle(idxes)
144 | points = points[idxes]
145 | points = torch.tensor(points).float()
146 |
147 | y_label = torch.tensor(float(label)).unsqueeze(-1)
148 | y_label = torch.round(y_label)
149 | return (points, y_label)
150 |
151 |
152 | class RawStateDataset(Dataset):
153 | """
154 | Define a dataset class that reads in raw state data and concept labels.
155 | Params:
156 | data_dir -- root directory where the data is stored.
157 | """
158 |
159 | def __init__(self, data_path, label_path, split_path=None, balanced=False, noise=0.0):
160 | self.data_path = data_path
161 | self.label_path = label_path
162 | self.hdf5cacher_data = None
163 | self.hdf5cacher_label = None
164 |
165 | if split_path is None:
166 | with h5py.File(self.data_path, 'r') as file1:
167 | with h5py.File(self.label_path, 'r') as file2:
168 | examples = list(file1.keys())
169 | # Balance the dataset if necessary.
170 | if balanced:
171 | zero_examples = []
172 | one_examples = []
173 | for example in examples:
174 | data = file1.__getitem__(example)
175 | label = file2.__getitem__(example)
176 | raw_state = data["raw_state"]
177 | label = label["label"]
178 | if label == 1.0:
179 | one_examples.append(example)
180 | else:
181 | zero_examples.append(example)
182 | if min(len(zero_examples), len(one_examples)) > 0:
183 | num_per_class = max(len(zero_examples), len(one_examples))
184 | examples = random.choices(zero_examples, k=num_per_class) + random.choices(one_examples, k=num_per_class)
185 | else:
186 | examples = []
187 | with open(split_path, "r") as f:
188 | while True:
189 | line = f.readline()
190 | if not line:
191 | break
192 | examples.append(line[:-1])
193 |
194 | if noise > 0.0:
195 | epsilon = np.random.uniform(0, 1, size=len(examples))
196 | self.flip = epsilon < noise
197 |
198 | self.examples = examples
199 | print("Loading dataset with {} examples".format(len(self.examples)))
200 |
201 | def __len__(self):
202 | return len(self.examples)
203 |
204 | def __getitem__(self, index):
205 | if self.hdf5cacher_data is None:
206 | self.hdf5cacher_data = Hdf5Cacher(self.data_path, 'r')
207 | self.hdf5cacher_label = Hdf5Cacher(self.label_path, 'r')
208 | example = self.examples[index]
209 | data = self.hdf5cacher_data.__getitem__(example)
210 | label = self.hdf5cacher_label.__getitem__(example)
211 | raw_state = data["raw_state"]
212 | label = label["label"]
213 |
214 | if hasattr(self, 'flip'):
215 | if self.flip[index]:
216 | label = 1.0 - label
217 |
218 | raw_state = torch.tensor(raw_state).float()
219 | y_label = torch.tensor(float(label)).unsqueeze(-1)
220 | y_label = torch.round(y_label)
221 |
222 | return (raw_state, y_label)
223 |
224 | class OptimizationDataset(Dataset):
225 | """
226 | Define a dataset class that reads in data that is organized by image view:
227 | {
228 | 'color':color_image,
229 | 'depth':depth_image,
230 | 'segmentation':segmentation,
231 | 'concept':label
232 | }
233 | Params:
234 | data_dir -- root directory where the data is stored.
235 | """
236 | def __init__(self, data_path, split_path=None, sample=True):
237 | self.data_path = data_path
238 | self.hdf5cacher_data = None
239 | self.sample = sample
240 |
241 | if split_path is None:
242 | with h5py.File(self.data_path, 'r') as file:
243 | examples = list(file.keys())
244 | else:
245 | examples = []
246 | with open(split_path, "r") as f:
247 | while True:
248 | line = f.readline()
249 | if not line:
250 | break
251 | examples.append(line[:-1])
252 |
253 | self.examples = examples
254 | print("Loading dataset with {} examples".format(len(self.examples)))
255 |
256 | def __len__(self):
257 | return len(self.examples)
258 |
259 | def __getitem__(self, index):
260 | if self.hdf5cacher_data is None:
261 | self.hdf5cacher_data = Hdf5Cacher(self.data_path, 'r')
262 | example = self.examples[index]
263 | data = self.hdf5cacher_data.__getitem__(example)
264 | depth = png_to_numpy(data["depth"]).astype(np.float32)
265 | mask = png_to_numpy(data["mask"]).astype(np.uint16)
266 | proj_matrix = data["proj_matrix"]
267 | view_matrix = data["view_matrix"]
268 | depth_max = data["depth_max"]
269 | depth_min = data["depth_min"]
270 | raw_state = data["raw_state"]
271 |
272 | depth = depth * (depth_max - depth_min) / 65535.0 + depth_min
273 | camera_data = {'depth':depth, 'mask':mask, 'proj_matrix':proj_matrix, 'view_matrix':view_matrix}
274 | camera_data = get_pointcloud_from_depth(camera_data)
275 |
276 | # Add one-hot segmentation.
277 | anchor_hot = (camera_data["pc_seg"]==SegLabel.ANCHOR.value).astype(int).reshape((-1,1))
278 | moving_hot = (camera_data["pc_seg"]==SegLabel.MOVING.value).astype(int).reshape((-1,1))
279 | points = np.hstack((camera_data["pc"], moving_hot, anchor_hot))
280 |
281 | if self.sample:
282 | # Sample to fit in the network.
283 | samples = 1024
284 | anchor_idx = np.where(camera_data["pc_seg"]==SegLabel.ANCHOR.value)[0]
285 | moving_idx = np.where(camera_data["pc_seg"]==SegLabel.MOVING.value)[0]
286 | idxes = np.random.choice(np.hstack((anchor_idx, moving_idx)), size=samples, replace=True)
287 | np.random.shuffle(idxes)
288 | points = points[idxes]
289 | points = torch.tensor(points).float()
290 | raw_state = torch.tensor(raw_state).float()
291 |
292 | return (points, raw_state)
--------------------------------------------------------------------------------
/scripts/scratch.txt:
--------------------------------------------------------------------------------
1 | robot_camera_pose = np.array([0.0,-2.8, 0.3,0.707,0.0,0.0,0.707]); t = as_float_array(from_euler_angles(-90.0 * 0.01745, 90.0 * 0.01745, 90 * 0.01745));robot_camera_pose[3:] = np.array([t[1], t[2], t[3], t[0]]);world_instance.camera_handle = spawn_camera(world_instance, env_ptr, 60, 640, 480, robot_camera_pose);camera_data = observe_camera(world_instance, env_ptr);plt.imshow(camera_data["color"]);plt.show()
2 |
3 |
4 |
5 |
6 |
7 |
8 | num_queries = args.concept_model.split("_")[-1][:-3]
9 | query_str = num_queries if num_queries.isnumeric() else ""
10 | self.save_dir = parent_dir + "/data/concept_shapenet/" + "{}_learned{}/".format(args.concept, query_str)
11 |
12 |
13 | example = self.examples[0]; f= h5py.File(self.data_dir + "/input/" + example + ".hdf5", 'r'); depth = png_to_numpy(f["depth"][()]).astype(np.float32); mask = png_to_numpy(f["mask"][()]).astype(np.uint16); proj_matrix = f["camera_intrinsics"]["proj_matrix"][()]; view_matrix = f["camera_intrinsics"]["view_matrix"][()]; depth_max = f["camera_intrinsics"]["depth_max"][()]; depth_min = f["camera_intrinsics"]["depth_min"][()]; f= h5py.File(self.data_dir + "/{}/".format(self.label_folder) + example + ".hdf5", 'r'); label = f["label"][()]; depth = depth * (depth_max - depth_min) / 65535.0 + depth_min; camera_data = {'depth':depth, 'mask':mask, 'proj_matrix':proj_matrix, 'view_matrix':view_matrix}; camera_data = get_pointcloud_from_depth(camera_data)
14 |
15 |
16 | import open3d as o3d; pcd = o3d.geometry.PointCloud(); pcd.points = o3d.utility.Vector3dVector(pts.cpu().detach().numpy()[:,:3]); o3d.visualization.draw_geometries([pcd])
17 |
18 | import open3d as o3d; pcd = o3d.geometry.PointCloud(); pcd.points = o3d.utility.Vector3dVector(camera_data['pc']); mesh_frame = open3d.geometry.TriangleMesh.create_coordinate_frame(size=0.6, origin=[0,0,0]); o3d.visualization.draw_geometries([pcd, mesh_frame])
19 |
20 |
21 | import open3d as o3d; pcd = o3d.geometry.PointCloud(); pcd.points = o3d.utility.Vector3dVector(points.T[:,:3]); mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.6, origin=[0,0,0]); o3d.visualization.draw_geometries([pcd, mesh_frame])
22 |
23 | import open3d as o3d; pcd = o3d.geometry.PointCloud(); pcd.points = o3d.utility.Vector3dVector(pcs_old[i][:,:3]); o3d.visualization.draw_geometries([pcd])
24 |
25 | for index in range(len(self.examples)): print(index); example = self.examples[index]; f= h5py.File(self.data_dir + "/input/" + example + ".hdf5", 'r'); depth = png_to_numpy(f["depth"][()]).astype(np.float32); mask = png_to_numpy(f["mask"][()]).astype(np.uint16); proj_matrix = f["camera_intrinsics"]["proj_matrix"][()]; view_matrix = f["camera_intrinsics"]["view_matrix"][()]; depth_max = f["camera_intrinsics"]["depth_max"][()]; depth_min = f["camera_intrinsics"]["depth_min"][()]; f.close(); f= h5py.File(self.data_dir + "/{}/".format(self.label_folder) + example + ".hdf5", 'r'); label = f["label"][()]; f.close();depth = depth * (depth_max - depth_min) / 65535.0 + depth_min; camera_data = {'depth':depth, 'mask':mask, 'proj_matrix':proj_matrix, 'view_matrix':view_matrix}; camera_data = get_pointcloud_from_depth(camera_data); anchor_pts = camera_data["pc"][camera_data["pc_seg"]==SegLabel.ANCHOR.value]; thing= np.mean(anchor_pts, axis=0)
26 |
27 | obj_idxs = set()
28 | for obj in upright_objs:
29 | for i, urdf in enumerat(obj_urdf_files):
30 | if obj in urdf:
31 | print(i)
32 | idxes = np.argwhere(obj in obj_urdf_files)
33 | print(idxes)
34 | obj_idxs.add(idx) for idx in idxes
35 |
36 |
37 | return torch.cat((rot, pose[:,:3].unsqueeze(2)), axis=2)
38 |
39 | moved_mean = torch.mean(movingpts_new[best_idx][:,:3], axis=0)
40 | anchor_mean = torch.mean(pts[best_idx,pts[0, :, 4]==1,:3], axis=0)
41 | length = torch.norm(moved_mean - anchor_mean)
42 | print("PC length: ",length.item())
43 |
44 | 991, 948, 663, 952, 544
45 |
46 | obj_idxs = set()
47 | front_objs = ["CanOpener", "Fork", "Fruit_7", "Hammer", "Knife", "Mug", "Pan_a", "Scissors", "Spoon", "Stapler", "Teapot"]
48 | lr_objs = ["CanOpener", "Fork", "Fruit_7", "Knife", "Mug", "Pan_a", "Spoon", "Stapler", "Teapot"]
49 | on_objs = ["Book", "Bowl", "Calculator", "Box", "Pan", "Plate"]
50 | towards_objs = ["CanOpener", "Fork", "Fruit_7", "Hammer", "Knife", "Mug", "Pan_a", "Scissors", "Spoon", "Stapler", "Teapot"]
51 |
52 | for obj in front_objs:
53 | for i, urdf in enumerate(obj_urdf_files):
54 | if obj in urdf:
55 | print(i)
56 | obj_idxs.add(i)
57 |
58 |
59 |
60 | mining_errors = {}
61 | for mining in [0, 250, 500, 1000]:
62 | args.mining = mining
63 | objective_errors = []
64 | for objective in ["random", "confusion", "all", "allscheduled", "confrand", "allrand"]:
65 | args.objective = objective
66 | active_generator.reset_model()
67 |
68 | if args.passive_samples > 0:
69 | # Warmstart the model.
70 | active_generator.retrain(data_filename, label_filename)
71 |
72 | errors = active_generator.collect_data(args.concept, N_queries=args.active_samples, objective=args.objective,\
73 | warmstart=args.warmstart, mining=args.mining)
74 | objective_errors.append(errors)
75 | print(objective_errors)
76 | mining_errors[mining] = objective_errors
77 | print(mining_errors)
78 | active_generator.kill_instance()
79 |
80 |
81 | xyz = np.repeat(np.array([0,0,0]).reshape((1,-1)), pts_center.shape[0], axis=0)
82 | quat = np.repeat(np.array([1,0,0,0]).reshape((1,-1)), pts_center.shape[0], axis=0)
83 |
84 |
85 | movingpts=pc_old[0,pc_old[0, :, 3]==1,:3]
86 | movedpts=pc_new[0,pc_new[0, :, 3]==1,:3]
87 | moving_center = torch.mean(movingpts,axis=0)
88 | moved_center = torch.mean(movedpts,axis=0)
89 |
90 |
91 | R = tra3d.quaternion_to_matrix(T[:,3:])
92 | moving_center = torch.mean(pc_old[0,pc_old[0, :, 3]==1,:3],axis=0).unsqueeze(0).unsqueeze(0)
93 | rawstate_pt = old_rawstate[:3].unsqueeze(0)
94 | movingpts_rot = torch.matmul(rawstate_pt-moving_center, torch.transpose(R, 1, 2))
95 | movingpts_trans = torch.add(movingpts_rot+moving_center, T[:,:3].unsqueeze(1))
96 | pose_new = movingpts_trans.squeeze()
97 | new_rawstate = torch.cat((pose_new, old_rawstate[7:])).detach().numpy()
98 |
99 | moving_center = torch.mean(old_state[old_state[:, 3]==1,:3],axis=0).unsqueeze(0).to(device)
100 | rawstate = copy.deepcopy(old_rawstate).unsqueeze(0).to(device)
101 | rawstate[:,:3] -= moving_center
102 | new_rawstate = transform_rawstate(rawstate, T.unsqueeze(0)).float()
103 | new_rawstate[:,:3] += moving_center
104 | new_rawstate = new_rawstate.squeeze()
105 |
106 |
107 | idxes = np.hstack((moving_idx, anchor_idx)); pc_old = state[idxes].to(device).repeat(batch_size, 1, 1); pc_new = move_points(pc_old, T);
108 |
109 | movingpc_new = pc_new[:, pc_new[0, :, 3]==1, :]; movingpc_old = pc_old[:, pc_old[0, :, 3]==1, :]
110 |
111 | idx=0; new_pts = torch.cat((movingpc_new[idx], state[notmoving_idx].to(device)), dim=0); new_pts = torch.cat((new_pts, torch.zeros((new_pts.shape[0],1)).to(device)),dim=1); new_pts = new_pts.cpu().detach().numpy(); old_pts = torch.cat((movingpc_old[idx], state[notmoving_idx].to(device)), dim=0); old_pts = torch.cat((old_pts, torch.zeros((old_pts.shape[0],1)).to(device)),dim=1); old_pts = old_pts.cpu().detach().numpy()
112 |
113 | show_pcs_with_frame(old_pts, pts[0][:3].cpu().detach().numpy())
114 |
115 |
116 |
117 | moving_center = torch.mean(pc_old[:,pc_old[0, :, 3]==1,:3], axis=1); old_rawstate = copy.deepcopy(rawstate).unsqueeze(0).to(device); old_rawstate[:,:3] -= moving_center[idx]; new_rawstate = transform_rawstate(old_rawstate, T[idx].unsqueeze(0)).float(); new_rawstate[:,:3] += moving_center[idx]; new_rawstate = new_rawstate.squeeze().cpu().detach().numpy()
118 |
119 | show_pcs_with_frame(new_pts, new_rawstate[:3])
120 |
121 | self.hdf5cacher_data = Hdf5Cacher(self.data_path, 'r'); self.hdf5cacher_label = Hdf5Cacher(self.label_path, 'r')
122 |
123 | index=0; example = self.examples[index]; data = self.hdf5cacher_data.__getitem__(example);label = self.hdf5cacher_label.__getitem__(example);depth = png_to_numpy(data["depth"]).astype(np.float32);mask = png_to_numpy(data["mask"]).astype(np.uint16);proj_matrix = data["proj_matrix"];view_matrix = data["view_matrix"];depth_max = data["depth_max"];depth_min = data["depth_min"];label = label["label"];depth = depth * (depth_max - depth_min) / 65535.0 + depth_min;camera_data = {'depth':depth, 'mask':mask, 'proj_matrix':proj_matrix, 'view_matrix':view_matrix};camera_data = get_pointcloud_from_depth(camera_data)
124 |
125 |
126 |
127 | 30 CanOpener
128 | 31 CanOpener
129 | 32 CanOpener
130 | 33 CanOpener
131 | 34 CanOpener
132 | 35 CanOpener
133 | 54 Fork
134 | 55 Fork
135 | 56 Fork
136 | 57 Fork
137 | 58 Fork
138 | 59 Fork
139 | 63 Fruit_7
140 | 64 Fruit_7
141 | 65 Fruit_7
142 | 66 Hammer
143 | 67 Hammer
144 | 68 Hammer
145 | 69 Hammer
146 | 70 Hammer
147 | 71 Hammer
148 | 72 Knife
149 | 73 Knife
150 | 74 Knife
151 | 75 Knife
152 | 76 Knife
153 | 77 Knife
154 | 84 Mug
155 | 85 Mug
156 | 86 Mug
157 | 87 Mug
158 | 88 Mug
159 | 89 Mug
160 | 90 Pan_a
161 | 91 Pan_a
162 | 92 Pan_a
163 | 102 Scissors
164 | 103 Scissors
165 | 104 Scissors
166 | 105 Scissors
167 | 106 Scissors
168 | 107 Scissors
169 | 114 Spoon
170 | 115 Spoon
171 | 116 Spoon
172 | 117 Spoon
173 | 118 Spoon
174 | 119 Spoon
175 | 120 Stapler
176 | 121 Stapler
177 | 122 Stapler
178 | 123 Stapler
179 | 124 Stapler
180 | 125 Stapler
181 | 126 Teapot
182 | 127 Teapot
183 | 128 Teapot
184 | 129 Teapot
185 | 130 Teapot
186 | 131 Teapot
187 |
188 |
189 |
190 | 0 Bottle
191 | 1 Bottle
192 | 2 Bottle
193 | 3 Bottle
194 | 4 Bottle
195 | 5 Bottle
196 | 12 Bottle
197 | 13 Bottle
198 | 14 Bottle
199 | 15 Bottle
200 | 16 Bottle
201 | 17 Bottle
202 | 132 Bottle
203 | 133 Bottle
204 | 134 Bottle
205 | 135 Bottle
206 | 136 Bottle
207 | 137 Bottle
208 | 18 Bowl
209 | 19 Bowl
210 | 20 Bowl
211 | 21 Bowl
212 | 22 Bowl
213 | 23 Bowl
214 | 36 Candle
215 | 37 Candle
216 | 38 Candle
217 | 39 Candle
218 | 40 Candle
219 | 41 Candle
220 | 48 Cup
221 | 49 Cup
222 | 50 Cup
223 | 51 Cup
224 | 52 Cup
225 | 53 Cup
226 | 78 MilkCarton
227 | 79 MilkCarton
228 | 80 MilkCarton
229 | 81 MilkCarton
230 | 82 MilkCarton
231 | 83 MilkCarton
232 | 84 Mug
233 | 85 Mug
234 | 86 Mug
235 | 87 Mug
236 | 88 Mug
237 | 89 Mug
238 | 90 Pan
239 | 91 Pan
240 | 92 Pan
241 | 93 Pan
242 | 94 Pan
243 | 95 Pan
244 | 96 Plate
245 | 97 Plate
246 | 98 Plate
247 | 99 Plate
248 | 100 Plate
249 | 101 Plate
250 | 126 Teapot
251 | 127 Teapot
252 | 128 Teapot
253 | 129 Teapot
254 | 130 Teapot
255 | 131 Teapot
256 |
257 |
258 | elif args.concept in ["aligned"]:
259 | obj_idxes = [0, 1, 2, 3, 4, 5, 132, 133, 134, 135, 136, 137, 12, 13, 14, 15, 16, 17, \
260 | 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, 78, 79, 80, 81, 82, 83, \
261 | 84, 85, 86, 87, 88, 89]
262 | obj_idxes = [0, 1, 2, 3, 4, 5, 132, 133, 134, 135, 136, 137, 12, 13, 14, 15, 16, 17, \
263 | 36, 37, 38, 39, 40, 41, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89]
264 | obj_idxes = [18, 19, 20, 21, 22, 23, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, \
265 | 126, 127, 128, 129, 130, 131]
266 |
267 |
268 | elif args.concept in ["front", "front45"]:
269 | obj_idxes = [30, 31, 32, 57, 66, 67, 68, 69, 70, 71, 75, 76, 77, 84, 85, 86, 87, 88, 89, 90, \
270 | 91, 92, 102, 103, 104, 105, 106, 107, 117, 118, 123, 124, 126, 127, 128, 129, 130, 131]
271 | obj_idxes = [30, 57, 66, 67, 68, 69, 70, 71, 75, 84, 85, 86, 87, 88, 89, 90, \
272 | 91, 92, 102, 103, 104, 105, 106, 107, 117, 123, 126, 127, 128, 129, 130, 131]
273 | obj_idxes = [32, 57, 66, 67, 68, 69, 70, 71, 90, 91, 92, 102, 103, 104, 105, 106, 107,\
274 | 117, 123, 126, 127, 128, 129, 130, 131]
275 | obj_idxes = [66, 67, 68, 69, 70, 71, 90, 91, 92, 102, 103, 104, 105, 106, 107, 126, 127, 128, 129, 130, 131]
276 | obj_idxes = [66, 67, 68, 69, 70, 71, 90, 91, 92, 126, 127, 128, 129, 130, 131]
277 |
278 |
279 |
280 | # Load object meshes for visualization purposes.
281 | parent_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../..")
282 | asset_root = os.path.abspath(parent_dir + "/data/shapenet_objects/")
283 | obj_mesh_files = sorted(glob.glob("{}/meshes/*.obj".format(asset_root)))
284 | obj_meshes = [trimesh.load(obj_mesh_file) for obj_mesh_file in obj_mesh_files]
285 |
286 |
287 | objs = dataset.examples[idx].split("_")
288 | moving_mesh = obj_meshes[int(objs[0])]
289 | anchor_mesh = obj_meshes[int(objs[1])]
290 | moving_T = pose_to_T(torch.tensor(old_rawstate[:7]).unsqueeze(0))
291 | anchor_T = pose_to_T(torch.tensor(old_rawstate[7:14]).unsqueeze(0))
292 |
293 | obj1_quat = torch.tensor([old_rawstate[6], old_rawstate[3], old_rawstate[4], old_rawstate[5]]).unsqueeze(0)
294 | obj2_quat = torch.tensor([old_rawstate[13], old_rawstate[10], old_rawstate[11], old_rawstate[12]]).unsqueeze(0)
295 | moving_vertices = tra3d.quaternion_apply(torch.tensor([0.707,0,0.707,0.0]).unsqueeze(0), torch.tensor(moving_mesh.vertices))
296 | anchor_vertices = tra3d.quaternion_apply(torch.tensor([0.707,-0.707,0.0,0.0]).unsqueeze(0), torch.tensor(anchor_mesh.vertices))
297 |
298 | moving_vertices = trimesh.transform_points(moving_mesh.vertices, moving_T.squeeze())
299 | anchor_vertices = trimesh.transform_points(anchor_mesh.vertices, anchor_T.squeeze())
300 | moving_faces = moving_mesh.faces
301 | anchor_faces = anchor_mesh.faces
302 | show_pcs_with_mesh(old_state, moving_vertices, moving_faces, shadow=True)
--------------------------------------------------------------------------------