├── __init__.py
├── visualize
├── __init__.py
├── visualize_data.py
├── visualize_fk_loss.py
├── visualize_fk_optimizer.py
├── visualize_grasping_alt.py
├── visualize_grasping.py
├── visualize_ik_loss.py
├── moveit
│ ├── visualize_nico_trajectory.py
│ └── visualize_redundancy.py
├── visualize_multi_run_loss.py
├── visualize_result_over_epochs.py
└── visualize_ik_optimizer.py
├── ga
├── __init__.py
└── ga.py
├── assets
└── img
│ ├── generator.png
│ ├── _DSC0957__.JPG
│ ├── generator_gan.png
│ ├── cycle_ik_overview.jpg
│ ├── cycleik_ik_pipeline.jpg
│ ├── example_GAN_nicol.png
│ ├── vis_multi_run_loss.png
│ └── nico_nicol_grasp_success_alt__.png
├── optimize
├── __init__.py
├── load_optuna_result.py
├── optimize.py
├── optimize_fk.py
└── optimize_mlp.py
├── requirements.txt
├── test
├── __init__.py
├── test.py
├── test_moveit.py
└── test_fk.py
├── train
├── __init__.py
├── mlp_multi_run.py
├── train_fk.py
└── train.py
├── cycleik_pytorch
├── optim.py
├── __init__.py
├── Bezier.py
├── datasets.py
├── utils.py
└── models.py
├── setup.py
├── data_samplers
├── pytorch_datasampler.py
├── nicol_data_vis.py
├── panda_data_generator.py
├── fetch_data_generator.py
├── valkyrie_data_generator.py
├── panda_data_generator_2.py
├── nico_data_generator.py
└── moveit_datasampler.py
├── config
├── fetch.yaml
├── valkyrie.yaml
├── panda.yaml
├── nicol.yaml
└── nico.yaml
├── optimize.py
├── test.py
├── .gitignore
├── README.md
└── train.py
/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/visualize/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ga/__init__.py:
--------------------------------------------------------------------------------
1 | from .ga import GA
2 |
3 | __all__ = [
4 | "GA",
5 | ]
--------------------------------------------------------------------------------
/assets/img/generator.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/generator.png
--------------------------------------------------------------------------------
/assets/img/_DSC0957__.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/_DSC0957__.JPG
--------------------------------------------------------------------------------
/assets/img/generator_gan.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/generator_gan.png
--------------------------------------------------------------------------------
/assets/img/cycle_ik_overview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/cycle_ik_overview.jpg
--------------------------------------------------------------------------------
/assets/img/cycleik_ik_pipeline.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/cycleik_ik_pipeline.jpg
--------------------------------------------------------------------------------
/assets/img/example_GAN_nicol.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/example_GAN_nicol.png
--------------------------------------------------------------------------------
/assets/img/vis_multi_run_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/vis_multi_run_loss.png
--------------------------------------------------------------------------------
/assets/img/nico_nicol_grasp_success_alt__.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jangerritha/CycleIK/HEAD/assets/img/nico_nicol_grasp_success_alt__.png
--------------------------------------------------------------------------------
/optimize/__init__.py:
--------------------------------------------------------------------------------
1 | from .optimize_mlp import MLPOptimizer
2 | from .optimize_fk import FKOptimizer
3 |
4 | __all__ = [
5 | "MLPOptimizer",
6 | "FKOptimizer",
7 | ]
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pillow
2 | tqdm
3 | matplotlib
4 | opencv-python
5 | numpy
6 | torch
7 | networkx
8 | open3d
9 | torchvision
10 | gdown
11 | optuna
12 | pytorch_kinematics
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
1 | from .test_mlp import MLPTester
2 | from .test_fk import FKTester
3 | from .evaluation_IROS import MoveitTester
4 |
5 | __all__ = [
6 | "MLPTester",
7 | "FKTester",
8 | "MoveitTester",
9 | ]
--------------------------------------------------------------------------------
/train/__init__.py:
--------------------------------------------------------------------------------
1 | from .train_mlp import MLPTrainer
2 | from .train_fk import FKTrainer
3 | from .fine_tune_mlp import FineTuneMLPTrainer
4 | from . mlp_multi_run import MLPMultiRun
5 |
6 | __all__ = [
7 | "MLPTrainer",
8 | "FKTrainer",
9 | "FineTuneMLPTrainer",
10 | "MLPMultiRun",
11 | ]
--------------------------------------------------------------------------------
/cycleik_pytorch/optim.py:
--------------------------------------------------------------------------------
1 |
2 | class DecayLR:
3 | def __init__(self, epochs, offset, decay_epochs):
4 | epoch_flag = epochs - decay_epochs
5 | assert (epoch_flag > 0), "training epochs < decay not allowed"
6 | self.epochs = epochs
7 | self.offset = offset
8 | self.decay_epochs = decay_epochs
9 |
10 | def step(self, epoch):
11 | return 1.0 - max(0, epoch + self.offset - self.decay_epochs) / (
12 | self.epochs - self.decay_epochs)
13 |
14 |
15 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from distutils.core import setup
4 |
5 | setup(name='CycleIK',
6 | version='0.1.0',
7 | description='CycleIK - Neuro-inspired Inverse Kinematics - Neuro-Genetic Robot Kinematics Toolkit for PyTorch',
8 | author='Jan-Gerrit Habekost',
9 | author_email='jan-Gerrit.habekost@uni-hamburg.de',
10 | #url='https://www.inf.uni-hamburg.de/en/inst/ab/wtm.html',
11 | license='BSD 2-clause',
12 | packages=['cycleik_pytorch'],
13 | install_requires=[
14 | 'pillow',
15 | 'tqdm',
16 | 'matplotlib',
17 | 'opencv-python',
18 | 'numpy',
19 | 'torch',
20 | 'networkx',
21 | 'open3d',
22 | 'torchvision',
23 | 'gdown',
24 | 'optuna',
25 | 'pytorch_kinematics'
26 | ],
27 |
28 | )
29 |
--------------------------------------------------------------------------------
/cycleik_pytorch/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import weights_init, load_config, get_kinematic_params, slice_fk_pose, normalize_pose, renormalize_pose, renormalize_joint_state, JSD
2 | from .datasets import IKDataset
3 | from .models import AutoEncoder, GenericDiscriminator, GenericGenerator, GenericNoisyGenerator, FineTuneModel
4 | from .optim import DecayLR
5 | from .utils import ReplayBuffer
6 | from .predictor import CycleIK
7 |
8 |
9 | __all__ = [
10 | "IKDataset",
11 | "FineTuneModel",
12 | "AutoEncoder",
13 | "DecayLR",
14 | "ReplayBuffer",
15 | "weights_init",
16 | "GenericDiscriminator",
17 | "GenericGenerator",
18 | "GenericNoisyGenerator",
19 | "load_config",
20 | "get_kinematic_params",
21 | "CycleIK",
22 | "slice_fk_pose",
23 | "normalize_pose",
24 | "renormalize_pose",
25 | "renormalize_joint_state",
26 | "JSD",
27 | ]
--------------------------------------------------------------------------------
/data_samplers/pytorch_datasampler.py:
--------------------------------------------------------------------------------
1 | from cycleik_pytorch import CycleIK
2 | import pickle
3 | import argparse
4 |
5 | parser = argparse.ArgumentParser(
6 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
7 |
8 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
9 | parser.add_argument("--gpu", type=str, default="0", help="GPU used for training")
10 | parser.add_argument("--samples", type=int, default="1000000", help="GPU used for training")
11 | args = parser.parse_args()
12 |
13 | def main():
14 |
15 | robot_name = args.robot
16 | data_file_name = f"results_{robot_name}_{int(args.samples / 1000)}"
17 |
18 | cycle_ik = CycleIK(robot=robot_name, cuda_device=args.gpu)
19 | random_points = cycle_ik.get_random_samples(args.samples)
20 | #print(random_points[1][5000])
21 | with open(f'/home/jan-gerrit/repositories/cycleik/data/{data_file_name}.p', 'wb') as f:
22 | pickle.dump(random_points, f)
23 | f.close()
24 |
25 | if __name__ == '__main__':
26 | main()
--------------------------------------------------------------------------------
/visualize/visualize_data.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import pickle
3 | from visualization_msgs.msg import Marker
4 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
5 | from std_msgs.msg import Header, ColorRGBA
6 | import rospy
7 | from tqdm import tqdm
8 |
9 | rospy.init_node('data_visualizer', anonymous=True)
10 |
11 | marker_publisher = rospy.Publisher("/moveit/visualization_marker", Marker, queue_size=100000)
12 | rospy.sleep(1)
13 |
14 |
15 | def main():
16 | data = pickle.load(open("./data/results_nicol_1000_3.p", 'rb'))
17 |
18 | for i in tqdm(range(len(data[0]))):
19 | pose = data[0][i]
20 | #if pose[2] > 0.85: continue
21 | #print(pose)
22 | marker = Marker(
23 | type=Marker.SPHERE,
24 | action=Marker.ADD,
25 | id=i,
26 | lifetime=rospy.Duration(),
27 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
28 | scale=Vector3(0.001, 0.001, 0.001),
29 | header=Header(frame_id='world'),
30 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
31 | # lifetime=0,
32 | frame_locked=False)
33 | marker_publisher.publish(marker)
34 | rospy.sleep(0.001)
35 |
36 |
37 | if __name__ == '__main__':
38 | main()
--------------------------------------------------------------------------------
/visualize/visualize_fk_loss.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import pickle
3 | import numpy as np
4 |
5 | def main():
6 | data = pickle.load(open("./results/train_fk_loss.p", 'rb'))
7 | time_series = []
8 | print(len(data))
9 | print(data[0])
10 | print(len(data[0]))
11 | print(data[1])
12 | print(len(data[1]))
13 | for i in range(len(data[0])):
14 | #if i == 0: continue
15 | time_series.append(i * 10)
16 |
17 | fig, ax = plt.subplots()
18 | ax.set_xlabel('Epoch')
19 | ax.set_ylabel('Error')
20 | kinematic_loss = ax.plot(time_series, data[0])
21 | kinematic_loss[0].set_label('Kinematic Loss (MAE)')
22 | kinematic_loss = ax.plot(time_series, data[1])
23 | kinematic_loss[0].set_label('Validation Loss (MAE)')
24 | ax.xaxis.label.set_fontweight('bold')
25 | ax.yaxis.label.set_fontweight('bold')
26 | ax.legend()
27 | plt.yticks(np.arange(0.0, 0.2, 0.01))
28 | plt.annotate('train: %0.5f' % data[0][-1], xy=(1, data[0][-1]), xytext=(-80, 18),
29 | xycoords=('axes fraction', 'data'), textcoords='offset points')
30 | plt.annotate('val: %0.5f' % data[1][-1], xy=(1, data[1][-1]), xytext=(-81, 5),
31 | xycoords=('axes fraction', 'data'), textcoords='offset points')
32 | plt.savefig('./img/losses/vis_fk_loss.png')
33 | plt.show()
34 |
35 |
36 | if __name__ == '__main__':
37 | main()
--------------------------------------------------------------------------------
/config/fetch.yaml:
--------------------------------------------------------------------------------
1 | arm:
2 | train_data: 'results_fetch_5000_3'
3 | test_data: 'results_fetch_100_3'
4 | val_data: 'results_fetch_200_3'
5 |
6 | robot_dof: 8
7 | limits:
8 | upper: [0.38615, 1.6056, 1.518, 3.14159, 2.251, 3.14159, 2.16, 3.14159]
9 | lower: [0., -1.6056, -1.221, -3.14159, -2.251, -3.14159, -2.16, -3.14159]
10 | workspace:
11 | upper: [0.962, 0.929, 1.959]
12 | lower: [-0.44, -0.929, -0.03]
13 |
14 | robot_urdf: './assets/urdf/fetch.urdf'
15 | robot_eef: 'wrist_roll_link'
16 | base_link: 'base_link'
17 |
18 | joint_name: [ "torso_lift_joint", "shoulder_pan_joint", "shoulder_lift_joint", "upperarm_roll_joint",
19 | "elbow_flex_joint", "forearm_roll_joint", "wrist_flex_joint", "wrist_roll_joint" ]
20 | home_js: [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
21 | move_group: 'arm_with_torso'
22 |
23 | #zero_joints_goal: [0]
24 |
25 | IKNet:
26 |
27 | pretrained_weights: "https://drive.google.com/uc?export=download&id=1O3iQ8BNS97J_TyphnbUdGXeMXh8oqG-N"
28 |
29 | position_weight: 19.
30 | orientation_weight: 3.
31 |
32 | training:
33 | batch_size: 100
34 | lr: 0.00032
35 |
36 | architecture:
37 | layers: [850, 620, 3210, 2680, 680, 3030, 2670]
38 | nbr_tanh: 1
39 | activation: "GELU"
40 |
41 | FKNet:
42 | training:
43 | batch_size: 700
44 | lr: 0.0001
45 |
46 | architecture:
47 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
48 | nbr_tanh: 3
49 | activation: "GELU"
50 |
--------------------------------------------------------------------------------
/visualize/visualize_fk_optimizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import sklearn.datasets
3 | import sklearn.metrics
4 | from sklearn.model_selection import train_test_split
5 |
6 | import joblib
7 |
8 | import optuna
9 |
10 | # You can use Matplotlib instead of Plotly for visualization by simply replacing `optuna.visualization` with
11 | # `optuna.visualization.matplotlib` in the following examples.
12 | from optuna.visualization import plot_contour
13 | from optuna.visualization import plot_edf
14 | from optuna.visualization import plot_intermediate_values
15 | from optuna.visualization import plot_optimization_history
16 | from optuna.visualization import plot_parallel_coordinate
17 | from optuna.visualization import plot_param_importances
18 | from optuna.visualization import plot_slice
19 |
20 |
21 | def main():
22 | study = joblib.load('./optuna/cycleik_fk_optimizer.pkl')
23 | plot_optimization_history(study).show()
24 | #plot_parallel_coordinate(study).show()
25 | plot_contour(study, params=["lr", "activation", "batch_size"]).show()
26 | plot_contour(study, params=["lr", "activation", "nbr_layers"]).show()
27 | plot_contour(study, params=["lr", "batch_size", "nbr_layers"]).show()
28 | plot_slice(study, params=["layer0_neurons", "layer1_neurons", "layer2_neurons", "layer3_neurons", "layer6_neurons", "layer7_neurons", "layer8_neurons", "layer9_neurons"]).show()
29 | plot_slice(study,
30 | params=["lr", "activation", "batch_size", "nbr_layers", "nbr_tanh"]).show()
31 | plot_param_importances(study).show()
32 | print(study.best_trial)
33 |
34 | if __name__ == '__main__':
35 | main()
--------------------------------------------------------------------------------
/optimize/load_optuna_result.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import optuna
3 | import joblib
4 |
5 | def get_score(trial):
6 | if trial.values is not None:
7 | val = trial.values[0]
8 | return val
9 | else:
10 | return 10000
11 |
12 | parser = argparse.ArgumentParser(
13 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
14 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
15 | parser.add_argument("--db", type=str, default="ik_optimizer_results.db", help="GPU used for training")
16 | parser.add_argument("--gpu", type=int, default="0", help="GPU used for training")
17 | parser.add_argument("--study_name", type=str, default="", help="Network architecture")
18 | args = parser.parse_args()
19 |
20 | if args.study_name != "":
21 | study_name = args.study_name
22 | else:
23 | study_name = f"cycleik_ik_optimizer_{args.robot}_robot_gpu_{args.gpu}"
24 |
25 |
26 | study_summaries = optuna.study.get_all_study_summaries(storage=f'sqlite:///{args.db}')
27 |
28 | print(f"Optuna study summary: \n")
29 |
30 | for i, study in enumerate(study_summaries):
31 | print(f'Study {i}: {study.study_name}\n')
32 |
33 |
34 | loaded_study = optuna.load_study(study_name=study_name,
35 | storage=f'sqlite:///{args.db}')
36 | joblib.dump(loaded_study, f"./optuna/{args.robot}/cycleik_ik_optimizer.pkl")
37 | print(len(loaded_study.trials))
38 | sorted_trials = sorted(loaded_study.trials, key=get_score)
39 |
40 | for i in range(10):
41 | print(sorted_trials[i])
42 |
43 | print("Best Config:\n {0}".format(loaded_study.trials[4]))
--------------------------------------------------------------------------------
/config/valkyrie.yaml:
--------------------------------------------------------------------------------
1 | right_arm:
2 | train_data: 'results_valkyrie_8000'
3 | test_data: 'results_valkyrie_800'
4 | val_data: 'results_valkyrie_200_1'
5 |
6 | robot_dof: 10
7 | limits:
8 | upper: [1.181, 0.666, 0.255, 2.0, 1.519, 2.18, 2.174, 3.14, 0.62, 0.36]
9 | lower: [-1.329, -0.13, -0.23, -2.85, -1.266, -3.1, -0.12, -2.019, -0.625, -0.49]
10 |
11 | workspace:
12 | upper: [0.974, 0.723, 0.986]
13 | lower: [-0.924, -0.973, -0.4]
14 |
15 | robot_urdf: './assets/urdf/valkyrie_D.urdf'
16 | robot_eef: 'rightPalm'
17 | base_link: 'pelvis'
18 | #zero_joints_goal: [ 0, 1, 2 ]
19 |
20 | joint_name: [ "torsoYaw", "torsoPitch", "torsoRoll", "rightShoulderPitch",
21 | "rightShoulderRoll", "rightShoulderYaw", "rightElbowPitch", "rightForearmYaw",
22 | "rightWristRoll", "rightWristPitch"]
23 | home_js: [-0.0032, -0.0016, -0.0051, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
24 | move_group: 'r_arm'
25 |
26 | #################### Full #######################
27 | FKNet:
28 | training:
29 | batch_size: 700
30 | lr: 0.0001
31 |
32 | architecture:
33 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
34 | nbr_tanh: 3
35 | activation: "GELU"
36 |
37 | IKNet:
38 |
39 | pretrained_weights: "https://drive.google.com/uc?export=download&id=1NVzPwDPlKPUmq_q5Yjm6RIal9ZfXe3Oe"
40 |
41 | position_weight: 9.
42 | orientation_weight: 1.
43 | zero_controller_weight: 2.
44 |
45 | training:
46 | batch_size: 100
47 | lr: 0.00044
48 |
49 | architecture:
50 | layers: [ 2930, 1130, 1520, 570, 670, 770, 2250 ]
51 | nbr_tanh: 1
52 | activation: "GELU"
53 |
54 |
--------------------------------------------------------------------------------
/config/panda.yaml:
--------------------------------------------------------------------------------
1 | arm:
2 | train_data: 'results_panda_1000_4'
3 | test_data: 'results_panda_100_4'
4 | val_data: 'results_panda_200_4'
5 |
6 | limits:
7 | upper: [2.8973, 1.7628, 2.8973, -0.0698, 2.8973, 3.7525, 2.8973]
8 | lower: [-2.8973, -1.7628, -2.8973, -3.0718, -2.8973, -0.0175, -2.8973]
9 |
10 | workspace:
11 | upper: [0.945, 0.946, 1.28]
12 | lower: [-0.946, -0.946, -0.425]
13 |
14 | robot_dof: 7
15 | robot_urdf: './assets/urdf/panda.urdf'
16 | robot_eef: 'panda_hand_tcp'
17 | base_link: 'world'
18 |
19 | joint_name: ["panda_joint1", "panda_joint2", "panda_joint3", "panda_joint4", "panda_joint5", "panda_joint6", "panda_joint7"]
20 | home_js: [0.0, -0.785398, 0.0, -2.35619, 0.0, 1.5708, 0.785398]
21 | move_group: 'panda_manipulator'
22 | #zero_joints_goal: [ 1 ]
23 |
24 | debug:
25 | # front
26 | #pose: [0.2, 0., 0.6, 0.73237, 0.053969, 0.67814, -0.029085]
27 | # upward
28 | pose: [0.2, 0., 0.8, 0.0, 0.0, 0.0, 1.0]
29 | # downward
30 | #pose: [0.15, 0.05, 0.5, 1.0, 0., 0., 0.]
31 | axis: 'X'
32 | points: 1
33 | null_space_samples: 100
34 |
35 | IKNet:
36 |
37 | pretrained_weights: "https://drive.google.com/uc?export=download&id=1y99BsBXOUBBDVp0U723Mz1Ma2SnEkl7J"
38 |
39 | position_weight: 16.
40 | orientation_weight: 2.
41 |
42 | training:
43 | batch_size: 100
44 | lr: 0.00024
45 |
46 | architecture:
47 | layers: [1370, 880, 2980, 1000, 2710, 2290, 880]
48 | nbr_tanh: 1
49 | activation: "GELU"
50 |
51 | FKNet:
52 | training:
53 | batch_size: 700
54 | lr: 0.0001
55 |
56 | architecture:
57 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
58 | nbr_tanh: 3
59 | activation: "GELU"
--------------------------------------------------------------------------------
/optimize.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from optimize import FKOptimizer, MLPOptimizer
3 |
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(
7 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
8 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
9 | parser.add_argument("--chain", type=str, default="right_arm", help="Robot model Kinematic Chain")
10 | parser.add_argument("--network", type=str, default="MLP", help="Network architecture")
11 | parser.add_argument("--study_name", type=str, default="", help="Network architecture")
12 | parser.add_argument("--trials", default=100, type=int, metavar="N", help="number of total trials in optimizer")
13 | parser.add_argument("--epochs", default=10, type=int, metavar="N", help="number of total trials in optimizer")
14 | parser.add_argument("--gpu", type=str, default="0", help="GPU used for training")
15 | parser.add_argument("--autoencoder", action="store_true", help="Enables autoencoder")
16 | parser.add_argument("--two-stage", action="store_true", help="Enables two-stage training for autoencoder")
17 | parser.add_argument("--db", type=str, default="ik_optimizer_results.db", help="GPU used for training")
18 | parser.add_argument("--add_seed", action="store_true", help="GPU used for training")
19 | parser.add_argument("--compile", action="store_true", help="Enables pytorch compiling")
20 | opt_args = parser.parse_args()
21 |
22 | optimizer = None
23 |
24 | if opt_args.network == "MLP":
25 | optimizer = MLPOptimizer(opt_args)
26 | elif opt_args.network == "FK":
27 | optimizer = FKOptimizer(opt_args)
28 |
29 | optimizer.optimize()
30 |
--------------------------------------------------------------------------------
/optimize/optimize.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | from numpy import array, array_equal, allclose
5 | import numpy as np
6 | import torch.backends.cudnn as cudnn
7 | import torch.utils.data
8 | import torch.utils.data.distributed
9 | import torchvision.transforms as transforms
10 | import torchvision.utils as vutils
11 | from tqdm import tqdm
12 | from scipy.spatial.transform import Rotation as R
13 |
14 | from cycleik_pytorch import IKDataset, GenericGenerator, GenericDiscriminator
15 | from cycleik_pytorch import load_config, renormalize_pose, normalize_pose, slice_fk_pose, renormalize_joint_state
16 | import pytorch_kinematics as pk
17 | import time
18 | import matplotlib.pyplot as plt
19 | import pandas as ps
20 | from abc import abstractmethod
21 |
22 | class BaseOptimizer:
23 |
24 | def __init__(self, args):
25 | self.args = args
26 | #self.args.compile = True
27 | self.robot = args.robot
28 | self.config = load_config(self.robot)[f'{args.chain}']
29 |
30 | try:
31 | os.makedirs("optuna")
32 | except OSError:
33 | pass
34 |
35 | try:
36 | os.makedirs(f"optuna/{self.robot}")
37 | except OSError:
38 | pass
39 |
40 | self.train_data = self.config["train_data"]
41 | self.test_data = self.config["test_data"]
42 | self.train_dataset, self.test_dataset = self.load_data(self.train_data, self.test_data, self.robot, self.config)
43 | self.args.cuda = True
44 | self.args.decay_epochs = 0
45 | self.args.fk_lr = None
46 |
47 | @abstractmethod
48 | def train_once(self, trial):
49 | pass
50 |
51 | @abstractmethod
52 | def optimize(self):
53 | pass
54 |
55 | def load_data(self, train_data, test_data, robot, config):
56 | train_dataset = IKDataset(root=train_data, robot=robot, config=config, mode="train")
57 | test_dataset = IKDataset(root=test_data, robot=robot, config=config, mode="test")
58 | return train_dataset, test_dataset
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from test import MLPTester, FKTester, MoveitTester
3 |
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(
7 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
8 | parser.add_argument("--cuda", action="store_true", help="Enables cuda")
9 | parser.add_argument("--test_moveit", action="store_true", help="Enables cuda")
10 | parser.add_argument("--use_ga", action="store_true", help="Enables cuda")
11 | parser.add_argument("--use_optimizer", action="store_true", help="Enables cuda")
12 | parser.add_argument("--manualSeed", type=int,
13 | help="Seed for initializing training. (default:none)")
14 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
15 | parser.add_argument("--gpu", type=int, default=0, help="Robot model IK is trained for")
16 | parser.add_argument("--network", type=str, default="", help="Robot model IK is trained for")
17 | parser.add_argument("--core-model", type=str, default="nicol", help="Robot model IK is trained for")
18 | parser.add_argument("--autoencoder", action="store_true", help="Enables learned FK")
19 | parser.add_argument("--two-stage", action="store_true", help="Enables two-stage learned FK training")
20 | parser.add_argument("--finetune", action="store_true", help="Enables two-stage learned FK training")
21 | parser.add_argument("--chain", type=str, default="right_arm", help="Robot model Kinematic Chain")
22 | parser.add_argument("--ik_name", type=str, default="bioik", help="Robot model Kinematic Chain")
23 |
24 | #print(args)
25 |
26 | test_args = parser.parse_args()
27 |
28 | tester = None
29 |
30 | if test_args.network == "MLP":
31 | tester = MLPTester(test_args)
32 | elif test_args.network == "FK":
33 | tester = FKTester(test_args)
34 | elif test_args.test_moveit:
35 | tester = MoveitTester(test_args)
36 |
37 | tester.test()
38 |
--------------------------------------------------------------------------------
/data_samplers/nicol_data_vis.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from unittest import result
3 | import numpy as np
4 |
5 | import random
6 | import time
7 |
8 | import pathlib
9 | import xml.etree.ElementTree
10 | from moveit_msgs.msg import RobotState
11 | from moveit_msgs.srv import GetPositionFK
12 | from sensor_msgs.msg import JointState
13 | from geometry_msgs.msg import Pose
14 | import std_msgs.msg
15 | from visualization_msgs.msg import Marker, MarkerArray
16 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
17 | from std_msgs.msg import Header, ColorRGBA
18 | import cv2 as cv
19 |
20 | import rospy
21 | import pickle
22 | from tqdm import tqdm
23 | import os
24 |
25 | use_platform = False
26 | timeout = 15
27 |
28 | rospy.init_node('ik_data_sampler', anonymous=True)
29 |
30 | """ setup """
31 | rospy.wait_for_service('moveit/compute_fk')
32 | calculate_fk = rospy.ServiceProxy('moveit/compute_fk', GetPositionFK)
33 |
34 | marker_publisher = rospy.Publisher("moveit/visualization_marker_array", MarkerArray, queue_size=2)
35 |
36 | def main():
37 | root = "results_nicol_1000_4"
38 |
39 | data_path = str(os.getcwd())
40 | print(data_path)
41 |
42 | with open(data_path + "/data/" + root + ".p", 'rb') as f:
43 | loaded_values = pickle.load(f)
44 |
45 | marker_array = MarkerArray()
46 | markers = []
47 |
48 | for i in tqdm(range(len(loaded_values[0]))):
49 | pose = np.array(loaded_values[0][i], dtype=np.float32)
50 | js = np.array(loaded_values[1][i], dtype=np.float32)
51 |
52 | # print(pose)
53 | marker = Marker(
54 | type=Marker.SPHERE,
55 | action=Marker.ADD,
56 | id=i,
57 | lifetime=rospy.Duration(),
58 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
59 | scale=Vector3(0.001, 0.001, 0.001),
60 | header=Header(frame_id='world'),
61 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
62 | # lifetime=0,
63 | frame_locked=False)
64 |
65 | marker_array.markers.append(marker)
66 |
67 | #marker_publisher.publish(marker)
68 | #rospy.sleep(0.00001)
69 |
70 | marker_publisher.publish(marker_array)
71 |
72 |
73 | if __name__ == '__main__':
74 | main()
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .nox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | .hypothesis/
49 | .pytest_cache/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | local_settings.py
58 | db.sqlite3
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # IPython
77 | profile_default/
78 | ipython_config.py
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 | .dmypy.json
111 | dmypy.json
112 |
113 | # Pyre type checker
114 | .pyre/
115 |
116 | # IDE file
117 | .idea
118 | .vscode
119 |
120 | # Mac file
121 | .DS_Store
122 |
123 | # custom
124 | /outputs/
125 | /results/
126 |
127 | /data/
128 | nicol_venv/
129 |
130 | /src/
131 | /fk_0.3mm_ik_0.5mm/
132 | /GAN_0.2mm_KL_div/
133 | /GAN_0.5mm_variance/
134 |
135 | /models/
136 | /weights/
137 | /optuna/
138 | /img/
139 | /tb_logs/
140 |
--------------------------------------------------------------------------------
/train/mlp_multi_run.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | import torch.utils.data
3 | from cycleik_pytorch import DecayLR, IKDataset, GenericNoisyGenerator
4 | import os
5 | import random
6 | import pickle
7 | import torch.backends.cudnn as cudnn
8 | import torch.utils.data
9 | from tqdm import tqdm
10 | import optuna
11 | from cycleik_pytorch import DecayLR, IKDataset, GenericGenerator
12 | from cycleik_pytorch import weights_init, load_config, slice_fk_pose, normalize_pose, renormalize_pose, renormalize_joint_state
13 | import pytorch_kinematics as pk
14 | from .train import BaseTrainer
15 | import copy
16 | import torch.multiprocessing as mp
17 | from train import MLPTrainer
18 | import time
19 | #torch.multiprocessing.set_start_method('forkserver', force=True)
20 |
21 | def train_model(gpu_queue, run_queue, args):
22 | gpu = gpu_queue.get(block=True)
23 | print(f'gpu: {gpu}')
24 |
25 | keep_running = run_queue.qsize() > 0
26 |
27 | while keep_running:
28 | next_suffix = run_queue.get(block=False)
29 | temp_args = copy.deepcopy(args)
30 | temp_args.suffix = copy.deepcopy(next_suffix)
31 | temp_args.gpu = copy.deepcopy(gpu)
32 | print(f'next suffix: {next_suffix}')
33 | del next_suffix
34 |
35 | temp_trainer = MLPTrainer(temp_args)
36 | temp_trainer.train()
37 |
38 | keep_running = run_queue.qsize() > 0
39 |
40 |
41 | del gpu
42 | # del next_suffix
43 |
44 | class MLPMultiRun():
45 |
46 | def __init__(self, args):
47 | self.args = args
48 | self.runs = args.runs
49 | self.gpus = self.args.gpus
50 | print(print(f'gpus: {self.gpus}'))
51 | self.suffixes = []
52 | for i in range(self.runs):
53 | self.suffixes.append(f'IROS_train_run_{i}')
54 |
55 |
56 |
57 | def run(self):
58 | ctx = mp.get_context('spawn')
59 | gpu_queue = ctx.Queue()
60 | run_queue = ctx.Queue()
61 | for gpu in self.gpus:
62 | gpu_queue.put(gpu)
63 | for suffix in self.suffixes:
64 | run_queue.put(suffix)
65 |
66 | print(gpu_queue)
67 |
68 | processes = []
69 |
70 | for i in range(len(self.gpus)):
71 | process = ctx.Process(target=train_model, args=(gpu_queue, run_queue, self.args))
72 | process.start()
73 | processes.append(process)
74 | for p in processes:
75 | p.join()
--------------------------------------------------------------------------------
/visualize/visualize_grasping_alt.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | # data from https://allisonhorst.github.io/palmerpenguins/
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import matplotlib.colors as mcolors
7 |
8 | font = {'weight' : 'bold',
9 | 'size' : 22}
10 |
11 | SMALL_SIZE = 10
12 | MEDIUM_SIZE = 14
13 | BIGGER_SIZE = 11
14 | another_size=12
15 |
16 | plt.rc('font', size=another_size) # controls default text sizes
17 | #plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
18 | plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
19 | plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
20 | plt.rc('ytick', labelsize=14) # fontsize of the tick labels
21 | plt.rc('legend', fontsize=another_size) # legend fontsize
22 | plt.rc('figure', titlesize=MEDIUM_SIZE)
23 |
24 |
25 |
26 | category_names = ("Success", "Slipped", "Not Grasped", "Detection", "LLM")#, "Slipped")#, "Lift")
27 | results = {
28 | 'NICO': (82, 2, 8, 8, 0),#, np.array([84,1 ])),
29 | 'NICOL': (72, 15, 7, 4, 2)#, np.array([87])),
30 | }
31 |
32 | #category_names = ['Strongly disagree', 'Disagree',
33 | # 'Neither agree nor disagree', 'Agree', 'Strongly agree']
34 | #results = {
35 | # 'Question 1': [10, 15, 17, 32, 26],
36 | # 'Question 2': [26, 22, 29, 10, 13],
37 | # 'Question 3': [35, 37, 7, 2, 19],
38 | # 'Question 4': [32, 11, 9, 15, 33],
39 | # 'Question 5': [21, 29, 5, 5, 40],
40 | # 'Question 6': [8, 19, 5, 30, 38]
41 | #}
42 |
43 |
44 | labels = list(results.keys())
45 | data = np.array(list(results.values()))
46 | data_cum = data.cumsum(axis=1)
47 | #category_colors = plt.colormaps['coolwarm'](
48 | # np.linspace(0.15, 0.85, data.shape[1]))
49 | category_colors = ['seagreen', 'cornflowerblue', 'firebrick', 'chocolate', 'gold']
50 | fig, ax = plt.subplots(figsize=(9.2, 4))
51 | ax.invert_yaxis()
52 | ax.xaxis.set_visible(True)
53 | ax.set_xlim(0, np.sum(data, axis=1).max())
54 | #ax.set_yticks([0., 1.], labels=labels)
55 | for i, (colname, color) in enumerate(zip(category_names, category_colors)):
56 | widths = data[:, i]
57 | starts = data_cum[:, i] - widths
58 | if i < 4:
59 | rects = ax.barh(labels, widths, left=starts, height=0.75,
60 | label=colname, color=color, align='center', alpha=0.75)
61 | else:
62 | rects = ax.barh(('NICOL'), 2, left=starts[1], height=0.75,
63 | label=colname, color=color, align='center', alpha=0.75)
64 | r, g, b = mcolors.to_rgb(color)
65 | text_color = 'white' if i != 4 else 'darkslategray'
66 | print(rects)
67 | ax.bar_label(rects, label_type='center', color=text_color)
68 |
69 | print(len(category_names))
70 | ax.legend(ncols=len(category_names), bbox_to_anchor=(0.5, 1.20),
71 | loc='upper center', fontsize=13)
72 | plt.tight_layout()
73 | #survey(results, category_names)
74 | #plt.show()
75 |
76 |
77 | # Add some text for labels, title and custom x-axis tick labels, etc.
78 | #ax.set_ylabel('Amount')
79 | #ax.set_title('Grasp')
80 | #species=list(category_names)
81 | #species.append('Slipped')
82 | #species.append('Lift')
83 | #species=tuple(species)
84 | #x = np.arange(len(species))
85 | #ax.set_xticks(x + width, species, weight = 'bold')
86 |
87 | #ax.set_ylim(0, 100)
88 |
89 | #ax.xaxis.label.set_fontweight('bold')
90 | ax.yaxis.label.set_fontweight('bold')
91 | fig.savefig('./img/grasping_IROS/nico_nicol_grasp_success_alt.png', format='png', dpi=300)
92 | plt.show()
--------------------------------------------------------------------------------
/visualize/visualize_grasping.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | # data from https://allisonhorst.github.io/palmerpenguins/
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 |
7 |
8 | font = {'weight' : 'bold',
9 | 'size' : 22}
10 |
11 | SMALL_SIZE = 10
12 | MEDIUM_SIZE = 14
13 | BIGGER_SIZE = 11
14 | another_size=12
15 |
16 | plt.rc('font', size=another_size) # controls default text sizes
17 | #plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
18 | plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
19 | plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
20 | plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
21 | plt.rc('legend', fontsize=another_size) # legend fontsize
22 | plt.rc('figure', titlesize=MEDIUM_SIZE)
23 |
24 |
25 | species = ("Success", "Missed", "Detection", "LLM")#, "Slipped")#, "Lift")
26 | penguin_means = {
27 | 'NICO': (82, 8, 8, 0),#, np.array([84,1 ])),
28 | 'NICOL': (72, 7, 4, 2)#, np.array([87])),
29 | }
30 |
31 | slipped_nico = (2)
32 | slipped_nicol = (15)
33 |
34 | upper = (2, 15 )
35 | lower = (82, 72)
36 |
37 | x = np.arange(len(species)) # the label locations
38 | width = 0.25 # the width of the bars
39 | multiplier = 0.5
40 |
41 | fig, ax = plt.subplots(layout='constrained')
42 | color = ['cornflowerblue', 'sandybrown']
43 |
44 | even=True
45 |
46 | count=0
47 | full_offset = 0
48 | for attribute, measurement in penguin_means.items():
49 | offset = width * multiplier
50 | color_index = 0 if even else 1
51 | print(attribute)
52 | print(measurement)
53 |
54 | rects = ax.bar(x + offset, measurement, width, label=attribute, color=color[color_index], edgecolor="black")
55 | #rects[4].linestyle='--'
56 | if even:
57 | even = False
58 | else:
59 | even = True
60 | ax.bar_label(rects, padding=3)
61 | multiplier += 1
62 | full_offset = width * multiplier
63 | count +=1
64 |
65 | ax.legend(loc='upper right', ncols=1, bbox_to_anchor=(0.83, 1.0))
66 |
67 | rects = ax.bar(3.5 + full_offset, slipped_nico, width, color=color[0], edgecolor="black",linestyle='--', alpha=0.8)
68 | ax.bar_label(rects, padding=3)
69 | rects = ax.bar(3.75 + full_offset, slipped_nicol, width,color=color[1], edgecolor="black",linestyle='--', alpha=0.8)
70 | ax.bar_label(rects, padding=3)
71 |
72 | rects = ax.bar(4.5 + full_offset, lower[0], width, color=color[0], edgecolor="black",bottom=[0] )
73 | ax.bar_label(rects, padding=-15)
74 | rects = ax.bar(4.5 + full_offset, upper[0], width,color=color[0], edgecolor="black",bottom=lower[0] ,linestyle='--', alpha=0.8)
75 | ax.bar_label(rects, padding=3)
76 | rects = ax.bar(4.75 + full_offset, lower[1], width, color=color[1], edgecolor="black",bottom=[0] )
77 | ax.bar_label(rects, padding=-15)
78 | rects = ax.bar(4.75 + full_offset, upper[1], width, color=color[1], edgecolor="black",bottom=lower[1],linestyle='--' , alpha=0.8)
79 | ax.bar_label(rects, padding=3)
80 | ax.vlines(x=[4.75], ymin=0, ymax=100, color='black', label='test lines', ls='--')
81 |
82 |
83 | # Add some text for labels, title and custom x-axis tick labels, etc.
84 | ax.set_ylabel('Amount')
85 | #ax.set_title('Grasp')
86 | species=list(species)
87 | species.append('Slipped')
88 | species.append('Lift')
89 | species=tuple(species)
90 | x = np.arange(len(species))
91 | ax.set_xticks(x + width, species, weight = 'bold')
92 |
93 | ax.set_ylim(0, 100)
94 |
95 | #ax.xaxis.label.set_fontweight('bold')
96 | ax.yaxis.label.set_fontweight('bold')
97 | fig.savefig('./img/grasping_IROS/nico_nicol_grasp_success.png', format='png', dpi=300)
98 | plt.show()
--------------------------------------------------------------------------------
/config/nicol.yaml:
--------------------------------------------------------------------------------
1 | right_arm:
2 |
3 | train_data: 'results_nicol_1400_5'
4 | test_data: 'results_nicol_140_5'
5 | val_data: 'results_nicol_200_5'
6 |
7 | robot_dof: 8
8 | limits:
9 | upper: [ 2.5, 1.8, 1.5, 2.9, 1.570796, 3.141592, 0.785398, 0.785398 ]
10 | lower: [ 0., -1.5, -2.25, -2.9, -1.570796, -3.141592, -0.785398, -0.785398 ]
11 |
12 | workspace:
13 | upper: [ 0.85, 0.48, 1.4 ]
14 | lower: [ 0.2, -0.9, 0.8 ]
15 |
16 |
17 | robot_urdf: './assets/urdf/NICOL.urdf'
18 | robot_eef: 'r_laser'
19 | base_link: world
20 | #zero_joints_goal: [ 3, 4, 5]
21 |
22 | joint_name: ["r_joint1", "r_joint2", "r_joint3", "r_joint4", "r_joint5", "r_joint6", "r_joint7", "r_joint8"]
23 | home_js: [1.57, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
24 | move_group: 'r_arm'
25 |
26 | debug:
27 | pose: [0.5, -0.2, 1.0, 0., 0., 0.75, 0.75]
28 | axis: 'Y'
29 | points: 15
30 | null_space_samples: 30
31 |
32 | FKNet:
33 | training:
34 | batch_size: 150
35 | lr: 0.00051
36 |
37 | architecture:
38 | layers: [ 3410, 2210, 2790, 2750, 1570, 1320, 1340, 340, 1990]
39 | nbr_tanh: 1
40 | activation: "GELU"
41 |
42 | IKNet:
43 | pretrained_weights: "https://drive.google.com/uc?export=download&id=1oSFayrzvXtVsbzSp7HS8vVJ83EL2-Ggn"
44 |
45 | position_weight: 9.
46 | orientation_weight: 2.
47 | zero_controller_weight: 2.
48 |
49 | training:
50 | batch_size: 100
51 | lr: 0.00018
52 |
53 | architecture:
54 | layers: [ 2780, 3480, 1710, 2880, 1750, 1090, 1470 ]
55 | nbr_tanh: 1
56 | activation: "GELU"
57 |
58 | GAN:
59 | position_weight: 100.
60 | orientation_weight: 20.
61 | variance_weight: 80.
62 |
63 | training:
64 | batch_size: 300
65 | lr: 0.00019
66 |
67 | architecture:
68 | noise_vector_size: 8
69 | layers: [ 1180, 1170, 2500, 1290, 700, 970, 440, 770 ]
70 | nbr_tanh: 2
71 | activation: "GELU"
72 |
73 |
74 | left_arm:
75 |
76 | train_data: 'results_nicol_1400_left'
77 | test_data: 'results_nicol_140_left'
78 | val_data: 'results_nicol_140_left'
79 |
80 | robot_dof: 8
81 | limits:
82 | upper: [0., 1.8, 1.5, 2.9, 1.570796, 3.141592, 0.785398, 0.785398]
83 | lower: [-2.5, -1.5, -2.25, -2.9, -1.570796, -3.141592, -0.785398, -0.785398]
84 |
85 | workspace:
86 | upper: [0.848, 0.9, 1.4]
87 | lower: [0.2, -0.4781, 0.8]
88 |
89 |
90 | robot_urdf: './assets/urdf/NICOL.urdf'
91 | robot_eef: 'l_laser'
92 | base_link: world
93 |
94 | zero_joints_goal: [3, 4, 5]
95 |
96 | FKNet:
97 | training:
98 | batch_size: 700
99 | lr: 0.0001
100 |
101 | architecture:
102 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
103 | nbr_tanh: 3
104 | activation: "GELU"
105 |
106 | IKNet:
107 | pretrained_weights: "https://drive.google.com/uc?export=download&id=1pgben2__06Ro44q9_3_JQpof5GmUvlEB"
108 |
109 | position_weight: 9.
110 | orientation_weight: 2.
111 | zero_controller_weight: [5000., 200., 5000.]
112 |
113 | training:
114 | batch_size: 100
115 | lr: 0.00018
116 |
117 | architecture:
118 | layers: [ 2780, 3480, 1710, 2880, 1750, 1090, 1470 ]
119 | nbr_tanh: 1
120 | activation: "GELU"
121 |
122 | GAN:
123 | position_weight: 10.
124 | orientation_weight: 2.
125 | variance_weight: 100.
126 |
127 | training:
128 | batch_size: 300
129 | lr: 0.00019
130 |
131 | architecture:
132 | noise_vector_size: 10
133 | layers: [ 1180, 1170, 2500, 1290, 700, 970, 440, 770 ]
134 | nbr_tanh: 2
135 | activation: "GELU"
136 |
--------------------------------------------------------------------------------
/cycleik_pytorch/Bezier.py:
--------------------------------------------------------------------------------
1 | """Bezier, a module for creating Bezier curves.
2 | Version 1.1, from < BezierCurveFunction-v1.ipynb > on 2019-05-02
3 |
4 | copied from https://github.com/tayalmanan28/Bezier
5 |
6 | Repo instruction was to copy the file into own repo
7 | """
8 |
9 | import numpy as np
10 |
11 | __all__ = ["Bezier"]
12 |
13 |
14 | class Bezier():
15 | def TwoPoints(t, P1, P2):
16 | """
17 | Returns a point between P1 and P2, parametised by t.
18 | INPUTS:
19 | t float/int; a parameterisation.
20 | P1 numpy array; a point.
21 | P2 numpy array; a point.
22 | OUTPUTS:
23 | Q1 numpy array; a point.
24 | """
25 |
26 | if not isinstance(P1, np.ndarray) or not isinstance(P2, np.ndarray):
27 | raise TypeError('Points must be an instance of the numpy.ndarray!')
28 | if not isinstance(t, (int, float)):
29 | raise TypeError('Parameter t must be an int or float!')
30 |
31 | Q1 = (1 - t) * P1 + t * P2
32 | return Q1
33 |
34 | def Points(t, points):
35 | """
36 | Returns a list of points interpolated by the Bezier process
37 | INPUTS:
38 | t float/int; a parameterisation.
39 | points list of numpy arrays; points.
40 | OUTPUTS:
41 | newpoints list of numpy arrays; points.
42 | """
43 | newpoints = []
44 | #print("points =", points, "\n")
45 | for i1 in range(0, len(points) - 1):
46 | #print("i1 =", i1)
47 | #print("points[i1] =", points[i1])
48 |
49 | newpoints += [Bezier.TwoPoints(t, points[i1], points[i1 + 1])]
50 | #print("newpoints =", newpoints, "\n")
51 | return newpoints
52 |
53 | def Point(t, points):
54 | """
55 | Returns a point interpolated by the Bezier process
56 | INPUTS:
57 | t float/int; a parameterisation.
58 | points list of numpy arrays; points.
59 | OUTPUTS:
60 | newpoint numpy array; a point.
61 | """
62 | newpoints = points
63 | #print("newpoints = ", newpoints)
64 | while len(newpoints) > 1:
65 | newpoints = Bezier.Points(t, newpoints)
66 | #print("newpoints in loop = ", newpoints)
67 |
68 | #print("newpoints = ", newpoints)
69 | #print("newpoints[0] = ", newpoints[0])
70 | return newpoints[0]
71 |
72 | def Curve(t_values, points):
73 | """
74 | Returns a point interpolated by the Bezier process
75 | INPUTS:
76 | t_values list of floats/ints; a parameterisation.
77 | points list of numpy arrays; points.
78 | OUTPUTS:
79 | curve list of numpy arrays; points.
80 | """
81 |
82 | if not hasattr(t_values, '__iter__'):
83 | raise TypeError("`t_values` Must be an iterable of integers or floats, of length greater than 0 .")
84 | if len(t_values) < 1:
85 | raise TypeError("`t_values` Must be an iterable of integers or floats, of length greater than 0 .")
86 | if not isinstance(t_values[0], (int, float)):
87 | raise TypeError("`t_values` Must be an iterable of integers or floats, of length greater than 0 .")
88 |
89 | curve = np.array([[0.0] * len(points[0])])
90 | for t in t_values:
91 | #print("curve \n", curve)
92 | #print("Bezier.Point(t, points) \n", Bezier.Point(t, points))
93 |
94 | curve = np.append(curve, [Bezier.Point(t, points)], axis=0)
95 |
96 | #print("curve after \n", curve, "\n--- --- --- --- --- --- ")
97 | curve = np.delete(curve, 0, 0)
98 | #print("curve final \n", curve, "\n--- --- --- --- --- --- ")
99 | return curve
100 |
--------------------------------------------------------------------------------
/visualize/visualize_ik_loss.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import pickle
3 | import numpy as np
4 |
5 | # Set the axes title font size
6 | plt.rc('axes', labelsize=14)
7 | # Set the font size for x tick labels
8 | plt.rc('xtick', labelsize=12)
9 | # Set the font size for y tick labels
10 | plt.rc('ytick', labelsize=12)
11 | # Set the legend font size
12 | plt.rc('legend', fontsize=12)
13 | # Set the font size of the figure title
14 | plt.rc('figure', titlesize=12)
15 |
16 | def main():
17 | robot = "nicol"
18 | #data = pickle.load(open(f"./weights/nicol/TRAINING_SAVES/100_epochs/1400/train_ik_loss_with_kinematics.p", 'rb'))
19 | data = pickle.load(open(f"/home/jan-gerrit/repositories/cycleik/results/train_ik_loss_1000.p", 'rb'))
20 | time_series = []
21 | small_time_series = [1]
22 | print(len(data))
23 | print(data[0])
24 | print(len(data[0]))
25 | print(data[1])
26 | print(len(data[1]))
27 | for i in range(len(data[0])):
28 | #if i == 0: continue
29 | time_series.append(i)
30 |
31 | for i in range(int(len(data[0]) / 10)):
32 | #if i == 0: continue
33 | small_time_series.append((i+1) * 10)
34 |
35 | print(len(time_series))
36 |
37 | fig, ax = plt.subplots()
38 | ax.set_xlabel('Epoch')
39 | ax.set_ylabel('Error')
40 | kinematic_loss = ax.plot(time_series, data[0][:])
41 | #kinematic_loss = ax.plot(time_series, data[0])
42 | kinematic_loss[0].set_label('Kinematic Loss (MAE)')
43 | kinematic_loss = ax.plot(time_series, data[2][:])
44 | #kinematic_loss = ax.plot(small_time_series, data[2])
45 | kinematic_loss[0].set_label('Validation Loss (MAE)')
46 | # kinematic_loss = ax.plot(time_series, data[1][1:])
47 | #kinematic_loss = ax.plot(time_series, data[1])
48 | #kinematic_loss[0].set_label('Zero-Controller Loss (MSE)')
49 | ax.legend(loc='upper right')#, bbox_to_anchor=(1.1, 0.5))
50 | #ax.legend(loc='lower right', bbox_to_anchor=(0.35, -1.15))
51 | ax.xaxis.label.set_fontweight('bold')
52 | ax.yaxis.label.set_fontweight('bold')
53 | #ax.legend()
54 | plt.yticks(np.arange(0.0, 0.025, 0.002))
55 | #plt.yticks(np.arange(0.0, 0.2, 0.02))
56 | plt.annotate('train: %0.5f' % data[0][-1], xy=(1, data[0][-1]), xytext=(-81, 22),
57 | xycoords=('axes fraction', 'data'), textcoords='offset points')
58 | plt.annotate('val: %0.5f' % data[2][-1], xy=(1, data[2][-1]), xytext=(-81, 14),
59 | xycoords=('axes fraction', 'data'), textcoords='offset points')
60 | plt.tight_layout()
61 | plt.savefig('./img/losses/vis_ik_loss_zoom.png')
62 | plt.show()
63 |
64 | fig, ax = plt.subplots()
65 | ax.set_xlabel('Epoch')
66 | ax.set_ylabel('Error')
67 | #kinematic_loss = ax.plot(time_series, data[0][:])
68 | kinematic_loss = ax.plot(time_series, data[0])
69 | kinematic_loss[0].set_label('Kinematic Loss (MAE)')
70 | #kinematic_loss = ax.plot(small_time_series, data[2][:])
71 | kinematic_loss = ax.plot(time_series, data[2])
72 | kinematic_loss[0].set_label('Validation Loss (MAE)')
73 | # kinematic_loss = ax.plot(time_series, data[1][1:])
74 | kinematic_loss = ax.plot(time_series, data[1])
75 | kinematic_loss[0].set_label('Zero-Controller Loss (MSE)')
76 | ax.legend(loc='center right') # , bbox_to_anchor=(1.1, 0.5))
77 | # ax.legend(loc='lower right', bbox_to_anchor=(0.35, -1.15))
78 | ax.xaxis.label.set_fontweight('bold')
79 | ax.yaxis.label.set_fontweight('bold')
80 | # ax.legend()
81 | #plt.yticks(np.arange(0.0, 0.025, 0.002))
82 | plt.yticks(np.arange(0.0, 0.2, 0.02))
83 | plt.annotate('train: %0.5f' % data[0][-1], xy=(1, data[0][-1]), xytext=(-81, 21),
84 | xycoords=('axes fraction', 'data'), textcoords='offset points')
85 | plt.annotate('val: %0.5f' % data[2][-1], xy=(1, data[2][-1]), xytext=(-81, 8),
86 | xycoords=('axes fraction', 'data'), textcoords='offset points')
87 | plt.tight_layout()
88 | plt.savefig('./img/losses/vis_ik_loss.png')
89 | plt.show()
90 |
91 | if __name__ == '__main__':
92 | main()
--------------------------------------------------------------------------------
/config/nico.yaml:
--------------------------------------------------------------------------------
1 | right_arm:
2 | train_data: 'results_nico_1250_6'
3 | test_data: 'results_nico_125_6'
4 | val_data: 'results_nico_200_6'
5 |
6 | robot_dof: 6
7 | limits:
8 | upper: [0.8, 3.142, 0.314, 0.0, 1.571, 0.872665]
9 | lower: [-0.8, -3.142, -1.57, -1.745, -1.571, 0.0]
10 |
11 | workspace:
12 | upper: [0.508, 0.2943, 1.23]
13 | lower: [-0.492, -0.5742, 0.342]
14 |
15 | robot_urdf: './assets/urdf/nico.urdf'
16 | robot_eef: 'right_tcp'
17 | base_link: 'torso:11'
18 |
19 | joint_name: ["r_shoulder_z", "r_shoulder_y", "r_arm_x", "r_elbow_y", "r_wrist_z", "r_wrist_x"]
20 | home_js: [ -0.157, 0.0, -1.57, -1.57, -1.39, 0.0 ]
21 | move_group: 'right_arm'
22 | #
23 |
24 | #################### Full #######################
25 | FKNet:
26 | training:
27 | batch_size: 700
28 | lr: 0.0001
29 |
30 | architecture:
31 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
32 | nbr_tanh: 3
33 | activation: "GELU"
34 |
35 | IKNet:
36 |
37 | pretrained_weights: "https://drive.google.com/uc?export=download&id=1C_sB8_NDgtqIVJ3Ymen2Xkww8zCifIfX"
38 |
39 | position_weight: 7.
40 | orientation_weight: 1.
41 |
42 | training:
43 | batch_size: 300
44 | lr: 0.00037
45 |
46 | architecture:
47 | layers: [ 2270, 560, 1100, 1990, 2590, 870 ]
48 | nbr_tanh: 1
49 | activation: "GELU"
50 |
51 |
52 | left_arm:
53 | train_data: 'results_nico_1000_left_arm'
54 | test_data: 'results_nico_100_left_arm'
55 | val_data: 'results_nico_200_left_arm'
56 |
57 | robot_dof: 6
58 | limits:
59 | upper: [ 0.8, 3.142, 1.57, 1.745, 1.571, 0]
60 | lower: [ -0.8, -3.142, -0.314, 0.0, -1.571, -0.872665 ]
61 |
62 | workspace:
63 | upper: [0.509, 0.573, 1.229]
64 | lower: [-0.488, -0.289, 0.343]
65 |
66 | robot_urdf: './assets/urdf/nico.urdf'
67 | robot_eef: 'left_tcp'
68 | base_link: 'torso:11'
69 |
70 | joint_name: ["l_shoulder_z", "l_shoulder_y", "l_arm_x", "l_elbow_y", "l_wrist_z", "l_wrist_x"]
71 |
72 | debug:
73 | # front
74 | #pose: [0.2, 0., 0.6, 0.73237, 0.053969, 0.67814, -0.029085]
75 | # upward
76 | pose: [ 0.23185, -0.109, 0.75, 0.7, 0.0, 0., 0.7]
77 | # downward
78 | #pose: [0.15, 0.05, 0.5, 1.0, 0., 0., 0.]
79 | axis: 'Y'
80 | points: 10
81 | #################### Full #######################
82 | FKNet:
83 | training:
84 | batch_size: 700
85 | lr: 0.0001
86 |
87 | architecture:
88 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
89 | nbr_tanh: 3
90 | activation: "GELU"
91 |
92 | IKNet:
93 |
94 | position_weight: 7.
95 | orientation_weight: 1.
96 |
97 | training:
98 | batch_size: 300
99 | lr: 0.00037
100 |
101 | architecture:
102 | layers: [ 2270, 560, 1100, 1990, 2590, 870 ]
103 | nbr_tanh: 1
104 | activation: "GELU"
105 |
106 |
107 | right_leg:
108 | train_data: 'results_nico_1000_right_leg'
109 | test_data: 'results_nico_10_right_leg'
110 | val_data: 'results_nico_100_right_leg'
111 |
112 | robot_dof: 6
113 | limits:
114 | upper: [0.6981, 1.2217, 0.5585, 1.2915, 0.7853, 0.7853]
115 | lower: [-0.6981, -0.4188, -1.5707, -1.5358, -0.7853, -0.7853]
116 |
117 | workspace:
118 | upper: [0.5027, 0.3971, 0.6776]
119 | lower: [-0.4262, -0.3806, -0.0063]
120 |
121 | robot_urdf: './assets/urdf/nico.urdf'
122 | robot_eef: 'right_foot:11'
123 | base_link: 'torso:11'
124 |
125 | #################### Full #######################
126 | FKNet:
127 | training:
128 | batch_size: 700
129 | lr: 0.0001
130 |
131 | architecture:
132 | layers: [1900, 2700, 3000, 2900, 450, 60, 10, 160]
133 | nbr_tanh: 3
134 | activation: "GELU"
135 |
136 | IKNet:
137 | training:
138 | batch_size: 250
139 | lr: 0.0002
140 |
141 | architecture:
142 | layers: [ 2760, 1040, 2910, 3390, 2690, 340, 710, 1170, 690 ]
143 | nbr_tanh: 1
144 | activation: "GELU"
145 |
146 | ###################################################
--------------------------------------------------------------------------------
/visualize/moveit/visualize_nico_trajectory.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | import numpy as np
3 | import rospy
4 | import std_msgs.msg
5 | from visualization_msgs.msg import Marker
6 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
7 | from std_msgs.msg import Header, ColorRGBA
8 | from cycleik_pytorch import CycleIK
9 |
10 |
11 | rospy.init_node('nico_vis_node', anonymous=True)
12 | marker_publisher = rospy.Publisher("/visualization_marker", Marker, queue_size=100000)
13 | marker_publisher_id_counter = 0
14 |
15 | cycleik = CycleIK(robot='nico', chain='right_arm', verbose=True, cuda_device=1)
16 |
17 |
18 | #(0.157, 0.0, 1.57, 1.57, 1.39, 0.0)
19 | #(4, -10, 90, 93, 80, 0.0)
20 | def create_marker(target_point):
21 | return Marker(
22 | type=Marker.SPHERE,
23 | action=Marker.ADD,
24 | id=marker_publisher_id_counter,
25 | lifetime=rospy.Duration(),
26 | pose=Pose(Point(target_point[0], target_point[1], target_point[2]), Quaternion(0, 0, 0, 1)),
27 | scale=Vector3(0.033, 0.033, 0.033),
28 | header=Header(frame_id='world'),
29 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
30 | # lifetime=0,
31 | frame_locked=False)
32 |
33 | def main():
34 | start_pose = np.array([0.2, -0.3, 0.82, -0.70619, 0.0888, -0.049169, 0.70071])
35 | # target_pose = np.array([Bx, -np.abs(By), 0.65, 0.93529, -0.11321, 0.18413, -0.2802])
36 | target_pose = np.array([0.35, 0., 0.66, -0.8957, 0.1506, -0.2052, 0.366])
37 | ctrl_point = np.array([np.array([0.3, -0.3, 0.75, -0.8957, 0.1506, -0.2052, 0.366]),
38 | np.array([0.35, 0., 0.68, -0.8957, 0.1506, -0.2052, 0.366])])
39 | cartesian_path = cycleik.generate_cubic_bezier_trajectory(start_pose=start_pose, target_pose=target_pose,
40 | control_points=ctrl_point, points=25)
41 |
42 | for target in cartesian_path[:,:3]:
43 | marker_publisher.publish(create_marker(target))
44 | global marker_publisher_id_counter
45 | marker_publisher_id_counter += 1
46 | rospy.sleep(0.01)
47 |
48 | #js_solution, fk_error, _, _ = cycleik.inverse_kinematics(cartesian_path, calculate_error=True)
49 | #
50 | #print(js_solution)
51 | #print(fk_error)
52 | #
53 | #for jc in js_solution:
54 | # if arm_control.is_right:
55 | # arm_control.set_pose(arm_target=tuple(jc), stay_time=0.1, wait_finished=True, move_time=0.5,
56 | # auto_resolve=False)
57 | # else:
58 | # jc = np.multiply(jc, np.array([-1, -1, -1, -1, -1, -1]))
59 | # arm_control.set_pose(arm_target=tuple(jc), stay_time=0.1, wait_finished=True, move_time=0.5,
60 | # auto_resolve=False)
61 | ## arm_control.set_pose(arm_target=tuple((0.157, -1.57, 1.57, 1.57, -1.39, 0.0)), stay_time=0, wait_finished=True, move_time=0.5)
62 | #time.sleep(5.0)
63 | #print(f"detected object: {detected_object}")
64 | #
65 | ## backward_ctrl_points= [np.arr]
66 | #action_future = arm_control.set_pose(hand_target=tuple((-1.3089969389957472, -1.3089969389957472)), stay_time=5,
67 | # wait_finished=True, move_time=0.5)
68 | #cartesian_path = cycleik.generate_cubic_bezier_trajectory(start_pose=target_pose, target_pose=start_pose,
69 | # control_points=ctrl_point, points=20)
70 | #js_solution, fk_error, _, _ = cycleik.inverse_kinematics(cartesian_path, calculate_error=True)
71 | #for jc in js_solution:
72 | # if arm_control.is_right:
73 | # arm_control.set_pose(arm_target=tuple(jc), stay_time=0.1, wait_finished=True, move_time=0.5,
74 | # auto_resolve=False)
75 | # else:
76 | # jc = np.multiply(jc, np.array([-1, -1, -1, -1, -1, -1]))
77 | # arm_control.set_pose(arm_target=tuple(jc), stay_time=0.1, wait_finished=True, move_time=0.5,
78 | # auto_resolve=False)
79 |
80 |
81 | if __name__ == '__main__':
82 | main()
--------------------------------------------------------------------------------
/test/test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | from numpy import array, array_equal, allclose
5 | import numpy as np
6 | import torch.backends.cudnn as cudnn
7 | import torch.utils.data
8 | import torch.utils.data.distributed
9 | import torchvision.transforms as transforms
10 | import torchvision.utils as vutils
11 | from tqdm import tqdm
12 | from scipy.spatial.transform import Rotation as R
13 |
14 | from cycleik_pytorch import IKDataset, GenericGenerator, GenericDiscriminator
15 | from cycleik_pytorch import load_config, renormalize_pose, normalize_pose, slice_fk_pose, renormalize_joint_state
16 | import pytorch_kinematics as pk
17 | import time
18 | import matplotlib.pyplot as plt
19 | import pandas as ps
20 | from abc import abstractmethod
21 |
22 |
23 | class BaseTester:
24 |
25 | def __init__(self, args):
26 | torch.cuda.empty_cache()
27 |
28 | if args.manualSeed is None:
29 | self.manualSeed = random.randint(1, 10000)
30 | else:
31 | self.manualSeed = args.manualSeed
32 | print("Random Seed: ", self.manualSeed)
33 | random.seed(self.manualSeed)
34 | torch.manual_seed(self.manualSeed)
35 |
36 | cudnn.benchmark = True
37 |
38 | if torch.cuda.is_available() and not args.cuda:
39 | print("WARNING: You have a CUDA device, so you should probably run with --cuda")
40 |
41 | self.finetune = args.finetune
42 | self.robot = args.robot
43 | self.network = args.network
44 | self.config = load_config(self.robot)[f'{args.chain}']
45 | self.autoencoder = args.autoencoder
46 | self.two_stage = args.two_stage
47 | self.val_data = self.config["val_data"]
48 | self.robot_dof = self.config["robot_dof"]
49 | self.robot_urdf = self.config["robot_urdf"]
50 | self.robot_eef = self.config["robot_eef"]
51 | self.core_model_config = load_config(args.core_model)
52 | self.core_model = args.core_model
53 |
54 | self.use_ga = args.use_ga
55 | self.use_optimizer = args.use_optimizer
56 |
57 | # Dataset
58 | self.load_dataset()
59 |
60 | self.device = torch.device(f"cuda:{args.gpu}" if args.cuda else "cpu")
61 |
62 | self.model = None
63 | self.fk_model = None
64 | self.create_model()
65 |
66 | # Load state dicts
67 | if not self.finetune:
68 | self.model.load_state_dict(
69 | torch.load(os.path.join("weights", str(args.robot), f"model_{args.network}_with_kinematics.pth"), map_location=self.device))
70 |
71 | # Set model mode
72 | self.model.eval()
73 | if self.autoencoder:
74 | self.fk_model.eval()
75 |
76 | self.progress_bar = tqdm(enumerate(self.dataloader), total=len(self.dataloader))
77 |
78 |
79 | self.samples = self.dataset.get_size()
80 |
81 |
82 |
83 | # rospy.init_node('ik_data_sampler', anonymous=True)
84 |
85 | """ setup """
86 |
87 | # marker_publisher = rospy.Publisher("/moveit/visualization_marker", Marker, queue_size=100000)
88 |
89 | chain = pk.build_serial_chain_from_urdf(open(self.robot_urdf).read(), self.robot_eef)
90 | self.chain = chain.to(dtype=torch.float32, device=self.device)
91 | single_renormalize_move, single_renormalize, workspace_renormalize_move, workspace_renormalize = self.dataset.get_norm_params()
92 |
93 | self.single_renormalize_move = torch.Tensor(single_renormalize_move).to(self.device)
94 | self.single_renormalize = torch.Tensor(single_renormalize).to(self.device)
95 | self.workspace_renormalize_move = torch.Tensor(workspace_renormalize_move).to(self.device)
96 | self.workspace_renormalize = torch.Tensor(workspace_renormalize).to(self.device)
97 |
98 | self.count_bullshit = 0
99 | self.count_success = 0
100 |
101 | @abstractmethod
102 | def test_step(self, **kwargs):
103 | pass
104 |
105 | @abstractmethod
106 | def load_dataset(self):
107 | pass
108 |
109 | @abstractmethod
110 | def process_test_losses(self, **kwargs):
111 | pass
112 |
113 | @abstractmethod
114 | def create_model(self):
115 | pass
116 |
117 | @abstractmethod
118 | def test(self):
119 | pass
--------------------------------------------------------------------------------
/optimize/optimize_fk.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | from numpy import array, array_equal, allclose
5 | import numpy as np
6 | import torch.backends.cudnn as cudnn
7 | import torch.utils.data
8 | import torch.utils.data.distributed
9 | import torchvision.transforms as transforms
10 | import torchvision.utils as vutils
11 | from tqdm import tqdm
12 | from scipy.spatial.transform import Rotation as R
13 |
14 | from cycleik_pytorch import IKDataset, GenericGenerator, GenericDiscriminator
15 | from cycleik_pytorch import load_config, renormalize_pose, normalize_pose, slice_fk_pose, renormalize_joint_state
16 | import pytorch_kinematics as pk
17 | import time
18 | import matplotlib.pyplot as plt
19 | import pandas as ps
20 | from abc import abstractmethod
21 | from train.train_fk import FKTrainer
22 | import optuna
23 | from .optimize import BaseOptimizer
24 | import joblib
25 |
26 |
27 | class FKOptimizer(BaseOptimizer):
28 |
29 | def train_once(self, trial):
30 | torch.set_num_threads(2)
31 |
32 | args = self.args
33 |
34 | args.batch_size = trial.suggest_int('batch_size', 100, 1000, step=50)
35 | # epochs = args.epochs
36 | args.lr = None
37 | args.fk_lr = trial.suggest_float('lr', 0.00001, 0.001, step=0.00001)
38 | # if robot_dof <= 6:
39 | # nbr_layers = trial.suggest_int('nbr_layers', 5, 10)
40 | # else:
41 | nbr_layers = trial.suggest_int('nbr_layers', 7, 9)
42 | layers = []
43 | for hidden_layer in range(nbr_layers):
44 | # layer = None
45 | if hidden_layer < nbr_layers / 2:
46 | layer = trial.suggest_int('layer{0}_neurons'.format(hidden_layer), 500, 3500, step=10)
47 | else:
48 | layer = trial.suggest_int('layer{0}_neurons'.format(hidden_layer), 10, 2000, step=10)
49 | layers.append(layer)
50 | args.layers = layers
51 | args.nbr_tanh = trial.suggest_int('nbr_tanh', 1, 3)
52 | # activation = trial.suggest_int('activation', 0, 3)
53 | args.activation = "GELU" # trial.suggest_categorical("activation", ["GELU", "LeakyReLu"])#, "SELU", "CELU"])
54 |
55 | print(
56 | "Hyperparams - batch_size: {0}, lr: {1}, nbr_layers: {4}, nbr_tanh: {2}, activation: {3}\n neurons: {5}".format(
57 | args.batch_size, args.fk_lr, args.nbr_tanh, args.activation, nbr_layers, layers))
58 |
59 | trainer = FKTrainer(args, trial=trial, config=self.config, train_dataset=self.train_dataset, test_dataset=self.test_dataset)
60 |
61 | return trainer.train()
62 |
63 | def optimize(self):
64 | study = None
65 | if self.args.db is not None:
66 | # You can change the number of GPUs per trial here:
67 | study = optuna.create_study(study_name="cycleik_fk_optimizer",
68 | direction='minimize',
69 | pruner=optuna.pruners.HyperbandPruner(),
70 | sampler=optuna.samplers.TPESampler(),
71 | storage=f'sqlite:///{self.args.db}',
72 | load_if_exists=True)
73 | else:
74 | # You can change the number of GPUs per trial here:
75 | study = optuna.create_study(study_name="cycleik_fk_optimizer",
76 | direction='minimize',
77 | pruner=optuna.pruners.HyperbandPruner(),
78 | sampler=optuna.samplers.TPESampler())
79 |
80 | if self.args.add_seed:
81 | initial_layers = self.config["FKNet"]["architecture"]["layers"]
82 | initial_nbr_layers = len(initial_layers)
83 |
84 | initial_params = {"batch_size": self.config["FKNet"]["training"]["batch_size"],
85 | "lr": self.config["FKNet"]["training"]["lr"],
86 | "nbr_layers": initial_nbr_layers,
87 | "nbr_tanh": self.config["FKNet"]["architecture"]["nbr_tanh"], }
88 | # "activation": config["IKNet"]["architecture"]["activation"]}
89 |
90 | for i in range(initial_nbr_layers):
91 | initial_params[f"layer{i}_neurons"] = initial_layers[i]
92 |
93 | study.enqueue_trial(initial_params)
94 |
95 | study.optimize(self.train_once, n_trials=self.args.trials)
96 |
97 | joblib.dump(study, f"./optuna/{self.robot}/cycleik_fk_optimizer.pkl")
98 | print("Best Config:\n {0}".format(study.best_params))
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CycleIK
2 |
3 | ## Overview
4 |
5 | Implementation of CycleIK - Neuro-inspired Inverse Kinematics
6 |
7 |
8 |
9 | 
CycleIK used in a human-in-the-loop grasping scenario on the Neuro-Inspired Collaborator (NICOL)
10 |
11 |
12 |
13 | 
Neuro-inspired inverse kinematics training architecture. A batch of
14 | poses X is inferenced by either the MLP or GAN to obtain the batch of joint angles
15 | Θ. The joint angles are transformed back to Cartesian space X by the FK(Θ)
16 | function to be evaluated by the multi-objective function under constraints L.
17 |
18 |
19 | ### Publications
20 | ```
21 | Inverse Kinematics for Neuro-Robotic Grasping with Humanoid Embodied Agents. (2024).
22 | Habekost, JG., Gäde, C., Allgeuer, P., Wermter, S.
23 | Accepted at IROS 2024.
24 | ```
25 | Arxiv: https://arxiv.org/html/2404.08825
26 |
27 | ```
28 | CycleIK: Neuro-inspired Inverse Kinematics. (2023).
29 | Habekost, JG., Strahl, E., Allgeuer, P., Kerzel, M., Wermter, S.
30 | In: Artificial Neural Networks and Machine Learning – ICANN 2023.
31 | Lecture Notes in Computer Science, vol. 14254, pp. 457-470.
32 | ```
33 | Arxiv: https://arxiv.org/abs/2307.11554 \
34 | Open Access: https://link.springer.com/chapter/10.1007/978-3-031-44207-0_38
35 |
36 |
37 | ### Installation
38 |
39 | #### Clone and install requirements
40 |
41 | Best practice is to use anaconda or miniconda
42 |
43 | ```bash
44 | git clone https://github.com/jangerritha/CycleIK.git
45 | cd cycleik/
46 | pip install -r requirements.txt
47 | pip install -e .
48 | ```
49 | ### Usage
50 |
51 | ```bash
52 | from cycleik_pytorch import CycleIK
53 |
54 | cycleik = CycleIK(robot="nicol", cuda_device='0', verbose=True, chain="right_arm")
55 |
56 | start_pose = np.array([0.3, -0.5, 0.99, 0.0015305, 0.0009334, 0.70713, 0.70708])
57 |
58 | target_pose = np.array([0.6, -0.3, 0.895, -0.5, 0.5, 0.5, 0.5])
59 |
60 | control_points = np.array([np.array([0.3, -0.5, 1.2, -0.5, 0.5, 0.5, 0.5]),
61 | np.array([0.6, -0.3, 1.2, -0.5, 0.5, 0.5, 0.5])])
62 |
63 | bezier_curve = cycleik.generate_cubic_bezier_trajectory(start_pose=start_pose,
64 | target_pose=target_pose,
65 | control_points=control_points,
66 | points=100)
67 |
68 | joint_trajectory, _, _, _ = cycleik.inverse_kinematics(bezier_curve, calculate_error=True)
69 | ```
70 |
71 |
72 | ### Test
73 | The MLP network can be tested with the following command, remove the `--cuda` flag incase you are running a CPU-only system.
74 |
75 | Exchange `` by one of the following options: `nicol, nico, fetch, panda, or valkyrie`.
76 |
77 | The `` tag refers to the kinematic chain that is selected for evaluation. \
78 | Must match one of the move groups in the config yaml of the corresponding robot.
79 |
80 | ```bash
81 | python test.py --cuda --robot --chain --network MLP
82 | ```
83 |
84 | #### Example
85 | ```bash
86 | python test.py --cuda --robot nicol --chain right_arm --network MLP
87 | ```
88 | #### Download pretrained weights
89 |
90 | Pre-trained weights are available for each of the five robot platforms on which CycleIK was evaluated.
91 |
92 |
93 | ```bash
94 | cd
95 | mkdir weights/
96 | cd weights
97 | mkdir
98 | ```
99 |
100 | You can download a zip that contains all pre-trained weights from [here](https://drive.google.com/file/d/1SdZVdi4KtpBleBPvAcVP9s_GOpJ6zcOt/view?usp=sharing).
101 |
102 | ### Train
103 | The training is executed very similar to running the tests.
104 |
105 | ```bash
106 | python train.py --cuda --robot --chain --network MLP --epochs 10
107 | ```
108 |
109 | #### Example
110 | ```bash
111 | python train.py --cuda --robot nicol --chain right_arm --network MLP --epochs 10
112 | ```
113 |
114 |
115 | #### Download dataset
116 |
117 | For every robot there is a `train, test, and validation` dataset. Every dataset is delivered in a single file and must be added under `/data`.
118 |
119 | ```bash
120 | cd
121 | mkdir data
122 | ```
123 |
124 | You can download a zip that contains all datasets used for our publication from [here](https://drive.google.com/file/d/1wc-YI9v0aEh0V0k5YqABckaJdNRqnNy7/view?usp=sharing).
125 |
126 |
127 |
128 |
129 |
130 | ### Contributing
131 |
132 | If you find a bug, create a GitHub issue, or even better, submit a pull request. Similarly, if you have questions, simply post them as GitHub issues.
133 |
--------------------------------------------------------------------------------
/data_samplers/panda_data_generator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from unittest import result
3 | import numpy as np
4 |
5 | import random
6 | import time
7 |
8 | import pathlib
9 | import xml.etree.ElementTree
10 | from moveit_msgs.msg import RobotState
11 | from moveit_msgs.srv import GetPositionFK
12 | from sensor_msgs.msg import JointState
13 | from geometry_msgs.msg import Pose
14 | import std_msgs.msg
15 | from visualization_msgs.msg import Marker
16 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
17 | from std_msgs.msg import Header, ColorRGBA
18 | import cv2 as cv
19 |
20 | import rospy
21 | import pickle
22 | from tqdm import tqdm
23 | import os
24 |
25 | use_platform = False
26 | timeout = 15
27 |
28 | rospy.init_node('ik_data_sampler', anonymous=True)
29 |
30 | """ setup """
31 | rospy.wait_for_service('compute_fk')
32 | calculate_fk = rospy.ServiceProxy('compute_fk', GetPositionFK)
33 |
34 | marker_publisher = rospy.Publisher("/visualization_marker", Marker, queue_size=100000)
35 |
36 | def main():
37 | correct_poses = 0
38 | data = []
39 | iterations = 200000
40 |
41 | #if os.path.exists('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p'):
42 | # with open('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p',
43 | # 'rb') as f:
44 | # data = pickle.load(f)
45 | # f.close()
46 | # print(data)
47 |
48 | joint_states = []
49 | poses = []
50 | jacobians = []
51 |
52 | with tqdm(total=iterations) as pbar:
53 | while correct_poses < iterations and not rospy.is_shutdown():
54 |
55 | in_workspace = False
56 |
57 | random_js = [random.uniform(-2.8973, 2.8973), random.uniform(-1.7628, 1.7628),
58 | random.uniform(-2.8973, 2.8973), random.uniform(-3.0718, -0.0698),
59 | random.uniform(-2.8973, 2.8973), random.uniform(-0.0175, 3.7525),
60 | random.uniform(-2.8973, 2.8973)]
61 |
62 | # rospy.loginfo("Position: \n%s", correct_poses)
63 | # rospy.loginfo("Joint State: \n%s", random_js)
64 |
65 | joint_state_msg = JointState()
66 | joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3", "panda_joint4", "panda_joint5", "panda_joint6", "panda_joint7"]
67 | joint_state_msg.position = random_js
68 | robot_state_msg = RobotState()
69 | robot_state_msg.joint_state = joint_state_msg
70 | robot_state_msg.is_diff = True
71 |
72 | h = std_msgs.msg.Header()
73 | h.stamp = rospy.Time.now()
74 |
75 | result = None
76 | try:
77 | result = calculate_fk(h, ["panda_hand_tcp"], robot_state_msg)
78 | except rospy.ServiceException as exc:
79 | rospy.logerr("Service did not process request: %s", exc)
80 | rospy.sleep(0.00001)
81 | continue
82 |
83 | if result is not None and (result.error_code.val == -10 or result.error_code.val == -12):
84 | # rospy.logerr("Joint State in collision")
85 | rospy.sleep(0.00001)
86 | continue
87 |
88 | if result is not None:
89 | if result.error_code.val == 1:
90 | correct_poses += 1
91 | # rospy.loginfo("Call successfull. Pose: \n%s", result.pose_stamped[0].pose)
92 |
93 | pose_msg = result.pose_stamped[0].pose
94 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
95 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
96 | js = joint_state_msg.position
97 | #print(pose)
98 | poses.append(pose)
99 | joint_states.append(js)
100 |
101 | data.append([result.pose_stamped[0].pose, joint_state_msg])
102 | pbar.update(1)
103 |
104 | # print(pose)
105 | marker = Marker(
106 | type=Marker.SPHERE,
107 | action=Marker.ADD,
108 | id=correct_poses,
109 | lifetime=rospy.Duration(),
110 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
111 | scale=Vector3(0.001, 0.001, 0.001),
112 | header=Header(frame_id='world'),
113 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
114 | # lifetime=0,
115 | frame_locked=False)
116 | marker_publisher.publish(marker)
117 | rospy.sleep(0.001)
118 | # else:
119 | # rospy.loginfo("Call unsuccessfull")
120 |
121 | rospy.sleep(0.00001)
122 |
123 | data = [poses, joint_states]
124 |
125 | with open('./data/results_panda_200_4.p', 'wb') as f:
126 | pickle.dump(data, f)
127 | f.close()
128 |
129 |
130 | if __name__ == '__main__':
131 | main()
--------------------------------------------------------------------------------
/data_samplers/fetch_data_generator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from unittest import result
3 | import numpy as np
4 |
5 | import random
6 | import time
7 |
8 | import pathlib
9 | import xml.etree.ElementTree
10 | from moveit_msgs.msg import RobotState
11 | from moveit_msgs.srv import GetPositionFK
12 | from sensor_msgs.msg import JointState
13 | from geometry_msgs.msg import Pose
14 | import std_msgs.msg
15 | from visualization_msgs.msg import Marker
16 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
17 | from std_msgs.msg import Header, ColorRGBA
18 | import cv2 as cv
19 |
20 | import rospy
21 | import pickle
22 | from tqdm import tqdm
23 | import os
24 |
25 | use_platform = False
26 | timeout = 15
27 |
28 | rospy.init_node('ik_data_sampler', anonymous=True)
29 |
30 | """ setup """
31 | rospy.wait_for_service('compute_fk')
32 | calculate_fk = rospy.ServiceProxy('compute_fk', GetPositionFK)
33 |
34 | marker_publisher = rospy.Publisher("/visualization_marker", Marker, queue_size=1000000)
35 |
36 | def main():
37 | correct_poses = 0
38 | data = []
39 | iterations = 200000
40 |
41 | #if os.path.exists('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p'):
42 | # with open('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p',
43 | # 'rb') as f:
44 | # data = pickle.load(f)
45 | # f.close()
46 | # print(data)
47 |
48 | joint_states = []
49 | poses = []
50 | jacobians = []
51 |
52 | with tqdm(total=iterations) as pbar:
53 | while correct_poses < iterations and not rospy.is_shutdown():
54 |
55 | in_workspace = False
56 |
57 | random_js = [random.uniform(0., 0.38615),
58 | random.uniform(-1.6056, 1.6056),
59 | random.uniform(-1.221, 1.518),
60 | random.uniform(-3.14159, 3.14159),
61 | random.uniform(-2.251, 2.251),
62 | random.uniform(-3.14159, 3.14159),
63 | random.uniform(-2.16, 2.16),
64 | random.uniform(-3.14159, 3.14159)]
65 |
66 | # rospy.loginfo("Position: \n%s", correct_poses)
67 | # rospy.loginfo("Joint State: \n%s", random_js)
68 |
69 | joint_state_msg = JointState()
70 | joint_state_msg.name = ["torso_lift_joint", "shoulder_pan_joint", "shoulder_lift_joint", "upperarm_roll_joint", "elbow_flex_joint", "forearm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
71 | joint_state_msg.position = random_js
72 | robot_state_msg = RobotState()
73 | robot_state_msg.joint_state = joint_state_msg
74 | robot_state_msg.is_diff = True
75 |
76 | h = std_msgs.msg.Header()
77 | h.stamp = rospy.Time.now()
78 |
79 | result = None
80 | try:
81 | result = calculate_fk(h, ["wrist_roll_link"], robot_state_msg)
82 | except rospy.ServiceException as exc:
83 | rospy.logerr("Service did not process request: %s", exc)
84 | rospy.sleep(0.00001)
85 | continue
86 |
87 | if result is not None and (result.error_code.val == -10 or result.error_code.val == -12):
88 | # rospy.logerr("Joint State in collision")
89 | rospy.sleep(0.00001)
90 | continue
91 |
92 | if result is not None:
93 | if result.error_code.val == 1:
94 | correct_poses += 1
95 | # rospy.loginfo("Call successfull. Pose: \n%s", result.pose_stamped[0].pose)
96 |
97 | pose_msg = result.pose_stamped[0].pose
98 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
99 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
100 | js = joint_state_msg.position
101 | #print(pose)
102 | poses.append(pose)
103 | joint_states.append(js)
104 |
105 | data.append([result.pose_stamped[0].pose, joint_state_msg])
106 | pbar.update(1)
107 |
108 | # print(pose)
109 | marker = Marker(
110 | type=Marker.SPHERE,
111 | action=Marker.ADD,
112 | id=correct_poses,
113 | lifetime=rospy.Duration(),
114 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
115 | scale=Vector3(0.001, 0.001, 0.001),
116 | header=Header(frame_id='base_link'),
117 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
118 | # lifetime=0,
119 | frame_locked=False)
120 | marker_publisher.publish(marker)
121 | rospy.sleep(0.001)
122 | # else:
123 | # rospy.loginfo("Call unsuccessfull")
124 |
125 | rospy.sleep(0.00001)
126 |
127 | data = [poses, joint_states]
128 | with open('./data_samplers/results_fetch_200_3.p', 'wb') as f:
129 | pickle.dump(data, f)
130 | f.close()
131 |
132 |
133 | if __name__ == '__main__':
134 | main()
--------------------------------------------------------------------------------
/data_samplers/valkyrie_data_generator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from unittest import result
3 | import numpy as np
4 |
5 | import random
6 | import time
7 |
8 | import pathlib
9 | import xml.etree.ElementTree
10 | from moveit_msgs.msg import RobotState
11 | from moveit_msgs.srv import GetPositionFK
12 | from sensor_msgs.msg import JointState
13 | from geometry_msgs.msg import Pose
14 | import std_msgs.msg
15 | from visualization_msgs.msg import Marker
16 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
17 | from std_msgs.msg import Header, ColorRGBA
18 | import cv2 as cv
19 |
20 | import rospy
21 | import pickle
22 | from tqdm import tqdm
23 | import os
24 |
25 | use_platform = False
26 | timeout = 15
27 |
28 | rospy.init_node('ik_data_sampler', anonymous=True)
29 |
30 | """ setup """
31 | rospy.wait_for_service('compute_fk')
32 | calculate_fk = rospy.ServiceProxy('compute_fk', GetPositionFK)
33 |
34 | marker_publisher = rospy.Publisher("/visualization_marker", Marker, queue_size=1000000)
35 |
36 | def main():
37 | correct_poses = 0
38 | data = []
39 | iterations = 200000
40 |
41 | #if os.path.exists('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p'):
42 | # with open('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p',
43 | # 'rb') as f:
44 | # data = pickle.load(f)
45 | # f.close()
46 | # print(data)
47 |
48 | joint_states = []
49 | poses = []
50 | jacobians = []
51 |
52 | with tqdm(total=iterations) as pbar:
53 | while correct_poses < iterations and not rospy.is_shutdown():
54 |
55 | in_workspace = False
56 |
57 | random_js = [random.uniform(-1.329, 1.181),
58 | random.uniform(-0.13, 0.666),
59 | random.uniform(-0.23, 0.255),
60 | random.uniform(-2.85, 2.0),
61 | random.uniform(-1.266, 1.519),
62 | random.uniform(-3.1, 2.18),
63 | random.uniform(-0.12, 2.174),
64 | random.uniform(-2.019, 3.14),
65 | random.uniform(-0.625, 0.62),
66 | random.uniform(-0.49, 0.36)]
67 |
68 | # rospy.loginfo("Position: \n%s", correct_poses)
69 | # rospy.loginfo("Joint State: \n%s", random_js)
70 |
71 | joint_state_msg = JointState()
72 | joint_state_msg.name = ["torsoYaw", "torsoPitch", "torsoRoll", "rightShoulderPitch", "rightShoulderRoll", "rightShoulderYaw", "rightElbowPitch", "rightForearmYaw", "rightWristRoll", "rightWristPitch"]
73 | joint_state_msg.position = random_js
74 | robot_state_msg = RobotState()
75 | robot_state_msg.joint_state = joint_state_msg
76 | robot_state_msg.is_diff = True
77 |
78 | h = std_msgs.msg.Header()
79 | h.stamp = rospy.Time.now()
80 |
81 | result = None
82 | try:
83 | result = calculate_fk(h, ["rightPalm"], robot_state_msg)
84 | except rospy.ServiceException as exc:
85 | rospy.logerr("Service did not process request: %s", exc)
86 | rospy.sleep(0.00001)
87 | continue
88 |
89 | if result is not None and (result.error_code.val == -10 or result.error_code.val == -12):
90 | # rospy.logerr("Joint State in collision")
91 | rospy.sleep(0.00001)
92 | continue
93 |
94 | if result is not None:
95 | if result.error_code.val == 1:
96 | correct_poses += 1
97 | # rospy.loginfo("Call successfull. Pose: \n%s", result.pose_stamped[0].pose)
98 |
99 | pose_msg = result.pose_stamped[0].pose
100 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
101 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
102 | js = joint_state_msg.position
103 | #print(pose)
104 | poses.append(pose)
105 | joint_states.append(js)
106 |
107 | data.append([result.pose_stamped[0].pose, joint_state_msg])
108 | pbar.update(1)
109 |
110 | # print(pose)
111 | marker = Marker(
112 | type=Marker.SPHERE,
113 | action=Marker.ADD,
114 | id=correct_poses,
115 | lifetime=rospy.Duration(),
116 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
117 | scale=Vector3(0.001, 0.001, 0.001),
118 | header=Header(frame_id='world'),
119 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
120 | # lifetime=0,
121 | frame_locked=False)
122 | marker_publisher.publish(marker)
123 | rospy.sleep(0.001)
124 | # else:
125 | # rospy.loginfo("Call unsuccessfull")
126 |
127 | rospy.sleep(0.00001)
128 |
129 | data = [poses, joint_states]
130 | with open('./data_samplers/results_valkyrie_200_1.p', 'wb') as f:
131 | pickle.dump(data, f)
132 | f.close()
133 |
134 |
135 | if __name__ == '__main__':
136 | main()
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from typing import List
3 |
4 | from train import MLPTrainer, FKTrainer, FineTuneMLPTrainer, MLPMultiRun
5 |
6 |
7 | if __name__ == '__main__':
8 | parser = argparse.ArgumentParser(
9 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
10 | parser.add_argument("--dataroot", type=str, default="./data",
11 | help="path to datasets. (default:./data)")
12 | parser.add_argument("--dataset", type=str, default="horse2zebra",
13 | help="dataset name. (default:`horse2zebra`)"
14 | "Option: [apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, "
15 | "cezanne2photo, ukiyoe2photo, vangogh2photo, maps, facades, selfie2anime, "
16 | "iphone2dslr_flower, ae_photos, ]")
17 | parser.add_argument("--epochs", default=100, type=int, metavar="N",
18 | help="number of total epochs to run")
19 | parser.add_argument("--decay_epochs", type=int, default=0,
20 | help="epoch to start linearly decaying the learning rate to 0. (default:100)")
21 | parser.add_argument("--runs", type=int, default=1,
22 | help="epoch to start linearly decaying the learning rate to 0. (default:100)")
23 | parser.add_argument("-b", "--batch-size", default=None, type=int,
24 | metavar="N",
25 | help="mini-batch size (default: 1), this is the total "
26 | "batch size of all GPUs on the current node when "
27 | "using Data Parallel or Distributed Data Parallel")
28 | parser.add_argument("--lr", type=float, default=None,
29 | help="learning rate. (default:0.0002)")
30 | parser.add_argument("--fk_lr", type=float, default=None,
31 | help="learning rate. (default:0.0002)")
32 | parser.add_argument("--noise_vector_size", type=int, default=None,
33 | help="learning rate. (default:0.0002)")
34 | parser.add_argument("--noise", type=float, default=None,
35 | help="noise on position for fk. (default:0.0002)")
36 | parser.add_argument("-p", "--print-freq", default=100, type=int,
37 | metavar="N", help="print frequency. (default:100)")
38 | parser.add_argument("--cuda", action="store_true", help="Enables cuda")
39 | parser.add_argument("--her", action="store_true", help="Enables Hindsight Experience Replay (HER)")
40 | parser.add_argument("--netG_A2B", default="", help="path to netG_A2B (to continue training)")
41 | parser.add_argument("--netG_B2A", default="", help="path to netG_B2A (to continue training)")
42 | parser.add_argument("--netD_A", default="", help="path to netD_A (to continue training)")
43 | parser.add_argument("--netD_B", default="", help="path to netD_B (to continue training)")
44 |
45 | parser.add_argument("--enc_A", default="", help="path to netD_A (to continue training)")
46 | parser.add_argument("--enc_B", default="", help="path to netD_B (to continue training)")
47 |
48 | parser.add_argument("--image-size", type=int, default=256,
49 | help="size of the data crop (squared assumed). (default:256)")
50 | parser.add_argument("--outf", default="./outputs",
51 | help="folder to output images. (default:`./outputs`).")
52 | parser.add_argument("--manualSeed", type=int,
53 | help="Seed for initializing training. (default:none)")
54 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
55 | parser.add_argument("--gpu", type=str, default="0", help="GPU used for training")
56 | parser.add_argument("--gpus", nargs='+', type=int, default=[0], help="GPU used for training")
57 | parser.add_argument("--network", type=str, default="MLP", help="Network type to be trained")
58 | parser.add_argument("--autoencoder", action="store_true", help="Enables autoencoder")
59 | parser.add_argument("--two-stage", action="store_true", help="Enables two-stage training for autoencoder")
60 | parser.add_argument("--finetune", action="store_true", help="Enables two-stage training for autoencoder")
61 | parser.add_argument("--multi-run", action="store_true", help="Enables two-stage training for autoencoder")
62 | parser.add_argument("--core-model", type=str, default="nicol", help="Enables two-stage training for autoencoder")
63 | parser.add_argument("--chain", type=str, default="right_arm", help="Robot model Kinematic Chain")
64 | parser.add_argument("--core_model_chain", type=str, default="right_arm", help="Robot model Kinematic Chain")
65 | parser.add_argument("--suffix", type=str, default="", help="suffix for persistent weights and stats")
66 | parser.add_argument("--compile", action="store_true", help="Enables pytorch compiling")
67 | train_args = parser.parse_args()
68 |
69 |
70 |
71 | if train_args.multi_run:
72 | multi_runner = None
73 | if train_args.network == "GAN":
74 | raise NotImplementedError
75 | elif train_args.network == "MLP":
76 | multi_runner = MLPMultiRun(train_args)
77 | multi_runner.run()
78 | else:
79 | trainer = None
80 | if train_args.network == "MLP":
81 | if train_args.finetune:
82 | trainer = FineTuneMLPTrainer(train_args)
83 | else:
84 | trainer = MLPTrainer(train_args)
85 | elif train_args.network == "FK":
86 | trainer = FKTrainer(train_args)
87 | trainer.train()
88 |
--------------------------------------------------------------------------------
/data_samplers/panda_data_generator_2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import numpy as np
3 |
4 | import random
5 | import time
6 |
7 | import pathlib
8 | import xml.etree.ElementTree
9 | from moveit_msgs.msg import RobotState
10 | from moveit_msgs.srv import GetPositionFK
11 | from sensor_msgs.msg import JointState
12 | from geometry_msgs.msg import Pose
13 | import std_msgs.msg
14 | from visualization_msgs.msg import Marker
15 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
16 | from std_msgs.msg import Header, ColorRGBA
17 | import cv2 as cv
18 |
19 | import rospy
20 | import pickle
21 | from tqdm import tqdm
22 | import os
23 |
24 | use_platform = False
25 | timeout = 15
26 |
27 | rospy.init_node('ik_data_sampler', anonymous=True)
28 |
29 | """ setup """
30 | rospy.wait_for_service('compute_fk')
31 | calculate_fk = rospy.ServiceProxy('compute_fk', GetPositionFK)
32 |
33 | marker_publisher = rospy.Publisher("/visualization_marker", Marker, queue_size=100000)
34 |
35 |
36 | def create_single_sample():
37 | random_js = [random.uniform(-2.8973, 2.8973), random.uniform(-1.7628, 1.7628),
38 | random.uniform(-2.8973, 2.8973), random.uniform(-3.0718, -0.0698),
39 | random.uniform(-2.8973, 2.8973), random.uniform(-0.0175, 3.7525),
40 | random.uniform(-2.8973, 2.8973)]
41 |
42 | # rospy.loginfo("Position: \n%s", correct_poses)
43 | # rospy.loginfo("Joint State: \n%s", random_js)
44 |
45 | joint_state_msg = JointState()
46 | joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3", "panda_joint4", "panda_joint5",
47 | "panda_joint6", "panda_joint7"]
48 | joint_state_msg.position = random_js
49 | robot_state_msg = RobotState()
50 | robot_state_msg.joint_state = joint_state_msg
51 | robot_state_msg.is_diff = True
52 |
53 | h = std_msgs.msg.Header()
54 | h.stamp = rospy.Time.now()
55 |
56 | result = None
57 | try:
58 | result = calculate_fk(h, ["panda_hand_tcp"], robot_state_msg)
59 | except rospy.ServiceException as exc:
60 | rospy.logerr("Service did not process request: %s", exc)
61 | rospy.sleep(0.00001)
62 | return None
63 |
64 | if result is not None and (result.error_code.val == -10 or result.error_code.val == -12):
65 | # rospy.logerr("Joint State in collision")
66 | rospy.sleep(0.00001)
67 | return None
68 |
69 | if result is not None:
70 | if result.error_code.val == 1:
71 | # rospy.loginfo("Call successfull. Pose: \n%s", result.pose_stamped[0].pose)
72 |
73 | pose_msg = result.pose_stamped[0].pose
74 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
75 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
76 | js = joint_state_msg.position
77 | return [js, pose]
78 | else:
79 | return None
80 | else:
81 | return None
82 |
83 |
84 | def main():
85 | correct_poses = 0
86 | data = []
87 | iterations = 1000000
88 |
89 | #if os.path.exists('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p'):
90 | # with open('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p',
91 | # 'rb') as f:
92 | # data = pickle.load(f)
93 | # f.close()
94 | # print(data)
95 |
96 | joint_states = []
97 | poses = []
98 | jacobians = []
99 |
100 | with tqdm(total=iterations) as pbar:
101 | while correct_poses < iterations and not rospy.is_shutdown():
102 |
103 | result = create_single_sample()
104 |
105 | if result is not None:
106 | if result.error_code.val == 1:
107 | correct_poses += 1
108 | # rospy.loginfo("Call successfull. Pose: \n%s", result.pose_stamped[0].pose)
109 |
110 | pose_msg = result.pose_stamped[0].pose
111 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
112 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
113 | js = joint_state_msg.position
114 | #print(pose)
115 | poses.append(pose)
116 | joint_states.append(js)
117 |
118 | data.append([result.pose_stamped[0].pose, joint_state_msg])
119 | pbar.update(1)
120 |
121 | # print(pose)
122 | marker = Marker(
123 | type=Marker.SPHERE,
124 | action=Marker.ADD,
125 | id=correct_poses,
126 | lifetime=rospy.Duration(),
127 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
128 | scale=Vector3(0.001, 0.001, 0.001),
129 | header=Header(frame_id='world'),
130 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
131 | # lifetime=0,
132 | frame_locked=False)
133 | marker_publisher.publish(marker)
134 | rospy.sleep(0.001)
135 | # else:
136 | # rospy.loginfo("Call unsuccessfull")
137 |
138 | rospy.sleep(0.00001)
139 |
140 | data = [poses, joint_states]
141 |
142 | with open('/home/jan-gerrit/repositories/cycleik_2/data/results_panda_200_4.p', 'wb') as f:
143 | pickle.dump(data, f)
144 | f.close()
145 |
146 |
147 | if __name__ == '__main__':
148 | main()
--------------------------------------------------------------------------------
/data_samplers/nico_data_generator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from unittest import result
3 | import numpy as np
4 |
5 | import random
6 | import time
7 |
8 | import pathlib
9 | import xml.etree.ElementTree
10 | from moveit_msgs.msg import RobotState
11 | from moveit_msgs.srv import GetPositionFK
12 | from sensor_msgs.msg import JointState
13 | from geometry_msgs.msg import Pose
14 | import std_msgs.msg
15 | from visualization_msgs.msg import Marker
16 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
17 | from std_msgs.msg import Header, ColorRGBA
18 | import cv2 as cv
19 |
20 | import rospy
21 | import pickle
22 | from tqdm import tqdm
23 | import os
24 |
25 | use_platform = False
26 | timeout = 15
27 |
28 | rospy.init_node('ik_data_sampler', anonymous=True)
29 |
30 | """ setup """
31 | rospy.wait_for_service('compute_fk')
32 | calculate_fk = rospy.ServiceProxy('compute_fk', GetPositionFK)
33 |
34 | marker_publisher = rospy.Publisher("/visualization_marker", Marker, queue_size=100000)
35 |
36 |
37 | def main():
38 | correct_poses = 0
39 | data = []
40 | iterations = 200000
41 | leg = False
42 |
43 | #if os.path.exists('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p'):
44 | # with open('/home/jan-gerrit/repositories/cycleik/data_samplers/results_ur5_1mio.p',
45 | # 'rb') as f:
46 | # data = pickle.load(f)
47 | # f.close()
48 | # print(data)
49 |
50 | joint_states = []
51 | poses = []
52 | jacobians = []
53 |
54 | with tqdm(total=iterations) as pbar:
55 | while correct_poses < iterations and not rospy.is_shutdown():
56 |
57 | in_workspace = False
58 | if leg:
59 | random_js = [random.uniform(-0.6981, 0.6981), random.uniform(-0.4188, 1.2217),
60 | random.uniform(-1.5707, 0.5585), random.uniform(-1.5358, 1.2915),
61 | random.uniform(-0.7853, 0.7853), random.uniform(-0.7853, 0.7853)]
62 | else:
63 | #upper: [0.8, 3.142, 0.314, 0.0, 1.571, 0.872665]
64 | #lower: [-0.8, -3.142, -1.57, -1.745, -1.571, 0.0]
65 | random_js = [random.uniform(-0.8, 0.8), random.uniform(-3.142, 3.142),
66 | random.uniform(-1.57, 0.314), random.uniform(-1.745, 0.0),
67 | random.uniform(-1.571, 1.571), random.uniform(0.0, 0.872665)]
68 |
69 | # rospy.loginfo("Position: \n%s", correct_poses)
70 | # rospy.loginfo("Joint State: \n%s", random_js)
71 |
72 | joint_state_msg = JointState()
73 | if leg:
74 | joint_state_msg.name = ["r_hip_z", "r_hip_x", "r_hip_y", "r_knee_y", "r_ankle_y", "r_ankle_x"]
75 | else:
76 | joint_state_msg.name = ["l_shoulder_z", "l_shoulder_y", "l_arm_x", "l_elbow_y", "l_wrist_z", "l_wrist_x"]
77 | print(random_js)
78 | joint_state_msg.position = random_js
79 | robot_state_msg = RobotState()
80 | robot_state_msg.joint_state = joint_state_msg
81 | robot_state_msg.is_diff = True
82 |
83 | h = std_msgs.msg.Header()
84 | h.stamp = rospy.Time.now()
85 |
86 | result = None
87 | try:
88 | if leg:
89 | result = calculate_fk(h, ["right_foot:11"], robot_state_msg)
90 | else:
91 | result = calculate_fk(h, ["left_tcp"], robot_state_msg)
92 | except rospy.ServiceException as exc:
93 | rospy.logerr("Service did not process request: %s", exc)
94 | rospy.sleep(0.00000001)
95 | continue
96 |
97 | #print(result.error_code.val)
98 |
99 | if result is not None and (result.error_code.val == -10 or result.error_code.val == -12):
100 | # rospy.logerr("Joint State in collision")
101 | rospy.sleep(0.000000001)
102 | continue
103 |
104 | if result is not None:
105 | if result.error_code.val == 1:
106 | correct_poses += 1
107 | # rospy.loginfo("Call successfull. Pose: \n%s", result.pose_stamped[0].pose)
108 |
109 | pose_msg = result.pose_stamped[0].pose
110 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
111 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
112 | js = joint_state_msg.position
113 | #print(pose)
114 | poses.append(pose)
115 | joint_states.append(js)
116 |
117 | data.append([result.pose_stamped[0].pose, joint_state_msg])
118 | pbar.update(1)
119 |
120 | # print(pose)
121 | marker = Marker(
122 | type=Marker.SPHERE,
123 | action=Marker.ADD,
124 | id=correct_poses,
125 | lifetime=rospy.Duration(),
126 | pose=Pose(Point(pose[0], pose[1], pose[2]), Quaternion(0, 0, 0, 1)),
127 | scale=Vector3(0.001, 0.001, 0.001),
128 | header=Header(frame_id='torso:11'),
129 | color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
130 | # lifetime=0,
131 | frame_locked=False)
132 | marker_publisher.publish(marker)
133 | rospy.sleep(0.001)
134 | # else:
135 | # rospy.loginfo("Call unsuccessfull")
136 |
137 | rospy.sleep(0.00000001)
138 |
139 | data = [poses, joint_states]
140 |
141 | with open('./data/results_nico_200_3_left.p', 'wb') as f:
142 | pickle.dump(data, f)
143 | f.close()
144 |
145 |
146 | if __name__ == '__main__':
147 | main()
--------------------------------------------------------------------------------
/optimize/optimize_mlp.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | from numpy import array, array_equal, allclose
5 | import numpy as np
6 | import torch.backends.cudnn as cudnn
7 | import torch.utils.data
8 | import torch.utils.data.distributed
9 | import torchvision.transforms as transforms
10 | import torchvision.utils as vutils
11 | from tqdm import tqdm
12 | from scipy.spatial.transform import Rotation as R
13 |
14 | from cycleik_pytorch import IKDataset, GenericGenerator, GenericDiscriminator
15 | from cycleik_pytorch import load_config, renormalize_pose, normalize_pose, slice_fk_pose, renormalize_joint_state
16 | import pytorch_kinematics as pk
17 | import time
18 | import matplotlib.pyplot as plt
19 | import pandas as ps
20 | from abc import abstractmethod
21 | from train import MLPTrainer
22 | import optuna
23 | from .optimize import BaseOptimizer
24 | import joblib
25 |
26 |
27 | class MLPOptimizer(BaseOptimizer):
28 |
29 | def train_once(self, trial):
30 | torch.set_num_threads(2)
31 |
32 | args = self.args
33 |
34 | args.batch_size = trial.suggest_int('batch_size', 100, 600, step=50)
35 | # epochs = args.epochs
36 | args.lr = trial.suggest_float('lr', 0.00001, 0.001, step=0.00001)
37 | # if robot_dof <= 6:
38 | # nbr_layers = trial.suggest_int('nbr_layers', 5, 10)
39 | # else:
40 | nbr_layers = trial.suggest_int('nbr_layers', 5, 9)
41 | layers = []
42 | for hidden_layer in range(nbr_layers):
43 | # layer = None
44 | layer = trial.suggest_int('layer{0}_neurons'.format(hidden_layer), 500, 3500, step=10)
45 | #if hidden_layer < nbr_layers / 2:
46 | # layer = trial.suggest_int('layer{0}_neurons'.format(hidden_layer), 500, 3500, step=10)
47 | #else:
48 | # layer = trial.suggest_int('layer{0}_neurons'.format(hidden_layer), 10, 2000, step=10)
49 | layers.append(layer)
50 | args.layers = layers
51 | args.nbr_tanh = trial.suggest_int('nbr_tanh', 0, 3)
52 | # activation = trial.suggest_int('activation', 0, 3)
53 | args.activation = "GELU" # trial.suggest_categorical("activation", ["GELU", "LeakyReLu"])#, "SELU", "CELU"])
54 |
55 | args.position_weight = trial.suggest_float('position_weight', 1., 20., step=1.)
56 | args.orientation_weight = trial.suggest_float('orientation_weight', 1., 20., step=1.)
57 |
58 | print("Hyperparams - batch_size: {0}, lr: {1}, nbr_layers: {4}, nbr_tanh: {2}, activation: {3}\n neurons: {5}".format(
59 | args.batch_size, args.lr, args.nbr_tanh, args.activation, nbr_layers, layers))
60 |
61 | trainer = MLPTrainer(args, trial=trial, config=self.config, train_dataset=self.train_dataset, test_dataset=self.test_dataset)
62 |
63 | return trainer.train()
64 |
65 | def optimize(self):
66 | study = None
67 | if self.args.study_name != "":
68 | study_name = self.args.study_name
69 | else:
70 | study_name = f"cycleik_ik_optimizer_{self.args.robot}_robot_gpu_{self.args.gpu}"
71 | if self.args.db is not None:
72 | # You can change the number of GPUs per trial here:
73 | study = optuna.create_study(study_name=study_name,
74 | direction='minimize',
75 | pruner=optuna.pruners.HyperbandPruner(),
76 | sampler=optuna.samplers.TPESampler(),
77 | storage=f'sqlite:///{self.args.db}',
78 | load_if_exists=True)
79 | else:
80 | # You can change the number of GPUs per trial here:
81 | study = optuna.create_study(study_name=study_name,
82 | direction='minimize',
83 | pruner=optuna.pruners.HyperbandPruner(),
84 | sampler=optuna.samplers.TPESampler())
85 |
86 | if self.args.add_seed:
87 | initial_layers = self.config["IKNet"]["architecture"]["layers"]
88 | initial_nbr_layers = len(initial_layers)
89 |
90 | initial_params = {"batch_size": self.config["IKNet"]["training"]["batch_size"],
91 | "lr": self.config["IKNet"]["training"]["lr"],
92 | "nbr_layers": initial_nbr_layers,
93 | "nbr_tanh": self.config["IKNet"]["architecture"]["nbr_tanh"],
94 | "position_weight": self.config["IKNet"]["position_weight"],
95 | "orientation_weight": self.config["IKNet"]["orientation_weight"]}
96 | # "activation": config["IKNet"]["architecture"]["activation"]}
97 |
98 | for i in range(initial_nbr_layers):
99 | initial_params[f"layer{i}_neurons"] = initial_layers[i]
100 |
101 | study.enqueue_trial(initial_params)
102 |
103 | #if self.robot != "nicol":
104 | # initial_layers = [2200, 2400, 2400, 1900, 250, 220, 30, 380]
105 | # initial_nbr_layers = len(initial_layers)
106 | #
107 | # initial_params = {"batch_size": 300,
108 | # "lr": 0.0001,
109 | # "nbr_layers": initial_nbr_layers,
110 | # "nbr_tanh": 3, }
111 | # # "activation": config["IKNet"]["architecture"]["activation"]}
112 | #
113 | # for i in range(initial_nbr_layers):
114 | # initial_params[f"layer{i}_neurons"] = initial_layers[i]
115 | #
116 | # study.enqueue_trial(initial_params)
117 |
118 | study.optimize(self.train_once, n_trials=self.args.trials)
119 |
120 | joblib.dump(study, f"./optuna/{self.robot}/cycleik_ik_optimizer_{self.args.gpu}.pkl")
121 | print("Best Config:\n {0}".format(study.best_params))
--------------------------------------------------------------------------------
/cycleik_pytorch/datasets.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os, sys
3 | import random
4 | import time
5 | from threading import Thread
6 |
7 | import cv2
8 | import numpy as np
9 | from PIL import Image
10 | from torch.utils.data import Dataset
11 | import pickle
12 | from tqdm import tqdm
13 | from .utils import get_kinematic_params
14 | from scipy.spatial import ConvexHull, convex_hull_plot_2d
15 | from scipy.spatial.transform import Rotation as R
16 | import open3d as o3d
17 | np.set_printoptions(threshold=20000)
18 | np.set_printoptions(suppress=True)
19 |
20 |
21 | class IKDataset(Dataset):
22 | def __init__(self, root, mode="train", test=False, robot="nicol", config=None, augmented=False):
23 | super(IKDataset, self).__init__()
24 | self.test = test
25 | self.ground_truth_A = []
26 | self.ground_truth_B = []
27 | self.unnormalized_A = []
28 | self.unnormalized_B = []
29 |
30 | print(robot)
31 | assert config is not None
32 |
33 | workspace_interval_array, workspace_center_array, limits_upper, limits_lower, normalize_interval_array, normalize_center_array = get_kinematic_params(config)
34 |
35 | #print(diff_upper_lower)
36 |
37 | data_path = os.path.dirname(os.path.abspath(__file__))
38 | print(data_path)
39 |
40 | with open(data_path + "/../data/" + root + ".p", 'rb') as f:
41 | loaded_values = pickle.load(f)
42 |
43 |
44 | if mode=="train":
45 | loaded_values[0] = loaded_values[0][:1000000]
46 | loaded_values[1] = loaded_values[1][:1000000]
47 |
48 | if mode == "test":
49 | loaded_values[0] = loaded_values[0][:100000]
50 | loaded_values[1] = loaded_values[1][:100000]
51 | # print(loaded_values2)
52 | if mode == "val":
53 | loaded_values[0] = loaded_values[0][:200000]
54 | loaded_values[1] = loaded_values[1][:200000]
55 |
56 | for i in range(len(loaded_values[0])):
57 |
58 | pose = np.array(loaded_values[0][i], dtype=np.float32)
59 | js = np.array(loaded_values[1][i], dtype=np.float32)
60 |
61 | self.unnormalized_A.append(np.array(js, dtype=np.float32))
62 | self.unnormalized_B.append(np.array(pose, dtype=np.float32))
63 |
64 | #pose[:3] = np.true_divide(pose[:3], workspace)
65 | pose[:3] = np.add(pose[:3], workspace_center_array)
66 | pose[:3] = np.true_divide(pose[:3], workspace_interval_array)
67 |
68 | if i == 0: assert len(limits_upper) == len(limits_lower) == len(js) == config["robot_dof"]
69 |
70 | js = np.add(js, normalize_center_array)
71 | js = np.true_divide(js, normalize_interval_array)
72 |
73 | self.ground_truth_A.append(np.array(js, dtype=np.float32))
74 | self.ground_truth_B.append(np.array(pose, dtype=np.float32))
75 |
76 | print("Max. val joint states: {0}".format(np.max(self.unnormalized_A, axis=0)))
77 | print("Min. val joint states: {0}".format(np.min(self.unnormalized_A, axis=0)))
78 | print("\n")
79 | print("Max. val poses: {0}".format(np.max(self.unnormalized_B, axis=0)))
80 | print("Min. val poses: {0}".format(np.min(self.unnormalized_B, axis=0)))
81 | print("\n")
82 | print("Max. val normalized joint states: {0}".format(np.max(self.ground_truth_A, axis=0)))
83 | print("Min. val normalized joint states: {0}".format(np.min(self.ground_truth_A, axis=0)))
84 | print("\n")
85 | print("Max. val normalized poses: {0}".format(np.max(self.ground_truth_B, axis=0)))
86 | print("Min. val normalized poses: {0}".format(np.min(self.ground_truth_B, axis=0)))
87 |
88 | points = np.array(self.unnormalized_B)[:, :3]
89 |
90 | # Initialize a point cloud object
91 | pcd = o3d.geometry.PointCloud()
92 | # Add the points, colors and normals as Vectors
93 | pcd.points = o3d.utility.Vector3dVector(points)
94 | workspace_convex_hull, point_indices = pcd.compute_convex_hull()
95 | workspace_convex_hull.compute_vertex_normals()
96 |
97 | workspace_volume = workspace_convex_hull.get_volume()
98 | workspace_volume *= 1000000 # cubic meter to cubic centimeter
99 | print(f'Volume of Action Space: {workspace_volume} cm³')
100 |
101 | workspace_sample_density = len(self.unnormalized_B) / workspace_volume
102 |
103 | print(f'Sample Density: {workspace_sample_density} samples per cm³')
104 |
105 | self.data_size = len(self.ground_truth_B)
106 | self.normalize_center_array = normalize_center_array
107 | self.normalize_interval_array = normalize_interval_array
108 | self.workspace_center_array = workspace_center_array
109 | self.workspace_interval_array = workspace_interval_array
110 | print("Size Dataset: ", self.data_size)
111 |
112 | #voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.05)
113 | # Initialize a visualizer object
114 | #vis = o3d.visualization.Visualizer()
115 | # Create a window, name it and scale it
116 | #vis.create_window(window_name='Bunny Visualize', width=800, height=600)
117 |
118 | # Add the voxel grid to the visualizer
119 | #vis.add_geometry(voxel_grid)
120 |
121 | # We run the visualizater
122 | #vis.run()
123 | # Once the visualizer is closed destroy the window and clean up
124 | #vis.destroy_window()
125 | def __getitem__(self, index):
126 | item_gt_A = self.ground_truth_A[index % len(self.ground_truth_A)]
127 | item_gt_B = self.ground_truth_B[index % len(self.ground_truth_B)]
128 | item_real_A = self.unnormalized_A[index % len(self.unnormalized_A)]
129 | item_real_B = self.unnormalized_B[index % len(self.unnormalized_B)]
130 | return {"gt_A": item_gt_A, "gt_B": item_gt_B, "real_A": item_real_A, "real_B": item_real_B}
131 |
132 | def __len__(self):
133 | return len(self.ground_truth_B)
134 |
135 | def get_size(self):
136 | return self.data_size
137 |
138 | def get_norm_params(self):
139 | return [self.normalize_center_array, self.normalize_interval_array, self.workspace_center_array, self.workspace_interval_array]
140 |
141 |
142 |
--------------------------------------------------------------------------------
/visualize/visualize_multi_run_loss.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 |
3 |
4 | import matplotlib.pyplot as plt
5 | import pickle
6 | import numpy as np
7 |
8 | # Set the axes title font size
9 | plt.rc('axes', labelsize=14)
10 | # Set the font size for x tick labels
11 | plt.rc('xtick', labelsize=12)
12 | # Set the font size for y tick labels
13 | plt.rc('ytick', labelsize=12)
14 | # Set the legend font size
15 | plt.rc('legend', fontsize=12)
16 | # Set the font size of the figure title
17 | plt.rc('figure', titlesize=12)
18 |
19 | def main():
20 | robots = ["nicol", "nico", "fetch", "valkyrie", "panda"]
21 | robots_CAP = ["NICOL", "NICO", "Fetch", "Valkyrie", "Panda"]
22 | colors = ['blue', 'orange', 'green', 'red', 'purple']
23 | #data = pickle.load(open(f"./weights/nicol/TRAINING_SAVES/100_epochs/1400/train_ik_loss_with_kinematics.p", 'rb'))
24 | all_data = {}
25 | for robot in robots:
26 | #data_runs = []
27 |
28 | all_pos_errors = np.empty((0,10))
29 | all_orientation_errors = np.empty((0, 10))
30 | for run in range(10):
31 | current_run = pickle.load(open(f"/home/jan-gerrit/repositories/cycleik_2/weights/IROS_saves_precision_26_02/results/{robot}/train_MLP_with_kinematics_loss_IROS_train_run_{run}.p", 'rb'))
32 | position = current_run[3]
33 | orientation = current_run[4]
34 | all_pos_errors = np.concatenate((all_pos_errors, np.array(position).reshape((1,10))), axis=0)
35 | all_orientation_errors = np.concatenate((all_orientation_errors, np.array(orientation).reshape((1,10))), axis=0)
36 | #data_runs.append([position, orientation])
37 | min_error_pos = np.min(all_pos_errors, axis=0)
38 | max_error_pos = np.max(all_pos_errors, axis=0)
39 | avg_error_pos = np.mean(all_pos_errors, axis=0)
40 | min_error_rot = np.min(all_orientation_errors, axis=0)
41 | max_error_rot = np.max(all_orientation_errors, axis=0)
42 | avg_error_rot = np.mean(all_orientation_errors, axis=0)
43 | error_dict = {}
44 | error_dict['pos'] = [avg_error_pos, min_error_pos, max_error_pos]
45 | error_dict['rot'] = [avg_error_rot, min_error_rot, max_error_rot]
46 | all_data[robot] = error_dict
47 |
48 | lowest_err = 10
49 | lowest_index = -1
50 | pos_error = all_pos_errors[:,9:]
51 | rot_error = all_orientation_errors[:,9:]
52 | all_err = (pos_error * 10) + rot_error
53 | for i in range(10):
54 | if all_err[i] < lowest_err:
55 | lowest_err = all_err[i]
56 | lowest_index = i
57 |
58 | print(f'{robot} best run: {lowest_index}, with error: {lowest_err}')
59 |
60 |
61 | time_series = []
62 | small_time_series = [1]
63 | print(len(all_data['nicol']['pos'][0]))
64 | print(all_data['nicol']['pos'][0])
65 |
66 | for i in range(len(all_data['nicol']['pos'][0])):
67 | #if i == 0: continue
68 | time_series.append(i)
69 |
70 | for i in range(int(len(all_data['nicol']['pos'][0]) / 10)):
71 | #if i == 0: continue
72 | small_time_series.append((i+1) * 10)
73 |
74 | print(len(time_series))
75 |
76 | fig, ax = plt.subplots()
77 | ax.set_xlabel('Epoch')
78 | ax.set_ylabel('Pos. Loss')
79 | #kinematic_loss = ax.plot(time_series, data[0][:])
80 | for e, robot in enumerate(robots):
81 | kinematic_loss = ax.plot(time_series, all_data[robot]['pos'][0])
82 | poly = ax.fill_between(time_series, all_data[robot]['pos'][1], all_data[robot]['pos'][2], facecolor=colors[e], alpha=0.5,interpolate=True)
83 | kinematic_loss[0].set_label(robots_CAP[e])
84 | #kinematic_loss = ax.plot(small_time_series, data[2][:])
85 | #kinematic_loss = ax.plot(time_series, data[4])
86 | #kinematic_loss[0].set_label('Validation Loss (MAE)')
87 | # kinematic_loss = ax.plot(time_series, data[1][1:])
88 | #kinematic_loss = ax.plot(time_series, data[1])
89 | #kinematic_loss[0].set_label('Zero-Controller Loss (MSE)')
90 | ax.legend(loc='upper right') # , bbox_to_anchor=(1.1, 0.5))
91 | # ax.legend(loc='lower right', bbox_to_anchor=(0.35, -1.15))
92 | ax.xaxis.label.set_fontweight('bold')
93 | ax.yaxis.label.set_fontweight('bold')
94 | # ax.legend()
95 | #plt.yticks(np.arange(0.0, 0.025, 0.002))
96 | plt.yticks(np.arange(0.0, 0.04, 0.005))
97 | #plt.annotate('train: %0.5f' % data[3][-1], xy=(1, data[3][-1]), xytext=(-81, 21),
98 | # xycoords=('axes fraction', 'data'), textcoords='offset points')
99 | #plt.annotate('val: %0.5f' % data[4][-1], xy=(1, data[4][-1]), xytext=(-81, 8),
100 | # xycoords=('axes fraction', 'data'), textcoords='offset points')
101 | plt.tight_layout()
102 | plt.savefig('./img/losses/vis_multi_run_loss.png')
103 | plt.show()
104 |
105 | fig, ax = plt.subplots()
106 | ax.set_xlabel('Epoch')
107 | ax.set_ylabel('Rot. Loss')
108 | # kinematic_loss = ax.plot(time_series, data[0][:])
109 | for e, robot in enumerate(robots):
110 | kinematic_loss = ax.plot(time_series, all_data[robot]['rot'][0])
111 | poly = ax.fill_between(time_series, all_data[robot]['rot'][1], all_data[robot]['rot'][2], facecolor=colors[e],
112 | alpha=0.5, interpolate=True)
113 | kinematic_loss[0].set_label(robots_CAP[e])
114 | # kinematic_loss = ax.plot(small_time_series, data[2][:])
115 | # kinematic_loss = ax.plot(time_series, data[4])
116 | # kinematic_loss[0].set_label('Validation Loss (MAE)')
117 | # kinematic_loss = ax.plot(time_series, data[1][1:])
118 | # kinematic_loss = ax.plot(time_series, data[1])
119 | # kinematic_loss[0].set_label('Zero-Controller Loss (MSE)')
120 | ax.legend(loc='upper right') # , bbox_to_anchor=(1.1, 0.5))
121 | # ax.legend(loc='lower right', bbox_to_anchor=(0.35, -1.15))
122 | ax.xaxis.label.set_fontweight('bold')
123 | ax.yaxis.label.set_fontweight('bold')
124 | # ax.legend()
125 | # plt.yticks(np.arange(0.0, 0.025, 0.002))
126 | plt.yticks(np.arange(0.0, 0.3, 0.03))
127 | # plt.annotate('train: %0.5f' % data[3][-1], xy=(1, data[3][-1]), xytext=(-81, 21),
128 | # xycoords=('axes fraction', 'data'), textcoords='offset points')
129 | # plt.annotate('val: %0.5f' % data[4][-1], xy=(1, data[4][-1]), xytext=(-81, 8),
130 | # xycoords=('axes fraction', 'data'), textcoords='offset points')
131 | plt.tight_layout()
132 | plt.savefig('./img/losses/vis_multi_run_loss.png')
133 | plt.show()
134 |
135 | if __name__ == '__main__':
136 | main()
--------------------------------------------------------------------------------
/visualize/visualize_result_over_epochs.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import matplotlib
3 | import pickle
4 | import numpy as np
5 |
6 | font = {'family' : 'normal',
7 | 'weight' : 'bold',
8 | 'size' : 10}
9 |
10 | #plt.rcParams['font.size'] = 12
11 | #plt.rcParams['font.weight'] = 'bold'
12 | # Set the axes title font size
13 | plt.rc('axes', labelsize=15)
14 | # Set the font size for x tick labels
15 | plt.rc('xtick', labelsize=12)
16 | # Set the font size for y tick labels
17 | plt.rc('ytick', labelsize=12)
18 | # Set the legend font size
19 | plt.rc('legend', fontsize=12)
20 | # Set the font size of the figure title
21 | plt.rc('figure', titlesize=12)
22 | #matplotlib.rc('label', labelsize=20)
23 | #matplotlib.rc('ytick', labelsize=20)
24 |
25 | def main():
26 | robot = "nicol"
27 |
28 | ik_full_on_small = [1.6215, 1.110, 0.6139, 0.7318, 1.0699, 1.0425, 6.0]
29 | ik_small_errors = [1.3425, 0.6016, 0.4583, 0.3472, 0.2953, 0.2964, 0.2749]
30 | ik_full_errors = [2.0238, 1.457, 1.0221, 1.172, 1.5190, 1.681, 6.0]
31 | ik_full_1mio_errors = [3.1720, 1.5167, 1.654, 1.7188, 1.3619, 6.0]
32 | gan_small_errors = [6.2028, 3.9612, 4.2949, 3.3177, 2.892, 20.0]
33 | gan_full_errors = [4.0819, 4.5565, 3.5515, 3.2947, 2.7955, 20.0]
34 | gan_full_1mio_errors = [7.0762, 4.4462, 3.9986, 4.2755, 20.0, 20.0]
35 | time_series = [10, 50, 100, 200, 300, 400, 500]
36 | time_series_GAN = [10, 20, 30, 40, 50, 60]
37 |
38 | rot_ik_full_on_small = [0.1715, 0.0952, 0.0583, 0.0722, 0.1250, 0.1173, 1.0]
39 | rot_ik_small_errors = [0.1665, 0.0621, 0.0473, 0.0381, 0.0335, 0.0345, 0.0328]
40 | rot_ik_full_errors = [0.274, 0.3359, 0.1523, 0.1503, 0.4008, 0.3214, 1.0]
41 | rot_ik_full_1mio_errors = [0.4616, 0.2918, 0.248, 0.3542, 0.2616, 1.0]
42 | rot_gan_small_errors = [0.6659, 0.3803, 0.3963, 0.2805, 0.2662, 4.0]
43 | rot_gan_full_errors = [0.5079, 0.7302, 0.5737, 0.6564, 0.5631, 4.0]
44 | rot_gan_full_1mio_errors = [0.9354, 0.7674, 0.6493, 0.4917, 4.0, 4.0]
45 |
46 | fig, ax = plt.subplots(2, 2, figsize=(16,9))
47 | #print(ax)
48 | ax[0][0].set_xlabel('Training Epochs')
49 | ax[0][0].set_ylabel('Error (mm)')
50 | kinematic_loss = ax[0][0].plot(time_series, ik_small_errors, marker="^", color="royalblue")
51 | kinematic_loss[0].set_label(r'MLP - Training: $Small_{1000}$ - Test: $Small_{100}$', )
52 | kinematic_loss = ax[0][0].plot(time_series, ik_full_errors, marker="^", color="forestgreen")
53 | kinematic_loss[0].set_label(r'MLP - Training: $Full_{1400}$ - Test: $Full_{140}$')
54 | kinematic_loss = ax[0][0].plot(time_series[:6], ik_full_1mio_errors, marker="^", color="saddlebrown")
55 | kinematic_loss[0].set_label(r'MLP - Training: $Full_{1000}$ - Test: $Full_{100}$')
56 | kinematic_loss = ax[0][0].plot(time_series, ik_full_on_small, linestyle='dashed', marker="^", color="forestgreen")
57 | kinematic_loss[0].set_label(r'MLP - Training: $Full_{1400}$ - Test: $Small_{100}$')
58 | ax[0][0].legend(loc='upper center', bbox_to_anchor=(0.35, 1.15))
59 | ax[0][0].set_yticks(np.arange(0.0, 3.5, 0.5))
60 | x_axis_scale = np.arange(0, 600, 100)
61 | x_axis_scale[0] = 10
62 | print(x_axis_scale)
63 | ax[0][0].set_xticks(x_axis_scale)
64 | ax[0][0].set_ylim(top=3.5, bottom=0.0)
65 |
66 |
67 | ax[1][0].set_xlabel('Training Epochs')
68 | ax[1][0].set_ylabel('Error (mm)')
69 | kinematic_loss = ax[1][0].plot(time_series_GAN, gan_small_errors, marker="^", color="orangered")
70 | kinematic_loss[0].set_label(r'GAN - Training: $Small_{1000}$ - Test: $Small_{100}$')
71 | kinematic_loss = ax[1][0].plot(time_series_GAN, gan_full_errors, marker="^", color="purple")
72 | kinematic_loss[0].set_label(r'GAN - Training: $Full_{1400}$ - Test: $Full_{140}$')
73 | kinematic_loss = ax[1][0].plot(time_series_GAN, gan_full_1mio_errors, marker="^", color="magenta")
74 | kinematic_loss[0].set_label(r'GAN - Training: $Full_{1000}$ - Test: $Full_{100}$')
75 | ax[1][0].legend(loc='upper center', bbox_to_anchor=(0.4, 1.1))
76 | ax[1][0].set_yticks(np.arange(0.0, 10.5, 1.0))
77 | x_axis_scale = np.arange(0, 60, 10)
78 | x_axis_scale[0] = 10
79 | print(x_axis_scale)
80 | ax[1][0].set_xticks(x_axis_scale)
81 | ax[1][0].set_ylim(top=10.5)
82 | ax[1][0].set_xlim(right=55)
83 |
84 | # print(ax)
85 | ax[0][1].set_xlabel('Training Epochs')
86 | ax[0][1].set_ylabel('Error (degree)')
87 | kinematic_loss = ax[0][1].plot(time_series, rot_ik_small_errors, marker="D", color="royalblue")
88 | kinematic_loss[0].set_label(r'MLP - Training: $Small_{1000}$ - Test: $Small_{100}$', )
89 | kinematic_loss = ax[0][1].plot(time_series, rot_ik_full_errors, marker="D", color="forestgreen")
90 | kinematic_loss[0].set_label(r'MLP - Training: $Full_{1400}$ - Test: $Full_{140}$')
91 | kinematic_loss = ax[0][1].plot(time_series[:6], rot_ik_full_1mio_errors, marker="D", color="saddlebrown")
92 | kinematic_loss[0].set_label(r'MLP - Training: $Full_{1000}$ - Test: $Full_{100}$')
93 | kinematic_loss = ax[0][1].plot(time_series, rot_ik_full_on_small, linestyle='dashed', marker="D", color="forestgreen")
94 | kinematic_loss[0].set_label(r'MLP - Training: $Full_{1400}$ - Test: $Small_{100}$')
95 | ax[0][1].legend(loc='upper center', bbox_to_anchor=(0.35, 1.15))
96 | ax[0][1].set_yticks(np.arange(0.0, 0.6, 0.1))
97 | x_axis_scale = np.arange(0, 600, 100)
98 | x_axis_scale[0] = 10
99 | print(x_axis_scale)
100 | ax[0][1].set_xticks(x_axis_scale)
101 | ax[0][1].set_ylim(top=0.6, bottom=0.0)
102 |
103 | ax[1][1].set_xlabel('Training Epochs')
104 | ax[1][1].set_ylabel('Error (degree)')
105 | kinematic_loss = ax[1][1].plot(time_series_GAN, rot_gan_small_errors, marker="D", color="orangered")
106 | kinematic_loss[0].set_label(r'GAN - Training: $Small_{1000}$ - Test: $Small_{100}$')
107 | kinematic_loss = ax[1][1].plot(time_series_GAN, rot_gan_full_errors, marker="D", color="purple")
108 | kinematic_loss[0].set_label(r'GAN - Training: $Full_{1400}$ - Test: $Full_{140}$')
109 | kinematic_loss = ax[1][1].plot(time_series_GAN, rot_gan_full_1mio_errors, marker="D", color="magenta")
110 | kinematic_loss[0].set_label(r'GAN - Training: $Full_{1000}$ - Test: $Full_{100}$')
111 | ax[1][1].legend(loc='upper center', bbox_to_anchor=(0.4, 1.1))
112 | ax[1][1].set_yticks(np.arange(0.0, 1.6, 0.2))
113 | x_axis_scale = np.arange(0, 60, 10)
114 | x_axis_scale[0] = 10
115 | print(x_axis_scale)
116 | ax[1][1].set_xticks(x_axis_scale)
117 | ax[1][1].set_ylim(top=1.6)
118 | ax[1][1].set_xlim(right=55)
119 |
120 | fig.tight_layout()
121 | plt.savefig('./img/losses/losses_for_diff_epochs.png', format='png', dpi=150)
122 | plt.show()
123 |
124 |
125 | if __name__ == '__main__':
126 | main()
--------------------------------------------------------------------------------
/train/train_fk.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data
2 | from cycleik_pytorch import DecayLR, IKDataset, GenericNoisyGenerator, GenericDiscriminator
3 | import os
4 | import random
5 | import pickle
6 | import torch.backends.cudnn as cudnn
7 | import torch.utils.data
8 | from tqdm import tqdm
9 | import optuna
10 | from cycleik_pytorch import DecayLR, IKDataset, GenericGenerator
11 | from cycleik_pytorch import weights_init, load_config, slice_fk_pose, normalize_pose, renormalize_pose, renormalize_joint_state
12 | import pytorch_kinematics as pk
13 | from .train import BaseTrainer
14 |
15 |
16 | class FKTrainer(BaseTrainer):
17 |
18 | def __init__(self, args, trial=None, config=None, train_dataset=None, test_dataset=None):
19 | super().__init__(args, trial, config, train_dataset, test_dataset)
20 |
21 | self.position_losses = []
22 | self.orientation_losses = []
23 | self.val_position_losses = []
24 | self.val_orientation_losses = []
25 |
26 | self.train_position_loss = 0
27 | self.train_orientation_loss = 0
28 | self.val_position_loss = 0
29 | self.val_orientation_loss = 0
30 |
31 | self.average_loss = 0
32 |
33 | self.kinematic_loss = torch.nn.L1Loss().to(self.device)
34 |
35 | def training_step(self, data, i):
36 | gt_A = data["gt_A"].to(self.device)
37 | real_B = data["real_B"].to(self.device)
38 |
39 | forward_A2B = self.fk_model(gt_A)
40 | forward_result = renormalize_pose(forward_A2B.clone(), batch_size=len(forward_A2B),
41 | workspace_renormalize=self.workspace_renormalize, workspace_move=self.workspace_move)
42 | position_error = self.kinematic_loss(forward_result[:, :3], real_B[:, :3])
43 | orientation_error = self.kinematic_loss(forward_result[:, 3:], real_B[:, 3:])
44 | errG = position_error + orientation_error
45 | errG.backward()
46 |
47 | return position_error, orientation_error
48 |
49 | def validation_step(self, data):
50 | gt_A = data["gt_A"].to(self.device)
51 | real_B = data["real_B"].to(self.device)
52 |
53 | with torch.no_grad():
54 | forward_A2B = self.fk_model(gt_A)
55 | forward_A2B = renormalize_pose(forward_A2B, batch_size=len(forward_A2B),
56 | workspace_renormalize=self.workspace_renormalize,
57 | workspace_move=self.workspace_move)
58 | cycle_loss_B2A_position = self.kinematic_loss(forward_A2B[:, :3], real_B[:, :3])
59 | cycle_loss_B2A_orientation = self.kinematic_loss(forward_A2B[:, 3:], real_B[:, 3:])
60 | self.val_position_loss += cycle_loss_B2A_position.item() * (len(real_B) / 10000)
61 | self.val_orientation_loss += cycle_loss_B2A_orientation.item() * (len(real_B) / 10000)
62 |
63 | return cycle_loss_B2A_position, cycle_loss_B2A_orientation
64 |
65 | def process_training_losses(self, cycle_loss_B2A_position, cycle_loss_B2A_orientation, bs, epoch):
66 | position_error = cycle_loss_B2A_position.item()
67 | orientation_error = cycle_loss_B2A_orientation.item()
68 |
69 | loss_for_lr = (position_error + orientation_error) / 2
70 | self.average_loss += loss_for_lr * (bs / self.batch_size)
71 | self.train_position_loss += position_error * (bs / self.batch_size)
72 | self.train_orientation_loss += orientation_error * (bs / self.batch_size)
73 |
74 | self.progress_bar.set_description(
75 | f"[{epoch}/{self.epochs - 1}][{epoch}/{len(self.train_dataloader) - 1}] "
76 | f"position_loss: {cycle_loss_B2A_position.item():.8f} "
77 | f"orientation_loss: {cycle_loss_B2A_orientation.item():.8f} ")
78 |
79 | def process_validation_losses(self, cycle_loss_B2A_position, cycle_loss_B2A_orientation, bs, epoch):
80 | pos_loss = cycle_loss_B2A_position.item()
81 | rot_loss = cycle_loss_B2A_orientation.item()
82 |
83 | self.val_position_loss += pos_loss * (bs / 10000)
84 | self.val_orientation_loss += rot_loss * (bs / 10000)
85 |
86 | # Set G_A and G_B's gradients to zero
87 |
88 | self.progress_bar.set_description(
89 | f"[{epoch}/{self.epochs - 1}][{epoch}/{len(self.test_dataloader) - 1}] "
90 | f"position_loss:: {pos_loss:.8f} "
91 | f"orientation_loss:: {rot_loss:.8f} "
92 | )
93 |
94 | def create_checkpoint(self, epoch):
95 | if epoch % 5 == 0 and not self.optimizer_run:
96 | torch.save(self.fk_model.state_dict(), f"weights/{self.robot}/model_FK_epoch_{epoch}_with_kinematics.pth")
97 |
98 | def train(self):
99 | torch.cuda.empty_cache()
100 |
101 | for epoch in range(0, self.epochs):
102 | self.progress_bar = tqdm(enumerate(self.train_dataloader), total=len(self.train_dataloader))
103 | self.train_position_loss = 0
104 | self.train_orientation_loss = 0
105 | self.average_loss = 0
106 |
107 | for i, data in self.progress_bar:
108 | self.fk_optimizer.zero_grad()
109 | cycle_loss_B2A_position, cycle_loss_B2A_orientation = self.training_step(data, i)
110 | self.fk_optimizer.step()
111 | self.process_training_losses(cycle_loss_B2A_position, cycle_loss_B2A_orientation,
112 | len(data["gt_B"]), epoch)
113 |
114 | self.position_losses.append(self.train_position_loss)
115 | self.orientation_losses.append(self.train_orientation_loss)
116 |
117 | self.create_checkpoint(epoch)
118 |
119 | self.val_position_loss = 0
120 | self.val_orientation_loss = 0
121 |
122 | if epoch == 0 or (epoch + 1) % 10 == 0:
123 | self.progress_bar = tqdm(enumerate(self.test_dataloader), total=len(self.test_dataloader))
124 | for i, data in self.progress_bar:
125 | # get batch size data
126 | cycle_loss_B2A_val_position, cycle_loss_B2A_val_orientation = self.validation_step(data)
127 | self.process_validation_losses(cycle_loss_B2A_val_position, cycle_loss_B2A_val_orientation,
128 | len(data["gt_B"]), epoch)
129 |
130 | self.val_orientation_losses.append(self.val_orientation_loss)
131 | self.val_position_losses.append(self.val_position_loss)
132 |
133 | avg_loss = self.average_loss / (self.train_data_size / self.batch_size)
134 |
135 | if self.optimizer_run:
136 | self.trial.report(avg_loss, epoch)
137 |
138 | if self.trial.should_prune():
139 | raise optuna.TrialPruned()
140 |
141 | print("Avg Loss: {0}".format(avg_loss))
142 |
143 | self.fk_lr_scheduler.step()
144 |
145 | print([self.position_losses, self.orientation_losses, self.val_position_losses, self.val_orientation_losses])
146 |
147 | if not self.optimizer_run:
148 | # save last check pointing
149 | torch.save(self.fk_model.state_dict(), f"weights/{self.robot}/model_FK_with_kinematics.pth")
150 | with open(rf"results/{self.robot}/train_FK_with_kinematics_loss.p", "wb") as output_file:
151 | pickle.dump([self.position_losses, self.orientation_losses,
152 | self.val_position_losses, self.val_orientation_losses], output_file)
153 |
154 | return (self.val_position_losses[-1]) + self.val_orientation_losses[-1] / 2
--------------------------------------------------------------------------------
/ga/ga.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | import pytorch_kinematics as pk
3 | import torch
4 | from cycleik_pytorch import load_config, renormalize_pose, normalize_pose, slice_fk_pose, renormalize_joint_state, IKDataset
5 | from tqdm import tqdm
6 |
7 | class GA:
8 |
9 | def __init__(self, nbr_generations, population_size, mutation_factor, recombination_factor, config, robot, cuda=False, gpu=0):
10 | self.config = config
11 |
12 | self.nbr_generations = nbr_generations
13 | self.population_size = population_size
14 | self.mutation_factor = mutation_factor
15 | self.recombination_factor = recombination_factor
16 |
17 | self.train_data = self.config["train_data"]
18 | self.test_data = self.config["test_data"]
19 | self.robot_dof = self.config["robot_dof"]
20 | self.robot_urdf = self.config["robot_urdf"]
21 | self.robot_eef = self.config["robot_eef"]
22 | self.robot = robot
23 |
24 | device_name = f"cuda:{gpu}" if cuda else "cpu"
25 | self.device = torch.device(device_name)
26 |
27 | self.population = None
28 | self.target = None
29 | self.elite = None
30 |
31 | chain = pk.build_serial_chain_from_urdf(open(self.robot_urdf).read(), self.robot_eef)
32 | self.chain = chain.to(dtype=torch.float32, device=self.device)
33 |
34 | self.mutation_arrays = torch.zeros((self.nbr_generations, self.population_size, self.robot_dof + 1)).to(self.device)
35 | for i in range(self.nbr_generations):
36 | self.mutation_arrays[i, :, :self.robot_dof] = (
37 | torch.randn((self.population_size, self.robot_dof)).to(self.device) * 0.3)
38 |
39 | def recombine(self):
40 | mating_pool = None
41 | new_population = None
42 | for individual in self.population.clone():
43 | recombination_probability = torch.rand(1,1)
44 | #print(individual[8])
45 | if recombination_probability[0,0] < 1 - individual[8]:
46 | if mating_pool is None:
47 | mating_pool = individual
48 | else:
49 | mating_pool = torch.vstack((mating_pool, individual))
50 |
51 | #print(mating_pool)
52 | while len(mating_pool) > 1:
53 | mating_index = torch.randint(size=(1, 2), high=len(mating_pool))
54 | individual_1 = mating_pool[mating_index[0,0], :8]
55 | individual_2 = mating_pool[mating_index[0,1], :8]
56 |
57 | new_individual_1 = torch.zeros(size=(1, self.robot_dof + 1)).to(self.device)
58 | new_individual_2 = torch.zeros(size=(1, self.robot_dof + 1)).to(self.device)
59 |
60 | crossover_points = torch.randint(size=(1,4), low=2,high=self.robot_dof)
61 | crossover_points = torch.unique(crossover_points, sorted=True)
62 | last_point=None
63 | for crossover_point in crossover_points:
64 | if last_point is None:
65 | new_individual_1[0,:crossover_point] = individual_2[:crossover_point]
66 | new_individual_2[0,:crossover_point] = individual_1[:crossover_point]
67 | else:
68 | new_individual_1[0,last_point:crossover_point] = individual_2[last_point:crossover_point]
69 | new_individual_2[0,last_point:crossover_point] = individual_1[last_point:crossover_point]
70 | last_point = crossover_point
71 |
72 | if new_population is None:
73 | #print(new_individual_1)
74 | new_population = new_individual_1
75 | #print(new_individual_2)
76 | #print(new_population)
77 | new_population = torch.concatenate((new_population, new_individual_2), dim=0)
78 | #print(len(new_population))
79 | else:
80 | new_population = torch.concatenate((new_population, new_individual_1, new_individual_2), dim=0)
81 |
82 | mating_pool = torch.concatenate((mating_pool[:mating_index[0,0]], mating_pool[mating_index[0,0] + 1:]), dim=0)
83 | mating_pool = torch.concatenate((mating_pool[:mating_index[0,1]], mating_pool[mating_index[0,1] + 1:]), dim=0)
84 |
85 | if len(new_population) > int(self.population_size * 0.8):
86 | new_population = new_population[:int(self.population_size * 0.8)]
87 |
88 | nbr_new_individuals = len(new_population)
89 | #print("\n\nNew Individuals: ", nbr_new_individuals)
90 | nbr_elites = self.population_size - nbr_new_individuals
91 | self.elite = self.population[:nbr_elites]
92 |
93 | return new_population, nbr_new_individuals
94 |
95 | def mutate(self, new_population, generation_idx):
96 | #print(new_population.shape)
97 | #print(self.mutation_arrays[generation_idx, :len(new_population)].shape)
98 | #print(self.mutation_arrays[generation_idx].shape)
99 | new_population = torch.add(new_population,
100 | self.mutation_arrays[generation_idx, :len(new_population)])
101 | #new_population[:, :8] = new_population[:, :8] + torch.randn((len(new_population), self.robot_dof)).to(self.device) * 0.3
102 | return new_population
103 |
104 | def evaluate_fitness(self, solution, target, sort=True):
105 | fk_tensor = self.chain.forward_kinematics(solution)
106 | forward_result = slice_fk_pose(fk_tensor, batch_size=len(solution))
107 | position_error = torch.sum(torch.abs(torch.subtract(forward_result[:, :3], target[:, :3])), dim=1) * 100
108 | orientation_error = torch.sum(torch.abs(torch.subtract(forward_result[:, 3:], target[:, 3:])), dim=1)
109 | full_error = position_error + orientation_error
110 | min_val = torch.min(full_error)
111 | max_val = torch.max(full_error)
112 | full_error -= min_val
113 | full_error /= max_val
114 | new_population = torch.zeros((self.population_size, self.robot_dof)).to(self.device)
115 | if sort:
116 | sorted, indices = torch.sort(full_error)
117 | #print(sorted)
118 | for e, index in enumerate(indices):
119 | new_population[e] = solution[index, :self.robot_dof]
120 | self.population = torch.concat((new_population, torch.reshape(sorted, shape=(len(sorted), 1))), dim=1)
121 | return sorted
122 | return full_error
123 |
124 |
125 | def init_population(self, initial_seeds):
126 | nbr_seeds = len(initial_seeds)
127 | nbr_random_individuals = self.population_size - nbr_seeds
128 | random_individuals = (torch.rand(nbr_random_individuals, self.robot_dof).to(self.device) * 2) - 1
129 | population = torch.concat((initial_seeds, random_individuals), dim=0)
130 | idx = torch.randperm(population.shape[0])
131 | population = population[idx].view(population.size())
132 | fitness_value = torch.zeros((self.population_size, 1)).to(self.device)
133 | torch.concat((population, fitness_value), dim=1)
134 | return population
135 |
136 | def run(self, target, initial_seeds=torch.Tensor([])):
137 | self.population = self.init_population(initial_seeds)
138 | #print(len(self.population))
139 | self.target = target.repeat((self.population_size, 1))
140 |
141 | for i in tqdm(range(self.nbr_generations)):
142 | self.evaluate_fitness(self.population, self.target)
143 | if i == self.nbr_generations - 1:
144 | return self.population[0]
145 | new_population, nbr_new_individuals = self.recombine()
146 | new_population = self.mutate(new_population, i)
147 | new_population = torch.concatenate((new_population, self.elite), dim=0)
148 | self.population = new_population[:self.population_size]
149 |
--------------------------------------------------------------------------------
/data_samplers/moveit_datasampler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
3 | import argparse
4 | import copy
5 | import os
6 | import random
7 | from numpy import array, array_equal, allclose
8 | import numpy as np
9 | import torch.backends.cudnn as cudnn
10 | import torch.utils.data
11 | import torch.utils.data.distributed
12 | import torchvision.transforms as transforms
13 | import torchvision.utils as vutils
14 | from tqdm import tqdm
15 | from scipy.spatial.transform import Rotation as R
16 | import os
17 | from cycleik_pytorch import load_config, IKDataset
18 | import pytorch_kinematics as pk
19 | import time
20 | import matplotlib.pyplot as plt
21 | import pandas as ps
22 | import multiprocessing as mp
23 | from multiprocessing import Manager
24 | import rospy
25 | from moveit_msgs.msg import RobotState
26 | from moveit_msgs.srv import GetPositionFK, GetPositionIK, GetPositionIKRequest, GetPositionFKRequest
27 | from sensor_msgs.msg import JointState
28 | from geometry_msgs.msg import Pose, PoseStamped
29 | import std_msgs.msg
30 | import moveit_msgs.msg
31 | import moveit_msgs.srv
32 | import bio_ik_msgs.msg
33 | import bio_ik_msgs.srv
34 | from visualization_msgs.msg import Marker
35 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
36 | from std_msgs.msg import Header, ColorRGBA
37 | import pickle
38 | np.set_printoptions(precision=32)
39 |
40 |
41 | def execute_fk_request(result_queue, robot_dof, joint_names, robot_eef, process_id, lower_js_limit, upper_js_limit, samples):
42 | rospy.init_node(f"moveit_ik_experiment_{process_id}", anonymous=True)
43 | rospy.wait_for_service("/compute_fk")
44 | moveit_ik = rospy.ServiceProxy("/compute_fk", GetPositionFK)
45 |
46 | lower_js_limit = copy.deepcopy(lower_js_limit)
47 | upper_js_limit = copy.deepcopy(upper_js_limit)
48 | samples = copy.deepcopy(samples)
49 |
50 | keep_running = len(result_queue[0]) < samples
51 |
52 | while keep_running:
53 | #target_pose_input = pose_queue.get(block=True)
54 | #real_b_np = copy.deepcopy(target_pose_input)
55 | #del target_pose_input
56 |
57 | in_workspace = False
58 |
59 | random_js = []
60 | for i in range(robot_dof):
61 | joint_val = random.uniform(lower_js_limit[i], upper_js_limit[i])
62 | random_js.append(joint_val)
63 |
64 | # rospy.loginfo("Position: \n%s", correct_poses)
65 | # rospy.loginfo("Joint State: \n%s", random_js)
66 |
67 | joint_state_msg = JointState()
68 |
69 | joint_state_msg.name = copy.deepcopy(joint_names)
70 | #print(random_js)
71 | joint_state_msg.position = random_js
72 | robot_state_msg = RobotState()
73 | robot_state_msg.joint_state = joint_state_msg
74 | robot_state_msg.is_diff = True
75 |
76 | h = std_msgs.msg.Header()
77 | h.stamp = rospy.Time.now()
78 |
79 | result = None
80 | try:
81 |
82 | result = moveit_ik(h, [copy.deepcopy(robot_eef)], robot_state_msg)
83 | rospy.sleep(0.0001)
84 | except rospy.ServiceException as exc:
85 | rospy.logerr("Service did not process request: %s", exc)
86 | rospy.sleep(0.0001)
87 | continue
88 |
89 | if result is not None and result.error_code.val == 1:
90 | pose_msg = result.pose_stamped[0].pose
91 | pose = [pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, pose_msg.orientation.x,
92 | pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w, ]
93 | js = joint_state_msg.position
94 | result_queue[0].append(pose)
95 | result_queue[1].append(js)
96 |
97 | keep_running = len(result_queue[0]) < samples
98 | #print(pose_queue.qsize())
99 | #print(return_positions)
100 |
101 | del moveit_ik
102 | rospy.signal_shutdown("Shutting down FK sampler")
103 |
104 | def watch_progress(result_queue, nbr_samples):
105 | progress_bar = tqdm(total=nbr_samples)
106 |
107 | keep_running = len(result_queue[0]) < nbr_samples
108 | last_val = 0
109 | while keep_running:
110 | #print(keep_running)
111 | curr_val = copy.deepcopy(len(result_queue[0]))
112 | progress_bar.update(curr_val - last_val)
113 | last_val = copy.deepcopy(curr_val)
114 | keep_running = len(result_queue[0]) < nbr_samples
115 | time.sleep(2)
116 |
117 | del progress_bar
118 |
119 | def main(args):
120 |
121 | torch.cuda.empty_cache()
122 |
123 | if args.manualSeed is None:
124 | args.manualSeed = random.randint(1, 10000)
125 | print("Random Seed: ", args.manualSeed)
126 | random.seed(args.manualSeed)
127 | torch.manual_seed(args.manualSeed)
128 |
129 | cudnn.benchmark = True
130 |
131 | if torch.cuda.is_available() and not args.cuda:
132 | print("WARNING: You have a CUDA device, so you should probably run with --cuda")
133 |
134 | config = load_config(args.robot)[f'{args.chain}']
135 |
136 | val_data = config["val_data"]
137 | robot_dof = config["robot_dof"]
138 | #robot_urdf = config["robot_urdf"]
139 | robot_eef = config["robot_eef"]
140 | joint_name_list = config['joint_name']
141 | upper_js_limit = config['limits']['upper']
142 | lower_js_limit = config['limits']['lower']
143 |
144 | #move_group = config['move_group']
145 | #zero_js = config['home_js']
146 |
147 | # Dataset
148 | #dataset = IKDataset(root=val_data, test=True, robot=args.robot, config=config, mode='val')
149 | #dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, pin_memory=True)
150 |
151 | device = torch.device(f"cuda:{args.gpu}" if args.cuda else "cpu")
152 |
153 | #progress_bar = tqdm(enumerate(dataloader), total=len(dataloader))
154 |
155 | count_bullshit = 0
156 |
157 | ctx = mp.get_context('forkserver')
158 | manager = Manager()
159 | result_queue = manager.list()
160 | result_queue.append(manager.list())
161 | result_queue.append(manager.list())
162 |
163 | processes = []
164 |
165 | for i in range(os.cpu_count() - 2):
166 | process = ctx.Process(target=execute_fk_request, args=(result_queue, robot_dof, joint_name_list, robot_eef, i, upper_js_limit, lower_js_limit, args.samples))
167 | process.start()
168 | processes.append(process)
169 |
170 | watcher_process = ctx.Process(target=watch_progress, args=(result_queue, args.samples))
171 | watcher_process.start()
172 | watcher_process.join()
173 | #watcher_process.terminate()
174 | #processes.append(watcher_process)
175 |
176 | #print('here')
177 |
178 | for p in processes:
179 | p.join()
180 | #p.terminate()
181 | res = list(result_queue)
182 | res = [list(result_queue[0][:args.samples]), list(result_queue[1][:args.samples])]
183 | #print(res)
184 |
185 | with open(f'./data_samplers/results_{args.robot}_{int(args.samples / 1000)}_{args.chain}.p', 'wb') as f:
186 | pickle.dump(res, f)
187 | f.close()
188 |
189 |
190 | if __name__ == '__main__':
191 | parser = argparse.ArgumentParser(
192 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
193 | parser.add_argument("--cuda", action="store_true", help="Enables cuda")
194 | parser.add_argument("--manualSeed", type=int,
195 | help="Seed for initializing training. (default:none)")
196 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
197 | parser.add_argument("--gpu", type=int, default=0, help="Robot model IK is trained for")
198 | parser.add_argument("--chain", type=str, default='', help="chain")
199 | parser.add_argument("--samples", type=int, default=100000, help="chain")
200 | args = parser.parse_args()
201 | print(args)
202 |
203 | main(args)
--------------------------------------------------------------------------------
/visualize/moveit/visualize_redundancy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | import numpy as np
3 | import torch
4 |
5 | from cycleik_pytorch import CycleIK
6 | import argparse
7 | from moveit_msgs.msg import DisplayRobotState, RobotState, DisplayTrajectory, RobotTrajectory
8 | from sensor_msgs.msg import JointState
9 | from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
10 | from cycleik_pytorch import load_config
11 |
12 | import rospy
13 |
14 |
15 | def display_redundancy(pose_np, z, js_samples, display_trajectory_msg, cycleik, config, t):
16 |
17 | joint_distribution = cycleik.inverse_kinematics_distribution(pose=pose_np, noise=z, js_samples=js_samples,
18 | calculate_error=True)
19 |
20 | #for i in range(0):
21 | # joint_distribution = joint_distribution.detach().cpu().numpy()
22 | # min_noise = np.min(joint_distribution, axis=0)
23 | # max_noise = np.max(joint_distribution, axis=0)
24 | # joint_distribution = np.random.uniform(low=min_noise, high=max_noise,
25 | # size=(js_samples, config['robot_dof'])) * 1.5
26 | # joint_distribution = np.clip(a=joint_distribution, a_min=-1, a_max=1)
27 | #
28 | # joint_distribution = cycleik.inverse_kinematics_distribution(pose=pose_np, noise=joint_distribution,
29 | # js_samples=js_samples,
30 | # calculate_error=True)
31 | #
32 | print(joint_distribution)
33 | display_trajectory_msg.trajectory.append(RobotTrajectory())
34 | display_trajectory_msg.trajectory[t].joint_trajectory = JointTrajectory()
35 | display_trajectory_msg.trajectory[t].joint_trajectory.joint_names = config["joint_name"]
36 |
37 | for e, joint_position in enumerate(joint_distribution):
38 | trajectory_point = JointTrajectoryPoint()
39 | trajectory_point.positions = list(joint_position)
40 |
41 | display_trajectory_msg.trajectory[t].joint_trajectory.points.append(trajectory_point)
42 |
43 | return display_trajectory_msg
44 |
45 | def display_simple_path(js, display_trajectory_msg, config, t):
46 |
47 | display_trajectory_msg.trajectory.append(RobotTrajectory())
48 | display_trajectory_msg.trajectory[t].joint_trajectory = JointTrajectory()
49 | display_trajectory_msg.trajectory[t].joint_trajectory.joint_names = config["joint_name"]
50 |
51 | trajectory_point = JointTrajectoryPoint()
52 | trajectory_point.positions = list(js)
53 |
54 | display_trajectory_msg.trajectory[t].joint_trajectory.points.append(trajectory_point)
55 |
56 | return display_trajectory_msg
57 |
58 |
59 | def main(args):
60 |
61 | ros_node = rospy.init_node("cycleik_vis", anonymous=True)
62 |
63 | print(args.robot)
64 |
65 | config = load_config(args.robot)
66 | config = config[f"{args.chain}"]
67 | if args.network == 'GAN':
68 | js_samples = config['debug']['null_space_samples']
69 |
70 | nbr_points = config['debug']['points']
71 |
72 | cycleik = CycleIK(robot=args.robot, cuda_device=args.gpu, chain=args.chain, verbose=True)
73 |
74 | display_pub = rospy.Publisher('move_group/display_planned_path', DisplayTrajectory, queue_size=10)
75 |
76 | start_pose_np = np.array(config['debug']['pose'])
77 | position_axis = 0
78 | if config['debug']['axis'] == 'X':
79 | position_axis = 0
80 | elif config['debug']['axis'] == 'Y':
81 | position_axis = 1
82 | elif config['debug']['axis'] == 'Z':
83 | position_axis = 2
84 | else:
85 | raise NotImplementedError('You specified a wrong value for the debug pose under config/{args.robot}.yaml,'
86 | ' coose from the following: \'X\', \'Y\' or \'Z\'')
87 | #start_pose_np = np.array([0.3, 0.0, 0.5, -1., 0., 0., 0.])
88 | start_pose_np = np.reshape(start_pose_np, newshape=(1, 7))
89 |
90 | joint_state_msg = JointState()
91 | joint_state_msg.name = config["joint_name"]
92 | joint_state_msg.position = list(np.zeros(config['robot_dof']))
93 |
94 | robot_state_msg = RobotState()
95 | robot_state_msg.joint_state = joint_state_msg
96 | robot_state_msg.is_diff = True
97 |
98 | display_trajectory_msg = DisplayTrajectory()
99 | display_trajectory_msg.trajectory_start = robot_state_msg
100 |
101 | print("\n\n heeree")
102 |
103 | if args.network == 'GAN':
104 | for t in range(nbr_points):
105 |
106 | pose_np = np.copy(start_pose_np)
107 | #pose_np[0, 0] -= t * 0.05
108 | pose_np[0, position_axis] -= t * 0.05
109 | pose_np = np.repeat(pose_np, js_samples, axis=0)
110 |
111 | z = np.random.uniform(low=-1.0, high=1.0, size=(js_samples, config['robot_dof']))
112 |
113 | display_trajectory_msg = display_redundancy(pose_np, z, js_samples, display_trajectory_msg, cycleik, config, t)
114 | else:
115 | print("\n\n heeree")
116 | pose_np = np.copy(start_pose_np)
117 | joint_distribution = None
118 | for t in range(1, nbr_points):
119 | next_pose = np.copy(start_pose_np)
120 | next_pose[:,position_axis] = np.copy(start_pose_np)[:,position_axis] + (t * 0.05)
121 | pose_np = np.concatenate((pose_np, next_pose), axis=0)
122 |
123 | joint_distribution, _, _, _ = cycleik.inverse_kinematics(poses=pose_np, calculate_error=args.calculate_error)
124 |
125 | print(joint_distribution)
126 |
127 | for t, js in enumerate(joint_distribution):
128 | display_trajectory_msg = display_simple_path(js, display_trajectory_msg, config, t)
129 |
130 |
131 |
132 |
133 |
134 |
135 | #joint_state_msg = JointState()
136 | #joint_state_msg.name =
137 | #joint_state_msg.position = list(joint_position)
138 | #
139 | #robot_state_msg = RobotState()
140 | #robot_state_msg.joint_state = joint_state_msg
141 | #robot_state_msg.is_diff = True
142 | #
143 | #display_state_msg = DisplayRobotState()
144 | #display_state_msg.state = robot_state_msg
145 | display_pub.publish(display_trajectory_msg)
146 | #
147 | rospy.sleep(1)
148 | display_pub.publish(display_trajectory_msg)
149 | #
150 | rospy.sleep(1)
151 | #display_pub.publish(display_trajectory_msg)
152 |
153 |
154 | if __name__ == '__main__':
155 | parser = argparse.ArgumentParser(
156 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
157 | parser.add_argument("--cuda", action="store_true", help="Enables cuda")
158 | parser.add_argument("--calculate_error", action="store_true", help="Enables cuda")
159 | parser.add_argument("--use_ga", action="store_true", help="Enables cuda")
160 | parser.add_argument("--use_optimizer", action="store_true", help="Enables cuda")
161 | parser.add_argument("--manualSeed", type=int,
162 | help="Seed for initializing training. (default:none)")
163 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
164 | parser.add_argument("--gpu", type=int, default=0, help="Robot model IK is trained for")
165 | parser.add_argument("--network", type=str, default="MLP", help="Robot model IK is trained for")
166 | parser.add_argument("--core-model", type=str, default="nicol", help="Robot model IK is trained for")
167 | parser.add_argument("--autoencoder", action="store_true", help="Enables learned FK")
168 | parser.add_argument("--two-stage", action="store_true", help="Enables two-stage learned FK training")
169 | parser.add_argument("--finetune", action="store_true", help="Enables two-stage learned FK training")
170 | parser.add_argument("--chain", type=str, default="right_arm", help="Robot model Kinematic Chain")
171 |
172 | #print(args)
173 |
174 | test_args = parser.parse_args()
175 | main(test_args)
--------------------------------------------------------------------------------
/visualize/visualize_ik_optimizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import sklearn.datasets
3 | import sklearn.metrics
4 | from sklearn.model_selection import train_test_split
5 | import joblib
6 |
7 | import optuna
8 |
9 | # You can use Matplotlib instead of Plotly for visualization by simply replacing `optuna.visualization` with
10 | # `optuna.visualization.matplotlib` in the following examples.
11 | from optuna.visualization.matplotlib import plot_contour
12 | from optuna.visualization.matplotlib import plot_edf
13 | from optuna.visualization.matplotlib import plot_intermediate_values
14 | from optuna.visualization.matplotlib import plot_optimization_history
15 | from optuna.visualization.matplotlib import plot_parallel_coordinate
16 | from optuna.visualization.matplotlib import plot_param_importances
17 | from optuna.visualization.matplotlib import plot_slice
18 |
19 | import matplotlib.pyplot as plt
20 | #font = {'family' : 'normal',
21 | # 'weight' : 'bold',
22 | # 'size' : 10}
23 | plt.rc('axes', labelsize=15)
24 | plt.rc('legend', fontsize=10)
25 |
26 | def main():
27 | study = joblib.load('./optuna/nicol/OPTIMIZER_SAVES/1000_dataset/cycleik_ik_optimizer.pkl')
28 | #study = optuna.load_study(study_name="cycleik_ik_optimizer", storage=f'sqlite:///optuna/nicol/OPTIMIZER_SAVES/1400_dataset/ik_optimizer_results.db')
29 |
30 | print(len(study.trials))
31 | ### optimization history ###
32 | opt_history_subplot = plot_optimization_history(study, target_name='Mean Absolute Error')
33 | opt_history_subplot.set_ylim(top=0.01, bottom=0.0)
34 | opt_history_subplot.xaxis.label.set_fontsize(18)
35 | opt_history_subplot.xaxis.label.set_fontweight('bold')
36 | opt_history_subplot.yaxis.label.set_fontsize(18)
37 | opt_history_subplot.yaxis.label.set_fontweight('bold')
38 | opt_history_subplot.tick_params(axis='x', labelsize=14)
39 | opt_history_subplot.tick_params(axis='y', labelsize=14)
40 | opt_history_subplot.set_title('')
41 | opt_history_subplot.legend(loc='upper right', fontsize="16")
42 | opt_history = opt_history_subplot.figure
43 | opt_history.set_size_inches(10, 8)
44 | plt.tight_layout()
45 |
46 |
47 | ### param importance ###
48 | param_importance_subplot = plot_param_importances(study, target_name='Mean Absolute Error')
49 | param_importance_subplot.xaxis.label.set_fontweight('bold')
50 | param_importance_subplot.xaxis.label.set_fontsize(18)
51 | #param_importance_subplot.yaxis.set_label('Hyperparameters')
52 | param_importance_subplot.yaxis.label.set_fontweight('bold')
53 | param_importance_subplot.yaxis.label.set_fontsize(18)
54 | param_importance_subplot.tick_params(axis='x', labelsize=14)
55 | param_importance_subplot.tick_params(axis='y', labelsize=14)
56 | param_importance_subplot.set_title('')
57 | param_importance = param_importance_subplot.figure
58 | param_importance.set_size_inches(10, 8)
59 | plt.tight_layout()
60 |
61 | ### contours ###
62 | nbr_tanh_to_nbr_layers_subplot = plot_contour(study, params=["nbr_tanh", "nbr_layers"], target_name='Mean Absolute Error', )
63 | nbr_tanh_to_nbr_layers_subplot.xaxis.label.set_fontweight('bold')
64 | nbr_tanh_to_nbr_layers_subplot.yaxis.label.set_fontweight('bold')
65 | nbr_tanh_to_nbr_layers_subplot.set_title('')
66 | nbr_tanh_to_nbr_layers = nbr_tanh_to_nbr_layers_subplot.figure
67 | plt.tight_layout()
68 |
69 | nbr_tanh_to_batch_size_subplot = plot_contour(study, params=["nbr_tanh", "batch_size"], target_name='Mean Absolute Error', )
70 | nbr_tanh_to_batch_size_subplot.xaxis.label.set_fontweight('bold')
71 | nbr_tanh_to_batch_size_subplot.yaxis.label.set_fontweight('bold')
72 | nbr_tanh_to_batch_size_subplot.set_title('')
73 | nbr_tanh_to_batch_size = nbr_tanh_to_batch_size_subplot.figure
74 | plt.tight_layout()
75 |
76 | nbr_tanh_to_lr_subplot = plot_contour(study, params=["nbr_tanh", "lr"], target_name='Mean Absolute Error', )
77 | nbr_tanh_to_lr_subplot.xaxis.label.set_fontweight('bold')
78 | nbr_tanh_to_lr_subplot.yaxis.label.set_fontweight('bold')
79 | nbr_tanh_to_lr_subplot.set_title('')
80 | nbr_tanh_to_lr = nbr_tanh_to_lr_subplot.figure
81 | plt.tight_layout()
82 |
83 | lr_to_batch_size_subplot = plot_contour(study, params=["lr", "batch_size"], target_name='Mean Absolute Error', )
84 | lr_to_batch_size_subplot.xaxis.label.set_fontweight('bold')
85 | lr_to_batch_size_subplot.yaxis.label.set_fontweight('bold')
86 | lr_to_batch_size_subplot.set_title('')
87 | lr_to_batch_size = lr_to_batch_size_subplot.figure
88 | plt.tight_layout()
89 |
90 | batch_size_to_nbr_layers_subplot = plot_contour(study, params=["batch_size", "nbr_layers"], target_name='Mean Absolute Error', )
91 | batch_size_to_nbr_layers_subplot.xaxis.label.set_fontweight('bold')
92 | batch_size_to_nbr_layers_subplot.yaxis.label.set_fontweight('bold')
93 | batch_size_to_nbr_layers_subplot.set_title('')
94 | batch_size_to_nbr_layers = batch_size_to_nbr_layers_subplot.figure
95 | plt.tight_layout()
96 |
97 | lr_to_nbr_layers_subplot = plot_contour(study, params=["lr", "nbr_layers"], target_name='Mean Absolute Error', )
98 | lr_to_nbr_layers_subplot.xaxis.label.set_fontweight('bold')
99 | lr_to_nbr_layers_subplot.yaxis.label.set_fontweight('bold')
100 | lr_to_nbr_layers_subplot.set_title('')
101 | lr_to_nbr_layers = lr_to_nbr_layers_subplot.figure
102 | plt.tight_layout()
103 |
104 | ### slices ###
105 | slice_all_params_subplot = plot_parallel_coordinate(study, params=["lr", "batch_size", "nbr_layers", "nbr_tanh", "layer0_neurons", "layer1_neurons", "layer2_neurons", "layer3_neurons", "layer6_neurons", "layer7_neurons", "layer8_neurons"], target_name='Mean Absolute Error')
106 | slice_all_params_subplot.xaxis.label.set_fontweight('bold')
107 | slice_all_params_subplot.yaxis.label.set_fontweight('bold')
108 | slice_all_params = slice_all_params_subplot.figure
109 | slice_all_params.set_size_inches(20, 8)
110 | plt.tight_layout()
111 |
112 | slice_networks_structure_subplot = plot_parallel_coordinate(study, params=["layer0_neurons", "layer1_neurons", "layer2_neurons", "layer3_neurons", "layer6_neurons", "layer7_neurons", "layer8_neurons"], target_name='Mean Absolute Error')
113 | slice_networks_structure_subplot.xaxis.label.set_fontweight('bold')
114 | slice_networks_structure_subplot.yaxis.label.set_fontweight('bold')
115 | slice_networks_structure = slice_networks_structure_subplot.figure
116 | slice_networks_structure.set_size_inches(20, 8)
117 | plt.tight_layout()
118 |
119 | slice_training_params_subplot = plot_parallel_coordinate(study, params=["lr", "batch_size", "nbr_layers", "nbr_tanh"])
120 | slice_training_params_subplot.xaxis.label.set_fontweight('bold')
121 | slice_training_params_subplot.yaxis.label.set_fontweight('bold')
122 | slice_training_params = slice_training_params_subplot.figure
123 | slice_training_params.set_size_inches(20, 8)
124 | plt.tight_layout()
125 |
126 | # save images
127 | opt_history.savefig('./img/optimizer/ik/opt_history.png', format='png', dpi=150)
128 | param_importance.savefig('./img/optimizer/ik/param_importance.png', format='png', dpi=150)
129 | nbr_tanh_to_nbr_layers.savefig('./img/optimizer/ik/nbr_tanh_to_nbr_layers.png', format='png', dpi=150)
130 | nbr_tanh_to_batch_size.savefig('./img/optimizer/ik/nbr_tanh_to_batch_size.png', format='png', dpi=150)
131 | nbr_tanh_to_lr.savefig('./img/optimizer/ik/nbr_tanh_to_lr.png', format='png', dpi=150)
132 | lr_to_batch_size.savefig('./img/optimizer/ik/lr_to_batch_size.png', format='png', dpi=150)
133 | batch_size_to_nbr_layers.savefig('./img/optimizer/ik/batch_size_to_nbr_layers.png', format='png', dpi=150)
134 | lr_to_nbr_layers.savefig('./img/optimizer/ik/lr_to_nbr_layers.png', format='png', dpi=150)
135 | slice_all_params.savefig('./img/optimizer/ik/slice_all_params.png', format='png', dpi=150)
136 | slice_networks_structure.savefig('./img/optimizer/ik/slice_networks_structure.png', format='png', dpi=150)
137 | slice_training_params.savefig('./img/optimizer/ik/slice_training_params.png', format='png', dpi=150)
138 |
139 | print(study.best_trial)
140 |
141 | if __name__ == '__main__':
142 | main()
--------------------------------------------------------------------------------
/cycleik_pytorch/utils.py:
--------------------------------------------------------------------------------
1 | import random
2 | import torch
3 | import yaml
4 | import os
5 | import numpy as np
6 | from pathlib import Path
7 | import pytorch_kinematics as pk
8 |
9 | class ReplayBuffer:
10 | def __init__(self, max_size=10000):
11 | assert (max_size > 0), "Empty buffer or trying to create a black hole. Be careful."
12 | self.max_size = max_size
13 | self.data = []
14 |
15 | def push_and_pop(self, data):
16 | to_return = []
17 | #print("unsqueeze: {0}".format(data))
18 | if len(self.data) < self.max_size:
19 | self.data.append(data)
20 | to_return.append(data)
21 | else:
22 | if random.uniform(0, 1) > 0.5:
23 | to_return.append(data)
24 | i = random.randint(0, self.max_size - 1)
25 | self.data[i] = data
26 | else:
27 | to_return.append(data)
28 | return torch.cat(to_return)
29 |
30 | def load_config(robot):
31 | data_path = str(Path(__file__).parent.parent.absolute())
32 | with open(data_path + f"/config/{robot}.yaml", "r") as f:
33 | config = yaml.safe_load(f)
34 | return config
35 |
36 | # custom weights initialization called on netG and netD
37 | def weights_init(m):
38 | classname = m.__class__.__name__
39 | if classname.find("Conv") != -1:
40 | torch.nn.init.normal_(m.weight, 0.0, 0.02)
41 | elif classname.find("BatchNorm") != -1:
42 | torch.nn.init.normal_(m.weight, 1.0, 0.02)
43 | torch.nn.init.zeros_(m.bias)
44 |
45 | def get_kinematic_params(config):
46 | workspace_upper = np.array(config["workspace"]["upper"])
47 | workspace_lower = np.array(config["workspace"]["lower"])
48 | orig_workspace_upper = np.copy(workspace_upper)
49 | orig_workspace_lower = np.copy(workspace_lower)
50 | diff_upper_lower = np.abs(config["workspace"]["lower"]) - np.abs(config["workspace"]["upper"])
51 | workspace_interval_array = np.ones(3)
52 | workspace_center_array = np.zeros(3)
53 | assert len(diff_upper_lower) == 3 == len(config["workspace"]["lower"]) == len(config["workspace"]["upper"])
54 |
55 | for r in range(len(diff_upper_lower)):
56 | if diff_upper_lower[r] == 0 and workspace_upper[r] >= 0. >= workspace_lower[r]:
57 | workspace_interval_array[r] = abs(workspace_upper[r])
58 | elif diff_upper_lower[r] != 0 and workspace_upper[r] >= 0. >= workspace_lower[r]:
59 | half_diff = (diff_upper_lower[r] / 2)
60 | workspace_center_array[r] = half_diff
61 | workspace_interval_array[r] = max([abs(workspace_lower[r]), abs(workspace_upper[r])]) - abs(half_diff)
62 | elif diff_upper_lower[r] != 0 and ((workspace_upper[r] > 0. and workspace_lower[r] > 0.) or
63 | (workspace_upper[r] < 0. and workspace_lower[r] < 0.)):
64 | if workspace_upper[r] > 0. and workspace_lower[r] > 0.:
65 | workspace_center_array[r] = -workspace_lower[r]
66 | workspace_upper[r] = workspace_upper[r] - workspace_lower[r]
67 | workspace_lower[r] = 0.
68 | print(workspace_center_array[r])
69 |
70 | elif workspace_upper[r] < 0. and workspace_lower[r] < 0.:
71 | #print("Here")
72 | workspace_center_array[r] = abs(workspace_upper[r])
73 | workspace_lower[r] = workspace_lower[r] + abs(workspace_upper[r])
74 | workspace_upper[r] = 0.
75 |
76 | # local_diff_upper_lower = np.abs(config["limits"]["lower"]) - np.abs(config["limits"]["upper"])
77 | local_diff = workspace_lower[r] - workspace_upper[r]
78 | #print(local_diff)
79 | half_diff = (local_diff / 2)
80 | workspace_center_array[r] += half_diff
81 | print(workspace_center_array[r])
82 |
83 | workspace_interval_array[r] = abs(half_diff)
84 |
85 | else:
86 | raise NotImplementedError
87 |
88 | #workspace_interval_array = workspace_upper
89 | print(workspace_center_array)
90 | print(workspace_interval_array)
91 |
92 | limits_upper = np.array(config["limits"]["upper"])
93 | limits_lower = np.array(config["limits"]["lower"])
94 | orig_limits_upper = np.copy(limits_upper)
95 | orig_limits_lower = np.copy(limits_lower)
96 | diff_upper_lower = np.abs(config["limits"]["lower"]) - np.abs(config["limits"]["upper"])
97 | normalize_interval_array = np.ones(config["robot_dof"])
98 | normalize_center_array = np.zeros(config["robot_dof"])
99 | assert len(diff_upper_lower) == config["robot_dof"] == len(config["limits"]["lower"]) == len(config["limits"]["upper"])
100 |
101 | for r in range(len(diff_upper_lower)):
102 | if diff_upper_lower[r] == 0 and limits_upper[r] >= 0. >= limits_lower[r]:
103 | normalize_interval_array[r] = abs(limits_upper[r])
104 | elif diff_upper_lower[r] != 0 and limits_upper[r] >= 0. >= limits_lower[r]:
105 | half_diff = (diff_upper_lower[r] / 2)
106 | normalize_center_array[r] = half_diff
107 | normalize_interval_array[r] = max([abs(limits_lower[r]), abs(limits_upper[r])]) - abs(half_diff)
108 | elif diff_upper_lower[r] != 0 and ((limits_upper[r] > 0. and limits_lower[r] > 0.) or
109 | (limits_upper[r] < 0. and limits_lower[r] < 0.)):
110 | if limits_upper[r] > 0. and limits_lower[r] > 0.:
111 | normalize_center_array[r] = -limits_lower[r]
112 | limits_upper[r] = limits_upper[r] - limits_lower[r]
113 | limits_lower[r] = 0.
114 | elif limits_upper[r] < 0. and limits_lower[r] < 0.:
115 | print("Here")
116 | normalize_center_array[r] = abs(limits_upper[r])
117 | limits_lower[r] = limits_lower[r] + abs(limits_upper[r])
118 | limits_upper[r] = 0.
119 |
120 | # local_diff_upper_lower = np.abs(config["limits"]["lower"]) - np.abs(config["limits"]["upper"])
121 | local_diff = limits_lower[r] - limits_upper[r]
122 | print(local_diff)
123 | half_diff = -(local_diff / 2)
124 | normalize_center_array[r] += half_diff
125 | print(normalize_center_array[r])
126 | normalize_interval_array[r] = abs(half_diff)
127 | print(normalize_interval_array[r])
128 | else:
129 | raise NotImplementedError
130 |
131 | return workspace_interval_array, workspace_center_array, orig_limits_upper, orig_limits_lower, normalize_interval_array, normalize_center_array
132 |
133 |
134 | def renormalize_joint_state(joint_state, batch_size, single_renormalize, single_renormalize_move):
135 | joint_state = torch.mul(joint_state, single_renormalize.repeat(batch_size, 1))
136 | joint_state = torch.subtract(joint_state, single_renormalize_move.repeat(batch_size, 1))
137 | return joint_state
138 |
139 |
140 | def slice_fk_pose(pose, batch_size, rotation='quaternion'):
141 | pos = torch.reshape(pose.get_matrix()[:, :3, 3:], shape=(batch_size, 3))
142 | rot = None
143 | if rotation == 'quaternion':
144 | rot = pk.matrix_to_quaternion(pose.get_matrix()[:, :3, :3])
145 | rot = torch.concat((rot[:, 1:], rot[:, :1]), dim=1)
146 | elif rotation == 'angle':
147 | rot = pk.matrix_to_euler_angles(pose.get_matrix()[:, :3, :3], convention='ZYX')
148 | else:
149 | raise NotImplementedError
150 | return torch.concat((pos, rot), dim=1)
151 |
152 |
153 | def normalize_pose(pose, batch_size, workspace_move, workspace_renormalize, slice_pk_result=True):
154 | if slice_pk_result:
155 | forward_result = slice_fk_pose(pose, batch_size)
156 | else:
157 | forward_result = pose
158 | forward_result[:, :3] = torch.add(forward_result[:, :3], workspace_move)
159 | forward_result[:, :3] = torch.true_divide(forward_result[:, :3], workspace_renormalize)
160 | return forward_result
161 |
162 |
163 | def renormalize_pose(pose, batch_size, workspace_move, workspace_renormalize):
164 | pose[:, :3] = torch.mul(pose[:, :3], workspace_renormalize.repeat(batch_size, 1))
165 | pose[:, :3] = torch.subtract(pose[:, :3], workspace_move.repeat(batch_size, 1))
166 | return pose
167 |
168 |
169 | class JSD(torch.nn.Module):
170 | def __init__(self):
171 | super(JSD, self).__init__()
172 | self.kl = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)
173 |
174 | def forward(self, p: torch.tensor, q: torch.tensor):
175 | p, q = p.view(-1, p.size(-1)), q.view(-1, q.size(-1))
176 | m = (0.5 * (p + q)).log()
177 | return 0.5 * (self.kl(m, p.log()) + self.kl(m, q.log()))
--------------------------------------------------------------------------------
/cycleik_pytorch/models.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 |
9 | class FineTuneModel(nn.Module):
10 |
11 | def __init__(self, core_model, output_dimension):
12 | super(FineTuneModel, self).__init__()
13 |
14 | self.prior_activation = F.tanh
15 | self.posteriori_activation = F.tanh
16 | self.activation = core_model.activation
17 | input_size = core_model.input_dim
18 | output_size = core_model.output_dim
19 |
20 | #for param in core_model.parameters():
21 | # param.requires_grad = False
22 |
23 | self.layers = nn.ModuleList()
24 | #self.layers.append(nn.Linear(input_size, 2048))
25 | #self.layers.append(nn.Linear(2048, 2048))
26 | #self.layers.append(nn.Linear(2048, core_model.layers[1].in_features))
27 | self.layers.append(nn.Linear(input_size, core_model.layers[1].in_features))
28 |
29 | for i in range(len(core_model.layers)):
30 | if i == 0 or i == len(core_model.layers)- 1: continue
31 | #print(core_model.layers[i].parameters())
32 | #core_model.layers[i].requires_grad = False
33 | #core_model.layers[i].bias = None
34 | self.layers.append(core_model.layers[i])
35 |
36 | #self.layers.append(nn.Linear(core_model.layers[-2].out_features, 128))
37 | #self.layers.append(nn.Linear(128, 128))
38 | #self.layers.append(nn.Linear(128, output_dimension))
39 | self.layers.append(nn.Linear(core_model.layers[-2].out_features, output_dimension))
40 |
41 | #for layer in self.layers:
42 | # print(layer)
43 |
44 | def forward(self, x):
45 | #print(x.shape)
46 | for e, layer in enumerate(self.layers):
47 | #print(e)
48 | if e <= 2:
49 | #print("prior")
50 | x = self.prior_activation(layer(x))
51 | elif 2 < e < (len(self.layers) - 3):
52 | #print("core")
53 | x = self.activation(layer(x))
54 | else:
55 | #print("post")
56 | x = self.posteriori_activation(layer(x))
57 | return x
58 |
59 |
60 | class GenericGenerator(nn.Module):
61 | def __init__(self, input_size=8, output_size=8, layers=[], activation="GELU", nbr_tanh=2):
62 | super(GenericGenerator, self).__init__()
63 |
64 | self.activation = None
65 | if activation == "GELU":
66 | self.activation = F.gelu
67 | elif activation == "LeakyReLu":
68 | self.activation = F.leaky_relu
69 | elif activation == "CELU":
70 | self.activation = F.celu
71 | elif activation == "SELU":
72 | self.activation = F.selu
73 | else:
74 | self.activation = F.gelu
75 |
76 | self.nbr_tanh = nbr_tanh
77 | self.input_dim = input_size
78 | self.output_dim = output_size
79 | self.hidden_dim = layers
80 | current_dim = input_size
81 | self.layers = nn.ModuleList()
82 | for hdim in layers:
83 | next_layer = nn.Linear(current_dim, hdim)
84 | torch.nn.init.xavier_normal_(next_layer.weight)
85 | self.layers.append(next_layer)
86 | #self.layers.append(nn.BatchNorm1d(hdim, affine=False))
87 | #torch.nn.init.kaiming_uniform_(self.layers[-1].weight, a=math.sqrt(5))
88 | #torch.nn.init.normal_(self.layers[-1].weight, mean=0.0, std=1.0)
89 | #torch.nn.init.zeros_(self.layers[-1].bias)
90 | current_dim = hdim
91 | self.layers.append(nn.Linear(current_dim, output_size))
92 |
93 | def forward(self, x):
94 | for e, layer in enumerate(self.layers):
95 | if e < len(self.layers) - self.nbr_tanh:
96 | #x = self.activation(torch.nn.functional.dropout(layer(x), p=0.05))
97 | #print(type(layer))
98 | #if type(layer) is nn.BatchNorm1d:
99 | # #print("--------------")
100 | # layer(x)
101 | #else:
102 | # x = self.activation(layer(x))
103 | x = self.activation(layer(x))
104 | else:
105 | x = torch.tanh(layer(x))
106 | return x
107 |
108 | class GenericNoisyGenerator(nn.Module):
109 | def __init__(self, noise_vector_size=8, input_size=7, output_size=8, layers=[], activation="GELU", nbr_tanh=2):
110 | super(GenericNoisyGenerator, self).__init__()
111 | self.activation = None
112 | if activation == "GELU":
113 | self.activation = F.gelu
114 | elif activation == "LeakyReLu":
115 | self.activation = F.leaky_relu
116 | elif activation == "CELU":
117 | self.activation = F.celu
118 | elif activation == "SELU":
119 | self.activation = F.selu
120 | else:
121 | self.activation = F.gelu
122 |
123 | self.nbr_tanh = nbr_tanh
124 | self.input_dim = input_size
125 | self.output_dim = output_size
126 | self.hidden_dim = layers
127 | current_dim = noise_vector_size + input_size
128 | self.layers = nn.ModuleList()
129 | for hdim in layers:
130 | next_layer = nn.Linear(current_dim, hdim)
131 | torch.nn.init.xavier_normal_(next_layer.weight)
132 | self.layers.append(next_layer)
133 | current_dim = hdim
134 | self.layers.append(nn.Linear(current_dim, output_size))
135 |
136 | def forward(self, z, x):
137 | input_tensor = torch.concat((z, x), dim=1)
138 | for e, layer in enumerate(self.layers):
139 | if e < len(self.layers) - self.nbr_tanh:
140 | input_tensor = self.activation(layer(input_tensor))
141 | else:
142 | input_tensor = torch.tanh(layer(input_tensor))
143 | return input_tensor
144 |
145 |
146 |
147 | class GenericDiscriminator(nn.Module):
148 |
149 | def __init__(self, input_size=8, output_size=8, layers=[], activation="GELU", nbr_tanh=2):
150 | super(GenericDiscriminator, self).__init__()
151 |
152 | self.activation = None
153 | if activation == "GELU":
154 | self.activation = F.gelu
155 | elif activation == "LeakyReLu":
156 | self.activation = F.leaky_relu
157 | elif activation == "CELU":
158 | self.activation = F.celu
159 | elif activation == "SELU":
160 | self.activation = F.selu
161 | else:
162 | self.activation = F.gelu
163 |
164 | self.nbr_tanh = nbr_tanh
165 | self.input_dim = input_size
166 | self.output_dim = output_size
167 | self.hidden_dim = layers
168 | current_dim = input_size
169 | self.layers = nn.ModuleList()
170 | for hdim in layers:
171 | self.layers.append(nn.Linear(current_dim, hdim))
172 | current_dim = hdim
173 | self.layers.append(nn.Linear(current_dim, output_size))
174 |
175 | def forward(self, x):
176 | for e, layer in enumerate(self.layers):
177 | if e < len(self.layers) - self.nbr_tanh:
178 | x = self.activation(layer(x))
179 | else:
180 | x = torch.tanh(layer(x))
181 | return x
182 |
183 |
184 | class AutoEncoder(nn.Module):
185 | def __init__(self, size):
186 | super(AutoEncoder, self).__init__()
187 | self.to_latent = nn.Sequential(
188 | # Input
189 | #nn.Flatten(),
190 | #nn.BatchNorm1d(size),
191 | nn.Linear(in_features=size, out_features=6),
192 | #nn.BatchNorm1d(200),
193 | nn.ReLU(inplace=True),
194 |
195 | # hidden layers
196 | nn.Linear(in_features=6, out_features=1),
197 | #nn.BatchNorm1d(200),
198 | nn.ReLU(inplace=True),
199 |
200 | #nn.Linear(in_features=200, out_features=3)
201 | )
202 |
203 | self.to_origin = nn.Sequential(
204 | #nn.BatchNorm1d(3),
205 | nn.Linear(in_features=1, out_features=6),
206 | #nn.BatchNorm1d(200),
207 | nn.ReLU(inplace=True),
208 |
209 | nn.Linear(in_features=6, out_features=size),
210 | #nn.BatchNorm1d(200),
211 | nn.ReLU(inplace=True),
212 |
213 | #nn.Linear(in_features=200, out_features=size)
214 | )
215 |
216 | self.softmax = nn.Softmax(dim=1)
217 |
218 | def forward(self, x):
219 | x = self.to_latent(x.float())
220 | #x = self.softmax(x)
221 | x = self.to_origin(x)
222 | #x = self.softmax(x)
223 | return x
224 |
--------------------------------------------------------------------------------
/test/test_moveit.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2024. Jan-Gerrit Habekost. GNU General Public License. https://www.gnu.org/licenses/gpl-3.0.html.
2 | import argparse
3 | import copy
4 | import os
5 | import random
6 | from numpy import array, array_equal, allclose
7 | import numpy as np
8 | import torch.backends.cudnn as cudnn
9 | import torch.utils.data
10 | import torch.utils.data.distributed
11 | import torchvision.transforms as transforms
12 | import torchvision.utils as vutils
13 | from tqdm import tqdm
14 | from scipy.spatial.transform import Rotation as R
15 | import os
16 | from cycleik_pytorch import load_config, IKDataset
17 | import pytorch_kinematics as pk
18 | import time
19 | import matplotlib.pyplot as plt
20 | import pandas as ps
21 | import multiprocessing as mp
22 | from multiprocessing import Manager
23 | import rospy
24 | from moveit_msgs.msg import RobotState
25 | from moveit_msgs.srv import GetPositionFK, GetPositionIK, GetPositionIKRequest
26 | from sensor_msgs.msg import JointState
27 | from geometry_msgs.msg import Pose, PoseStamped
28 | import std_msgs.msg
29 | import moveit_msgs.msg
30 | import moveit_msgs.srv
31 | import bio_ik_msgs.msg
32 | import bio_ik_msgs.srv
33 | from visualization_msgs.msg import Marker
34 | from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
35 | from std_msgs.msg import Header, ColorRGBA
36 | import pickle
37 | np.set_printoptions(precision=32)
38 |
39 | def execute_ik_request(pose_queue, result_queue, robot_dof, joint_names, robot_eef, group_name, zero_js, process_id):
40 | rospy.init_node(f"moveit_ik_experiment_{process_id}", anonymous=True)
41 | rospy.wait_for_service("/compute_ik")
42 | moveit_ik = rospy.ServiceProxy("/compute_ik", GetPositionIK)
43 |
44 | keep_running = pose_queue.qsize() > 0
45 |
46 | while keep_running:
47 | target_pose_input = pose_queue.get(block=True)
48 | real_b_np = copy.deepcopy(target_pose_input)
49 | del target_pose_input
50 | request = GetPositionIKRequest()
51 | #print(request)
52 | request.ik_request.group_name = copy.deepcopy(group_name)
53 | #request.ik_request.timeout.nsecs = 5000000
54 | request.ik_request.timeout.nsecs = 1000000
55 | request.ik_request.avoid_collisions = False
56 | request.ik_request.ik_link_name = copy.deepcopy(robot_eef)
57 |
58 | current_state = RobotState()
59 | current_state.is_diff = True
60 | #print(joint_names)
61 | #print(zero_js)
62 | current_state.joint_state.name = copy.deepcopy(joint_names)
63 | current_state.joint_state.position = copy.deepcopy(zero_js)
64 | request.ik_request.robot_state = current_state
65 |
66 | target_pose = PoseStamped()
67 | target_pose.pose.position.x = real_b_np[0]
68 | target_pose.pose.position.y = real_b_np[1]
69 | target_pose.pose.position.z = real_b_np[2]
70 | target_pose.pose.orientation.x = real_b_np[3]
71 | target_pose.pose.orientation.y = real_b_np[4]
72 | target_pose.pose.orientation.z = real_b_np[5]
73 | target_pose.pose.orientation.w = real_b_np[6]
74 | request.ik_request.pose_stamped = target_pose
75 |
76 | response = moveit_ik(request)
77 | rospy.sleep(0.001)
78 | return_positions = []
79 | if response.error_code.val != 1:
80 | for i in range(robot_dof):
81 | return_positions.append(-10)
82 | else:
83 | all_names = response.solution.joint_state.name
84 | full_js = response.solution.joint_state.position
85 | selected_js_unordered = []
86 | selected_names_unordered = []
87 | for i in range(len(full_js)):
88 | curr_name = all_names[i]
89 | if curr_name in joint_names:
90 | selected_names_unordered.append(curr_name)
91 | selected_js_unordered.append(full_js[i])
92 | for joint_name in joint_names:
93 | for e, unordered_name in enumerate(selected_names_unordered):
94 | if unordered_name == joint_name:
95 | return_positions.append(selected_js_unordered[e])
96 | keep_running = pose_queue.qsize() > 0
97 | #print(pose_queue.qsize())
98 | #print(return_positions)
99 | assert len(return_positions) == robot_dof
100 | result_queue.append([real_b_np, return_positions])
101 | #rospy.spin_once()
102 | del request
103 | del response
104 | del moveit_ik
105 | rospy.signal_shutdown("Shutting down IK test")
106 |
107 | def watch_progress(result_queue, nbr_samples):
108 | progress_bar = tqdm(total=nbr_samples)
109 |
110 | keep_running = len(result_queue) < nbr_samples
111 | last_val = 0
112 | while keep_running:
113 | #print(keep_running)
114 | curr_val = copy.deepcopy(len(result_queue))
115 | progress_bar.update(curr_val - last_val)
116 | last_val = copy.deepcopy(curr_val)
117 | keep_running = len(result_queue) < nbr_samples
118 | time.sleep(2)
119 |
120 | del progress_bar
121 |
122 | def main(args):
123 |
124 | torch.cuda.empty_cache()
125 |
126 | if args.manualSeed is None:
127 | args.manualSeed = random.randint(1, 10000)
128 | print("Random Seed: ", args.manualSeed)
129 | random.seed(args.manualSeed)
130 | torch.manual_seed(args.manualSeed)
131 |
132 | cudnn.benchmark = True
133 |
134 | if torch.cuda.is_available() and not args.cuda:
135 | print("WARNING: You have a CUDA device, so you should probably run with --cuda")
136 |
137 | config = load_config(args.robot)[f'{args.chain}']
138 |
139 | val_data = config["val_data"]
140 | robot_dof = config["robot_dof"]
141 | robot_urdf = config["robot_urdf"]
142 | robot_eef = config["robot_eef"]
143 | joint_name_list = config['joint_name']
144 | move_group = config['move_group']
145 | zero_js = config['home_js']
146 |
147 | try:
148 | os.makedirs(f"results/IROS/precision/{args.robot}/")
149 | except OSError:
150 | pass
151 |
152 | # Dataset
153 | dataset = IKDataset(root=val_data, test=True, robot=args.robot, config=config, mode='val')
154 | dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, pin_memory=True)
155 |
156 | device = torch.device(f"cuda:{args.gpu}" if args.cuda else "cpu")
157 |
158 | progress_bar = tqdm(enumerate(dataloader), total=len(dataloader))
159 |
160 | count_bullshit = 0
161 |
162 | ctx = mp.get_context('forkserver')
163 | pose_queue = ctx.Queue()
164 | manager = Manager()
165 | result_queue = manager.list()
166 |
167 | for i, data in progress_bar:#for i, data in progress_bar
168 | # get batch size data
169 | real_B = data["real_B"]
170 | #print(real_images_B.cpu().detach().numpy().shape)
171 | #print("-----------------------------------------------")
172 |
173 | #print(real_B)
174 | real_b_np = real_B.clone().numpy()[0]
175 | #print(real_b_np.shape)
176 | real_b_np = list(real_b_np)
177 | pose_queue.put(copy.deepcopy(real_b_np))
178 |
179 | processes = []
180 |
181 | for i in range(os.cpu_count() - 2):
182 | process = ctx.Process(target=execute_ik_request, args=(pose_queue, result_queue, robot_dof, joint_name_list, robot_eef, move_group, zero_js, i))
183 | process.start()
184 | processes.append(process)
185 |
186 | watcher_process = ctx.Process(target=watch_progress, args=(result_queue, pose_queue.qsize()))
187 | watcher_process.start()
188 | watcher_process.join()
189 | #watcher_process.terminate()
190 | #processes.append(watcher_process)
191 |
192 | #print('here')
193 |
194 | for p in processes:
195 | p.join()
196 | #p.terminate()
197 | res = list(result_queue)
198 | #print(res)
199 |
200 | with open(rf"results/IROS/precision/{args.robot}/{args.ik_name}.p", "wb") as output_file:
201 | pickle.dump(res, output_file)
202 |
203 |
204 | if __name__ == '__main__':
205 | parser = argparse.ArgumentParser(
206 | description="PyTorch implements `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks`")
207 | parser.add_argument("--cuda", action="store_true", help="Enables cuda")
208 | parser.add_argument("--manualSeed", type=int,
209 | help="Seed for initializing training. (default:none)")
210 | parser.add_argument("--robot", type=str, default="nicol", help="Robot model IK is trained for")
211 | parser.add_argument("--gpu", type=int, default=0, help="Robot model IK is trained for")
212 | parser.add_argument("--chain", type=str, default='', help="chain")
213 | parser.add_argument("--ik_name", type=str, default='', help="chain")
214 | args = parser.parse_args()
215 | print(args)
216 |
217 | main(args)
--------------------------------------------------------------------------------
/test/test_fk.py:
--------------------------------------------------------------------------------
1 | from .test import BaseTester
2 | import numpy as np
3 | import torch
4 | from cycleik_pytorch import load_config, renormalize_pose, normalize_pose, slice_fk_pose, renormalize_joint_state, IKDataset
5 | from cycleik_pytorch import GenericGenerator, GenericDiscriminator
6 | from scipy.spatial.transform import Rotation as R
7 | import matplotlib.pyplot as plt
8 |
9 |
10 | class FKTester(BaseTester):
11 |
12 | def __init__(self, args):
13 | super().__init__(args)
14 | self.error_list = []
15 | self.min_error = 10000
16 | self.max_error = 0
17 | self.avg_error = 0
18 |
19 | self.min_error_ik = 10000
20 | self.max_error_ik = 0
21 | self.avg_ik_error = [0., 0., 0., 0., 0., 0.]
22 |
23 | self.over_1mm = 0
24 | self.over_2_5mm = 0
25 | self.over_5mm = 0
26 | self.over_1cm = 0
27 | self.over_1_5cm = 0
28 | self.over_2cm = 0
29 | self.over_5cm = 0
30 | self.over_10cm = 0
31 |
32 | self.over_1mm_ik = 0
33 | self.over_2_5mm_ik = 0
34 | self.over_5mm_ik = 0
35 | self.over_1cm_ik = 0
36 | self.over_1_5cm_ik = 0
37 | self.over_2cm_ik = 0
38 | self.over_5cm_ik = 0
39 | self.over_10cm_ik = 0
40 |
41 | self.it_counter = 0
42 |
43 | def test_step(self, **kwargs):
44 | pass
45 |
46 | def process_test_losses(self, **kwargs):
47 | pass
48 |
49 | def create_model(self):
50 | self.model = GenericDiscriminator(input_size=self.robot_dof, output_size=7,
51 | nbr_tanh=self.config["FKNet"]["architecture"]["nbr_tanh"],
52 | activation=self.config["FKNet"]["architecture"]["activation"],
53 | layers=self.config["FKNet"]["architecture"]["layers"]).to(self.device)
54 |
55 | def load_dataset(self):
56 | self.dataset = IKDataset(root=self.val_data, test=True, robot=self.robot, config=self.config)
57 | self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=10000, shuffle=False, pin_memory=True)
58 |
59 | def test(self):
60 | avg_ik_error = [0., 0., 0., 0., 0., 0.]
61 | for i, data in self.progress_bar: # for i, data in progress_bar
62 | # get batch size data
63 | gt_A = data["gt_A"].to(self.device)
64 | gt_B = data["gt_B"].to(self.device)
65 | real_B = data["real_B"].to(self.device)
66 | # print(real_images_B.cpu().detach().numpy().shape)
67 | # print("-----------------------------------------------")
68 |
69 | # print("----- IK test -------")
70 | with torch.no_grad():
71 | result_B = self.model(gt_A)
72 |
73 | # ik_error = np.average(abs(moveit_fk_array - gt_b_np), axis=0)
74 |
75 | #js = renormalize_joint_state(result_B, batch_size=len(gt_B),
76 | # single_renormalize_move=self.single_renormalize_move,
77 | # single_renormalize=self.single_renormalize)
78 | #fk_tensor = self.chain.forward_kinematics(js)
79 | #forward_result = slice_fk_pose(fk_tensor, batch_size=len(gt_B))
80 |
81 | gt_b_np = gt_B.detach().cpu().numpy()
82 | real_b_np = real_B.detach().cpu().numpy()
83 | forward_result = renormalize_pose(result_B, len(gt_A), workspace_move=self.workspace_renormalize_move,
84 | workspace_renormalize=self.workspace_renormalize)
85 | forward_result_np = forward_result.clone().detach().cpu().numpy()
86 |
87 | r_target = R.from_quat(list(gt_b_np[:, 3:]))
88 | r_result = R.from_quat(list(forward_result_np[:, 3:]))
89 | # print(r_result)
90 | rotation_diff = r_target * r_result.inv()
91 | rotation_diff_save = rotation_diff.as_euler('zyx', degrees=True)
92 | rotation_diff = np.abs(rotation_diff_save)
93 | # print(rotation_diff_save.shape)
94 | # print(abs(result_A_np[:,:3] - gt_b_np[:,:3]))
95 | # print(np.array(rotation_diff))
96 |
97 | # print(np.abs(np.multiply(forward_result[:, :3], single_normalize_pos_np) - np.multiply(gt_b_np[:, :3], single_normalize_pos_np)))
98 | # print(np.array(rotation_diff))
99 | ik_error_batch = np.concatenate(
100 | (np.reshape(np.abs(forward_result_np[:, :3] - real_b_np[:, :3]), newshape=(10000, 3)),
101 | np.array(rotation_diff)), axis=1)
102 |
103 | # print(ik_error)
104 | # print("FK Moveit: ", moveit_fk_array)
105 |
106 | # print(error)
107 | for k, ik_error in enumerate(ik_error_batch):
108 | if np.sum(ik_error[:3]) / 3 < 0.01 and np.sum(ik_error[3:]) / 3 < 20.:
109 | self.count_success += 1
110 | if ik_error[0] == 0. and ik_error[1] == 0. and ik_error[2] == 0.:
111 | print(k)
112 | print("--------------------")
113 |
114 | self.error_list.append(ik_error)
115 | avg_ik_error = np.add(avg_ik_error, ik_error)
116 |
117 | # ik_error[:3] = np.true_divide(ik_error[:3], 1000)
118 | ik_error = np.average(ik_error[:3])
119 |
120 | if self.min_error_ik > ik_error:
121 | self.min_error_ik = ik_error
122 | if self.max_error_ik < ik_error:
123 | self.max_error_ik = ik_error
124 |
125 | if ik_error > 0.001:
126 | self.over_1mm_ik += 1
127 | if ik_error > 0.0025:
128 | self.over_2_5mm_ik += 1
129 | if ik_error > 0.005:
130 | self.over_5mm_ik += 1
131 | if ik_error > 0.01:
132 | self.over_1cm_ik += 1
133 | if ik_error > 0.015:
134 | self.over_1_5cm_ik += 1
135 | if ik_error > 0.02:
136 | self.over_2cm_ik += 1
137 | if ik_error > 0.05:
138 | self.over_5cm_ik += 1
139 | if ik_error > 0.1:
140 | self.over_10cm_ik += 1
141 |
142 | # rospy.sleep(0.00001)
143 |
144 | avg_ik_error = np.true_divide(avg_ik_error, self.samples)
145 | avg_ik_error_mm = np.copy(avg_ik_error)
146 | avg_ik_error_mm[:3] = np.multiply(avg_ik_error_mm[:3], 1000)
147 |
148 | error_array = np.array(self.error_list, dtype=np.float32)
149 |
150 | error_array_mean = np.concatenate((
151 | np.reshape(np.average(error_array[:, :3], axis=1), newshape=(self.samples, 1)),
152 | np.reshape(np.average(error_array[:, 3:], axis=1), newshape=(self.samples, 1))
153 | ), axis=1)
154 |
155 | error_array_mean_cleaned = np.copy(error_array_mean)
156 |
157 | outlier_rows = []
158 | for r, row in enumerate(error_array_mean):
159 | if row[0] > 0.01 or row[1] > 1.:
160 | outlier_rows.append(r)
161 |
162 | error_array_mean_cleaned = np.delete(error_array_mean_cleaned, outlier_rows, axis=0)
163 |
164 | print(len(error_array_mean_cleaned))
165 |
166 | max_pos_error_all_axis, max_rot_error_all_axis = np.max(error_array[:, :3], axis=0), np.max(error_array[:, 3:],
167 | axis=0)
168 | min_pos_error_all_axis, min_rot_error_all_axis = np.min(error_array[:, :3], axis=0), np.min(error_array[:, 3:],
169 | axis=0)
170 | max_pos_err_all_axis_mm = np.multiply(max_pos_error_all_axis, 1000)
171 | min_pos_err_all_axis_mm = np.multiply(min_pos_error_all_axis, 1000)
172 |
173 | max_pos_err, max_rot_error = np.max(error_array_mean, axis=0)[0], np.max(error_array_mean, axis=0)[1]
174 | min_pos_err, min_rot_error = np.min(error_array_mean, axis=0)[0], np.min(error_array_mean, axis=0)[1]
175 | max_pos_err_mm = np.multiply(max_pos_err, 1000)
176 | min_pos_err_mm = np.multiply(min_pos_err, 1000)
177 | mean_pos_err = np.average(avg_ik_error_mm[:3])
178 | mean_rot_err = np.average(avg_ik_error_mm[3:])
179 |
180 | # dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, pin_memory=False)
181 | # progress_bar = tqdm(enumerate(dataloader), total=len(dataloader))
182 | measure_time = 0.
183 | # for i, data in progress_bar:#for i, data in progress_bar
184 | # # get batch size data
185 | # gt_A = data["gt_A"].to(device)
186 | # gt_B = data["gt_B"].to(device)
187 | #
188 | #
189 | #
190 | # with torch.no_grad():
191 | # start_time = time.time()
192 | # result_B = netG_B2A(gt_B)
193 | # end_time = time.time()
194 | # measure_time += end_time - start_time
195 |
196 | print(f'\nAverage IK error over all axis: XYZ (mm): {avg_ik_error_mm[:3]}, RPY: {avg_ik_error_mm[3:]}')
197 | print(f'Max IK error over all axis: XYZ (mm): {max_pos_err_all_axis_mm}, RPY: {max_rot_error_all_axis}')
198 | print(f'Min IK error over all axis: XYZ (mm): {min_pos_err_all_axis_mm}, RPY: {min_rot_error_all_axis}\n')
199 |
200 | print(f'\nAverage IK error over all axis: XYZ (mm): {mean_pos_err}, RPY: {mean_rot_err}')
201 | print(f'Max IK error : XYZ (mm): {max_pos_err_mm}, RPY: {max_rot_error}')
202 | print(f'Min IK error: XYZ (mm): {min_pos_err_mm}, RPY: {min_rot_error}\n')
203 |
204 | print(f"Avg inference_time: {measure_time / self.samples}")
205 | print(f"Success Rate: {self.count_success / self.samples}")
206 |
207 | print("--------- IK Results -------------")
208 | # print("Average IK Error: ", avg_ik_error[0,:3])
209 | print("Average IK Error: \n"
210 | " Position (mm): {0}".format(avg_ik_error[:3]) + "\n"
211 | " Orientation (degree): {0}".format(
212 | avg_ik_error[3:]))
213 | print("Min Error: " + str(self.min_error_ik))
214 | print("Max Error: " + str(self.max_error_ik))
215 | print("Error > 1mm: ", self.over_1mm_ik)
216 | print("Percent > 1mm: ", self.over_1mm_ik / self.samples)
217 | print("Error > 2.5mm: ", self.over_2_5mm_ik)
218 | print("Percent > 2.5mm: ", self.over_2_5mm_ik / self.samples)
219 | print("Error > 5mm: ", self.over_5mm_ik)
220 | print("Percent > 5mm: ", self.over_5mm_ik / self.samples)
221 | print("Error > 1cm: ", self.over_1cm_ik)
222 | print("Percent > 1cm: ", self.over_1cm_ik / self.samples)
223 | print("Error > 1.5cm: ", self.over_1_5cm_ik)
224 | print("Percent > 1.5cm: ", self.over_1_5cm_ik / self.samples)
225 | print("Error > 2cm: ", self.over_2cm_ik)
226 | print("Percent > 2cm: ", self.over_2cm_ik / self.samples)
227 | print("Error > 5cm: ", self.over_5cm_ik)
228 | print("Percent > 5cm: ", self.over_5cm_ik / self.samples)
229 | print("Error > 10cm: ", self.over_10cm_ik)
230 | print("Percent > 10cm: ", self.over_10cm_ik / self.samples)
231 | # print("Average IK Error: " + str(ik_error))
232 |
233 | bins = [0.0]
234 | for i in range(240):
235 | bins.append(bins[-1] + 0.005)
236 |
237 | rot_bins = [0.0]
238 | for i in range(320):
239 | rot_bins.append(rot_bins[-1] + 0.0005)
240 |
241 | fig, ax = plt.subplots(2)
242 |
243 | ax[0].set_xlabel('Error (mm)')
244 | ax[0].set_ylabel('Count')
245 | n, bins, patches = ax[0].hist(np.multiply(error_array_mean[:, :1], 1000), bins=bins)
246 | mean_error = ax[0].axvline(np.mean(np.multiply(error_array_mean[:, :1], 1000), axis=0), color='orange',
247 | linestyle='dashed', linewidth=1)
248 | mean_error.set_label('Mean Absolute Error (all solutions)')
249 | mean_error = ax[0].axvline(np.mean(np.multiply(error_array_mean_cleaned[:, :1], 1000), axis=0), color='green',
250 | linestyle='dashed', linewidth=1)
251 | mean_error.set_label('Mean Absolute Error (only valid solutions)')
252 | bins = bins + 0.0025
253 | ax[0].plot(bins[:240], n, linewidth=1, linestyle=(0, (5, 1)), color="black")
254 | ax[0].set_xticks(np.arange(0.0, 1.4, 0.2))
255 | ax[0].set_yticks(np.arange(0.0, 7000., 1000.))
256 | ax[0].xaxis.label.set_fontweight('bold')
257 | ax[0].yaxis.label.set_fontweight('bold')
258 | ax[0].legend()
259 |
260 | ax[1].set_xlabel('Error (degree)')
261 | ax[1].set_ylabel('Count')
262 | n, bins, patches = ax[1].hist(error_array_mean[:, 1:], bins=rot_bins)
263 | mean_error = ax[1].axvline(error_array_mean[:, 1:].mean(), color='orange', linestyle='dashed', linewidth=1)
264 | mean_error.set_label('Mean Absolute Error (all solutions)')
265 | mean_error = ax[1].axvline(error_array_mean_cleaned[:, 1:].mean(), color='green', linestyle='dashed',
266 | linewidth=1)
267 | mean_error.set_label('Mean Absolute Error (only valid solutions)')
268 | bins = bins + 0.00025
269 | ax[1].plot(bins[:320], n, linewidth=1, linestyle=(0, (5, 1)), color="black")
270 | ax[1].set_xticks(np.arange(0.0, 0.18, 0.02))
271 | # ax[1].set_xlim(right=0.16)
272 | ax[1].set_yticks(np.arange(0.0, 5000., 1000.))
273 | ax[1].xaxis.label.set_fontweight('bold')
274 | ax[1].yaxis.label.set_fontweight('bold')
275 | ax[1].legend()
276 |
277 | fig.tight_layout()
278 | plt.savefig('./img/losses/vis_error_dist_iknet.png')
279 | plt.show()
--------------------------------------------------------------------------------
/train/train.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data
2 | import os
3 | import random
4 | import pickle
5 | import torch.backends.cudnn as cudnn
6 | import torch.utils.data
7 | from tqdm import tqdm
8 | import optuna
9 | from cycleik_pytorch import DecayLR, IKDataset, GenericGenerator, GenericDiscriminator
10 | from cycleik_pytorch import weights_init, load_config, slice_fk_pose, normalize_pose, renormalize_pose, renormalize_joint_state, JSD
11 | import pytorch_kinematics as pk
12 | from abc import abstractmethod
13 | #from softadapt import SoftAdapt, NormalizedSoftAdapt, LossWeightedSoftAdapt
14 | from torch.functional import F
15 | import math
16 | import scipy.linalg as linalg
17 |
18 | jsd = JSD()
19 | sqrt_2 = None
20 |
21 | class BaseTrainer:
22 |
23 | def __init__(self, args, trial=None, config=None, train_dataset=None, test_dataset=None):
24 |
25 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
26 |
27 | try:
28 | os.makedirs("weights")
29 | except OSError:
30 | pass
31 |
32 | try:
33 | os.makedirs("img/losses")
34 | except OSError:
35 | pass
36 |
37 | try:
38 | os.makedirs("img")
39 | except OSError:
40 | pass
41 |
42 | try:
43 | os.makedirs("results")
44 | except OSError:
45 | pass
46 |
47 | try:
48 | os.makedirs(f"weights/{args.robot}")
49 | except OSError:
50 | pass
51 |
52 | try:
53 | os.makedirs(f"results/{args.robot}")
54 | except OSError:
55 | pass
56 |
57 | self.optimizer_run = True if trial is not None else False
58 | torch.set_num_threads(2)
59 |
60 | try:
61 | if args.manualSeed is None:
62 | self.manualSeed = random.randint(1, 10000)
63 | else:
64 | self.manualSeed = args.manualSeed
65 | except AttributeError:
66 | self.manualSeed = random.randint(1, 10000)
67 |
68 | if self.optimizer_run:
69 | self.optimizer_run = True
70 | self.cuda = True
71 | self.decay_epochs = 0
72 | self.noise = None
73 | self.her = False
74 | self.trial = trial
75 | self.nbr_tanh = args.nbr_tanh
76 | self.activation = args.activation
77 | self.layers = args.layers
78 |
79 | print("Random Seed: ", self.manualSeed)
80 | random.seed(self.manualSeed)
81 | torch.manual_seed(self.manualSeed)
82 |
83 | cudnn.benchmark = True
84 |
85 | if torch.cuda.is_available() and not args.cuda:
86 | print("WARNING: You have a CUDA device, so you should probably run with --cuda")
87 |
88 | if config is None:
89 | self.config = load_config(args.robot)[f'{args.chain}']
90 | else:
91 | self.config = config
92 |
93 | self.autoencoder = args.autoencoder
94 | self.two_stage = args.two_stage
95 |
96 | self.train_data = self.config["train_data"]
97 | self.test_data = self.config["test_data"]
98 | self.robot_dof = self.config["robot_dof"]
99 | self.robot_urdf = self.config["robot_urdf"]
100 | self.robot_eef = self.config["robot_eef"]
101 | self.robot = args.robot
102 | if not self.optimizer_run:
103 | self.core_model = args.core_model
104 | self.core_model_config = load_config(args.core_model)[f'{args.core_model_chain}']
105 |
106 | self.js_samples = 100
107 | self.manifold_sample_nbr = 100
108 | self.epochs = args.epochs
109 | self.decay_epochs = args.decay_epochs
110 |
111 | self.network = None
112 | if args.network == "GAN":
113 | self.network = "GAN"
114 | elif args.network == "MLP":
115 | self.network = "IKNet"
116 | elif args.network == "FK":
117 | self.network = "FKNet"
118 |
119 | if self.autoencoder or self.network == "FKNet":
120 | if args.fk_lr is None:
121 | self.fk_lr = self.config["FKNet"]["training"]["lr"]
122 | else:
123 | self.fk_lr = args.fk_lr
124 |
125 | if args.lr is None:
126 | self.lr = self.config[self.network]["training"]["lr"]
127 | else:
128 | self.lr = args.lr
129 | if args.batch_size is None:
130 | self.batch_size = self.config[self.network]["training"]["batch_size"]
131 | else:
132 | self.batch_size = args.batch_size
133 | if self.network == "GAN":
134 | if args.noise_vector_size is None:
135 | self.noise_vector_size = self.config["GAN"]["architecture"]["noise_vector_size"]
136 | else:
137 | self.noise_vector_size = args.noise_vector_size
138 | print(args)
139 |
140 | device_name = f"cuda:{args.gpu}" if args.cuda else "cpu"
141 | self.device = torch.device(device_name)
142 |
143 | # Dataset
144 | if not self.optimizer_run:
145 | self.train_dataset, self.test_dataset = self.load_data(self.train_data, self.test_data, self.robot, self.config)
146 | else:
147 | self.train_dataset, self.test_dataset = train_dataset, test_dataset
148 |
149 | self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, pin_memory=True, num_workers=1)
150 | self.test_dataloader = torch.utils.data.DataLoader(self.test_dataset, batch_size=10000, shuffle=True, pin_memory=True, num_workers=1)
151 | self.train_data_size, self.test_data_size = self.train_dataset.get_size(), self.test_dataset.get_size()
152 |
153 | torch.autograd.set_detect_anomaly(True)
154 |
155 | try:
156 | chain = pk.build_serial_chain_from_urdf(open(self.robot_urdf).read(), self.robot_eef, self.config['base_link'])
157 | self.chain = chain.to(dtype=torch.float32, device=self.device)
158 | except ValueError:
159 | chain = pk.build_serial_chain_from_urdf(open(self.robot_urdf).read(), self.robot_eef)
160 | self.chain = chain.to(dtype=torch.float32, device=self.device)
161 | single_renormalize_move, single_renormalize, workspace_move, workspace_renormalize = self.train_dataset.get_norm_params()
162 | self.single_renormalize_move = torch.Tensor(single_renormalize_move).to(self.device)
163 | self.single_renormalize = torch.Tensor(single_renormalize).to(self.device)
164 | self.workspace_move = torch.Tensor(workspace_move).to(self.device)
165 | self.workspace_renormalize = torch.Tensor(workspace_renormalize).to(self.device)
166 |
167 |
168 | self.fk_model = None
169 | self.model = None
170 | self.fk_optimizer = None
171 | self.optimizer = None
172 | self.fk_lr_scheduler = None
173 | self.lr_scheduler = None
174 | self.create_model()
175 | self.create_optimizer()
176 | self.create_lr_scheduler()
177 | self.progress_bar = None
178 |
179 | self.cycle_loss = torch.nn.SmoothL1Loss(beta=0.01)#torch.nn.L1Loss().to(self.device)
180 | self.position_cycle_loss = torch.nn.SmoothL1Loss(beta=0.001)#torch.nn.L1Loss().to(self.device)
181 | self.rotation_cycle_loss = torch.nn.SmoothL1Loss(beta=0.01, reduction='none')#torch.nn.L1Loss().to(self.device)
182 | self.kl_divergence_loss = torch.nn.KLDivLoss(reduction="none", log_target=True)
183 |
184 | self.compile = args.compile
185 | self.adaptive_loss_order = 10
186 | self.gpu = args.gpu
187 | #self.softadapt = NormalizedSoftAdapt(beta=0.1, accuracy_order=self.adaptive_loss_order)
188 |
189 | sqrt_tensor = torch.Tensor(1)
190 | sqrt_tensor[0] = 2
191 | global sqrt_2
192 | sqrt_2 = torch.sqrt(sqrt_tensor).to(self.device)
193 |
194 | def load_data(self, train_data, test_data, robot, config):
195 | train_dataset = IKDataset(root=train_data, robot=robot, config=config, mode="train")
196 | test_dataset = IKDataset(root=test_data, robot=robot, config=config, mode="test")
197 | return [train_dataset, test_dataset]
198 |
199 | @abstractmethod
200 | def training_step(self, **kwargs):
201 | pass
202 |
203 | @abstractmethod
204 | def validation_step(self, **kwargs):
205 | pass
206 |
207 | @abstractmethod
208 | def process_training_losses(self, **kwargs):
209 | pass
210 |
211 | @abstractmethod
212 | def process_validation_losses(self, **kwargs):
213 | pass
214 |
215 | def create_optimizer(self):
216 | if self.autoencoder or self.network == "FKNet":
217 | self.fk_optimizer = torch.optim.Adam(self.fk_model.parameters(), lr=self.fk_lr, betas=(0.9, 0.999))
218 |
219 | def create_lr_scheduler(self):
220 | if self.autoencoder or self.network == "FKNet":
221 | lr_lambda = DecayLR(self.epochs, 0, self.decay_epochs).step
222 | self.fk_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.fk_optimizer, lr_lambda=lr_lambda, verbose=True)
223 |
224 | def create_model(self):
225 | if self.autoencoder or self.network == "FKNet":
226 | if self.optimizer_run:
227 | self.fk_model = GenericDiscriminator(input_size=self.robot_dof, output_size=7,
228 | nbr_tanh=self.nbr_tanh,
229 | activation=self.activation,
230 | layers=self.layers).to(self.device)
231 | else:
232 | self.fk_model = GenericDiscriminator(input_size=self.robot_dof, output_size=7,
233 | nbr_tanh=self.config["FKNet"]["architecture"]["nbr_tanh"],
234 | activation=self.config["FKNet"]["architecture"]["activation"],
235 | layers=self.config["FKNet"]["architecture"]["layers"]).to(self.device)
236 | if self.two_stage:
237 | self.fk_model.load_state_dict(torch.load(f'./weights/{self.robot}/model_FK_with_kinematics.pth'))
238 |
239 | @abstractmethod
240 | def create_checkpoint(self, epoch):
241 | pass
242 |
243 | @abstractmethod
244 | def train(self):
245 | pass
246 |
247 | @staticmethod
248 | def orientation_loss_phi_2(quat_1, quat_2):
249 | cycle_loss_B2A_orientation_positive = torch.sum(torch.pow(torch.subtract(quat_1, quat_2), 2), dim=1)
250 | cycle_loss_B2A_orientation_negative = torch.sum(torch.pow(torch.add(quat_1, quat_2), 2), dim=1)
251 | cycle_loss_orientation = torch.minimum(cycle_loss_B2A_orientation_positive, cycle_loss_B2A_orientation_negative)
252 | return torch.mean(cycle_loss_orientation, dim=0)
253 |
254 | @staticmethod
255 | def orientation_loss_phi_4(quat_1, quat_2):
256 | cycle_loss_orientation = torch.subtract(1, torch.abs(torch.sum(torch.multiply(quat_1, quat_2), dim=1)))
257 | return torch.mean(cycle_loss_orientation, dim=0)
258 |
259 | @staticmethod
260 | def orientation_loss_phi_7(quat_1, quat_2):
261 | cycle_loss_B2A_orientation_positive = torch.sum(torch.nn.functional.smooth_l1_loss(quat_1, quat_2, beta=0.01, reduction='none'), dim=1)
262 | cycle_loss_B2A_orientation_negative = torch.sum(torch.nn.functional.smooth_l1_loss(-quat_1, quat_2, beta=0.01, reduction='none'), dim=1)
263 | cycle_loss_orientation = torch.minimum(cycle_loss_B2A_orientation_positive, cycle_loss_B2A_orientation_negative)
264 | return torch.mean(cycle_loss_orientation, dim=0)
265 |
266 | @staticmethod
267 | def kl_divergence(predicted_distribution, target_distribution, beta=0.1):
268 | joint_space_distribution = torch.nn.functional.log_softmax(predicted_distribution, dim=1)
269 | target_distribution = torch.nn.functional.log_softmax(target_distribution, dim=1)
270 |
271 | #point_wise_kl_div = self.kl_divergence_loss(joint_space_distribution, target_distribution)
272 | point_wise_kl_div = torch.nn.functional.kl_div(joint_space_distribution, target_distribution, reduction="none", log_target=True)
273 | row_wise_kl_div = torch.sum(point_wise_kl_div, dim=1)
274 | row_wise_kl_div = torch.subtract(row_wise_kl_div, beta)
275 | row_wise_kl_div = torch.maximum(row_wise_kl_div, torch.zeros(row_wise_kl_div.size()).to(f'cuda:{row_wise_kl_div.get_device()}'))
276 | #smooth_kl_div = row_wise_kl_div.where(row_wise_kl_div > beta,
277 | # torch.multiply(torch.square(row_wise_kl_div), 0.1))
278 | #smooth_kl_div = torch.nn.functional.smooth_l1_loss()
279 | #smooth_kl_div = point_wise_kl_div.where(condition=point_wise_kl_div < beta,
280 | # input=torch.multiply(torch.square(point_wise_kl_div), beta))
281 | return torch.mean(row_wise_kl_div, dim=0)
282 | #return torch.mean(smooth_kl_div, dim=0)
283 |
284 | @staticmethod
285 | def min_distance_js_loss(predicted_distribution, target_distribution, beta=0.1):
286 | point_wise_diff = torch.abs(torch.subtract(predicted_distribution, target_distribution))
287 | #print(point_wise_diff.size())
288 | #print(point_wise_diff)
289 | point_wise_diff = point_wise_diff.where(
290 | (point_wise_diff > beta),
291 | torch.multiply(torch.square(point_wise_diff), beta)
292 | )
293 | return torch.mean(torch.mean(point_wise_diff, dim=1), dim=0)
294 |
295 | @staticmethod
296 | def batchmean_kl_div(predicted_distribution, target_distribution):
297 | joint_space_distribution = torch.nn.functional.log_softmax(predicted_distribution, dim=1)
298 | target_distribution = torch.nn.functional.log_softmax(target_distribution, dim=1)
299 |
300 | # point_wise_kl_div = self.kl_divergence_loss(joint_space_distribution, target_distribution)
301 | point_wise_kl_div = torch.nn.functional.kl_div(joint_space_distribution, target_distribution, reduction="batchmean",
302 | log_target=True)
303 | #row_wise_kl_div = torch.sum(point_wise_kl_div, dim=1)
304 | #smooth_kl_div = row_wise_kl_div.where(row_wise_kl_div < beta,
305 | # torch.multiply(torch.square(row_wise_kl_div), 1))
306 | # smooth_kl_div = torch.nn.functional.smooth_l1_loss()
307 | # smooth_kl_div = point_wise_kl_div.where(condition=point_wise_kl_div < beta,
308 | # input=torch.multiply(torch.square(point_wise_kl_div), beta))
309 | return point_wise_kl_div#torch.mean(smooth_kl_div, dim=0)
310 |
311 |
312 |
313 | @staticmethod
314 | def max_variance_loss(predicted_distribution):
315 | return torch.subtract(torch.div(4, 12), torch.mean(torch.var(predicted_distribution, dim=0)))
316 |
--------------------------------------------------------------------------------