├── .gitattributes
├── .gitignore
├── .gitmodules
├── Readme.md
├── code
├── __init__.py
├── agent
│ ├── __init__.py
│ └── traj_opt_single.py
├── engine
│ ├── BaseScene.py
│ ├── __init__.py
│ ├── analytic_grad_single.py
│ ├── analytic_grad_system.py
│ ├── blob.py
│ ├── build_luisa_script.py
│ ├── contact_diff.py
│ ├── convert_luisa.py
│ ├── convert_piece.py
│ ├── geometry.py
│ ├── geometry_self.py
│ ├── gripper_single.py
│ ├── gripper_tactile.py
│ ├── linalg.py
│ ├── model_elastic_offset.py
│ ├── model_elastic_tactile.py
│ ├── model_fold_offset.py
│ ├── readfile.py
│ ├── render_engine.py
│ └── sparse_solver.py
├── optimizer
│ ├── __init__.py
│ └── optim.py
├── scripts
│ ├── run_RL_balance.sh
│ ├── run_RL_flatlift.sh
│ ├── run_RL_folding.sh
│ ├── run_RL_following.sh
│ ├── run_RL_forming.sh
│ ├── run_RL_lifting.sh
│ ├── run_RL_pick_fold.sh
│ ├── run_RL_separating.sh
│ ├── run_cmaes_balance.sh
│ ├── run_cmaes_bouncing.sh
│ ├── run_cmaes_card.sh
│ ├── run_cmaes_folding.sh
│ ├── run_cmaes_following.sh
│ ├── run_cmaes_forming.sh
│ ├── run_cmaes_lifting.sh
│ ├── run_cmaes_pick_fold.sh
│ ├── run_cmaes_separate_soft.sh
│ ├── run_cmaes_separating.sh
│ ├── run_cmaes_slide.sh
│ ├── run_cmaes_throwing.sh
│ ├── run_dp_bouncing.sh
│ ├── run_dp_card.sh
│ ├── run_dp_slide.sh
│ ├── run_trajopt_balancing.sh
│ ├── run_trajopt_balancing_luisa.sh
│ ├── run_trajopt_folding.sh
│ ├── run_trajopt_following.sh
│ ├── run_trajopt_forming.sh
│ ├── run_trajopt_lifting.sh
│ ├── run_trajopt_pickfolding.sh
│ ├── run_trajopt_separating.sh
│ └── run_trajopt_throwing.sh
├── task_scene
│ ├── Scene_balancing.py
│ ├── Scene_bouncing.py
│ ├── Scene_card.py
│ ├── Scene_folding.py
│ ├── Scene_forming.py
│ ├── Scene_interact.py
│ ├── Scene_lifting.py
│ ├── Scene_pick.py
│ ├── Scene_sliding.py
│ └── __init__.py
└── training
│ ├── RL_env.py
│ ├── RL_eval_env.py
│ ├── run_cmaes_all.py
│ ├── run_cmaes_parameter.py
│ ├── training_env.py
│ ├── trajopt_balancing.py
│ ├── trajopt_bouncing.py
│ ├── trajopt_card.py
│ ├── trajopt_folding.py
│ ├── trajopt_forming.py
│ ├── trajopt_interact.py
│ ├── trajopt_lifting.py
│ ├── trajopt_pick_fold.py
│ └── trajopt_silding.py
├── data
├── balance_state
│ ├── F_x_lower.npy
│ ├── F_x_lower_world.npy
│ ├── F_x_upper.npy
│ ├── F_x_upper_world.npy
│ ├── border_flag.npy
│ ├── half_gripper_dist.npy
│ ├── pos.npy
│ ├── proj_dir.npy
│ ├── proj_flag.npy
│ ├── rot.npy
│ ├── rotmat.npy
│ └── state
├── ball.ele
├── ball.face
├── ball.node
├── ball_surface_mesh.ply
├── scene_texture_options.json
├── tactile.ele
├── tactile.face
└── tactile.node
├── images
└── teaser.gif
└── pyproject.toml
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.egg-info
2 | imgs/*
3 | __pycache__
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "data/AssetLoader"]
2 | path = data/AssetLoader
3 | url = https://github.com/ACMLCZH/AssetLoader.git
4 |
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # ThinShellLab: Thin-Shell Object Manipulations With Differentiable Physics Simulations
2 |
3 | ---
4 |
5 |

6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | Official repo for the paper:
18 |
19 | > **[Thin-Shell Object Manipulations With Differentiable Physics Simulations](https://vis-www.cs.umass.edu/ThinShellLab/)**
20 |
21 | ThinShellLab is a **fully differentiable** simulation platform tailored for **robotic interactions** with diverse thin-shell materials.
22 |
23 | ## Installation
24 | You can create a Conda environment for this simulator first:
25 | ```bash
26 | conda create -n thinshelllab python=3.9.16
27 | conda activate thinshelllab
28 | ```
29 |
30 | And install the package with its dependencies using
31 | ```bash
32 | git clone https://github.com/wangyian-me/thinshelllab.git
33 | cd thinshelllab
34 | pip install -e .
35 | ```
36 |
37 | ### Render
38 | - Here are two ways to render our scene, Taichi GGUI and LuisaRender Script. Taichi GGUI renders real-time image in GUI windows with low resolution, and LuisaRender Script generates meta-data script files for high-resolution and more realistic rendering outputs. This can be specified using the option `--render_option`.
39 | - To run LuisaRender Script, necessary assets should be loaded. Run `git submodule update --init --recursive` to load the submodule `AssetLoader` and run `export PYTHONPATH=$PYTHONPATH:${PWD}/data/AssetLoader` to add the asset path to `PYTHONPATH`.
40 | - For seeing the rendering results of LuisaRender Script, you should setup LuisaRender and use the command `` to get the outputs.
41 |
42 | ## Usage example
43 |
44 | We put running scripts under code/scripts, you can simply run
45 | ```bash
46 | cd thinshelllab
47 | cd code
48 | sh scripts/run_trajopt_folding.sh
49 | ```
50 | to train a trajectory optimization policy for the folding task, or use other scripts to train on different tasks.
51 |
52 | ## Citation
53 | If you find this codebase/paper useful for your research, please consider citing:
54 | ```
55 | @inproceedings{wang2023thin,
56 | title={Thin-Shell Object Manipulations With Differentiable Physics Simulations},
57 | author={Wang, Yian and Zheng, Juntian and Chen, Zhehuan and Xian, Zhou and Zhang, Gu and Liu, Chao and Gan, Chuang},
58 | booktitle={The Twelfth International Conference on Learning Representations},
59 | year={2023}
60 | }
61 | ```
62 |
63 |
--------------------------------------------------------------------------------
/code/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/code/__init__.py
--------------------------------------------------------------------------------
/code/agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/code/agent/__init__.py
--------------------------------------------------------------------------------
/code/agent/traj_opt_single.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | @ti.data_oriented
4 | class agent_trajopt:
5 | def __init__(self, tot_timestep, cnt, max_moving_dist=0.0005):
6 | self.traj = ti.field(ti.f64, (tot_timestep, cnt, 6))
7 | self.tmp_action = ti.field(ti.f64, (cnt, 6))
8 | self.delta_pos = ti.Vector.field(3, ti.f64, shape=cnt)
9 | self.delta_rot = ti.Vector.field(3, ti.f64, shape=cnt)
10 | self.action_dim = 6 * cnt
11 | self.tot_timestep = tot_timestep
12 | self.max_moving_dist = max_moving_dist
13 | self.n_part = cnt
14 |
15 | def fix_action(self, max_dist):
16 | for i in range(1, self.tot_timestep):
17 | for j in ti.static(range(self.n_part)):
18 | delta_pos = ti.Vector([self.traj[i, j, 0] - self.traj[i - 1, j, 0], self.traj[i, j, 1] - self.traj[i - 1, j, 1],
19 | self.traj[i, j, 2] - self.traj[i - 1, j, 2]])
20 | delta_rot = ti.Vector([self.traj[i, j, 3] - self.traj[i - 1, j, 3], self.traj[i, j, 4] - self.traj[i - 1, j, 4],
21 | self.traj[i, j, 5] - self.traj[i - 1, j, 5]])
22 |
23 | moving_dist = ti.math.sqrt(delta_pos.dot(delta_pos)) + ti.math.sqrt(delta_rot.dot(delta_rot)) * max_dist
24 | weight = self.max_moving_dist / (moving_dist + 1e-8)
25 | if weight < 1.0:
26 | for k in range(self.action_dim):
27 | self.traj[i, j, k] = self.traj[i - 1, j, k] + (self.traj[i, j, k] - self.traj[i - 1, j, k]) * weight
28 |
29 | def calculate_dist(self, frame, max_dist, j):
30 | i = frame
31 |
32 | delta_pos = ti.Vector(
33 | [self.traj[i, j, 0] - self.traj[i - 1, j, 0], self.traj[i, j, 1] - self.traj[i - 1, j, 1],
34 | self.traj[i, j, 2] - self.traj[i - 1, j, 2]])
35 | delta_rot = ti.Vector(
36 | [self.traj[i, j, 3] - self.traj[i - 1, j, 3], self.traj[i, j, 4] - self.traj[i - 1, j, 4],
37 | self.traj[i, j, 5] - self.traj[i - 1, j, 5]])
38 |
39 | moving_dist = ti.math.sqrt(delta_pos.dot(delta_pos)) + ti.math.sqrt(delta_rot.dot(delta_rot)) * max_dist
40 | return moving_dist
41 |
42 | def get_action(self, step):
43 | i = step
44 | for j in ti.static(range(self.n_part)):
45 | self.delta_pos[j] = ti.Vector([self.traj[i, j, 0] - self.traj[i - 1, j, 0], self.traj[i, j, 1] - self.traj[i - 1, j, 1],
46 | self.traj[i, j, 2] - self.traj[i - 1, j, 2]])
47 | self.delta_rot[j] = ti.Vector([self.traj[i, j, 3] - self.traj[i - 1, j, 3], self.traj[i, j, 4] - self.traj[i - 1, j, 4],
48 | self.traj[i, j, 5] - self.traj[i - 1, j, 5]])
49 |
50 | def init_traj_forming(self):
51 |
52 | for i in range(1, 20):
53 | self.traj[i, 0, 2] = -0.00011 * i
54 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.00023
55 | for i in range(20, 35):
56 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2] - 0.0002
57 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.00027
58 | for i in range(35, 50):
59 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2]
60 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.0002
61 |
62 | def init_traj_pick_fold(self):
63 |
64 | for i in range(8):
65 | self.traj[i, 0, 2] = -0.0006 * i
66 | self.traj[i, 1, 2] = -0.0006 * i
67 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0]
68 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0]
69 | for i in range(8, 50):
70 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2]
71 | self.traj[i, 1, 2] = self.traj[i - 1, 1, 2]
72 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0]
73 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0]
74 |
75 | def init_traj_card(self):
76 | for i in range(5):
77 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.0003
78 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0] - 0.0003
79 |
80 | for i in range(5, 20):
81 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.0001
82 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2] + 0.0003
83 | self.traj[i, 0, 4] = self.traj[i - 1, 0, 4]
84 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0]
85 |
86 | for i in range(20, 35):
87 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.0001
88 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2] + 0.0002
89 | self.traj[i, 0, 4] = self.traj[i - 1, 0, 4]
90 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0]
91 |
92 | for i in range(35, 50):
93 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] + 0.0002
94 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2] + 0.0005
95 | self.traj[i, 0, 4] = self.traj[i - 1, 0, 4] + 0.02
96 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0]
97 |
98 | for i in range(50, 150):
99 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0]
100 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2]
101 | self.traj[i, 0, 4] = self.traj[i - 1, 0, 4]
102 | self.traj[i, 1, 0] = self.traj[i - 1, 1, 0]
103 |
104 | def init_traj_slide(self):
105 | for i in range(10):
106 | self.traj[i, 0, 2] = -0.00035 * i
107 | for i in range(10, 50):
108 | self.traj[i, 0, 0] = self.traj[i - 1, 0, 0] - 0.0005
109 | self.traj[i, 0, 2] = self.traj[i - 1, 0, 2]
110 |
--------------------------------------------------------------------------------
/code/engine/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/code/engine/__init__.py
--------------------------------------------------------------------------------
/code/engine/analytic_grad_system.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | @ti.data_oriented
4 | class Grad:
5 | def __init__(self, sys, tot_timestep, n_parts):
6 | self.tot_NV = sys.tot_NV
7 | self.n_part = n_parts
8 | self.pos_buffer = ti.field(ti.f64, (tot_timestep, sys.tot_NV, 3))
9 | if n_parts > 0:
10 | self.gripper_pos_buffer = ti.Vector.field(3, dtype=ti.f64, shape=(tot_timestep, n_parts))
11 | self.gripper_rot_buffer = ti.Vector.field(4, dtype=ti.f64, shape=(tot_timestep, n_parts))
12 | self.ref_angle_buffer = ti.Vector.field(3, ti.f64, shape=(tot_timestep, sys.cloth_cnt, sys.cloths[0].NF))
13 | self.dt = sys.dt
14 | self.pos_grad = ti.field(ti.f64, (tot_timestep, sys.tot_NV, 3))
15 | self.x_hat_grad = ti.field(ti.f64, sys.tot_NV * 3)
16 | self.gripper_grad = ti.field(dtype=ti.f64, shape=(tot_timestep, 7))
17 | self.mass = ti.field(ti.f64, sys.tot_NV)
18 | self.F = ti.field(ti.f64, shape=(self.tot_NV * 3,))
19 | self.tot_timestep = tot_timestep
20 | self.grad_lam = ti.field(ti.f64, ())
21 | self.grad_mu = ti.field(ti.f64, ())
22 | self.grad_friction_coef = ti.field(ti.f64, ())
23 | self.grad_kb = ti.field(ti.f64, ())
24 | self.angleref_grad = ti.Vector.field(3, ti.f64, shape=(tot_timestep, sys.cloth_cnt, sys.cloths[0].NF))
25 | self.cloth_cnt = sys.cloth_cnt
26 | self.damping = 1.0
27 | self.count_friction_grad = False
28 | self.count_mu_lam_grad = False
29 | self.count_kb_grad = True
30 |
31 | def reset(self):
32 | self.pos_buffer.fill(0)
33 | self.pos_grad.fill(0)
34 | self.grad_mu[None] = 0
35 | self.grad_lam[None] = 0
36 | self.grad_friction_coef[None] = 0
37 | self.grad_kb[None] = 0
38 |
39 | @ti.kernel
40 | def init_mass(self, sys: ti.template()):
41 | for i in range(self.tot_NV):
42 | self.mass[i] = sys.mass[i]
43 |
44 | @ti.kernel
45 | def copy_pos(self, sys: ti.template(), step: int):
46 | for i in range(self.tot_NV):
47 | self.pos_buffer[step, i, 0] = sys.pos[i][0]
48 | self.pos_buffer[step, i, 1] = sys.pos[i][1]
49 | self.pos_buffer[step, i, 2] = sys.pos[i][2]
50 |
51 | for i in range(sys.cloths[0].NF):
52 | for j in ti.static(range(sys.cloth_cnt)):
53 | for l in ti.static(range(3)):
54 | self.ref_angle_buffer[step, j, i][l] = sys.cloths[j].ref_angle[i][l]
55 |
56 | for j in ti.static(range(self.n_part)):
57 | self.gripper_pos_buffer[step, j] = sys.gripper.pos[j]
58 | self.gripper_rot_buffer[step, j] = sys.gripper.rot[j]
59 |
60 |
61 | @ti.kernel
62 | def get_F(self, step: int, sys: ti.template()):
63 | for i in range(self.tot_NV):
64 | self.F[i * 3 + 0] = self.pos_grad[step, i, 0]
65 | self.F[i * 3 + 1] = self.pos_grad[step, i, 1]
66 | self.F[i * 3 + 2] = self.pos_grad[step, i, 2]
67 |
68 | @ti.kernel
69 | def get_parameters_grad(self, p: ti.types.ndarray(), sys: ti.template()):
70 | for i in range(self.tot_NV):
71 | for j in ti.static(range(3)):
72 | if self.count_mu_lam_grad:
73 | if not sys.frozen[i * 3 + j]:
74 | self.grad_mu[None] += p[i * 3 + j] * sys.d_mu[i][j]
75 |
76 | self.grad_lam[None] += p[i * 3 + j] * sys.d_lam[i][j]
77 | if self.count_kb_grad:
78 | if not sys.frozen[i * 3 + j]:
79 | self.grad_kb[None] += p[i * 3 + j] * sys.d_kb[i][j]
80 |
81 |
82 | @ti.kernel
83 | def get_grad(self, p: ti.types.ndarray()):
84 | for i in range(self.tot_NV):
85 | self.x_hat_grad[i * 3 + 0] = p[i * 3 + 0] * self.mass[i] / (self.dt ** 2)
86 | self.x_hat_grad[i * 3 + 1] = p[i * 3 + 1] * self.mass[i] / (self.dt ** 2)
87 | self.x_hat_grad[i * 3 + 2] = p[i * 3 + 2] * self.mass[i] / (self.dt ** 2)
88 |
89 | @ti.kernel
90 | def get_prev_grad(self, sys: ti.template(), step: int):
91 | for i in range(self.tot_NV):
92 | for j in ti.static(range(3)):
93 | if not sys.frozen[i * 3 + j]:
94 | self.pos_grad[step - 1, i, j] += self.x_hat_grad[i * 3 + j] * (1 + self.damping)
95 |
96 | @ti.kernel
97 | def get_prev_prev_grad(self, sys: ti.template(), step: int):
98 | for i in range(self.tot_NV):
99 | for j in ti.static(range(3)):
100 | if not sys.frozen[i * 3 + j]:
101 | self.pos_grad[step - 2, i, j] -= self.x_hat_grad[i * 3 + j] * self.damping
102 |
103 | @ti.kernel
104 | def clamp_grad(self, step: int):
105 | for i in range(self.tot_NV):
106 | self.pos_grad[step, i, 0] = ti.math.clamp(self.pos_grad[step, i, 0], -1, 1)
107 | self.pos_grad[step, i, 1] = ti.math.clamp(self.pos_grad[step, i, 1], -1, 1)
108 | self.pos_grad[step, i, 2] = ti.math.clamp(self.pos_grad[step, i, 2], -1, 1)
109 |
110 | @ti.kernel
111 | def copy_z(self, sys: ti.template(), p: ti.types.ndarray()):
112 | for i in range(self.tot_NV * 3):
113 | sys.tmp_z_not_frozen[i] = p[i]
114 |
115 | def transfer_grad(self, step, sys, f_contact):
116 |
117 | self.clamp_grad(step)
118 | # self.print_grad(step, sys)
119 | sys.copy_pos_only(self.pos_buffer, step - 1)
120 | sys.calc_vn()
121 | f_contact(sys)
122 | sys.contact_analysis()
123 |
124 | sys.copy_pos_and_refangle(self, step)
125 |
126 | sys.init_folding()
127 | sys.ref_angle_backprop_a2ax(self, step)
128 |
129 | # sys.copy_refangle(self, step - 1)
130 | # sys.init_folding()
131 | sys.get_paramters_grad()
132 | sys.H.clear_all()
133 |
134 | sys.compute_Hessian(False)
135 |
136 | self.get_F(step, sys)
137 |
138 | F_array = self.F.to_torch(device='cuda:0')
139 |
140 | p = sys.H.solve(F_array)
141 | self.copy_z(sys, p)
142 | sys.tmp_z_frozen.fill(0)
143 | sys.counting_z_frozen[None] = True
144 | sys.compute_Hessian(False)
145 | sys.counting_z_frozen[None] = False
146 | self.get_grad(p)
147 | # self.max_abs(step, sys)
148 | sys.contact_energy_backprop(True, self, step - 1, p)
149 | sys.ref_angle_backprop_x2a(self, step, p)
150 | if self.count_friction_grad:
151 | sys.contact_energy_backprop_friction(True, self, step - 1, p)
152 | else:
153 | self.get_parameters_grad(p, sys)
154 |
155 | # print(self.grad_friction_coef[None], end=" ")
156 |
157 | if step > 0:
158 | self.get_prev_grad(sys, step)
159 | if step > 1:
160 | self.get_prev_prev_grad(sys, step)
161 |
162 | def get_loss(self, sys, pos_grad=False):
163 | for step in range(1, self.tot_timestep):
164 | # print(step)
165 | sys.copy_pos_and_refangle(self, step)
166 | if pos_grad:
167 | sys.compute_pos_grad(self, step)
168 | else:
169 | sys.compute_force_grad(self, step)
170 |
171 | def get_loss_slide(self, sys, pos_grad=False):
172 | for i, j in ti.ndrange(sys.cloths[0].NV, self.tot_timestep - 1):
173 | self.pos_grad[j + 1, sys.cloths[0].offset + i, 0] = 1
174 |
175 | def get_loss_card(self, sys):
176 | for i in range(sys.cloths[0].NV):
177 | self.pos_grad[self.tot_timestep - 1, sys.cloths[0].offset + i, 0] = 1
178 |
179 | @ti.kernel
180 | def get_loss_table(self, sys: ti.template()):
181 | for i, j in ti.ndrange(sys.cloths[0].NV, self.tot_timestep - 1):
182 | if (ti.cast(i / (sys.cloths[0].N + 1), ti.i32) == 5) or (ti.cast(i / (sys.cloths[0].N + 1), ti.i32) == 10):
183 | self.pos_grad[j + 1, sys.cloths[0].offset + i, 2] = -1
184 |
185 |
186 |
--------------------------------------------------------------------------------
/code/engine/blob.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import cv2
4 | import matplotlib.pyplot as plt
5 | import os
6 |
7 | def project(a, f=8.445e-04, m=173913.04, cx=3.200e+02, cy=2.400e+02, h=0.014):
8 | a[2] += h
9 | b = np.array([[0., 0., 1.]])
10 | inner_product = (a * b).sum()
11 | a_norm = np.linalg.norm(a)
12 | b_norm = np.linalg.norm(b)
13 | cos = inner_product / (a_norm * b_norm)
14 |
15 | theta = np.arccos(cos)
16 | omega = np.arctan2(a[1],a[0]) + np.pi
17 | # print("proj omega", omega)
18 | # print("proj theta", theta)
19 | r = m * f * theta
20 |
21 | p = np.zeros((2))
22 | p[0] = r * np.cos(omega) + cx
23 | p[1] = r * np.sin(omega) + cy
24 |
25 | return p
26 |
27 | def project_inverse(p, f=8.445e-04, m=173913.04,
28 | cx=3.200e+02, cy=2.400e+02, r=0.015, h=0.014):
29 | p1 = p - torch.tensor([cx, cy])
30 | # print("p1", p1[0])
31 | # p1 = p
32 | omega = torch.atan2(p1[:, 1], p1[:, 0])
33 | # print("inv omega", omega[0])
34 | theta = torch.norm(p1, dim=1) / (m*f)
35 | # print("inv theta", theta[0])
36 | x1 = -torch.cos(omega) * torch.sin(theta)
37 | y1 = -torch.sin(omega) * torch.sin(theta)
38 | z1 = torch.cos(theta)
39 |
40 | k = (h*z1 + (-h**2*x1**2 - h**2*y1**2 + r**2*x1**2 + r**2*y1**2 + r**2*z1 **2)**0.5)
41 | a = k.unsqueeze(-1) * torch.stack([x1, y1, z1], dim=1)
42 | a[:, 2] -= h
43 | return a
44 |
45 | rest_pos = None
46 | last_pos = None
47 |
48 | # for i in range(215, 400):
49 | # idx = i
50 | # path = "../data/gelsight-force-capture-press-twist-x_2023-06-27-16-48-28/frame%05d.png" % idx
51 | #
52 | # save_path = "../data/blob_data_rot_x"
53 | # if not os.path.exists(save_path):
54 | # os.mkdir(save_path)
55 | # img = cv2.imread(path, 0)
56 | # curve1 = 50
57 | # curve2 = 100
58 | # mask = img260, pos[:, 0]<390)
91 | # rest_pos = pos[mask]
92 | # last_pos = pos[mask]
93 | # pos_3d = project_inverse(rest_pos)
94 | # # for j in range(1):
95 | # # pos_now = project(pos_3d[j].detach().cpu().numpy())
96 | # # print(pos_now, rest_pos[j])
97 | # # fig = plt.figure()
98 | # # ax = fig.add_subplot(projection='3d')
99 | # # ax.set_xlim(-0.01, 0.01)
100 | # # ax.set_ylim(-0.01, 0.01)
101 | # # ax.set_zlim(-0.01, 0.01)
102 | # # ax.scatter(pos_3d[:, 0], pos_3d[:, 1], pos_3d[:, 2])
103 | # np.save(os.path.join(save_path, "pos_ori_3d.npy"), pos_3d.detach().cpu().numpy())
104 | # # np.save(os.path.join(save_path, "pos_ori_2d.npy"), rest_pos.detach().cpu().numpy())
105 | #
106 | # # for ww in range(pos_3d.shape[0]):
107 | # # print(pos_3d[ww], torch.norm(pos_3d[ww]))
108 | # # plt.show()
109 | #
110 | # pos = step_pos(last_pos, pos)
111 | # pos_3d = project_inverse(pos)
112 | # np.save(os.path.join(save_path, f"pos_3d_{i}.npy"), pos_3d.detach().cpu().numpy())
113 | # # print(pos.shape)
114 | # delta = pos-rest_pos
115 | #
116 | # last_pos = pos
117 | #
118 | # # plt.scatter(rest_pos[:, 0], rest_pos[:, 1])
119 | # # plt.scatter(pos[:, 0], pos[:, 1])
120 | # # plt.savefig(os.path.join(save_path, 'blob_tmp/%i.jpg'%i))
121 | # # plt.close()
122 | # print(i)
--------------------------------------------------------------------------------
/code/engine/contact_diff.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | import taichi.math as tm
3 |
4 | @ti.func
5 | def det(a:tm.vec3, b:tm.vec3, c:tm.vec3, diff, H, H_idx, G, G_idx):
6 | d = a[0]*b[1]*c[2] + a[1]*b[2]*c[0] + a[2]*b[0]*c[1] \
7 | - a[2]*b[1]*c[0] - a[1]*b[0]*c[2] - a[0]*b[2]*c[1]
8 | if diff:
9 | grad_a = tm.vec3(b[1]*c[2]-b[2]*c[1], b[2]*c[0]-b[0]*c[2], b[0]*c[1]-b[1]*c[0])
10 | grad_b = tm.vec3(c[1]*a[2]-c[2]*a[1], c[2]*a[0]-c[0]*a[2], c[0]*a[1]-c[1]*a[0])
11 | grad_c = tm.vec3(a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0])
12 | for i in range(3):
13 | G[G_idx, 0*3+i] = grad_a[i]
14 | G[G_idx, 1*3+i] = grad_b[i]
15 | G[G_idx, 2*3+i] = grad_c[i]
16 | for i in range(3):
17 | j = i+1 if i<2 else 0
18 | k = i-1 if i>0 else 2
19 | H[H_idx, 0*3+i, 1*3+j] = H[H_idx, 1*3+j, 0*3+i] = c[k]
20 | H[H_idx, 1*3+i, 2*3+j] = H[H_idx, 2*3+j, 1*3+i] = a[k]
21 | H[H_idx, 2*3+i, 0*3+j] = H[H_idx, 0*3+j, 2*3+i] = b[k]
22 | H[H_idx, 1*3+i, 0*3+j] = H[H_idx, 0*3+j, 1*3+i] = -c[k]
23 | H[H_idx, 2*3+i, 1*3+j] = H[H_idx, 1*3+j, 2*3+i] = -a[k]
24 | H[H_idx, 0*3+i, 2*3+j] = H[H_idx, 2*3+j, 0*3+i] = -b[k]
25 | return d
26 |
27 | @ti.func
28 | def cross(a:tm.vec3, b:tm.vec3, diff, H, H_idx, G, G_idx):
29 | a0, a1, a2 = a[0], a[1], a[2]
30 | b0, b1, b2 = b[0], b[1], b[2]
31 | c1 = a[0]*b[1]-b[0]*a[1]
32 | c2 = a[0]*b[2]-b[0]*a[2]
33 | c3 = a[1]*b[2]-b[1]*a[2]
34 | c0 = c1**2 + c2**2 + c3**2
35 | c = c0**0.5
36 |
37 | if diff:
38 | grad_a = tm.vec3( c1*b[1]+c2*b[2], -c1*b[0]+c3*b[2], -c2*b[0]-c3*b[1]) / c
39 | grad_b = tm.vec3(-c1*a[1]-c2*a[2], c1*a[0]-c3*a[2], c2*a[0]+c3*a[1]) / c
40 | for i in range(3):
41 | G[G_idx, 0*3+i] = grad_a[i]
42 | G[G_idx, 1*3+i] = grad_b[i]
43 |
44 | # Hessian from SymPy
45 | x0 = b1**2
46 | x1 = b2**2
47 | x2 = a0*b1
48 | x3 = a1*b0
49 | x4 = x2 - x3
50 | x5 = a0*b2
51 | x6 = a2*b0
52 | x7 = x5 - x6
53 | x8 = a1*b2
54 | x9 = a2*b1
55 | x10 = x8 - x9
56 | x11 = x10**2 + x4**2 + x7**2
57 | x12 = x11**(-0.5)
58 | x13 = b1*x4 + b2*x7
59 | x14 = x11**(-1.5)
60 | x15 = b0*x12
61 | x16 = b0*x4 - b2*x10
62 | x17 = -b1*x15 + x13*x14*x16
63 | x18 = b0*x7 + b1*x10
64 | x19 = -b2*x15 + x13*x14*x18
65 | x20 = a1*b1
66 | x21 = a2*b2
67 | x22 = a1*x4 + a2*x7
68 | x23 = -x12*(x20 + x21) + x13*x14*x22
69 | x24 = a0*x4 - a2*x10
70 | x25 = x13*x14
71 | x26 = x12*(2.0*x2 - x3) - x24*x25
72 | x27 = a0*x7 + a1*x10
73 | x28 = x12*(2.0*x5 - x6) - x25*x27
74 | x29 = b0**2
75 | x30 = x14*x16
76 | x31 = -b1*b2*x12 - x18*x30
77 | x32 = x30
78 | x33 = -x12*(x2 - 2.0*x3) - x22*x32
79 | x34 = a0*b0
80 | x35 = -x12*(x21 + x34) + x14*x16*x24
81 | x36 = x12*(2.0*x8 - x9) + x27*x32
82 | x37 = x18
83 | x38 = -x12*(x5 - 2.0*x6) - x14*x22*x37
84 | x39 = -x12*(x8 - 2.0*x9) + x14*x24*x37
85 | x40 = -x12*(x20 + x34) + x14*x18*x27
86 | x41 = a1**2
87 | x42 = a2**2
88 | x43 = a0*x12
89 | x44 = -a1*x43 + x14*x22*x24
90 | x45 = -a2*x43 + x14*x22*x27
91 | x46 = a0**2
92 | x47 = -a1*a2*x12 - x14*x24*x27
93 |
94 | H[H_idx, 0, 0] = x12*(x0 + x1) - x13**2*x14
95 | H[H_idx, 0, 1] = x17
96 | H[H_idx, 0, 2] = x19
97 | H[H_idx, 0, 3] = x23
98 | H[H_idx, 0, 4] = x26
99 | H[H_idx, 0, 5] = x28
100 | H[H_idx, 1, 0] = x17
101 | H[H_idx, 1, 1] = x12*(x1 + x29) - x14*x16**2
102 | H[H_idx, 1, 2] = x31
103 | H[H_idx, 1, 3] = x33
104 | H[H_idx, 1, 4] = x35
105 | H[H_idx, 1, 5] = x36
106 | H[H_idx, 2, 0] = x19
107 | H[H_idx, 2, 1] = x31
108 | H[H_idx, 2, 2] = x12*(x0 + x29) - x14*x18**2
109 | H[H_idx, 2, 3] = x38
110 | H[H_idx, 2, 4] = x39
111 | H[H_idx, 2, 5] = x40
112 | H[H_idx, 3, 0] = x23
113 | H[H_idx, 3, 1] = x33
114 | H[H_idx, 3, 2] = x38
115 | H[H_idx, 3, 3] = x12*(x41 + x42) - x14*x22**2
116 | H[H_idx, 3, 4] = x44
117 | H[H_idx, 3, 5] = x45
118 | H[H_idx, 4, 0] = x26
119 | H[H_idx, 4, 1] = x35
120 | H[H_idx, 4, 2] = x39
121 | H[H_idx, 4, 3] = x44
122 | H[H_idx, 4, 4] = x12*(x42 + x46) - x14*x24**2
123 | H[H_idx, 4, 5] = x47
124 | H[H_idx, 5, 0] = x28
125 | H[H_idx, 5, 1] = x36
126 | H[H_idx, 5, 2] = x40
127 | H[H_idx, 5, 3] = x45
128 | H[H_idx, 5, 4] = x47
129 | H[H_idx, 5, 5] = x12*(x41 + x46) - x14*x27**2
130 | return c
--------------------------------------------------------------------------------
/code/engine/geometry.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import taichi as ti
3 | import taichi.math as tm
4 | import matplotlib.pyplot as plt
5 |
6 | vec3 = ti.types.vector(3, ti.f64)
7 |
8 | grid_h = 0.003
9 | grid_n = int(0.2 // grid_h) * 2
10 | grid_bound = grid_h * (grid_n-1) / 2
11 | max_n_particles = 100000
12 |
13 | grid_cnt = ti.field(ti.i32, shape=(grid_n, grid_n, grid_n))
14 | grid_scnt = ti.field(ti.i32, shape=(grid_n, grid_n))
15 | grid_s2cnt = ti.field(ti.i32, shape=(grid_n,))
16 | grid_baseidx = ti.field(ti.i32, shape=(grid_n, grid_n, grid_n))
17 | grid_active_range = ti.Vector.field(3, ti.i32, shape=(2,))
18 | particle_v = ti.Vector.field(3, ti.f64, shape=(max_n_particles, 3))
19 | particle_idx = ti.field(ti.i32, shape=(max_n_particles, 3))
20 |
21 | # proj_num = ti.field(ti.i32, shape=())
22 |
23 | @ti.func
24 | def pt2tri(x:tm.vec3, p1:tm.vec3, p2:tm.vec3, p3:tm.vec3):
25 | eps = 0
26 | e1 = (p2-p1).normalized()
27 | e2 = (p3-p2).normalized()
28 | e3 = (p1-p3).normalized()
29 | n = -e1.cross(e3).normalized()
30 | x1 = x - (x-p1).dot(n)*n
31 | d = 0.0
32 | c = 0
33 | w = vec3(0)
34 | if (x1-p1).cross(e1).dot(n) > eps:
35 | if (x1-p1).dot(e1) < -eps:
36 | c = 1 # p1
37 | d = (x-p1).norm()
38 | w = vec3(1, 0, 0)
39 | elif (x1-p2).dot(e1) > eps:
40 | c = 2 # p2
41 | d = (x-p2).norm()
42 | w = vec3(0, 1, 0)
43 | else:
44 | c = -3 # e12
45 | alpha = (x1-p1).dot(e1) / (p2-p1).dot(e1)
46 | x2 = p1 + alpha*(p2-p1)
47 | d = (x-x2).norm()
48 | w = vec3(1-alpha, alpha, 0)
49 | elif (x1-p2).cross(e2).dot(n) > eps:
50 | if (x1-p2).dot(e2) < -eps:
51 | c = 2 # p2
52 | d = (x-p2).norm()
53 | w = vec3(0, 1, 0)
54 | elif (x1-p3).dot(e2) > eps:
55 | c = 3 # p3
56 | d = (x-p3).norm()
57 | w = vec3(0, 0, 1)
58 | else:
59 | c = -1 # e23
60 | alpha = (x1-p2).dot(e2) / (p3-p2).dot(e2)
61 | x2 = p2 + alpha*(p3-p2)
62 | d = (x-x2).norm()
63 | w = vec3(0, 1-alpha, alpha)
64 | elif (x1-p3).cross(e3).dot(n) > eps:
65 | if (x1-p3).dot(e3) < -eps:
66 | c = 3 # p3
67 | d = (x-p3).norm()
68 | w = vec3(0, 0, 1)
69 | elif (x1-p1).dot(e3) > eps:
70 | c = 1 # p1
71 | d = (x-p1).norm()
72 | w = vec3(1, 0, 0)
73 | else:
74 | c = -2 # e31
75 | alpha = (x1-p3).dot(e3) / (p1-p3).dot(e3)
76 | x2 = p3 + alpha*(p1-p3)
77 | d = (x-x2).norm()
78 | w = vec3(alpha, 0, 1-alpha)
79 | else:
80 | # f123
81 | d = (x-x1).norm()
82 | S = (p3 - p1).cross(p2 - p1).norm()
83 | w1 = (p3 - p2).cross(x1 - p2).dot(n) / S
84 | w2 = (p1 - p3).cross(x1 - p3).dot(n) / S
85 | w3 = (p2 - p1).cross(x1 - p1).dot(n) / S
86 | w = vec3(w1, w2, w3)
87 | return c, d, w
88 |
89 | @ti.func
90 | def grid_idx(x: vec3):
91 | i = ti.floor(tm.clamp(x[0], -grid_bound, grid_bound) / grid_h, ti.i32) + grid_n // 2
92 | j = ti.floor(tm.clamp(x[1], -grid_bound, grid_bound) / grid_h, ti.i32) + grid_n // 2
93 | k = ti.floor(tm.clamp(x[2], -grid_bound, grid_bound) / grid_h, ti.i32) + grid_n // 2
94 | return i, j, k
95 |
96 | @ti.kernel
97 | def p2g(v:ti.template(), f:ti.template(), start: ti.i32, end: ti.i32):
98 | active_old = (
99 | (grid_active_range[0][0], grid_active_range[1][0]+1),
100 | (grid_active_range[0][1], grid_active_range[1][1]+1),
101 | (grid_active_range[0][2], grid_active_range[1][2]+1),
102 | )
103 | for i, j, k in ti.ndrange(active_old[0], active_old[1], active_old[2]):
104 | grid_cnt[i, j, k] = 0
105 |
106 | grid_active_range[0] = (grid_n, grid_n, grid_n)
107 | grid_active_range[1] = (0, 0, 0)
108 | for i in range(start, end):
109 | mid_v = (v[f[i][0]] + v[f[i][1]] + v[f[i][2]])/3
110 | idx = grid_idx(mid_v)
111 | grid_cnt[idx] += 1
112 | ti.atomic_min(grid_active_range[0], idx)
113 | ti.atomic_max(grid_active_range[1], idx)
114 |
115 | active = (
116 | (grid_active_range[0][0], grid_active_range[1][0]+1),
117 | (grid_active_range[0][1], grid_active_range[1][1]+1),
118 | (grid_active_range[0][2], grid_active_range[1][2]+1),
119 | )
120 |
121 | for i, j in ti.ndrange(active[0], active[1]):
122 | grid_baseidx[i, j, active[2][0]] = grid_cnt[i, j, active[2][0]]
123 | for k in range(active[2][0]+1, active[2][1]):
124 | grid_baseidx[i, j, k] = grid_baseidx[i, j, k-1] + grid_cnt[i, j, k]
125 |
126 | for i in ti.ndrange(active[0]):
127 | grid_scnt[i, active[1][0]] = grid_baseidx[i, active[1][0], active[2][1]-1]
128 | for j in range(active[1][0]+1, active[1][1]):
129 | grid_scnt[i, j] = grid_scnt[i, j-1] + grid_baseidx[i, j, active[2][1]-1]
130 |
131 | grid_s2cnt[active[0][0]] = grid_scnt[active[0][0], active[1][1]-1]
132 | ti.loop_config(serialize=True)
133 | for i in range(active[0][0]+1, active[0][1]):
134 | grid_s2cnt[i] = grid_s2cnt[i-1] + grid_scnt[i, active[1][1]-1]
135 |
136 | for i, j, k in ti.ndrange(active[0], active[1], active[2]):
137 | if i>active[0][0]:
138 | grid_baseidx[i, j, k] += grid_s2cnt[i-1]
139 | if j>active[1][0]:
140 | grid_baseidx[i, j, k] += grid_scnt[i, j-1]
141 | grid_baseidx[i, j, k] -= grid_cnt[i, j, k]
142 | grid_cnt[i, j, k] = 0
143 |
144 | max_margin = 0.0
145 |
146 | for i in range(start, end):
147 | mid_v = (v[f[i][0]] + v[f[i][1]] + v[f[i][2]])/3
148 | ti.atomic_max(max_margin, (mid_v-v[f[i][0]]).norm())
149 | ti.atomic_max(max_margin, (mid_v-v[f[i][1]]).norm())
150 | ti.atomic_max(max_margin, (mid_v-v[f[i][2]]).norm())
151 | idx = grid_idx(mid_v)
152 | pid = grid_baseidx[idx] + ti.atomic_add(grid_cnt[idx], 1)
153 | particle_v[pid, 0] = v[f[i][0]]
154 | particle_v[pid, 1] = v[f[i][1]]
155 | particle_v[pid, 2] = v[f[i][2]]
156 | particle_idx[pid, 0] = f[i][0]
157 | particle_idx[pid, 1] = f[i][1]
158 | particle_idx[pid, 2] = f[i][2]
159 |
160 | detect_ub = grid_h-max_margin
161 | # if detect_ub<0.01:
162 | # print("detect ub:", detect_ub)
163 | # exit(0)
164 |
165 | @ti.kernel
166 | def project_pair(sys:ti.template(), start: ti.i32, end: ti.i32, body_idx:ti.i32, debug:ti.i32):
167 | for i in range(start, end):
168 | xq = sys.pos[i]
169 | q_idx = grid_idx(xq)
170 | r0 = ti.max(q_idx-ti.Vector([1, 1, 1]), grid_active_range[0])
171 | r1 = ti.min(q_idx+ti.Vector([1, 1, 1]), grid_active_range[1]) + ti.Vector([1, 1, 1])
172 | d_min = 1e6
173 | cos_max = -1e6
174 | proj_flag = 0
175 | proj_idx = ti.Vector([0, 0, 0])
176 | proj_w = vec3(0)
177 | for gi, gj, gk in ti.ndrange((r0[0], r1[0]), (r0[1], r1[1]), (r0[2], r1[2])):
178 | for pid in range(grid_baseidx[gi, gj, gk], grid_baseidx[gi, gj, gk]+grid_cnt[gi, gj, gk]):
179 | v1 = particle_v[pid, 0]
180 | v2 = particle_v[pid, 1]
181 | v3 = particle_v[pid, 2]
182 |
183 | c, d, w = pt2tri(xq, v1, v2, v3)
184 | vt = v1*w[0] + v2*w[1] + v3*w[2]
185 | nt = (v2-v1).cross(v3-v1).normalized()
186 | cos = (xq-vt).dot(nt)
187 | if d < d_min-1e-5 or (d < d_min+1e-5 and cos>cos_max):
188 | d_min = d
189 | cos_max = cos
190 | proj_idx[0] = particle_idx[pid, 0]
191 | proj_idx[1] = particle_idx[pid, 1]
192 | proj_idx[2] = particle_idx[pid, 2]
193 | proj_w = w
194 | if c==0:
195 | proj_flag = 1
196 | elif c>0:
197 | proj_flag = not sys.border_flag[particle_idx[pid, c-1]]
198 | else:
199 | p1 = particle_idx[pid, 2] if c!=-3 else particle_idx[pid, 0]
200 | p2 = particle_idx[pid, 2+c] if c!=-3 else particle_idx[pid, 1]
201 | proj_flag = not (sys.border_flag[p1] and sys.border_flag[p2])
202 | v1 = sys.pos[proj_idx[0]]
203 | v2 = sys.pos[proj_idx[1]]
204 | v3 = sys.pos[proj_idx[2]]
205 | n1 = sys.vn[proj_idx[0]]
206 | n2 = sys.vn[proj_idx[1]]
207 | n3 = sys.vn[proj_idx[2]]
208 | v = proj_w[0]*v1 + proj_w[1]*v2 + proj_w[2]*v3
209 | n = proj_w[0]*n1 + proj_w[1]*n2 + proj_w[2]*n3
210 | temp_proj_dir = sys.proj_dir[body_idx, i]
211 | if sys.proj_flag[body_idx, i] == 0 and proj_flag == 1:
212 | sys.proj_dir[body_idx, i] = (xq-v).dot(n) > 0
213 | if sys.proj_flag[body_idx, i] and sys.proj_dir[body_idx, i] != temp_proj_dir:
214 | print("???")
215 | # if proj_flag == 1 and body_idx == 2 and sys.bel(start + 1) == 1:
216 | # proj_num[None] += 1
217 | # if proj_num[None] % 10 == 0:
218 | # print(n.normalized(), sys.proj_dir[body_idx, i])
219 | sys.proj_flag[body_idx, i] = proj_flag
220 | sys.proj_idx[body_idx, i] = proj_idx
221 | sys.proj_w[body_idx, i] = proj_w
222 |
223 | def projection_query(sys, debug=False):
224 | # proj_num[None] = 0
225 | for body_idx, body in enumerate(sys.body_list):
226 | p2g(sys.pos, sys.faces, body.f_start, body.f_end)
227 | for body_idx2, body2 in enumerate(sys.body_list):
228 | if body_idx2 != body_idx: # TODO: contact relationship
229 | project_pair(sys, body2.v_start, body2.v_end, body_idx, debug)
230 |
231 | # print("potential contact", proj_num[None])
--------------------------------------------------------------------------------
/code/engine/gripper_single.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import numpy as np
4 | import torch
5 | import taichi as ti
6 | import matplotlib.pyplot as plt
7 |
8 | @ti.func
9 | def quat_to_rotmat(quat):
10 | s = quat[0]
11 | x = quat[1]
12 | y = quat[2]
13 | z = quat[3]
14 | rotmat = ti.Matrix([[s * s + x * x - y * y - z * z, 2 * (x * y - s * z), 2 * (x * z + s * y)],
15 | [2 * (x * y + s * z), s * s - x * x + y * y - z * z, 2 * (y * z - s * x)],
16 | [2 * (x * z - s * y), 2 * (y * z + s * x), s * s - x * x - y * y + z * z]])
17 | return rotmat
18 |
19 | @ti.func
20 | def rotmat_to_quat(rotmat):
21 | s = ti.math.sqrt(1 + rotmat[0][0] + rotmat[1][1] + rotmat[2][2]) / 2
22 | x = (rotmat[2][1] - rotmat[1][2]) / (4 * s)
23 | y = (rotmat[0][2] - rotmat[2][0]) / (4 * s)
24 | z = (rotmat[1][0] - rotmat[0][1]) / (4 * s)
25 | return ti.Vector([s, x, y, z])
26 |
27 | @ti.data_oriented
28 | class gripper:
29 | def __init__(self, dt, n_verts, n_bound, n_surf, cnt):
30 |
31 | self.n_verts = n_verts
32 | self.dt = dt
33 | self.n_bound = n_bound
34 | self.n_surf = n_surf
35 | self.n_part = cnt
36 |
37 | self.F_x = ti.Vector.field(3, dtype=ti.f64, shape=(cnt, n_verts))
38 | self.F_x_world = ti.Vector.field(3, dtype=ti.f64, shape=(cnt, n_verts))
39 |
40 | self.bound_idx = ti.field(ti.i32, shape=n_bound) # it's the same in all tactile
41 | self.surface_idx = ti.field(ti.i32, shape=n_surf)
42 |
43 | self.pos = ti.Vector.field(3, dtype=ti.f64, shape=cnt)
44 | self.rot = ti.Vector.field(4, dtype=ti.f64, shape=cnt)
45 | self.d_pos = ti.Vector.field(3, dtype=ti.f64, shape=cnt)
46 | self.d_angle = ti.Vector.field(3, dtype=ti.f64, shape=cnt)
47 |
48 | self.rotmat = ti.Matrix.field(3, 3, dtype=ti.f32, shape=cnt)
49 |
50 | @ti.kernel
51 | def init_kernel(self, sys: ti.template(), pos_array: ti.types.ndarray()):
52 | for i in ti.static(range(1, sys.effector_cnt)):
53 | self.pos[i - 1] = ti.Vector([pos_array[i - 1, 0], pos_array[i - 1, 1], pos_array[i - 1, 2]])
54 | self.rot[i - 1] = ti.Vector([1.0, 0., 0., 0.])
55 | for i in range(self.n_verts):
56 | for jj in ti.static(range(1, sys.effector_cnt)):
57 | j = jj - 1
58 | self.F_x[j, i] = sys.elastics[jj].F_x[i] - self.pos[j]
59 |
60 | cnt0 = 0
61 | cnt1 = 0
62 | for i in range(sys.elastics[1].n_verts):
63 | if sys.elastics[1].is_bottom(i) or sys.elastics[1].is_inner_circle(i):
64 | ii = ti.atomic_add(cnt0, 1)
65 | self.bound_idx[ii] = i
66 | else:
67 | if sys.elastics[1].is_surf(i):
68 | ii = ti.atomic_add(cnt1, 1)
69 | self.surface_idx[ii] = i
70 | # print(cnt0, cnt1)
71 |
72 | def init(self, sys, pos_array):
73 | self.init_kernel(sys, pos_array)
74 | self.get_rotmat()
75 |
76 | def set(self, pos: ti.template(), rot: ti.template(), step: int):
77 | for i in range(self.n_part):
78 | self.pos[i] = pos[step, i]
79 | self.rot[i] = rot[step, i]
80 |
81 | @ti.kernel
82 | def get_vert_pos(self):
83 | for i in range(self.n_verts):
84 | for j in ti.static(range(self.n_part)):
85 | self.F_x_world[j, i] = self.pos[j] + self.rotmat[j] @ self.F_x[j, i]
86 |
87 | def get_rotmat(self):
88 | for j in range(self.n_part):
89 | s = self.rot[j][0]
90 | x = self.rot[j][1]
91 | y = self.rot[j][2]
92 | z = self.rot[j][3]
93 | self.rotmat[j] = ti.Matrix([[s*s+x*x-y*y-z*z, 2*(x*y-s*z), 2*(x*z+s*y)],
94 | [2*(x*y+s*z), s*s-x*x+y*y-z*z, 2*(y*z-s*x)],
95 | [2*(x*z-s*y), 2*(y*z+s*x), s*s-x*x-y*y+z*z]])
96 | @ti.func
97 | def quat_to_rotmat(self, quat):
98 | s = quat[0]
99 | x = quat[1]
100 | y = quat[2]
101 | z = quat[3]
102 | rotmat = ti.Matrix([[s * s + x * x - y * y - z * z, 2 * (x * y - s * z), 2 * (x * z + s * y)],
103 | [2 * (x * y + s * z), s * s - x * x + y * y - z * z, 2 * (y * z - s * x)],
104 | [2 * (x * z - s * y), 2 * (y * z + s * x), s * s - x * x - y * y + z * z]])
105 | return rotmat
106 |
107 | @ti.func
108 | def rotmat_to_quat(self, rotmat):
109 | s = ti.math.sqrt(1 + rotmat[0][0] + rotmat[1][1] + rotmat[2][2]) / 2
110 | x = (rotmat[2][1] - rotmat[1][2]) / (4 * s)
111 | y = (rotmat[0][2] - rotmat[2][0]) / (4 * s)
112 | z = (rotmat[1][0] - rotmat[0][1]) / (4 * s)
113 | return ti.Vector([s, x, y, z])
114 |
115 | def step_simple(self, delta_pos: ti.template(), delta_rot: ti.template()):
116 | for j in range(self.n_part):
117 | self.pos[j] += delta_pos[j]
118 | self.get_rotmat()
119 |
120 | v2 = ti.Vector([self.rot[j][1], self.rot[j][2], self.rot[j][3]])
121 | real = -delta_rot[j].dot(v2)
122 | res = self.rot[j][0] * delta_rot[j] + delta_rot[j].cross(v2)
123 |
124 | self.rot[j][0] += real
125 | self.rot[j][1] += res[0]
126 | self.rot[j][2] += res[1]
127 | self.rot[j][3] += res[2]
128 | self.rot[j] = self.rot[j].normalized()
129 |
130 | self.get_rotmat()
131 | self.get_vert_pos()
132 |
133 | @ti.kernel
134 | def gather_grad(self, grad: ti.template(), sys: ti.template()):
135 | self.d_pos.fill(0)
136 | self.d_angle.fill(0)
137 |
138 | for i in range(self.n_bound):
139 | for j in ti.static(range(self.n_part)):
140 | xx = sys.elastics[j + 1].offset + self.bound_idx[i]
141 | gradient = ti.Vector([grad[xx * 3 + 0], grad[xx * 3 + 1], grad[xx * 3 + 2]])
142 | self.d_pos[j] += gradient
143 | self.d_angle[j] += (self.rotmat[j] @ self.F_x[j, self.bound_idx[i]]).cross(gradient)
144 |
145 | for j in ti.static(range(self.n_part)):
146 | self.d_pos[j] /= 1.0 * self.n_bound
147 | self.d_angle[j] /= 1.0 * self.n_bound
148 | for k in ti.static(range(3)):
149 | self.d_pos[j][k] = ti.math.clamp(self.d_pos[j][k], -10, 10)
150 | self.d_angle[j][k] = ti.math.clamp(self.d_angle[j][k], -100, 100)
151 |
152 | @ti.kernel
153 | def update_bound(self, sys: ti.template()):
154 | for i in range(self.n_bound):
155 | for j in ti.static(range(self.n_part)):
156 | sys.elastics[j + 1].F_x[self.bound_idx[i]] = self.F_x_world[j, self.bound_idx[i]]
157 |
158 | @ti.kernel
159 | def update_all(self, sys: ti.template()):
160 | for i in range(self.n_verts):
161 | for j in ti.static(range(self.n_part)):
162 | sys.elastics[j + 1].F_x[i] = self.F_x_world[j, i]
--------------------------------------------------------------------------------
/code/engine/linalg.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | import numpy as np
3 | import random
4 |
5 | @ti.func
6 | def SPD_project_2d(A):
7 | U, S, V = ti.svd(A)
8 | if U[0, 0]*V[0, 0] + U[1, 0]*V[1, 0] < 0:
9 | S[0, 0] = 0
10 | if U[0, 1]*V[0, 1] + U[1, 1]*V[1, 1] < 0:
11 | S[1, 1] = 0
12 | return U @ S @ V.transpose()
13 |
14 | @ti.data_oriented
15 | class SPD_Projector:
16 | def __init__(self, N, D, K) -> None:
17 | self.N, self.D, self.K = N, D, K
18 | self.T = ti.field(ti.f64, (N, D, D))
19 | self.Q = ti.field(ti.f64, (N, D, D))
20 |
21 | @ti.func
22 | def clear(self, T, Q, t, n):
23 | for i in range(n):
24 | for j in range(n):
25 | T[t, i, j] = 0
26 | Q[t, i, j] = i==j
27 |
28 | @ti.func
29 | def Householder(self, A, T, Q, t, n): # transform A[t, :, :] in-place
30 | for i in range(n-2):
31 | b = 0.0
32 | for j in range(i+1, n):
33 | b += A[t, j, i]**2
34 | b = b**0.5
35 | if b<1e-6:
36 | T[t, i, i] = -1
37 | for j in range(i+1, n):
38 | A[t, i, j] = 0
39 | else:
40 | T[t, i, i] = 1
41 | if A[t, i+1, i]<0:
42 | b *= -1
43 | T[t, i+1, i] = A[t, i+1, i] + b
44 | c = T[t, i+1, i]**2
45 | for j in range(i+2, n):
46 | T[t, j, i] = A[t, j, i]
47 | c += A[t, j, i]**2
48 | c = (2/c)**0.5
49 | for j in range(i+1, n):
50 | T[t, j, i] *= c
51 | for j in range(i+1, n):
52 | T[t, i, j] = 0
53 | for j in range(i+1, n):
54 | for k in range(i+1, j+1):
55 | T[t, i, j] += A[t, j, k] * T[t, k, i]
56 | for k in range(j+1, n):
57 | T[t, i, j] += A[t, k, j] * T[t, k, i]
58 | d = 0.0
59 | for j in range(i+1, n):
60 | d += T[t, i, j] * T[t, j, i]
61 | d *= 0.5
62 | for j in range(i+1, n):
63 | T[t, i, j] -= T[t, j, i] * d
64 | A[t, i, j] = A[t, j, i] = 0
65 | A[t, i+1, i] = A[t, i, i+1] = -b
66 | for j in range(i+1, n):
67 | for k in range(i+1, j+1):
68 | A[t, j, k] -= T[t, i, j] * T[t, k, i] + T[t, i, k] * T[t, j, i]
69 | for k in range(n):
70 | s = 0.0
71 | for j in range(i+1, n):
72 | s += Q[t, k, j] * T[t, j, i]
73 | for j in range(i+1, n):
74 | Q[t, k, j] -= s*T[t, j, i]
75 | A[t, n-2, n-1] = A[t, n-1, n-2]
76 |
77 | @ti.func
78 | def QR(self, A, T, Q, t, n, k):
79 | # subd = 0.0
80 | for j in range(k):
81 | m = 0
82 | for i in range(n-1):
83 | if ti.abs(A[t, i+1, i])>1e-5:
84 | m = i+2
85 | if m==0:
86 | break
87 | a = A[t, m-2, m-2]
88 | b = A[t, m-2, m-1]
89 | c = A[t, m-1, m-1]
90 | d = (a-c)/2
91 | sd = 1 if d>0 else -1
92 | mu = c
93 | if ti.abs(b)>1e-6:
94 | mu -= (sd * b**2)/(ti.abs(d)+(d**2+b**2)**0.5)
95 | for i in range(n):
96 | A[t, i, i] -= mu
97 | for i in range(m-1):
98 | a = A[t, i, i]
99 | b = A[t, i, i+1]
100 | e = A[t, i+1, i]
101 | d = A[t, i+1, i+1]
102 | s = ti.abs(e/(a**2+e**2)**0.5) if ti.abs(e)>1e-5 else 0
103 | if a*e<0:
104 | s *= -1
105 | c = ti.max(1-s**2, 0)**0.5
106 | T[t, 0, i] = s
107 | A[t, i, i] = a*c+e*s
108 | A[t, i, i+1] = b*c+d*s
109 | A[t, i+1, i+1] = d*c-b*s
110 | if i0:
145 | for j in range(n):
146 | v2 = v*self.Q[t, j, i]
147 | for k in range(n):
148 | A[t, j, k] += v2*self.Q[t, k, i]
149 |
150 | @ti.kernel
151 | def test_kernel(A:ti.template(), p:ti.template()):
152 | for i in range(1):
153 | p.project(A, 0, 9)
154 |
155 | if __name__ == '__main__':
156 | ti.init(ti.gpu, random_seed=5, debug=True, default_fp=ti.f64)
157 | random.seed(1)
158 | n = 9
159 | p = SPD_Projector(1, n, n)
160 | A = ti.field(ti.f64, (1, n, n))
161 | for i in range(n):
162 | for j in range(n):
163 | A[0, i, j] = random.random()
164 | for i in range(n):
165 | for j in range(i):
166 | A[0, i, j] = A[0, j, i]
167 | A0 = A.to_numpy(dtype='float64')[0].copy()
168 | test_kernel(A, p)
169 | A = A.to_numpy(dtype='float64')[0]
170 | print(np.linalg.eigh(A0)[0])
171 | print(np.linalg.eigh(A)[0])
--------------------------------------------------------------------------------
/code/engine/readfile.py:
--------------------------------------------------------------------------------
1 | def read_node(filename="../data/tactile.node"):
2 |
3 | file = open(filename, encoding='utf-8')
4 | contents2 = file.readline()
5 | array = contents2.split(' ')
6 | tot_cnt = int(array[0])
7 | positions = []
8 | maxx = 0.0
9 | for i in range(tot_cnt):
10 | contents2 = file.readline()
11 | array = [float(i) for i in contents2.split(" ") if i.strip()]
12 | positions.append(array[1:])
13 | maxx = max(array[3], maxx)
14 | file.close()
15 | return tot_cnt, positions
16 |
17 | def read_smesh(filename="../data/tactile.face"):
18 |
19 | file = open(filename, encoding='utf-8')
20 | contents2 = file.readline()
21 | # print(contents2)
22 | array = contents2.split(' ')
23 | tot_cnt = int(array[0])
24 |
25 | f2v = []
26 |
27 | for i in range(tot_cnt):
28 | contents2 = file.readline()
29 | array = [int(i) for i in contents2.split(" ") if i.strip()]
30 | f2v.append(array[1:4])
31 | file.close()
32 |
33 | return tot_cnt, f2v
34 |
35 | def read_ele(filename="../data/tactile.ele"):
36 |
37 | file = open(filename, encoding='utf-8')
38 | contents2 = file.readline()
39 | # print(contents2)
40 | array = contents2.split(' ')
41 | tot_cnt = int(array[0])
42 |
43 | F_verts = []
44 |
45 | for i in range(tot_cnt):
46 | contents2 = file.readline()
47 | array = [int(i) for i in contents2.split(" ") if i.strip()]
48 | F_verts.append(array[1:])
49 | file.close()
50 |
51 | return tot_cnt, F_verts
52 |
53 | def build_tactile_mesh():
54 | import trimesh
55 | filename = "../data/tactile3.obj"
56 | n_verts, nodes = read_node()
57 | n_surfaces, faces = read_smesh()
58 | mesh = trimesh.Trimesh(vertices=nodes, faces=faces)
59 | mesh.export(filename)
60 |
61 | def save_ply():
62 | import open3d as o3d
63 | import numpy as np
64 | geometry = o3d.geometry.TriangleMesh()
65 | _, faces = read_smesh()
66 | geometry.triangles = o3d.utility.Vector3iVector(np.array(faces))
67 | _, verts = read_node()
68 |
69 | geometry.vertices = o3d.utility.Vector3dVector(np.array(verts))
70 | geometry.compute_vertex_normals()
71 |
72 | o3d.io.write_triangle_mesh(f"../surf_mesh.ply", geometry)
73 |
74 | def read_hdf5(filename):
75 | import h5py
76 | f = h5py.File(filename, "r")
77 | return f
78 |
79 | def read_force(filename):
80 | import numpy as np
81 | data = read_hdf5(filename)
82 | # for key in data.keys():
83 | # print(key)
84 | force_data = np.array(data["force_measure"])
85 | # print(force_data)
86 | # force_data = force_data[::-1]
87 | # print("Force shape:", force_data.shape)
88 | force_data[:, 2] -= 0.44
89 | # print(force_data[450:550])
90 | return force_data
91 |
92 | def read_pos(filename):
93 | import numpy as np
94 | data = read_hdf5(filename)
95 | for key in data.keys():
96 | print(key)
97 | pos_data = np.array(data["tool_pose"])
98 | print(pos_data[0])
99 | # print(pos_data[140:190])
100 | # print(pos_data[450:550])
101 | # print(pos_data[:, 2].min())
102 | return pos_data
103 |
104 | def get_surface_new():
105 | import open3d as o3d
106 | import numpy as np
107 | mesh = o3d.io.read_triangle_mesh("../tactile.ply")
108 | vert = mesh.vertices
109 | vert = np.array(vert)
110 | for i in range(vert.shape[0]):
111 | if np.linalg.norm(vert[i]) < 0.007:
112 | vert[i] *= 0.0075 / 0.006
113 | mesh.vertices = o3d.utility.Vector3dVector(vert)
114 | o3d.io.write_triangle_mesh(f"../tactile_new.ply", mesh)
115 |
116 |
117 | def save_cloth_mesh(cloth, path):
118 | import open3d as o3d
119 | import numpy as np
120 | mesh = o3d.geometry.TriangleMesh()
121 | vertices = cloth.pos.to_numpy(dtype='float64')
122 | faces = cloth.f2v.to_numpy(dtype='float64')
123 |
124 | mesh.triangles = o3d.utility.Vector3iVector(np.array(faces))
125 | mesh.vertices = o3d.utility.Vector3dVector(np.array(vertices))
126 | mesh.compute_vertex_normals()
127 |
128 | o3d.io.write_triangle_mesh(path, mesh)
129 |
130 | def get_score(path, step, cmaes=False):
131 | import numpy as np
132 | rewards = np.load(path)[:step]
133 | if cmaes:
134 | rewards = -rewards - 5.1
135 | return rewards.max()
136 |
137 | # get_surface_new()
138 |
139 | # read_pos("../data/gelsight-force-capture-press-twist-x_2023-06-27-16-48-28.hdf5")
140 | # read_force("../data/gelsight-force-capture-press-sphere_2023-06-16-12-38-26.hdf5")
--------------------------------------------------------------------------------
/code/engine/sparse_solver.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | import torch
3 | from torch.utils.dlpack import from_dlpack, to_dlpack
4 | import cupy as cp
5 | from cupyx.scipy.sparse import csr_matrix
6 | from cupyx.scipy.sparse.linalg import spsolve
7 | from cupyx.scipy.sparse.linalg import cg
8 |
9 | @ti.data_oriented
10 | class SparseMatrix:
11 | def __init__(self, n, use_cg=False, device="cuda:0"):
12 | self.n = n
13 | self.timestamp = ti.field(ti.i32, shape=())
14 | self.active_flag = ti.field(ti.i32, shape=(n, n))
15 | self.value = ti.field(ti.f64, shape=(n, n))
16 | self.row_cnt = ti.field(ti.i32, shape=(n,))
17 | self.row_idx = ti.field(ti.i32, shape=(n, n))
18 | self.use_cg = use_cg
19 | self.device = device
20 |
21 | @ti.kernel
22 | def clear_all(self):
23 | self.timestamp[None] += 1
24 | for i in range(self.n):
25 | for _j in range(self.row_cnt[i]):
26 | j = self.row_idx[i, _j]
27 | self.value[i, j] = 0
28 | for i in range(self.n):
29 | self.row_cnt[i] = 0
30 |
31 | @ti.func
32 | def add(self, i, j, v):
33 | timestamp = self.timestamp[None]
34 | self.value[i, j] += v
35 | t0 = ti.atomic_max(self.active_flag[i, j], timestamp)
36 | if t0 != timestamp:
37 | self.active_flag[i, j] = timestamp
38 | self.row_idx[i, ti.atomic_add(self.row_cnt[i], 1)] = j
39 |
40 | @ti.func
41 | def sub(self, i, j, v):
42 | self.add(i, j, -v)
43 |
44 | @ti.kernel
45 | def to_ndarray(self, arr:ti.types.ndarray()):
46 | for i in range(self.n):
47 | for _j in range(self.row_cnt[i]):
48 | j = self.row_idx[i, _j]
49 | arr[i, j] = self.value[i, j]
50 |
51 | @ti.kernel
52 | def check_nan(self):
53 | has_nan = False
54 | for i in range(self.n):
55 | for j in range(self.n):
56 | # j = self.row_idx[i, _j]
57 | if ti.math.isnan(self.value[i, j]):
58 | has_nan = True
59 | if has_nan:
60 | print("nan in matrix!!!")
61 |
62 | def check_PD(self):
63 | H = self.value.to_torch().double()
64 | L, V = torch.linalg.eigh(H.cpu())
65 | if L.min() < -1e-5:
66 | print("not PD")
67 | return False
68 | return True
69 |
70 | @ti.kernel
71 | def fill_indptr(self, indptr:ti.types.ndarray()):
72 | ti.loop_config(serialize=True)
73 | indptr[0] = 0
74 | for i in range(self.n):
75 | indptr[i+1] = indptr[i] + self.row_cnt[i]
76 |
77 | @ti.kernel
78 | def fill_value(self, indptr:ti.types.ndarray(), ind:ti.types.ndarray(), v:ti.types.ndarray()):
79 | for i in range(self.n):
80 | offset = indptr[i]
81 | for j in range(self.row_cnt[i]):
82 | ind[offset+j] = j1 = self.row_idx[i, j]
83 | v[offset+j] = self.value[i, j1]
84 |
85 | def solve(self, b):
86 | indptr = torch.zeros((self.n+1,), device=self.device, dtype=torch.int32)
87 | self.fill_indptr(indptr)
88 | nzz = indptr[-1]
89 | ind = torch.zeros((nzz,), device=self.device, dtype=torch.int32)
90 | v = torch.zeros((nzz,), device=self.device, dtype=torch.float64)
91 | self.fill_value(indptr, ind, v)
92 | indptr = cp.from_dlpack(to_dlpack(indptr))
93 | ind = cp.from_dlpack(to_dlpack(ind))
94 | v = cp.from_dlpack(to_dlpack(v))
95 | b = cp.from_dlpack(to_dlpack(b.double()))
96 | H = csr_matrix((v, ind, indptr), shape=(self.n, self.n))
97 | # print('solve: n = %d, nzz = %d' % (self.n, int(nzz)))
98 | if self.use_cg:
99 | x0 = cp.zeros_like(b)
100 | x, info = cg(H, b, x0, tol=1e-6)
101 | x = from_dlpack(x.toDlpack())
102 | else:
103 | x = from_dlpack(spsolve(H, b).toDlpack())
104 | # print('done')
105 | return x
106 |
107 | def build(self):
108 | # self.check_PD()
109 | builder = ti.linalg.SparseMatrixBuilder(self.n, self.n, 10000000)
110 | self.to_taichi_sparse_builder(builder)
111 | return builder.build()
112 |
113 | # def solve(self, b, tol=1e-6, max_iter=1000):
114 | #
115 | # indptr = torch.zeros((self.n + 1,), device='cuda:0', dtype=torch.int32)
116 | # self.fill_indptr(indptr)
117 | # nzz = indptr[-1]
118 | # ind = torch.zeros((nzz,), device='cuda:0', dtype=torch.int32)
119 | # v = torch.zeros((nzz,), device='cuda:0', dtype=torch.float64)
120 | # self.fill_value(indptr, ind, v)
121 | # indptr = cp.from_dlpack(to_dlpack(indptr))
122 | # ind = cp.from_dlpack(to_dlpack(ind))
123 | # v = cp.from_dlpack(to_dlpack(v))
124 | # b = cp.from_dlpack(to_dlpack(b.double()))
125 | # H = csr_matrix((v, ind, indptr), shape=(self.n, self.n))
126 | #
127 | # x0 = cp.zeros_like(b)
128 | # x = x0.copy()
129 | # r = b - H.dot(x)
130 | # p = r.copy()
131 | # r_dot_r = cp.dot(r, r)
132 | #
133 | # for i in range(max_iter):
134 | # if r_dot_r < tol:
135 | # break
136 | # Hp = H.dot(p)
137 | # alpha = r_dot_r / cp.dot(p, Hp)
138 | # x += alpha * p
139 | # r -= alpha * Hp
140 | #
141 | # new_r_dot_r = cp.dot(r, r)
142 | # beta = new_r_dot_r / r_dot_r
143 | # p = r + beta * p
144 | # r_dot_r = new_r_dot_r
145 | #
146 | # x = from_dlpack(x.toDlpack())
147 | # return x
--------------------------------------------------------------------------------
/code/optimizer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/code/optimizer/__init__.py
--------------------------------------------------------------------------------
/code/optimizer/optim.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | @ti.data_oriented
4 | class Adam:
5 | def __init__(self, parameters_shape, lr, beta_1, beta_2, eps):
6 | self.tot_timestep = parameters_shape[0]
7 | self.action_dim = parameters_shape[1]
8 | self.lr = lr
9 | self.beta_1 = float(beta_1)
10 | self.beta_2 = beta_2
11 | self.eps = eps
12 | self.momentum_buffer = ti.field(ti.f64, shape=(self.tot_timestep, self.action_dim))
13 | self.v_buffer = ti.field(ti.f64, shape=(self.tot_timestep, self.action_dim))
14 | self.iter = ti.field(ti.f64, shape=())
15 |
16 | @ti.kernel
17 | def step(self, parameters: ti.template(), grads: ti.template()):
18 | beta_1 = self.beta_1
19 | beta_2 = self.beta_2
20 | epsilon = self.eps
21 | for i, j in ti.ndrange(self.tot_timestep, self.action_dim):
22 | mij = self.momentum_buffer[i, j]
23 | grad = grads[i, j]
24 | self.momentum_buffer[i, j] = beta_1 * mij + (1 - beta_1) * grad
25 | self.v_buffer[i, j] = beta_2 * self.v_buffer[i, j] + (1 - beta_2) * (grad * grad)
26 | m_cap = self.momentum_buffer[i, j] / (1 - (beta_1 ** (self.iter[None] + 1))) # calculates the bias-corrected estimates
27 | v_cap = self.v_buffer[i, j] / (1 - (beta_2 ** (self.iter[None] + 1))) # calculates the bias-corrected estimates
28 | parameters[i, j] -= (self.lr * m_cap) / (ti.math.sqrt(v_cap + epsilon))
29 | self.iter[None] += 1.0
30 |
31 | def reset(self):
32 | self.iter[None] = 0.0
33 | self.momentum_buffer.fill(0)
34 | self.v_buffer.fill(0)
35 |
36 | @ti.data_oriented
37 | class Adam_single:
38 | def __init__(self, parameters_shape, lr, beta_1, beta_2, eps, discount=0.9):
39 | self.tot_timestep = parameters_shape[0]
40 | self.action_dim1 = parameters_shape[1]
41 | self.action_dim2 = parameters_shape[2]
42 | self.beta_1 = float(beta_1)
43 | self.beta_2 = beta_2
44 | self.eps = eps
45 | self.momentum_buffer = ti.field(ti.f64, shape=(self.tot_timestep, self.action_dim1, self.action_dim2))
46 | self.v_buffer = ti.field(ti.f64, shape=(self.tot_timestep, self.action_dim1, self.action_dim2))
47 | self.iter = ti.field(ti.f64, shape=())
48 | self.lr = ti.field(ti.f64, shape=())
49 | self.lr[None] = lr
50 | self.ori_lr = lr
51 | self.discount = discount
52 |
53 | @ti.kernel
54 | def step(self, parameters: ti.template(), grads: ti.template()):
55 | beta_1 = self.beta_1
56 | beta_2 = self.beta_2
57 | epsilon = self.eps
58 | has_nan = False
59 | for i, j, k in ti.ndrange(self.tot_timestep, self.action_dim1, self.action_dim2):
60 | mij = self.momentum_buffer[i, j, k]
61 | grad = grads[i, j, k]
62 | self.momentum_buffer[i, j, k] = beta_1 * mij + (1 - beta_1) * grad
63 | self.v_buffer[i, j, k] = beta_2 * self.v_buffer[i, j, k] + (1 - beta_2) * (grad * grad)
64 | m_cap = self.momentum_buffer[i, j, k] / (1 - (beta_1 ** (self.iter[None] + 1))) # calculates the bias-corrected estimates
65 | v_cap = self.v_buffer[i, j, k] / (1 - (beta_2 ** (self.iter[None] + 1))) # calculates the bias-corrected estimates
66 | parameters[i, j, k] -= (self.lr[None] * m_cap) / (ti.math.sqrt(v_cap + epsilon))
67 | if ti.math.isnan(grads[i, j, k]):
68 | has_nan = True
69 |
70 | if has_nan:
71 | print("nan in gripper grid!!")
72 | self.iter[None] += 1.0
73 |
74 | if int(self.iter[None]) % 10 == 0:
75 | self.lr[None] *= self.discount
76 |
77 | def reset(self):
78 | self.iter[None] = 0.0
79 | self.lr[None] = self.ori_lr
80 | self.momentum_buffer.fill(0)
81 | self.v_buffer.fill(0)
82 |
83 |
84 | @ti.data_oriented
85 | class SGD_single:
86 | def __init__(self, parameters_shape, lr, beta_1, beta_2, eps):
87 | self.tot_timestep = parameters_shape[0]
88 | self.action_dim1 = parameters_shape[1]
89 | self.action_dim2 = parameters_shape[2]
90 | self.lr = lr
91 | self.beta_1 = float(beta_1)
92 | self.beta_2 = beta_2
93 | self.eps = eps
94 | # self.momentum_buffer = ti.field(ti.f64, shape=(self.tot_timestep, self.action_dim1, self.action_dim2))
95 | # self.v_buffer = ti.field(ti.f64, shape=(self.tot_timestep, self.action_dim1, self.action_dim2))
96 | # self.iter = ti.field(ti.f64, shape=())
97 |
98 | @ti.kernel
99 | def step(self, parameters: ti.template(), grads: ti.template()):
100 |
101 | for i, j, k in ti.ndrange(self.tot_timestep, self.action_dim1, self.action_dim2):
102 | parameters[i, j, k] -= self.lr * grads[i, j, k]
103 |
104 | def reset(self):
105 | pass
--------------------------------------------------------------------------------
/code/scripts/run_RL_balance.sh:
--------------------------------------------------------------------------------
1 | python training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env balance \
6 | --task_name balance_RL \
7 | --Kb 100.0 \
8 | --mu 5.0 \
9 | --load_dir ../data/balance_state \
10 | --model SAC
--------------------------------------------------------------------------------
/code/scripts/run_RL_flatlift.sh:
--------------------------------------------------------------------------------
1 | python training/training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env balancing \
6 | --task_name throwing_RL \
7 | --Kb 100.0 \
8 | --mu 5.0 \
9 | --load_dir ../data/throwing_state \
10 | --reward_name compute_reward_throwing_RL \
11 | --model SAC
--------------------------------------------------------------------------------
/code/scripts/run_RL_folding.sh:
--------------------------------------------------------------------------------
1 | python training/training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 40 \
5 | --env folding \
6 | --task_name folding_reward_7 \
7 | --Kb 400.0 \
8 | --mu 10.0 \
9 | --reward_name compute_reward_7 \
10 | --model RecurrentPPO
--------------------------------------------------------------------------------
/code/scripts/run_RL_following.sh:
--------------------------------------------------------------------------------
1 | python training/training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env interact \
6 | --task_name following \
7 | --Kb 100.0 \
8 | --mu 5.0 \
9 | --reward_name compute_reward_1 \
10 | --model RecurrentPPO
--------------------------------------------------------------------------------
/code/scripts/run_RL_forming.sh:
--------------------------------------------------------------------------------
1 | python training/training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env forming \
6 | --task_name forming_RL \
7 | --Kb 200.0 \
8 | --mu 5.0 \
9 | --model RecurrentPPO
--------------------------------------------------------------------------------
/code/scripts/run_RL_lifting.sh:
--------------------------------------------------------------------------------
1 | python training/training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env lifting \
6 | --task_name lift_RL \
7 | --Kb 100.0 \
8 | --mu 5.0 \
9 | --model SAC
--------------------------------------------------------------------------------
/code/scripts/run_RL_pick_fold.sh:
--------------------------------------------------------------------------------
1 | python training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env pick \
6 | --task_name pick_fold_RL \
7 | --Kb 100.0 \
8 | --mu 5.0 \
9 | --reward_name compute_reward_pick_fold \
10 | --model RecurrentPPO
--------------------------------------------------------------------------------
/code/scripts/run_RL_separating.sh:
--------------------------------------------------------------------------------
1 | python training_env.py \
2 | --num_env 1 \
3 | --num_eval_envs 1 \
4 | --tot_step 50 \
5 | --env interact \
6 | --task_name interact_sep_soft \
7 | --Kb 0.1 \
8 | --mu 5.0 \
9 | --reward_name compute_reward \
10 | --model SAC
11 |
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_balance.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 10 \
3 | --tot_step 50 \
4 | --iter 10 \
5 | --trial 1 \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env balancing \
9 | --Kb 100 \
10 | --mu 5.0 \
11 | --max_dist 0.001 \
12 | --reward_name compute_reward_all \
13 | --load_dir ../data/balance_state
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_bouncing.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_parameter.py \
2 | --tot_step 30 \
3 | --iter 5 \
4 | --trial 1 \
5 | --pop_size 10 \
6 | --sigma 0.2 \
7 | --env bouncing \
8 | --Kb 100 \
9 | --mu 0.5 \
10 | --traj init_traj_table
11 |
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_card.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_parameter.py \
2 | --tot_step 50 \
3 | --iter 5 \
4 | --trial 0 \
5 | --pop_size 10 \
6 | --sigma 0.1 \
7 | --env card \
8 | --Kb 1000 \
9 | --mu 1.0 \
10 | --mu_cloth 0.001 \
11 | --traj init_traj_card
12 |
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_folding.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 10 \
3 | --tot_step 80 \
4 | --iter 15 \
5 | --trial curve_7 \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env folding \
9 | --Kb 400 \
10 | --mu 5.0 \
11 | --max_dist 0.001 \
12 | --reward_name compute_reward_7
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_following.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 5 \
3 | --tot_step 30 \
4 | --iter 10 \
5 | --trial following \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env interact \
9 | --Kb 100 \
10 | --mu 5.0 \
11 | --dense 20000.0 \
12 | --reward_name compute_reward_1
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_forming.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 5 \
3 | --tot_step 50 \
4 | --iter 15 \
5 | --trial 1 \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env forming \
9 | --Kb 200 \
10 | --mu 5.0 \
11 | --max_dist 0.001 \
12 | --target_dir ../data/forming_pos_save/cloth_pos.npy
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_lifting.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 5 \
3 | --tot_step 50 \
4 | --iter 15 \
5 | --trial 1 \
6 | --pop_size 40 \
7 | --sigma 1.0 \
8 | --env lifting \
9 | --Kb 100 \
10 | --mu 5.0 \
11 | --max_dist 0.001
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_pick_fold.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 10 \
3 | --tot_step 50 \
4 | --iter 30 \
5 | --trial 1 \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env pick \
9 | --Kb 200 \
10 | --mu 5.0 \
11 | --max_dist 0.001 \
12 | --reward_name compute_reward_pick_fold
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_separate_soft.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 5 \
3 | --tot_step 30 \
4 | --iter 15 \
5 | --trial soft \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env interact \
9 | --Kb 0.1 \
10 | --mu 5.0 \
11 | --dense 20000.0
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_separating.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 5 \
3 | --tot_step 30 \
4 | --iter 10 \
5 | --trial separate \
6 | --pop_size 40 \
7 | --sigma 2.0 \
8 | --env interact \
9 | --Kb 100 \
10 | --mu 5.0 \
11 | --dense 20000.0
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_slide.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_parameter.py \
2 | --tot_step 50 \
3 | --iter 5 \
4 | --trial 0 \
5 | --pop_size 10 \
6 | --sigma 0.1 \
7 | --env slide \
8 | --Kb 1000 \
9 | --mu 1.0 \
10 | --mu_cloth 0.001 \
11 | --traj init_traj_slide
12 |
--------------------------------------------------------------------------------
/code/scripts/run_cmaes_throwing.sh:
--------------------------------------------------------------------------------
1 | python training/run_cmaes_all.py \
2 | --abs_step 5 \
3 | --tot_step 50 \
4 | --iter 10 \
5 | --trial 1 \
6 | --pop_size 40 \
7 | --sigma 2.5 \
8 | --env balancing \
9 | --Kb 100 \
10 | --mu 5.0 \
11 | --load_dir ../data/throwing_state \
12 | --max_dist 0.001 \
13 | --reward_name compute_reward_throwing
--------------------------------------------------------------------------------
/code/scripts/run_dp_bouncing.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_bouncing.py --l 0 --r 1 --iter 50 --tot_step 30 --lr 100000
--------------------------------------------------------------------------------
/code/scripts/run_dp_card.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_card.py --l 0 --r 1 --iter 50 --tot_step 80 --lr 20000 --Kb 1400
--------------------------------------------------------------------------------
/code/scripts/run_dp_slide.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_sliding.py --l 0 --r 1 --iter 50 --tot_step 50 --lr 0.0001 --mu 0.05
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_balancing.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_balancing.py --l 0 --r 1 --iter 400 --tot_step 50 --lr 0.00001
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_balancing_luisa.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_balancing.py --l 0 --r 1 --iter 400 --tot_step 50 --lr 0.00001 --render_option LuisaScript
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_folding.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_folding.py --l 12 --r 13 --iter 400 --tot_step 50 --lr 0.00003 --curve7 1 --curve8 -1
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_following.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_interact.py --l 0 --r 1 --iter 400 --tot_step 50 --lr 0.00001
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_forming.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_forming.py --l 0 --r 1 --tot_step 50 --iter 400 --lr 0.00001 --target_dir ../data/push_pos_save/cloth_pos.npy
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_lifting.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_lifting.py --l 0 --r 1 --iter 400 --tot_step 50 --lr 0.00001
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_pickfolding.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_pick_fold.py --l 0 --r 1 --iter 400 --tot_step 50 --lr 0.00001
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_separating.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_interact.py --l 0 --r 1 --iter 400 --tot_step 50 --lr 0.00001 --sep
--------------------------------------------------------------------------------
/code/scripts/run_trajopt_throwing.sh:
--------------------------------------------------------------------------------
1 | python training/trajopt_balancing.py --l 0 --r 1 --iter 500 --tot_step 50 --lr 0.00001 --throwing
--------------------------------------------------------------------------------
/code/task_scene/Scene_bouncing.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | from ..engine.model_fold_offset import Cloth
4 | from ..engine.model_elastic_offset import Elastic
5 | from ..engine.model_elastic_tactile import Elastic as tactile
6 |
7 | from typing import List
8 | import taichi as ti
9 | import torch
10 | from dataclasses import dataclass
11 | import os
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from ..engine import linalg
15 |
16 | from ..engine.sparse_solver import SparseMatrix
17 | from ..engine.BaseScene import BaseScene
18 |
19 | vec3 = ti.types.vector(3, ti.f64)
20 | vec3i = ti.types.vector(3, ti.i32)
21 |
22 | @dataclass
23 | class Body:
24 | v_start: ti.i32
25 | v_end: ti.i32
26 | f_start: ti.i32
27 | f_end: ti.i32
28 |
29 | @ti.data_oriented
30 | class Scene(BaseScene):
31 |
32 | def __init__(self, cloth_size=0.06):
33 | super(Scene, self).__init__(cloth_size=cloth_size, enable_gripper=False)
34 | self.gravity[None] = ti.Vector([0., 0., -9.8])
35 | self.cloths[0].k_angle[None] = 3.14
36 |
37 | def init_scene_parameters(self):
38 | self.dt = 2e-3
39 | self.h = self.dt
40 | self.cloth_cnt = 1
41 | self.elastic_cnt = 1
42 | self.elastic_size = [0.07]
43 | self.elastic_Nx = int(9)
44 | self.elastic_Ny = int(9)
45 | self.elastic_Nz = int(2)
46 | self.cloth_N = 15
47 | self.cloth_M = 15
48 |
49 | self.k_contact = 40000
50 | self.eps_contact = 0.0004
51 | self.eps_v = 0.01
52 | self.max_n_constraints = 10000
53 | self.damping = 1.0
54 |
55 | def init_all(self):
56 | self.init()
57 | self.init_property()
58 | self.set_frozen()
59 | self.set_ext_force()
60 | self.update_visual()
61 | # print(self.cloths[0].mass)
62 | # print(self.elastics[0].F_m[0])
63 |
64 | def init_objects(self):
65 | rho = 4e1
66 |
67 | self.cloths.append(Cloth(self.cloth_N, self.dt, self.cloth_size, self.tot_NV, rho, 0))
68 |
69 | self.elastic_offset = (self.cloth_N + 1) * (self.cloth_M + 1)
70 | tmp_tot = self.elastic_offset
71 |
72 | self.elastics.append(Elastic(self.dt, self.elastic_size[0], tmp_tot, self.elastic_Nx, self.elastic_Ny, self.elastic_Nz))
73 | tmp_tot += self.elastic_Nx * self.elastic_Ny * self.elastic_Nz
74 |
75 | for i in range(1, self.elastic_cnt):
76 | self.elastics.append(tactile(self.dt, tmp_tot, self.elastic_size[i] / 0.03))
77 | tmp_tot += self.elastics[i].n_verts
78 |
79 | self.tot_NV = tmp_tot
80 |
81 | def init(self):
82 | self.cloths[0].init(-0.03, -0.03, 0.00039)
83 | self.elastics[0].init(-0.035, -0.035, -0.00875)
84 | self.cloths[0].init_ref_angle_bridge()
85 |
86 | def reset_pos(self):
87 | self.cloths[0].init(-0.03, -0.03, 0.0039)
88 | self.elastics[0].init(-0.035, -0.035, -0.00875)
89 | self.cloths[0].init_ref_angle_bridge()
90 |
91 | def contact_analysis(self):
92 | self.nc[None] = 0
93 | for i in range(self.cloth_cnt):
94 | for j in range(self.elastic_cnt):
95 | mu = self.mu_cloth_elastic[None]
96 | self.contact_pair_analysis(self.elastics[j].body_idx, self.cloths[i].offset,
97 | self.cloths[i].offset + self.cloths[i].NV, mu)
98 |
99 | @ti.kernel
100 | def set_frozen_kernel(self):
101 | for i in range(self.elastics[0].n_verts):
102 | xx = self.elastics[0].offset + i
103 | self.frozen[xx * 3] = 1
104 | self.frozen[xx * 3 + 1] = 1
105 | self.frozen[xx * 3 + 2] = 1
106 |
107 | @ti.kernel
108 | def compute_reward(self) -> ti.f64:
109 | ret = 0.0
110 | for i in range(self.cloths[0].NV):
111 | if ti.cast(i / (self.cloths[0].M + 1), ti.i32) == 5 or ti.cast(i / (self.cloths[0].M + 1), ti.i32) == 10:
112 | ret += self.cloths[0].pos[i].z
113 |
114 | return ret
115 |
116 | def timestep_finish(self):
117 | self.update_vel()
118 | self.push_down_vel()
119 | self.update_ref_angle()
120 | self.update_visual()
121 |
122 | @ti.kernel
123 | def get_colors(self, colors: ti.template()):
124 | colors.fill(0)
125 | for i in range(self.cloths[0].NV):
126 | colors[i + self.cloths[0].offset] = ti.Vector([1, 1, 1])
--------------------------------------------------------------------------------
/code/task_scene/Scene_card.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | from ..engine.model_fold_offset import Cloth
4 | from ..engine.model_elastic_offset import Elastic
5 | from ..engine.model_elastic_tactile import Elastic as tactile
6 |
7 | from typing import List
8 | import taichi as ti
9 | import torch
10 | from dataclasses import dataclass
11 | import os
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 |
15 | from ..engine.sparse_solver import SparseMatrix
16 | from ..engine.BaseScene import BaseScene
17 |
18 | vec3 = ti.types.vector(3, ti.f64)
19 | vec3i = ti.types.vector(3, ti.i32)
20 |
21 | @dataclass
22 | class Body:
23 | v_start: ti.i32
24 | v_end: ti.i32
25 | f_start: ti.i32
26 | f_end: ti.i32
27 |
28 | @ti.data_oriented
29 | class Scene(BaseScene):
30 |
31 | def __init__(self, cloth_size=0.06):
32 | super(Scene, self).__init__(cloth_size=cloth_size, enable_gripper=False)
33 | self.gravity[None] = ti.Vector([0., 0., 0.])
34 | self.cloths[0].k_angle[None] = 3.14
35 |
36 | def init_scene_parameters(self):
37 | self.dt = 5e-3
38 | self.h = self.dt
39 | self.cloth_cnt = 3
40 | self.elastic_cnt = 4
41 | self.elastic_size = [0.07, 0.015, 0.015, 0.015]
42 | self.elastic_Nx = int(9)
43 | self.elastic_Ny = int(9)
44 | self.elastic_Nz = int(2)
45 | self.cloth_N = 12
46 | self.cloth_M = 8
47 |
48 | self.k_contact = 20000
49 | self.eps_contact = 0.0004
50 | self.eps_v = 0.01
51 | self.max_n_constraints = 10000
52 | self.damping = 0.95
53 |
54 | def init_all(self):
55 | self.init()
56 | self.init_property()
57 | self.set_frozen()
58 | self.set_ext_force()
59 | self.update_visual()
60 | # print(self.cloths[0].mass)
61 | # print(self.elastics[0].F_m[0])
62 |
63 | def init_objects(self):
64 | rho = 4e1
65 | for i in range(self.cloth_cnt):
66 | self.cloths.append(Cloth(self.cloth_N, self.dt, self.cloth_size, self.tot_NV, rho, i * (self.cloth_N + 1) * (self.cloth_M + 1), False, self.cloth_M))
67 |
68 | self.elastic_offset = (self.cloth_N + 1) * (self.cloth_M + 1) * self.cloth_cnt
69 | tmp_tot = self.elastic_offset
70 |
71 | self.elastics.append(Elastic(self.dt, self.elastic_size[0], tmp_tot, self.elastic_Nx, self.elastic_Ny, self.elastic_Nz))
72 | tmp_tot += self.elastic_Nx * self.elastic_Ny * self.elastic_Nz
73 |
74 | for i in range(1, self.elastic_cnt):
75 | self.elastics.append(tactile(self.dt, tmp_tot, self.elastic_size[i] / 0.03))
76 | tmp_tot += self.elastics[i].n_verts
77 |
78 | self.tot_NV = tmp_tot
79 |
80 | def init(self):
81 | self.cloths[0].init(-0.02, -0.02, 0.01)
82 | self.cloths[1].init(-0.02, -0.02, 0.0104)
83 | self.cloths[2].init(-0.02, -0.02, 0.0108)
84 | self.elastics[0].init(-0.025, -0.025, -0.00875)
85 | self.elastics[1].init(-0.0285, 0.0, 0.01, False)
86 | self.elastics[2].init(0.0485, 0.0, 0.01, False)
87 | self.elastics[3].init(0.01, 0.0, 0.0185, True)
88 | pos = np.array([[-0.0285, 0.0, 0.01], [0.0485, 0.0, 0.01], [0.01, 0.0, 0.0185]])
89 | self.gripper.init(self, pos)
90 | self.gripper.rot[0] = ti.Vector([ti.sqrt(2) * 0.5, 0, ti.sqrt(2) * 0.5, 0])
91 | self.gripper.rot[1] = ti.Vector([ti.sqrt(2) * 0.5, 0, -ti.sqrt(2) * 0.5, 0])
92 | self.gripper.get_rotmat()
93 | self.gripper.get_vert_pos()
94 | self.gripper.update_all(self)
95 |
96 | def reset_pos(self):
97 | self.cloths[0].init(-0.02, -0.02, 0.01)
98 | self.cloths[1].init(-0.02, -0.02, 0.0104)
99 | self.cloths[2].init(-0.02, -0.02, 0.0108)
100 | self.elastics[0].init(-0.025, -0.025, -0.00875)
101 | self.elastics[1].init(-0.0285, 0.0, 0.01, False)
102 | self.elastics[2].init(0.0485, 0.0, 0.01, False)
103 | self.elastics[3].init(0.01, 0.0, 0.0185, True)
104 | pos = np.array([[-0.0285, 0.0, 0.01], [0.0485, 0.0, 0.01], [0.01, 0.0, 0.0185]])
105 | self.gripper.init(self, pos)
106 | self.gripper.rot[0] = ti.Vector([ti.sqrt(2) * 0.5, 0, ti.sqrt(2) * 0.5, 0])
107 | self.gripper.rot[1] = ti.Vector([ti.sqrt(2) * 0.5, 0, -ti.sqrt(2) * 0.5, 0])
108 | self.gripper.get_rotmat()
109 | self.gripper.get_vert_pos()
110 | self.gripper.update_all(self)
111 |
112 | def contact_analysis(self):
113 | self.nc[None] = 0
114 | for i in range(self.cloth_cnt):
115 | for j in range(self.cloth_cnt):
116 | mu = 0.1
117 | if abs(i - j) == 1: # TODO: contact relationship
118 | self.contact_pair_analysis(self.cloths[i].body_idx, self.cloths[j].offset,
119 | self.cloths[j].offset + self.cloths[j].NV, mu)
120 | self.contact_pair_analysis(self.cloths[j].body_idx, self.cloths[i].offset,
121 | self.cloths[i].offset + self.cloths[i].NV, mu)
122 | for i in range(self.cloth_cnt):
123 | for j in range(self.elastic_cnt):
124 |
125 | mu = self.mu_cloth_elastic[None]
126 | if i != 0:
127 | mu *= 10
128 | self.contact_pair_analysis(self.elastics[j].body_idx, self.cloths[i].offset,
129 | self.cloths[i].offset + self.cloths[i].NV, mu)
130 |
131 | @ti.kernel
132 | def set_frozen_kernel(self):
133 | for i in range(self.elastics[0].n_verts):
134 | xx = self.elastics[0].offset + i
135 | self.frozen[xx * 3] = 1
136 | self.frozen[xx * 3 + 1] = 1
137 | self.frozen[xx * 3 + 2] = 1
138 | for i in range(self.elastics[1].n_verts):
139 | if self.elastics[1].is_bottom(i) or self.elastics[1].is_inner_circle(i):
140 | xx = self.elastics[1].offset + i
141 | self.frozen[xx * 3] = 1
142 | self.frozen[xx * 3 + 1] = 1
143 | self.frozen[xx * 3 + 2] = 1
144 | for i in range(self.elastics[2].n_verts):
145 | if self.elastics[2].is_bottom(i) or self.elastics[2].is_inner_circle(i):
146 | xx = self.elastics[2].offset + i
147 | self.frozen[xx * 3] = 1
148 | self.frozen[xx * 3 + 1] = 1
149 | self.frozen[xx * 3 + 2] = 1
150 | for i in range(self.elastics[3].n_verts):
151 | if self.elastics[3].is_bottom(i) or self.elastics[3].is_inner_circle(i):
152 | xx = self.elastics[3].offset + i
153 | self.frozen[xx * 3] = 1
154 | self.frozen[xx * 3 + 1] = 1
155 | self.frozen[xx * 3 + 2] = 1
156 |
157 | @ti.kernel
158 | def compute_reward(self) -> ti.f64:
159 | ret = 0.0
160 | for i in range(self.cloths[0].NV):
161 | ret -= self.cloths[0].pos[i].x
162 | return ret
163 |
164 | def action(self, step, delta_pos, delta_rot):
165 | # self.gripper.print_plot()
166 | self.gripper.step_simple(delta_pos, delta_rot)
167 | # self.gripper.print_plot()
168 | self.gripper.update_bound(self)
169 | a1 = self.elastics[1].check_determinant()
170 | a2 = self.elastics[2].check_determinant()
171 | if not (a1 and a2):
172 | print("penetrate!!!!")
173 | # self.gripper.print_plot()
174 | self.pushup_property(self.pos, self.elastics[1].F_x, self.elastics[1].offset)
175 | self.pushup_property(self.pos, self.elastics[2].F_x, self.elastics[2].offset)
176 |
177 | def timestep_finish(self):
178 | self.update_vel()
179 | self.push_down_vel()
180 | self.update_ref_angle()
181 | self.update_visual()
182 |
183 | def get_paramters_grad(self):
184 | for i in range(self.cloth_cnt):
185 | self.cloths[i].compute_deri_Kb()
186 | self.pushup_property(self.d_kb, self.cloths[i].d_kb, self.cloths[i].offset)
187 |
188 | @ti.kernel
189 | def get_colors(self, colors: ti.template()):
190 | colors.fill(0)
191 | for i in range(self.cloths[0].NV):
192 | colors[i + self.cloths[0].offset] = ti.Vector([1, 1, 1])
193 | for i in range(self.cloths[1].NV):
194 | colors[i + self.cloths[1].offset] = ti.Vector([0.23, 0.66, 0.9])
195 | for i in range(self.cloths[2].NV):
196 | colors[i + self.cloths[2].offset] = ti.Vector([0.33, 0.33, 0.33])
197 | for i in range(self.elastics[1].n_verts):
198 | colors[i + self.elastics[1].offset] = ti.Vector([0.22, 0.72, 0.52]) # Agent1 Color
199 | for i in range(self.elastics[2].n_verts):
200 | colors[i + self.elastics[2].offset] = ti.Vector([1, 0.334, 0.52]) # Agent2 Color
201 |
202 |
--------------------------------------------------------------------------------
/code/task_scene/Scene_forming.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | from ..engine.model_fold_offset import Cloth
4 | from ..engine.model_elastic_offset import Elastic
5 | from ..engine.model_elastic_tactile import Elastic as tactile
6 |
7 | import taichi as ti
8 | from dataclasses import dataclass
9 | import numpy as np
10 | from ..engine.BaseScene import BaseScene
11 |
12 | vec3 = ti.types.vector(3, ti.f64)
13 | vec3i = ti.types.vector(3, ti.i32)
14 |
15 | @dataclass
16 | class Body:
17 | v_start: ti.i32
18 | v_end: ti.i32
19 | f_start: ti.i32
20 | f_end: ti.i32
21 |
22 | @ti.data_oriented
23 | class Scene(BaseScene):
24 |
25 | def __init__(self, cloth_size=0.06):
26 | super(Scene, self).__init__(cloth_size=cloth_size, enable_gripper=False)
27 | self.gravity[None] = ti.Vector([0., 0., 0.])
28 | print("tot nv", self.tot_NV)
29 |
30 | def init_scene_parameters(self):
31 | self.dt = 5e-3
32 | self.h = self.dt
33 | self.cloth_cnt = 1
34 | self.elastic_cnt = 2
35 | self.elastic_size = [0.07, 0.015]
36 | self.elastic_Nx = int(9)
37 | self.elastic_Ny = int(9)
38 | self.elastic_Nz = int(2)
39 | self.cloth_N = 15
40 | self.cloth_M = 7
41 |
42 | self.k_contact = 20000
43 | self.eps_contact = 0.0004
44 | self.eps_v = 0.01
45 | self.max_n_constraints = 10000
46 | self.damping = 1.0
47 |
48 | def init_all(self):
49 | self.init()
50 | self.init_property()
51 | self.set_frozen()
52 | self.set_ext_force()
53 | self.update_visual()
54 | # print(self.cloths[0].mass)
55 | # print(self.elastics[0].F_m[0])
56 |
57 | def init_objects(self):
58 | rho = 4e1
59 |
60 | self.cloths.append(Cloth(self.cloth_N, self.dt, self.cloth_size, self.tot_NV, rho, 0, False, self.cloth_M))
61 |
62 | self.elastic_offset = (self.cloth_N + 1) * (self.cloth_M + 1)
63 | tmp_tot = self.elastic_offset
64 |
65 | self.elastics.append(Elastic(self.dt, self.elastic_size[0], tmp_tot, self.elastic_Nx, self.elastic_Ny, self.elastic_Nz))
66 | tmp_tot += self.elastic_Nx * self.elastic_Ny * self.elastic_Nz
67 |
68 | for i in range(1, self.elastic_cnt):
69 | self.elastics.append(tactile(self.dt, tmp_tot, self.elastic_size[i] / 0.03))
70 | tmp_tot += self.elastics[i].n_verts
71 |
72 | self.tot_NV = tmp_tot
73 |
74 | def init(self):
75 | half_curve_num = 3
76 | self.cloths[0].init_fold(-0.07, -0.02, 0.00035, half_curve_num)
77 | self.elastics[0].init(-0.035, -0.035, -0.00875)
78 | r = self.cloths[0].grid_len * (half_curve_num * 2 - 1) / 3.1415
79 | x = -0.07 + (7 + half_curve_num) / 16 * 0.1 - r * 0.86 + 0.01
80 | self.elastics[1].init(x, 0.0, 2 * r + 0.00785, True)
81 | print("pos:", x, 2 * r + 0.00785)
82 | pos = np.array([[x, 0.0, 2 * r + 0.00785]])
83 | self.gripper.init(self, pos)
84 |
85 | def reset_pos(self):
86 | half_curve_num = 3
87 | self.cloths[0].init_fold(-0.07, -0.02, 0.00035, half_curve_num)
88 | self.elastics[0].init(-0.035, -0.035, -0.00875)
89 | r = self.cloths[0].grid_len * (half_curve_num * 2 - 1) / 3.1415
90 | x = -0.07 + (7 + half_curve_num) / 16 * 0.1 - r * 0.86 + 0.01
91 | self.elastics[1].init(x, 0.0, 2 * r + 0.00785, True)
92 | pos = np.array([[x, 0.0, 2 * r + 0.00785]])
93 | self.gripper.init(self, pos)
94 |
95 | def contact_analysis(self):
96 | self.nc[None] = 0
97 | for i in range(self.cloth_cnt):
98 | for j in range(self.elastic_cnt):
99 |
100 | mu = self.mu_cloth_elastic[None]
101 | self.contact_pair_analysis(self.cloths[i].body_idx, self.elastics[j].offset,
102 | self.elastics[j].offset + self.elastics[j].n_verts, mu)
103 | self.contact_pair_analysis(self.elastics[j].body_idx, self.cloths[i].offset,
104 | self.cloths[i].offset + self.cloths[i].NV, mu)
105 |
106 | @ti.kernel
107 | def set_frozen_kernel(self):
108 | for i in range(self.elastics[0].n_verts):
109 | xx = self.elastics[0].offset + i
110 | self.frozen[xx * 3] = 1
111 | self.frozen[xx * 3 + 1] = 1
112 | self.frozen[xx * 3 + 2] = 1
113 | for i in range(self.elastics[1].n_verts):
114 | if self.elastics[1].is_bottom(i) or self.elastics[1].is_inner_circle(i):
115 | xx = self.elastics[1].offset + i
116 | self.frozen[xx * 3] = 1
117 | self.frozen[xx * 3 + 1] = 1
118 | self.frozen[xx * 3 + 2] = 1
119 | for i in range(self.cloths[0].M + 1):
120 | xx = self.cloths[0].offset + self.cloths[0].N * (self.cloths[0].M + 1) + i
121 | self.frozen[xx * 3] = 1
122 | self.frozen[xx * 3 + 1] = 1
123 | self.frozen[xx * 3 + 2] = 1
124 |
125 | @ti.kernel
126 | def compute_reward(self, target_pos: ti.types.ndarray()) -> ti.f64:
127 | ret = 0.0
128 | for j, k in ti.ndrange(self.cloths[0].NV, 3):
129 | ret -= (self.cloths[0].pos[j][k] - target_pos[j, k]) ** 2
130 | # ret -= (analy_grad.pos_buffer[analy_grad.tot_timestep - 2, i + self.cloths[0].offset, j] - target_pos[
131 | # i, j]) ** 2
132 | return ret
133 |
134 | def action(self, step, delta_pos, delta_rot):
135 | # self.gripper.print_plot()
136 | self.gripper.step_simple(delta_pos, delta_rot)
137 | # self.gripper.print_plot()
138 | self.gripper.update_bound(self)
139 | self.pushup_property(self.pos, self.elastics[1].F_x, self.elastics[1].offset)
140 |
141 | def timestep_finish(self):
142 | self.update_vel()
143 | self.push_down_vel()
144 | self.update_ref_angle()
145 | self.update_visual()
146 |
147 | @ti.kernel
148 | def get_colors(self, colors: ti.template()):
149 | colors.fill(0)
150 | for i in range(self.cloths[0].NV):
151 | colors[i + self.cloths[0].offset] = ti.Vector([1, 1, 1])
152 | # for i in range(self.cloths[1].NV):
153 | # colors[i + self.cloths[1].offset] = ti.Vector([0.5, 0.5, 0.5])
154 | for i in range(self.elastics[1].n_verts):
155 | colors[i + self.elastics[1].offset] = ti.Vector([0.22, 0.72, 0.52]) # Agent1 Color
--------------------------------------------------------------------------------
/code/task_scene/Scene_interact.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | from ..engine.model_fold_offset import Cloth
4 | from ..engine.model_elastic_offset import Elastic
5 | from ..engine.model_elastic_tactile import Elastic as tactile
6 |
7 | from typing import List
8 | import taichi as ti
9 | import torch
10 | from dataclasses import dataclass
11 | import os
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from ..engine.BaseScene import BaseScene
15 |
16 | vec3 = ti.types.vector(3, ti.f64)
17 | vec3i = ti.types.vector(3, ti.i32)
18 |
19 | @dataclass
20 | class Body:
21 | v_start: ti.i32
22 | v_end: ti.i32
23 | f_start: ti.i32
24 | f_end: ti.i32
25 |
26 | @ti.data_oriented
27 | class Scene(BaseScene):
28 |
29 | def __init__(self, cloth_size=0.06, device="cuda:0", soft=False, dense=10000.0):
30 | self.dense = dense
31 | self.soft = soft
32 | super(Scene, self).__init__(cloth_size=cloth_size, enable_gripper=True, device=device)
33 | self.gravity[None] = ti.Vector([0., 0., -9.8])
34 | self.cloths[0].k_angle[None] = 3.14
35 |
36 |
37 | def init_scene_parameters(self):
38 |
39 | self.dt = 5e-3
40 | self.h = self.dt
41 | self.cloth_cnt = 1
42 | self.elastic_cnt = 4
43 | self.elastic_size = [0.06, 0.015, 0.015, 0.012]
44 | self.elastic_Nx = int(16)
45 | self.elastic_Ny = int(16)
46 | self.elastic_Nz = int(2)
47 | self.cloth_N = 15
48 | self.extra_obj = True
49 | self.effector_cnt = 3
50 |
51 | self.k_contact = 30000
52 | self.eps_contact = 0.0004
53 | self.eps_v = 0.01
54 | self.max_n_constraints = 10000
55 | self.damping = 1.0
56 |
57 | def init_objects(self):
58 | rho = 4e1
59 | for i in range(self.cloth_cnt):
60 | self.cloths.append(Cloth(self.cloth_N, self.dt, self.cloth_size, self.tot_NV, rho, i * ((self.cloth_N + 1)**2)))
61 |
62 | self.elastic_offset = ((self.cloth_N + 1)**2) * self.cloth_cnt
63 | tmp_tot = self.elastic_offset
64 |
65 | self.elastics.append(Elastic(self.dt, self.elastic_size[0], tmp_tot, self.elastic_Nx, self.elastic_Ny, self.elastic_Nz))
66 | tmp_tot += self.elastic_Nx * self.elastic_Ny * self.elastic_Nz
67 |
68 | for i in range(1, self.elastic_cnt - 1):
69 | self.elastics.append(tactile(self.dt, tmp_tot, self.elastic_size[i] / 0.03))
70 | tmp_tot += self.elastics[i].n_verts
71 |
72 | if self.soft:
73 | self.elastics.append(
74 | Elastic(self.dt, self.elastic_size[3], tmp_tot, 6, 6, 4, density=self.dense))
75 | else:
76 | self.elastics.append(
77 | Elastic(self.dt, self.elastic_size[3], tmp_tot, 6, 6, 4, density=self.dense))
78 | tmp_tot += 6 * 6 * 4
79 |
80 | self.tot_NV = tmp_tot
81 |
82 | def init_all(self):
83 | self.init()
84 | self.init_property()
85 | self.set_frozen()
86 | self.set_ext_force()
87 | self.update_visual()
88 | # print(self.cloths[0].mass)
89 | # print(self.elastics[0].F_m[0])
90 |
91 | def init(self):
92 | self.cloths[0].init(-0.045, -0.03, 0.0004)
93 | self.elastics[0].init(-0.03, -0.03, -0.004)
94 | self.elastics[1].init(-0.04, 0., 0.0083, True)
95 | self.elastics[2].init(-0.04, 0., -0.0075, False)
96 | self.elastics[3].init(0.001, -0.006, 0.0008)
97 | pos = np.array([[-0.04, 0., 0.0004]])
98 | self.gripper.init(self, pos)
99 |
100 | def reset_pos(self):
101 | self.cloths[0].init(-0.045, -0.03, 0.0004)
102 | self.elastics[0].init(-0.03, -0.03, -0.004)
103 | self.elastics[1].init(-0.04, 0., 0.0083, True)
104 | self.elastics[2].init(-0.04, 0., -0.0075, False)
105 | self.elastics[3].init(0.001, -0.006, 0.0008)
106 | pos = np.array([[-0.04, 0., 0.0004]])
107 | self.gripper.init(self, pos)
108 |
109 | def contact_analysis(self):
110 | self.nc[None] = 0
111 |
112 | for i in range(self.cloth_cnt):
113 | for j in range(self.elastic_cnt):
114 | mu = self.mu_cloth_elastic[None]
115 | if j == 0 or j == 3:
116 | mu = 0.2
117 | self.contact_pair_analysis(self.cloths[i].body_idx, self.elastics[j].offset,
118 | self.elastics[j].offset + self.elastics[j].n_verts, mu)
119 | self.contact_pair_analysis(self.elastics[j].body_idx, self.cloths[i].offset,
120 | self.cloths[i].offset + self.cloths[i].NV, mu)
121 |
122 | self.contact_pair_analysis(self.elastics[0].body_idx, self.elastics[3].offset,
123 | self.elastics[3].offset + self.elastics[3].n_verts, 0.1)
124 | self.contact_pair_analysis(self.elastics[3].body_idx, self.elastics[0].offset,
125 | self.elastics[0].offset + self.elastics[0].n_verts, 0.1)
126 |
127 |
128 | @ti.kernel
129 | def set_frozen_kernel(self):
130 | for i in range(self.elastics[0].n_verts):
131 | xx = self.elastics[0].offset + i
132 | self.frozen[xx * 3] = 1
133 | self.frozen[xx * 3 + 1] = 1
134 | self.frozen[xx * 3 + 2] = 1
135 | for i in range(self.elastics[1].n_verts):
136 | if self.elastics[1].is_bottom(i) or self.elastics[1].is_inner_circle(i):
137 | xx = self.elastics[1].offset + i
138 | self.frozen[xx * 3] = 1
139 | self.frozen[xx * 3 + 1] = 1
140 | self.frozen[xx * 3 + 2] = 1
141 |
142 | for i in range(self.elastics[2].n_verts):
143 | if self.elastics[2].is_bottom(i) or self.elastics[2].is_inner_circle(i):
144 | xx = self.elastics[2].offset + i
145 | self.frozen[xx * 3] = 1
146 | self.frozen[xx * 3 + 1] = 1
147 | self.frozen[xx * 3 + 2] = 1
148 |
149 | @ti.kernel
150 | def compute_reward(self) -> ti.f64:
151 | ret = 0.0
152 | for i in self.cloths[0].pos:
153 | ret = ret - self.cloths[0].pos[i].x
154 | for i in self.elastics[3].F_x:
155 | ret = ret + self.elastics[3].F_x[i].x * 256.0 / 144.0
156 | return ret
157 |
158 | @ti.kernel
159 | def compute_reward_1(self) -> ti.f64:
160 | ret = 0.0
161 | for i in self.elastics[3].F_x:
162 | ret = ret - self.elastics[3].F_x[i].x
163 | return ret
164 |
165 | def action(self, step, delta_pos, delta_rot):
166 | # self.gripper.print_plot()
167 | if step < 5:
168 | self.gripper.step(delta_pos, delta_rot, ti.Vector([-0.0006]))
169 | else:
170 | self.gripper.step_simple(delta_pos, delta_rot)
171 | # self.gripper.print_plot()
172 | self.gripper.update_bound(self)
173 | # a1 = self.elastics[1].check_determinant()
174 | # a2 = self.elastics[2].check_determinant()
175 | # if not (a1 and a2):
176 | # print("penetrate!!!!")
177 | # self.gripper.print_plot()
178 | self.pushup_property(self.pos, self.elastics[1].F_x, self.elastics[1].offset)
179 | self.pushup_property(self.pos, self.elastics[2].F_x, self.elastics[2].offset)
180 |
181 | def timestep_finish(self):
182 | self.update_vel()
183 | self.push_down_vel()
184 | self.update_ref_angle()
185 | self.update_visual()
186 |
187 | @ti.kernel
188 | def get_colors(self, colors: ti.template()):
189 | colors.fill(0)
190 | for i in range(self.cloths[0].NV):
191 | colors[i + self.cloths[0].offset] = ti.Vector([1, 1, 1])
192 | for i in range(self.elastics[1].n_verts):
193 | colors[i + self.elastics[1].offset] = ti.Vector([0.22, 0.72, 0.52]) # Agent1 Color
194 | for i in range(self.elastics[2].n_verts):
195 | colors[i + self.elastics[2].offset] = ti.Vector([1, 0.334, 0.52]) # Agent1 Color
196 |
197 | for i in range(self.elastics[3].n_verts):
198 | colors[i + self.elastics[3].offset] = ti.Vector([1, 0.334, 0.52])
199 |
200 |
201 | def time_step(self, f_contact, frame_idx, force_stick=True):
202 | self.timestep_init()
203 |
204 | self.calc_vn()
205 | f_contact(self)
206 | self.contact_analysis()
207 |
208 | iter = 0
209 | delta = 1e5
210 | temp_delta = 1e6
211 | PD = True
212 | while iter < 50:
213 | iter += 1
214 |
215 | self.newton_step_init()
216 | # self.calc_vn()
217 | # self.contact_analysis()
218 | # split energy and residual!!!!
219 | self.compute_energy()
220 | PD = self.compute_residual_and_Hessian(False, iter, spd=True)
221 | temp_delta = delta
222 | if not PD:
223 | break
224 | # if iter > 20:
225 | # alpha = "GD"
226 | # delta = self.calc_f_norm(self.F) * 0.01
227 | # self.gradient_step(delta)
228 | # else:
229 | delta, alpha = self.newton_step(iter)
230 | # if iter > 70:
231 | # print(f"iter: {iter}, delta: {delta}, step size: {alpha}, energy {self.E[None]}")
232 | if delta < 1e-7:
233 | break
234 | # print(f'pass {frame_idx}, iter:', iter, "delta:", delta)
235 |
236 | self.timestep_finish()
237 |
238 |
--------------------------------------------------------------------------------
/code/task_scene/Scene_lifting.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 |
3 | from ..engine.model_fold_offset import Cloth
4 | from ..engine.model_elastic_offset import Elastic
5 |
6 | from typing import List
7 | import taichi as ti
8 | import torch
9 | from dataclasses import dataclass
10 | from ..engine.model_elastic_tactile import Elastic as tactile
11 | import os
12 | import numpy as np
13 | from ..engine.BaseScene import BaseScene
14 |
15 | vec3 = ti.types.vector(3, ti.f64)
16 | vec3i = ti.types.vector(3, ti.i32)
17 |
18 | @dataclass
19 | class Body:
20 | v_start: ti.i32
21 | v_end: ti.i32
22 | f_start: ti.i32
23 | f_end: ti.i32
24 |
25 | @ti.data_oriented
26 | class Scene(BaseScene):
27 |
28 | def __init__(self, cloth_size=0.06, device="cuda:0"):
29 | super(Scene, self).__init__(cloth_size=cloth_size, enable_gripper=False, device=device)
30 | self.cloths[0].k_angle[None] = 3.14
31 | print("tot node:", self.tot_NV)
32 |
33 | def init_scene_parameters(self):
34 | self.dt = 5e-3
35 | self.h = self.dt
36 | self.cloth_cnt = 1
37 | self.elastic_cnt = 4
38 | self.elastic_size = [0.007, 0.015, 0.015, 0.015]
39 | self.elastic_Nx = int(5)
40 | self.elastic_Ny = int(5)
41 | self.elastic_Nz = int(5)
42 | self.cloth_N = 15
43 |
44 | self.k_contact = 500
45 | self.eps_contact = 0.0004
46 | self.eps_v = 0.01
47 | self.max_n_constraints = 10000
48 | self.damping = 1.0
49 |
50 | def init_objects(self):
51 | rho = 4e1
52 | for i in range(self.cloth_cnt):
53 | self.cloths.append(Cloth(self.cloth_N, self.dt, self.cloth_size, self.tot_NV, rho, i * ((self.cloth_N + 1)**2)))
54 |
55 | self.elastic_offset = ((self.cloth_N + 1)**2) * self.cloth_cnt
56 | tmp_tot = self.elastic_offset
57 |
58 | self.elastics.append(Elastic(self.dt, self.elastic_size[0], tmp_tot, self.elastic_Nx, self.elastic_Ny, self.elastic_Nz, 20000.0))
59 | tmp_tot += self.elastic_Nx * self.elastic_Ny * self.elastic_Nz
60 |
61 | for i in range(1, self.elastic_cnt):
62 | self.elastics.append(tactile(self.dt, tmp_tot, self.elastic_size[i] / 0.03))
63 | tmp_tot += self.elastics[i].n_verts
64 |
65 | self.tot_NV = tmp_tot
66 |
67 | def init_all(self):
68 | self.init()
69 | self.init_property()
70 | self.set_frozen()
71 | self.set_ext_force()
72 | self.update_visual()
73 | # print(self.cloths[0].mass)
74 | # print(self.elastics[0].F_m[0])
75 |
76 | def init(self):
77 |
78 | self.cloths[0].init(-0.03, -0.03, 0.)
79 | self.elastics[0].init(-0.025, -0.005, 0.0003)
80 | self.elastics[1].init(0.01, 0., 0.0079, True)
81 | self.elastics[2].init(0., -0.015, -0.0079, False)
82 | self.elastics[3].init(0., 0.015, -0.0079, False)
83 | pos = np.array([[0.01, 0., 0.0079], [0., -0.015, -0.0079], [0., 0.015, -0.0079]])
84 | self.gripper.init(self, pos)
85 |
86 | def init_property(self):
87 | for i in range(self.cloth_cnt):
88 | self.pushup_property(self.pos, self.cloths[i].pos, self.cloths[i].offset)
89 | self.pushup_property(self.vel, self.cloths[i].vel, self.cloths[i].offset)
90 | self.cloths[i].gravity[None] = ti.Vector([0., 0., 0.])
91 |
92 | self.pushup_property(self.pos, self.elastics[0].F_x, self.elastics[0].offset)
93 | self.pushup_property(self.vel, self.elastics[0].F_v, self.elastics[0].offset)
94 | self.elastics[0].gravity[None] = self.gravity[None]
95 |
96 | for i in range(1, self.elastic_cnt):
97 | self.pushup_property(self.pos, self.elastics[i].F_x, self.elastics[i].offset)
98 | self.pushup_property(self.vel, self.elastics[i].F_v, self.elastics[i].offset)
99 | self.elastics[i].gravity[None] = ti.Vector([0., 0., 0.])
100 | self.init_mass()
101 | self.init_faces()
102 | self.build_f_vis()
103 |
104 | def reset_pos(self):
105 | self.cloths[0].init(-0.03, -0.03, 0.)
106 | self.elastics[0].init(-0.025, -0.005, 0.0003)
107 | self.elastics[1].init(0.01, 0., 0.0079, True)
108 | self.elastics[2].init(0., -0.015, -0.0079, False)
109 | self.elastics[3].init(0., 0.015, -0.0079, False)
110 | pos = np.array([[0.01, 0., 0.0079], [0., -0.015, -0.0079], [0., 0.015, -0.0079]])
111 | self.gripper.init(self, pos)
112 |
113 | def contact_analysis(self):
114 | self.nc[None] = 0
115 | for i in range(self.cloth_cnt):
116 | for j in range(self.cloth_cnt):
117 | if abs(i - j) == 1: # TODO: contact relationship
118 | self.contact_pair_analysis(self.cloths[i].body_idx, self.cloths[j].offset, self.cloths[j].offset + self.cloths[j].NV, 0.05)
119 | self.contact_pair_analysis(self.cloths[j].body_idx, self.cloths[i].offset,
120 | self.cloths[i].offset + self.cloths[i].NV, 0.05)
121 | for i in range(self.cloth_cnt):
122 | for j in range(self.elastic_cnt):
123 |
124 | mu = self.mu_cloth_elastic[None]
125 | self.contact_pair_analysis(self.cloths[i].body_idx, self.elastics[j].offset,
126 | self.elastics[j].offset + self.elastics[j].n_verts, mu)
127 | self.contact_pair_analysis(self.elastics[j].body_idx, self.cloths[i].offset,
128 | self.cloths[i].offset + self.cloths[i].NV, mu)
129 | print("tot contact:", self.nc[None])
130 |
131 | @ti.kernel
132 | def set_frozen_kernel(self):
133 | for i in range(self.elastics[1].n_verts):
134 | if self.elastics[1].is_bottom(i) or self.elastics[1].is_inner_circle(i):
135 | xx = self.elastics[1].offset + i
136 | self.frozen[xx * 3] = 1
137 | self.frozen[xx * 3 + 1] = 1
138 | self.frozen[xx * 3 + 2] = 1
139 | for i in range(self.elastics[2].n_verts):
140 | if self.elastics[2].is_bottom(i) or self.elastics[2].is_inner_circle(i):
141 | xx = self.elastics[2].offset + i
142 | self.frozen[xx * 3] = 1
143 | self.frozen[xx * 3 + 1] = 1
144 | self.frozen[xx * 3 + 2] = 1
145 | for i in range(self.elastics[3].n_verts):
146 | if self.elastics[3].is_bottom(i) or self.elastics[3].is_inner_circle(i):
147 | xx = self.elastics[3].offset + i
148 | self.frozen[xx * 3] = 1
149 | self.frozen[xx * 3 + 1] = 1
150 | self.frozen[xx * 3 + 2] = 1
151 |
152 | @ti.kernel
153 | def compute_reward(self) -> ti.f64:
154 | ret = 0.0
155 | for i in range(self.elastics[0].n_verts):
156 | ret -= (self.elastics[0].F_x[i].x - self.elastics[0].F_ox[i].x + 0.025 + 0.012) ** 2
157 | ret -= (self.elastics[0].F_x[i].y - self.elastics[0].F_ox[i].y + 0.005 + 0.012) ** 2
158 | ret -= (self.elastics[0].F_x[i].z - self.elastics[0].F_ox[i].z - 0.0003) ** 2
159 | return ret
160 |
161 | def action(self, step, delta_pos, delta_rot):
162 | # self.gripper.print_plot()
163 | self.gripper.step_simple(delta_pos, delta_rot)
164 | # self.gripper.print_plot()
165 | self.gripper.update_bound(self)
166 | # a1 = self.elastics[1].check_determinant()
167 | # a2 = self.elastics[2].check_determinant()
168 | # a3 = self.elastics[3].check_determinant()
169 | # if not (a1 and a2 and a3):
170 | # print("penetrate!!!!")
171 | # self.gripper.print_plot()
172 | self.pushup_property(self.pos, self.elastics[1].F_x, self.elastics[1].offset)
173 | self.pushup_property(self.pos, self.elastics[2].F_x, self.elastics[2].offset)
174 | self.pushup_property(self.pos, self.elastics[3].F_x, self.elastics[3].offset)
175 |
176 | def time_step(self, f_contact, frame_idx, force_stick=True):
177 | import time
178 | start_time = time.time()
179 | self.timestep_init()
180 | self.calc_vn()
181 | end_time = time.time()
182 | print("init time:", end_time - start_time)
183 | start_time = end_time
184 |
185 | f_contact(self)
186 |
187 | end_time = time.time()
188 | print("contact time:", end_time - start_time)
189 | start_time = end_time
190 | self.contact_analysis()
191 | end_time = time.time()
192 | print("contact analysis time:", end_time - start_time)
193 | start_time = end_time
194 |
195 | # self.save_constraints('../debug/const_%d.pt' % frame_idx)
196 | # print("contact_cnt", self.nc[None])
197 | # self.pos.copy_from(self.x_hat)
198 | # self.push_down_pos()
199 | iter = 0
200 | delta = 1e5
201 | temp_delta = 1e6
202 | PD = True
203 | while iter < 15:
204 | iter += 1
205 | # if frame_idx >= 5:
206 | # self.check_differential()
207 | # if not self.elastics[1].check_determinant():
208 | # print("not deter 1!!!")
209 | # if not self.elastics[2].check_determinant():
210 | # print("not deter 2!!!")
211 | self.newton_step_init()
212 | # self.calc_vn()
213 | # self.contact_analysis()
214 | # split energy and residual!!!!
215 | self.compute_energy()
216 | PD = self.compute_residual_and_Hessian(False, iter, spd=True)
217 | temp_delta = delta
218 | if not PD:
219 | break
220 | # if iter > 20:
221 | # alpha = "GD"
222 | # delta = self.calc_f_norm(self.F) * 0.01
223 | # self.gradient_step(delta)
224 | # else:
225 | delta, alpha = self.newton_step(iter)
226 | # print(f"iter: {iter}, delta: {delta}, step size: {alpha}, energy {self.E[None]}")
227 | if delta < 1e-7:
228 | break
229 | # print(f'pass {frame_idx}, iter:', iter, "delta:", delta)
230 | end_time = time.time()
231 | print("solving time:", end_time - start_time, "iter:", iter)
232 | start_time = end_time
233 | self.timestep_finish()
--------------------------------------------------------------------------------
/code/task_scene/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/code/task_scene/__init__.py
--------------------------------------------------------------------------------
/code/training/RL_eval_env.py:
--------------------------------------------------------------------------------
1 | """
2 | tactile_env.py
3 | Implemented by Elgce August, 2023
4 | TactileEnv class inherited from gym.Env
5 | Wrap implemented Contact_models like Surface_follow
6 | For use of stable-baselines3
7 | """
8 | from RL_env import Env
9 |
10 | ##################################################################
11 | # implement TactileEvalEnv,
12 | # inherit from TactileEnv
13 | # for eval usage in rl algos
14 | ##################################################################
15 | class EvalEnv(Env):
16 |
17 | def __init__(self, sys_name, time_step, reward_name=None, load_dir=None, task_name=None, Kb=100, mu=5.0, model=None):
18 | super().__init__(sys_name, time_step, reward_name, load_dir, task_name, Kb, mu, model)
19 |
20 | def compute_rewards(self):
21 | """_summary_
22 | Args:
23 | obs (array from self.get_observations): tactile image
24 | Returns:
25 | rewards : taichi env's -loss
26 | """
27 | if self.reward_name is None:
28 | if self.sys_name == "forming":
29 | rewards = self.sys.compute_reward(self.target_pos)
30 | else:
31 | rewards = self.sys.compute_reward()
32 | else:
33 | func = getattr(self.sys, self.reward_name)
34 | if callable(func):
35 | rewards = func()
36 | else:
37 | print(f"{self.reward_name}, not a callable function!!")
38 | exit(0)
39 | ret_rewards = rewards - self.last_reward
40 | self.last_reward = rewards
41 | if self.sys.check_early_stop(self.time_step, RL=True):
42 | ret_rewards = 0.0
43 | return ret_rewards
44 |
--------------------------------------------------------------------------------
/code/training/run_cmaes_all.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import imageio
8 | import os
9 |
10 | import random
11 | from argparse import ArgumentParser
12 | import matplotlib.pyplot as plt
13 |
14 | parser = ArgumentParser()
15 | parser.add_argument('--pop_size', type=int, default=8)
16 | parser.add_argument('--iter', type=int, default=10)
17 | parser.add_argument('--tot_step', type=int, default=60)
18 | parser.add_argument('--abs_step', type=int, default=60)
19 | parser.add_argument('--sigma', type=float, default=1.0)
20 | parser.add_argument('--trial', type=str, default="0")
21 | parser.add_argument('--env', type=str, default="")
22 | parser.add_argument('--Kb', type=float, default=100.0)
23 | parser.add_argument('--mu', type=float, default=1.0)
24 | parser.add_argument('--reward_name', type=str, default=None)
25 | parser.add_argument('--load_dir', type=str, default=None)
26 | parser.add_argument('--max_dist', type=float, default=0.002)
27 | parser.add_argument('--curve7', type=float, default=1.0)
28 | parser.add_argument('--curve8', type=float, default=1.0)
29 | parser.add_argument('--dense', type=float, default=10000.0)
30 | parser.add_argument('--target_dir', type=str, default=None)
31 | args = parser.parse_args()
32 |
33 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
34 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
35 | offline_cache_cleaning_policy='version')
36 |
37 | from thinshelllab.engine.geometry import projection_query
38 | from thinshelllab.engine import linalg
39 | from thinshelllab.agent.traj_opt_single import agent_trajopt
40 | import cma
41 | import importlib
42 |
43 | from thinshelllab.engine.analytic_grad_single import Grad
44 |
45 | importlib.invalidate_caches()
46 | Scene = importlib.import_module(f'thinshelllab.task_scene.Scene_{args.env}')
47 |
48 | tot_timestep = args.tot_step
49 | cloth_size=0.06
50 | if args.env == "folding_2" or args.env == "forming":
51 | cloth_size = 0.1
52 |
53 | if args.Kb < 2:
54 | sys = Scene.Scene(cloth_size=cloth_size, soft=True, dense=args.dense)
55 | else:
56 | if args.env == "interact":
57 | sys = Scene.Scene(cloth_size=cloth_size, dense=args.dense)
58 | else:
59 | sys = Scene.Scene(cloth_size=cloth_size)
60 | sys.cloths[0].Kb[None] = args.Kb
61 |
62 | colors = ti.Vector.field(3, dtype=float, shape=sys.tot_NV)
63 |
64 | sys.init_all()
65 | sys.get_colors(colors)
66 | sys.mu_cloth_elastic[None] = args.mu
67 |
68 |
69 | window = ti.ui.Window('surface test', res=(800, 800), vsync=True, show_window=False)
70 | canvas = window.get_canvas()
71 | canvas.set_background_color((0.5, 0.5, 0.5))
72 | scene = ti.ui.Scene()
73 | camera = ti.ui.Camera()
74 | camera.position(-0.2, 0.2, 0.05)
75 | camera.lookat(0, 0, 0)
76 | camera.up(0, 0, 1)
77 |
78 | save_path = f"../data/cmaes_traj_{args.env}_{args.trial}"
79 | if not os.path.exists(save_path):
80 | os.mkdir(save_path)
81 |
82 | gripper_cnt = sys.elastic_cnt - 1
83 | if sys.enable_gripper:
84 | gripper_cnt = int((sys.effector_cnt - 1) // 2)
85 |
86 | analy_grad = Grad(sys, tot_timestep, gripper_cnt)
87 |
88 | x0 = (args.abs_step * 6 * gripper_cnt) * [5]
89 | sigma0 = args.sigma
90 |
91 | es = cma.CMAEvolutionStrategy(x0, sigma0, {'popsize': args.pop_size})
92 | agent = agent_trajopt(tot_timestep, gripper_cnt, max_moving_dist=args.max_dist)
93 |
94 | sub_steps = int(tot_timestep / args.abs_step)
95 | scaling = 5.0 / (sub_steps * 0.0003)
96 | scaling_angle = 5.0 / (sub_steps * 0.01)
97 |
98 | def evaluate(x):
99 | sys.reset()
100 | agent.traj.fill(0)
101 | if not (args.load_dir is None):
102 | sys.load_all(args.load_dir)
103 | for ii in range(args.abs_step):
104 | for jj in range(sub_steps):
105 | if ii == 0 and jj == 0:
106 | continue
107 | i = ii * sub_steps + jj
108 | if not (i < 5 and args.env == "interact"):
109 | for j in range(gripper_cnt):
110 | for k in range(3):
111 | agent.traj[i, j, k] = agent.traj[i - 1, j, k] + (x[ii * 6 * gripper_cnt + j * 6 + k] - 5) / sub_steps / scaling
112 | agent.traj[i, j, k + 3] = agent.traj[i - 1, j, k + 3] + (x[ii * 6 * gripper_cnt + j * 6 + k + 3] - 5) / sub_steps / scaling_angle
113 |
114 | agent.fix_action(0.015)
115 |
116 | early_stop = False
117 | stop_step = 0
118 | tot_reward = 0
119 | target = None
120 | if args.env == "forming":
121 | target = np.load(args.target_dir)
122 | if args.env == "balancing" or args.env == "bounce":
123 | analy_grad.copy_pos(sys, 0)
124 | for frame in range(1, tot_timestep):
125 | agent.get_action(frame)
126 | sys.action(frame, agent.delta_pos, agent.delta_rot)
127 | sys.time_step(projection_query, frame)
128 | early_stop = sys.check_early_stop(frame)
129 | if early_stop:
130 | break
131 |
132 | stop_step = frame + 1
133 | if args.env == "balancing" or args.env == "bounce":
134 | analy_grad.copy_pos(sys, frame)
135 |
136 | reward = stop_step / tot_timestep * 0.1
137 | if not early_stop:
138 | if (args.reward_name is None) or (args.env == "balancing"):
139 | if (args.env == "balancing"):
140 | if args.reward_name == "compute_reward_throwing":
141 | reward += sys.compute_reward_throwing(analy_grad) + 10
142 | else:
143 | func = getattr(sys, args.reward_name)
144 | if callable(func):
145 | reward += func(analy_grad) + 5
146 | else:
147 | print(f"{args.reward_name}, not a callable function!!")
148 | exit(0)
149 | elif (args.env == "forming"):
150 | reward += sys.compute_reward(target) + 5
151 | elif (args.env == "bounce"):
152 | reward += sys.compute_reward(analy_grad) + 5
153 | else:
154 | reward += sys.compute_reward() + 5
155 | else:
156 | func = getattr(sys, args.reward_name)
157 | if callable(func):
158 | reward += func() + 5
159 | else:
160 | print(f"{args.reward_name}, not a callable function!!")
161 | exit(0)
162 |
163 | return -reward
164 |
165 | tot_count = 0
166 | plot_x = []
167 | plot_y = []
168 |
169 | for ww in range(args.iter):
170 | X = es.ask() # sample len(X) candidate solutions
171 | # print(len(X))
172 | # print(X[:5])
173 | tell_list = []
174 | for x in X:
175 | eva = evaluate(x)
176 | tell_list.append(eva)
177 | tot_count += 1
178 | plot_x.append(tot_count)
179 | plot_y.append(eva)
180 | # print("value:", tell_list)
181 | es.tell(X, tell_list)
182 | es.disp()
183 | plt.plot(plot_x, plot_y)
184 | plt.savefig(os.path.join(save_path, f"plot.png"))
185 | np.save(os.path.join(save_path, "plot_Data.npy"), np.array(plot_y))
186 | # print("fbest:", es.result.fbest)
187 |
188 | result = es.result
189 | x = result.xbest
190 |
191 | agent.traj.fill(0)
192 | for ii in range(args.abs_step):
193 | for jj in range(sub_steps):
194 | if ii == 0 and jj == 0:
195 | continue
196 | i = ii * sub_steps + jj
197 | if not (i < 5 and args.env == "interact"):
198 | for j in range(gripper_cnt):
199 | for k in range(3):
200 | agent.traj[i, j, k] = agent.traj[i - 1, j, k] + (
201 | x[ii * 6 * gripper_cnt + j * 6 + k] - 5) / sub_steps / scaling
202 | agent.traj[i, j, k + 3] = agent.traj[i - 1, j, k + 3] + (
203 | x[ii * 6 * gripper_cnt + j * 6 + k + 3] - 5) / sub_steps / scaling_angle
204 | agent.fix_action(0.015)
205 | np_traj = agent.traj.to_numpy()
206 | np.save(os.path.join(save_path, f"traj_{ww}.npy"), np_traj)
207 |
208 | sys.reset()
209 | if not (args.load_dir is None):
210 | sys.load_all(args.load_dir)
211 |
212 | scene.set_camera(camera)
213 | scene.ambient_light([0.8, 0.8, 0.8])
214 | scene.point_light((2, 2, 2), (1, 1, 1))
215 | # sys.cloths[0].update_visual()
216 | scene.mesh(sys.x32, indices=sys.f_vis,
217 | per_vertex_color=colors) # , index_offset=(nf//2)*3, index_count=(nf//2)*3)
218 | canvas.scene(scene)
219 | window.save_image(os.path.join(save_path, f"{0}.png"))
220 |
221 | for frame in range(1, tot_timestep):
222 | agent.get_action(frame)
223 | sys.action(frame, agent.delta_pos, agent.delta_rot)
224 | sys.time_step(projection_query, frame)
225 | if sys.check_early_stop(frame, True):
226 | print("should stop in", frame)
227 | scene.set_camera(camera)
228 | scene.ambient_light([0.8, 0.8, 0.8])
229 | scene.point_light((2, 2, 2), (1, 1, 1))
230 | # sys.cloths[0].update_visual()
231 | scene.mesh(sys.x32, indices=sys.f_vis,
232 | per_vertex_color=colors) # , index_offset=(nf//2)*3, index_count=(nf//2)*3)
233 | canvas.scene(scene)
234 | window.save_image(os.path.join(save_path, f"{frame}.png"))
235 |
236 | frames = []
237 | for j in range(tot_timestep):
238 | filename = os.path.join(save_path, f"{j}.png")
239 | frames.append(imageio.imread(filename))
240 |
241 | gif_name = os.path.join(save_path, f"GIF_{ww}.gif")
242 | imageio.mimsave(gif_name, frames, 'GIF', duration=0.02)
--------------------------------------------------------------------------------
/code/training/run_cmaes_parameter.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import imageio
8 | import os
9 |
10 | import random
11 | from argparse import ArgumentParser
12 | import matplotlib.pyplot as plt
13 |
14 | parser = ArgumentParser()
15 | parser.add_argument('--pop_size', type=int, default=5)
16 | parser.add_argument('--iter', type=int, default=10)
17 | parser.add_argument('--tot_step', type=int, default=60)
18 | parser.add_argument('--sigma', type=float, default=1.0)
19 | parser.add_argument('--trial', type=str, default="0")
20 | parser.add_argument('--env', type=str, default="")
21 | parser.add_argument('--traj', type=str, default="")
22 | parser.add_argument('--Kb', type=float, default=100.0)
23 | parser.add_argument('--max_dist', type=float, default=0.002)
24 | parser.add_argument('--mu', type=float, default=1.0)
25 | parser.add_argument('--mu_cloth', type=float, default=1.0)
26 | args = parser.parse_args()
27 |
28 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
29 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
30 | offline_cache_cleaning_policy='version')
31 |
32 | from thinshelllab.engine.geometry import projection_query
33 | from thinshelllab.engine import linalg
34 | from thinshelllab.engine.analytic_grad_single import Grad
35 | from thinshelllab.agent.traj_opt_single import agent_trajopt
36 | import cma
37 | import importlib
38 |
39 | from thinshelllab.engine.analytic_grad_single import Grad
40 |
41 | importlib.invalidate_caches()
42 | Scene = importlib.import_module(f'thinshelllab.task_scene.Scene_{args.env}')
43 |
44 | tot_timestep = args.tot_step
45 | cloth_size=0.06
46 |
47 | sys = Scene.Scene(cloth_size=cloth_size)
48 | sys.cloths[0].Kb[None] = args.Kb
49 |
50 | colors = ti.Vector.field(3, dtype=float, shape=sys.tot_NV)
51 |
52 | sys.init_all()
53 | sys.get_colors(colors)
54 | sys.mu_cloth_elastic[None] = args.mu
55 |
56 |
57 |
58 |
59 | # window = ti.ui.Window('surface test', res=(800, 800), vsync=True, show_window=False)
60 | # canvas = window.get_canvas()
61 | # canvas.set_background_color((0.5, 0.5, 0.5))
62 | # scene = ti.ui.Scene()
63 | # camera = ti.ui.Camera()
64 | # camera.position(-0.2, 0.2, 0.05)
65 | # camera.lookat(0, 0, 0)
66 | # camera.up(0, 0, 1)
67 |
68 | save_path = f"../data/cmaes_traj_{args.env}_{args.trial}"
69 | if not os.path.exists(save_path):
70 | os.mkdir(save_path)
71 |
72 | gripper_cnt = sys.elastic_cnt - 1
73 | if sys.enable_gripper:
74 | gripper_cnt = int((sys.effector_cnt - 1) // 2)
75 |
76 | x0 = [0, 0]
77 | sigma0 = args.sigma
78 |
79 | es = cma.CMAEvolutionStrategy(x0, sigma0, {'popsize': args.pop_size})
80 | if gripper_cnt > 0:
81 | agent = agent_trajopt(tot_timestep, gripper_cnt, max_moving_dist=args.max_dist)
82 | func = getattr(agent, args.traj)
83 | if callable(func):
84 | func()
85 | else:
86 | agent = agent_trajopt(tot_timestep, 1, max_moving_dist=args.max_dist)
87 |
88 |
89 |
90 | def evaluate(x):
91 | sys.reset()
92 | if args.env == "slide":
93 | sys.mu_cloth_cloth[None] = args.mu_cloth + x[0]
94 | sys.mu_cloth_cloth[None] = max(0.0001, sys.mu_cloth_cloth[None])
95 | else:
96 | sys.cloths[0].Kb[None] = args.Kb + x[0] * 200
97 | sys.cloths[0].Kb[None] = max(0.0001, sys.cloths[0].Kb[None])
98 |
99 | for frame in range(1, tot_timestep):
100 | if gripper_cnt > 0:
101 | agent.get_action(frame)
102 | sys.action(frame, agent.delta_pos, agent.delta_rot)
103 | sys.time_step(projection_query, frame)
104 |
105 | reward = sys.compute_reward()
106 | return -reward
107 |
108 | tot_count = 0
109 | plot_x = []
110 | plot_y = []
111 |
112 | for ww in range(args.iter):
113 | X = es.ask() # sample len(X) candidate solutions
114 | # print(len(X))
115 | # print(X[:5])
116 | tell_list = []
117 | for x in X:
118 | eva = evaluate(x)
119 | tell_list.append(eva)
120 | tot_count += 1
121 | plot_x.append(tot_count)
122 | plot_y.append(eva)
123 | # print("value:", tell_list)
124 | es.tell(X, tell_list)
125 | es.disp()
126 | plt.plot(plot_x, plot_y)
127 | plt.savefig(os.path.join(save_path, f"plot.png"))
128 | np.save(os.path.join(save_path, "plot_Data.npy"), np.array(plot_y))
129 | # print("fbest:", es.result.fbest)
130 |
131 | result = es.result
132 | x = result.xbest
133 |
134 | print(x)
--------------------------------------------------------------------------------
/code/training/training_env.py:
--------------------------------------------------------------------------------
1 | """
2 | multi_env.py
3 | Implemented by Elgce August, 2023
4 | Use implemented class TactileEnv to train ppo policy
5 | For use of stable-baselines3
6 | """
7 |
8 | import numpy as np
9 | import taichi as ti
10 | import os
11 |
12 | off_screen = True
13 | if off_screen:
14 | os.environ["PYTHON_PLATFORM"] = 'egl'
15 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
16 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
17 | offline_cache_cleaning_policy='version')
18 |
19 | from RL_env import Env
20 | from RL_eval_env import EvalEnv
21 | from stable_baselines3 import PPO, SAC
22 | from sb3_contrib import RecurrentPPO
23 | from stable_baselines3.common.env_util import make_vec_env
24 | from stable_baselines3.common.callbacks import EvalCallback
25 |
26 | import torch
27 | device_count = torch.cuda.device_count()
28 | print(f"Number of available devices: {device_count}")
29 |
30 | for i in range(device_count):
31 | print(f"Device {i}: {torch.cuda.get_device_name(i)}")
32 |
33 | ##################################################################
34 | # init taichi env & use implemented Contact class
35 | ##################################################################
36 | # if trained on servers, off_screen = True, otherwise False
37 |
38 |
39 | ##################################################################
40 |
41 |
42 | ##################################################################
43 | # A class for train taichi_tasks with ppo algorithm
44 | # num_envs: number of envs for multi-processing training
45 | # task_type: name of task to do
46 | # num_sub_steps: num of sub_steps for update iteration
47 | # num_total_step: num of total_step, maximum step length
48 | # dt: dt of the simulated env as in taichi envs
49 | # n_sensors: related to how many sensors you have (noted as n), 2*n
50 | ##################################################################
51 |
52 |
53 |
54 | class Trainer():
55 | def __init__(self, num_envs, num_eval_envs, env_name, time_step, training_iterations, resume, off_screen, algo, reward_name, load_dir, task_name, Kb, mu, model):
56 | self.num_envs = num_envs
57 | self.num_eval_envs = num_eval_envs
58 | self.training_iterations = training_iterations
59 | self.tot_step = time_step
60 | # self.env = TactileEvalEnv(env_name, time_step, reward_name, load_dir, task_name, Kb, mu, model=None)
61 | self.env_kwargs = {"sys_name": env_name, \
62 | "time_step": time_step, "reward_name": reward_name,
63 | "load_dir": load_dir, "task_name": task_name, "Kb": Kb, "mu": mu, "model": model}
64 | self.vec_env = make_vec_env(Env, n_envs=self.num_envs, env_kwargs=self.env_kwargs)
65 | self.checkpoint_path = "../data/checkpoints/" + task_name + "/"
66 | self.eval_env_kwargs = {"sys_name": env_name, \
67 | "time_step": time_step, "reward_name": reward_name,
68 | "load_dir": load_dir, "task_name": task_name, "Kb": Kb, "mu": mu, "model": None}
69 | self.eval_env = make_vec_env(EvalEnv, n_envs=self.num_eval_envs, env_kwargs=self.eval_env_kwargs)
70 | self.resume = resume
71 | self.off_screen = off_screen
72 | self.algo = algo
73 | # render guis as taichi envs do
74 | self.eval_rwd = 0
75 | self.model_name = model
76 |
77 | # use multiprocessing ppo env to train for self.training_iterations
78 | def train(self):
79 | if not resume:
80 | if self.model_name == "RecurrentPPO":
81 | self.model = self.algo("MlpLstmPolicy", self.vec_env, verbose=1, tensorboard_log=self.checkpoint_path)
82 | else:
83 | self.model = self.algo("MlpPolicy", self.vec_env, verbose=1, tensorboard_log=self.checkpoint_path)
84 | else:
85 | self.load_model()
86 | self.eval_callback = EvalCallback(eval_env=self.eval_env, best_model_save_path=self.checkpoint_path,
87 | log_path=self.checkpoint_path, eval_freq=self.tot_step * 50)
88 | self.model.learn(total_timesteps=self.training_iterations * self.tot_step, \
89 | callback=self.eval_callback)
90 | print("================= Training process has finished! =================")
91 |
92 | def load_model(self):
93 | try:
94 | self.model = self.algo.load(self.checkpoint_path + "best_model")
95 | except FileNotFoundError as e:
96 | print(f"model file does not exist!: {e}")
97 |
98 | # if there is no previous saved ckp, raise error, else load it and evaluate it with single env
99 | # def evaluate(self):
100 | # # resume and evaluate
101 | # self.load_model()
102 | # obs, _ = self.env.reset()
103 | # for item in range(self.task.total_steps):
104 | # action, _states = self.model.predict(obs)
105 | # obs, rewards, dones, truncated, infos = self.env.step(action)
106 | # print("action:", action)
107 | # print("rewards:", rewards)
108 | # print("obs:", obs)
109 | # self.eval_rwd = rewards
110 | # print("total reward:", self.eval_rwd)
111 |
112 | # call tactile env's render function to render forward process
113 | def render(self):
114 | pass
115 |
116 |
117 | ##################################################################
118 | from argparse import ArgumentParser
119 | import matplotlib.pyplot as plt
120 |
121 | parser = ArgumentParser()
122 | parser.add_argument('--num_env', type=int, default=4)
123 | parser.add_argument('--num_eval_envs', type=int, default=4)
124 | parser.add_argument('--tot_step', type=int, default=60)
125 | parser.add_argument('--env', type=str, default="")
126 | parser.add_argument('--Kb', type=float, default=100.0)
127 | parser.add_argument('--mu', type=float, default=1.0)
128 | parser.add_argument('--reward_name', type=str, default=None)
129 | parser.add_argument('--load_dir', type=str, default=None)
130 | parser.add_argument('--task_name', type=str, default=None)
131 | parser.add_argument('--model', type=str, default="RecurrentPPO")
132 | args = parser.parse_args()
133 |
134 | if __name__ == "__main__":
135 | # choose tasks from registered tasks
136 | task_type = "surface_following"
137 | # set number of envs to train
138 | num_env = 2
139 | num_eval_envs = 2
140 | training_iterations = 100000
141 |
142 | resume = False # this parameter determines whether or not to resume previous trained ckp, True for use
143 | if args.model == "RecurrentPPO":
144 | algo = RecurrentPPO # SAC
145 | elif args.model == "PPO":
146 | algo = PPO
147 | else:
148 | algo = SAC
149 | trainer = Trainer(num_envs=args.num_env, num_eval_envs=args.num_eval_envs, env_name=args.env, time_step=args.tot_step, training_iterations=training_iterations, resume=False,
150 | off_screen=True, algo=algo, reward_name=args.reward_name, load_dir=args.load_dir, task_name=args.task_name, Kb=args.Kb, mu=args.mu, model=args.model)
151 | trainer.train()
152 | ##################################################################
--------------------------------------------------------------------------------
/code/training/trajopt_balancing.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | import time
3 | import numpy as np
4 | import torch
5 | import os
6 | from thinshelllab.agent.traj_opt_single import agent_trajopt
7 | from thinshelllab.optimizer.optim import Adam_single
8 | import random
9 | from argparse import ArgumentParser
10 | import matplotlib.pyplot as plt
11 |
12 | parser = ArgumentParser()
13 | parser.add_argument('--l', type=int, default=0)
14 | parser.add_argument('--r', type=int, default=5)
15 | parser.add_argument('--iter', type=int, default=10)
16 | parser.add_argument('--lr', type=float, default=0.001)
17 | parser.add_argument('--tot_step', type=int, default=5)
18 | parser.add_argument('--throwing', action="store_true", default=False)
19 | parser.add_argument('--save', action="store_true", default=False)
20 | parser.add_argument('--side', action="store_true", default=False)
21 | parser.add_argument('--load_traj', type=str, default=None)
22 | parser.add_argument('--Kb', type=float, default=100)
23 | parser.add_argument('--load_state', type=str, default="../data/balance_state")
24 | parser.add_argument('--render', type=int, default=50)
25 | parser.add_argument('--render_option', type=str, default="Taichi")
26 | args = parser.parse_args()
27 |
28 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
29 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
30 | offline_cache_cleaning_policy='version')
31 |
32 | from thinshelllab.task_scene.Scene_balancing import Scene, Body
33 | from thinshelllab.engine.geometry import projection_query
34 | from thinshelllab.engine.render_engine import Renderer
35 | from thinshelllab.engine import linalg
36 | from thinshelllab.engine.analytic_grad_single import Grad
37 |
38 | M = 2
39 | N = 16
40 | k_spring = 1000
41 | nv = M * (N ** 2)
42 | nf = M * 2 * (N - 1) ** 2
43 | # nv = (N**2)
44 | # nf = 2*(N-1)**2
45 | total_m = 0.005 * M
46 | h = 0.005
47 |
48 | tot_timestep = args.tot_step
49 | sys = Scene(cloth_size=0.06)
50 | sys.cloths[0].Kb[None] = args.Kb
51 | gripper_cnt = sys.elastic_cnt - 1
52 | if sys.enable_gripper:
53 | gripper_cnt = int((sys.effector_cnt - 1) // 2)
54 |
55 | analy_grad = Grad(sys, tot_timestep, gripper_cnt)
56 | adam = Adam_single((tot_timestep, gripper_cnt, 6), args.lr, 0.9, 0.9999, 1e-8)
57 | agent = agent_trajopt(tot_timestep, gripper_cnt, max_moving_dist=0.001)
58 | sys.init_all()
59 | analy_grad.init_mass(sys)
60 | renderer = Renderer(sys, "balancing", option=args.render_option)
61 |
62 | now_reward = -100000
63 | print("tot_NV:", sys.tot_NV)
64 |
65 | for ww in range(args.l, args.r):
66 | if args.throwing:
67 | save_path = f"../imgs/traj_opt_balance_throwing_{ww}"
68 | else:
69 | save_path = f"../imgs/traj_opt_balance_{ww}"
70 | renderer.set_save_dir(save_path)
71 | print(f"Saving Path: {save_path}")
72 |
73 | sys.reset()
74 | sys.mu_cloth_elastic[None] = 5.0
75 | plot_x = []
76 | plot_y = []
77 |
78 | if args.load_traj is not None:
79 | agent.traj.from_numpy(np.load(args.load_traj))
80 | agent.fix_action(0.015)
81 |
82 | adam.reset()
83 | for i in range(args.iter):
84 | render = False
85 | if i % args.render == 0:
86 | render = True
87 | print("iter: ", i)
88 | analy_grad.copy_pos(sys, 0)
89 | obs_list = []
90 | action_list = []
91 | start_time = time.time()
92 | state_path = args.load_state
93 | if args.throwing:
94 | state_path = "../data/throwing_state"
95 | # if not os.path.exists(state_path):
96 | # os.mkdir(state_path)
97 | if not args.save:
98 | sys.load_all(state_path)
99 |
100 | print("pos:", sys.gripper.pos.to_numpy())
101 | if not os.path.exists(state_path):
102 | os.mkdir(state_path)
103 |
104 | if render:
105 | renderer.render("0")
106 | for frame in range(1, tot_timestep):
107 | # print("frame:", frame)
108 | agent.get_action(frame)
109 | # sys.get_observation()
110 | sys.action(frame, agent.delta_pos, agent.delta_rot)
111 | # agent.get_action_field(frame)
112 | # obs_list.append(sys.observation.to_torch('cpu'))
113 | # action_list.append(agent.tmp_action.to_torch('cpu'))
114 | sys.time_step(projection_query, frame)
115 | analy_grad.copy_pos(sys, frame)
116 | if render:
117 | renderer.render(str(frame))
118 | if args.save:
119 | sys.save_all(state_path)
120 |
121 | end_time = time.time()
122 | print("tot_time:", end_time - start_time)
123 | if args.throwing:
124 | tot_reward = sys.compute_reward_throwing(analy_grad)
125 | else:
126 | tot_reward = sys.compute_reward_all(analy_grad)
127 | # if tot_reward > now_reward:
128 | # now_reward = tot_reward
129 | # data_path = f"../data/data_traj_opt_fold_{ww}"
130 | # if not os.path.exists(data_path):
131 | # os.mkdir(data_path)
132 | # for frame in range(tot_timestep - 1):
133 | # torch.save({
134 | # 'obs': obs_list[frame],
135 | # 'action': action_list[frame]
136 | # }, os.path.join(data_path, f"data_{frame + 1}"))
137 |
138 | plot_x.append(i)
139 | plot_y.append(tot_reward)
140 | print("total_reward:", plot_y)
141 | if tot_reward > now_reward:
142 | now_reward = tot_reward
143 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
144 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
145 |
146 | if render:
147 | renderer.end_rendering(i)
148 |
149 | if args.throwing:
150 | analy_grad.get_loss_throwing(sys)
151 | else:
152 | analy_grad.get_loss_balance(sys)
153 |
154 | for i in range(tot_timestep - 1, 0, -1):
155 | analy_grad.transfer_grad(i, sys, projection_query)
156 |
157 | analy_grad.apply_action_limit_grad(agent, 0.015)
158 | print("done grad")
159 | sys.reset()
160 |
161 | adam.step(agent.traj, analy_grad.gripper_grad)
162 | # agent.fix_action(0.015)
163 | analy_grad.reset()
164 | # agent.print_traj()
165 | plt.plot(plot_x, plot_y)
166 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_bouncing.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import imageio
8 | import os
9 | from thinshelllab.agent.traj_opt_single import agent_trajopt
10 | from thinshelllab.optimizer.optim import Adam_single
11 | import random
12 | from argparse import ArgumentParser
13 | import matplotlib.pyplot as plt
14 |
15 | parser = ArgumentParser()
16 | parser.add_argument('--l', type=int, default=0)
17 | parser.add_argument('--r', type=int, default=5)
18 | parser.add_argument('--iter', type=int, default=10)
19 | parser.add_argument('--lr', type=float, default=0.001)
20 | parser.add_argument('--tot_step', type=int, default=5)
21 | parser.add_argument('--Kb', type=float, default=120.0)
22 | parser.add_argument('--render_option', type=str, default="Taichi")
23 | args = parser.parse_args()
24 |
25 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
26 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
27 | offline_cache_cleaning_policy='version')
28 |
29 | from thinshelllab.task_scene.Scene_bouncing import Scene, Body
30 | from thinshelllab.engine.geometry import projection_query
31 | from thinshelllab.engine.render_engine import Renderer
32 | from thinshelllab.engine import linalg
33 | from thinshelllab.engine.analytic_grad_system import Grad
34 |
35 | M = 2
36 | N = 16
37 | k_spring = 1000
38 | nv = M * (N ** 2)
39 | nf = M * 2 * (N - 1) ** 2
40 | # nv = (N**2)
41 | # nf = 2*(N-1)**2
42 | total_m = 0.005 * M
43 | h = 0.005
44 |
45 | tot_timestep = args.tot_step
46 | sys = Scene(cloth_size=0.06)
47 | sys.cloths[0].Kb[None] = args.Kb
48 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
49 | sys.init_all()
50 | analy_grad.init_mass(sys)
51 | renderer = Renderer(sys, "bouncing", option=args.render_option)
52 |
53 | now_reward = 0
54 | for ww in range(args.l, args.r):
55 | save_path = f"../imgs/traj_opt_table_{ww}"
56 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
57 | renderer.set_save_dir(save_path)
58 | print(f"Saving Path: {save_path}")
59 |
60 | sys.reset()
61 | sys.mu_cloth_elastic[None] = 0.5
62 | plot_x = []
63 | plot_y = []
64 | kb_list = []
65 |
66 | for i in range(args.iter):
67 | print("iter: ", i)
68 | analy_grad.copy_pos(sys, 0)
69 | obs_list = []
70 | action_list = []
71 | start_time = time.time()
72 | renderer.render("0")
73 | for frame in range(1, tot_timestep):
74 | print("frame:", frame)
75 | # agent.get_action_field(frame)
76 | # obs_list.append(sys.observation.to_torch('cpu'))
77 | # action_list.append(agent.tmp_action.to_torch('cpu'))
78 | sys.time_step(projection_query, frame)
79 | analy_grad.copy_pos(sys, frame)
80 | renderer.render(str(frame))
81 |
82 | end_time = time.time()
83 | print("tot_time:", end_time - start_time)
84 | tot_reward = sys.compute_reward()
85 | # if tot_reward > now_reward:
86 | # now_reward = tot_reward
87 | # data_path = f"../data/data_traj_opt_fold_{ww}"
88 | # if not os.path.exists(data_path):
89 | # os.mkdir(data_path)
90 | # for frame in range(tot_timestep - 1):
91 | # torch.save({
92 | # 'obs': obs_list[frame],
93 | # 'action': action_list[frame]
94 | # }, os.path.join(data_path, f"data_{frame + 1}"))
95 |
96 | plot_x.append(i)
97 | plot_y.append(tot_reward)
98 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
99 | print("total_reward:", plot_y)
100 | renderer.end_rendering(i)
101 |
102 | analy_grad.get_loss_table(sys)
103 |
104 | for j in range(tot_timestep - 1, 0, -1):
105 | # print("back prop step:", i)
106 | analy_grad.transfer_grad(j, sys, projection_query)
107 |
108 | loss_grad = analy_grad.grad_kb[None] * args.lr
109 | if loss_grad < -100:
110 | loss_grad = -100
111 | if loss_grad > 100:
112 | loss_grad = 100
113 | sys.cloths[0].Kb[None] -= loss_grad
114 | kb_list.append(sys.cloths[0].Kb[None])
115 | print("done grad")
116 | sys.reset()
117 |
118 | print("prev kbs", kb_list)
119 | print("now kb", sys.cloths[0].Kb[None], "now grad", analy_grad.grad_kb[None] * args.lr)
120 | analy_grad.reset()
121 | args.lr *= 0.95
122 | plt.plot(plot_x, plot_y)
123 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_card.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import imageio
8 | import os
9 | from thinshelllab.agent.traj_opt_single import agent_trajopt
10 | from thinshelllab.optimizer.optim import Adam_single
11 | import random
12 | from argparse import ArgumentParser
13 | import matplotlib.pyplot as plt
14 |
15 | parser = ArgumentParser()
16 | parser.add_argument('--l', type=int, default=0)
17 | parser.add_argument('--r', type=int, default=5)
18 | parser.add_argument('--iter', type=int, default=10)
19 | parser.add_argument('--lr', type=float, default=0.001)
20 | parser.add_argument('--tot_step', type=int, default=5)
21 | parser.add_argument('--Kb', type=float, default=1000.0)
22 | parser.add_argument('--render_option', type=str, default="Taichi")
23 | args = parser.parse_args()
24 |
25 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
26 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
27 | offline_cache_cleaning_policy='version')
28 |
29 | from thinshelllab.task_scene.Scene_card import Scene, Body
30 | from thinshelllab.engine.geometry import projection_query
31 | from thinshelllab.engine.render_engine import Renderer
32 | from thinshelllab.engine import linalg
33 | from thinshelllab.engine.analytic_grad_system import Grad
34 |
35 | tot_timestep = args.tot_step
36 | sys = Scene(cloth_size=0.06)
37 | sys.cloths[0].Kb[None] = args.Kb
38 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
39 | agent = agent_trajopt(tot_timestep, sys.elastic_cnt - 1, max_moving_dist=0.001)
40 | sys.init_all()
41 | analy_grad.init_mass(sys)
42 | renderer = Renderer(sys, "card", option=args.render_option)
43 |
44 | now_reward = 0
45 | for ww in range(args.l, args.r):
46 | save_path = f"../imgs/traj_opt_card_{ww}"
47 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
48 | renderer.set_save_dir(save_path)
49 | print(f"Saving Path: {save_path}")
50 |
51 | sys.reset()
52 | sys.mu_cloth_elastic[None] = 1.0
53 | plot_x = []
54 | plot_y = []
55 | kb_list = []
56 | agent.init_traj_card()
57 | agent.fix_action(0.015)
58 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
59 |
60 | for i in range(args.iter):
61 | print("iter: ", i)
62 | analy_grad.copy_pos(sys, 0)
63 | obs_list = []
64 | action_list = []
65 | start_time = time.time()
66 | renderer.render("0")
67 | for frame in range(1, tot_timestep):
68 | print("frame:", frame)
69 | agent.get_action(frame)
70 | # sys.get_observation()
71 | sys.action(frame, agent.delta_pos, agent.delta_rot)
72 | # agent.get_action_field(frame)
73 | # obs_list.append(sys.observation.to_torch('cpu'))
74 | # action_list.append(agent.tmp_action.to_torch('cpu'))
75 | sys.time_step(projection_query, frame)
76 | analy_grad.copy_pos(sys, frame)
77 | renderer.render(str(frame))
78 | faces = sys.cloths[0].f2v.to_numpy()
79 | verts = sys.cloths[0].pos.to_numpy()
80 | np.save(os.path.join(save_path, f"faces_{frame}.npy"), faces)
81 | np.save(os.path.join(save_path, f"verts_{frame}.npy"), verts)
82 | end_time = time.time()
83 | print("tot_time:", end_time - start_time)
84 | tot_reward = sys.compute_reward()
85 | # if tot_reward > now_reward:
86 | # now_reward = tot_reward
87 | # data_path = f"../data/data_traj_opt_fold_{ww}"
88 | # if not os.path.exists(data_path):
89 | # os.mkdir(data_path)
90 | # for frame in range(tot_timestep - 1):
91 | # torch.save({
92 | # 'obs': obs_list[frame],
93 | # 'action': action_list[frame]
94 | # }, os.path.join(data_path, f"data_{frame + 1}"))
95 |
96 | plot_x.append(i)
97 | plot_y.append(tot_reward)
98 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
99 | print("total_reward:", plot_y)
100 | renderer.end_rendering(i)
101 |
102 | analy_grad.get_loss_card(sys)
103 |
104 | for i in range(tot_timestep - 1, 50, -1):
105 | # print("back prop step:", i)
106 | analy_grad.transfer_grad(i, sys, projection_query)
107 |
108 | sys.cloths[0].Kb[None] -= analy_grad.grad_kb[None] * args.lr
109 | kb_list.append(sys.cloths[0].Kb[None])
110 | print("done grad")
111 | sys.reset()
112 | args.lr *= 0.95
113 | print("prev kbs", kb_list)
114 | print("now kb", sys.cloths[0].Kb[None], "now grad", analy_grad.grad_kb[None])
115 |
116 | agent.fix_action(0.015)
117 | analy_grad.reset()
118 |
119 | plt.plot(plot_x, plot_y)
120 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_folding.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import imageio
8 | import os
9 | from thinshelllab.agent.traj_opt_single import agent_trajopt
10 | from thinshelllab.optimizer.optim import Adam_single
11 | import random
12 | from argparse import ArgumentParser
13 | import matplotlib.pyplot as plt
14 | from thinshelllab.engine import readfile
15 |
16 | parser = ArgumentParser()
17 | parser.add_argument('--l', type=int, default=0)
18 | parser.add_argument('--r', type=int, default=5)
19 | parser.add_argument('--iter', type=int, default=10)
20 | parser.add_argument('--lr', type=float, default=0.001)
21 | parser.add_argument('--tot_step', type=int, default=5)
22 | parser.add_argument('--curve7', type=float, default=1.0)
23 | parser.add_argument('--curve8', type=float, default=-1.0)
24 | parser.add_argument('--load_traj', type=str, default=None)
25 | parser.add_argument('--render_option', type=str, default="Taichi")
26 | args = parser.parse_args()
27 |
28 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
29 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
30 | offline_cache_cleaning_policy='version')
31 |
32 | from thinshelllab.task_scene.Scene_folding import Scene, Body
33 | from thinshelllab.engine.geometry import projection_query
34 | from thinshelllab.engine.render_engine import Renderer
35 | from thinshelllab.engine import linalg
36 | from thinshelllab.engine.analytic_grad_single import Grad
37 |
38 | M = 2
39 | N = 16
40 | k_spring = 1000
41 | nv = M * (N ** 2)
42 | nf = M * 2 * (N - 1) ** 2
43 | # nv = (N**2)
44 | # nf = 2*(N-1)**2
45 | total_m = 0.005 * M
46 | h = 0.005
47 |
48 | tot_timestep = args.tot_step
49 | sys = Scene(cloth_size=0.1)
50 | sys.cloths[0].Kb[None] = 400.0
51 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
52 | adam = Adam_single((tot_timestep, sys.elastic_cnt - 1, 6), args.lr, 0.9, 0.9999, 1e-8)
53 | agent = agent_trajopt(args.tot_step, sys.elastic_cnt - 1, max_moving_dist=0.001)
54 | sys.init_all()
55 | analy_grad.init_mass(sys)
56 | renderer = Renderer(sys, "folding", option=args.render_option)
57 |
58 | now_reward = -100000
59 | for ww in range(args.l, args.r):
60 | save_path = f"../imgs/traj_opt_fold_{ww}"
61 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
62 | renderer.set_save_dir(save_path)
63 | print(f"Saving Path: {save_path}")
64 |
65 | sys.reset()
66 | sys.mu_cloth_elastic[None] = 5.0
67 | plot_x = []
68 | plot_y = []
69 |
70 | if args.load_traj is not None:
71 | agent.traj.from_numpy(np.load(args.load_traj))
72 | # agent.fix_action(0.015)
73 | adam.reset()
74 | for i in range(args.iter):
75 | render = False
76 | if i % 10 == 0:
77 | render = True
78 | print("iter: ", i)
79 | analy_grad.copy_pos(sys, 0)
80 | obs_list = []
81 | action_list = []
82 | start_time = time.time()
83 | if i == 0:
84 | path = os.path.join(save_path, f"cloth_{0}.ply")
85 | readfile.save_cloth_mesh(sys.cloths[0], path)
86 | if render:
87 | renderer.render("0")
88 | for frame in range(1, tot_timestep):
89 | # print("frame:", frame)
90 | agent.get_action(frame)
91 | # sys.get_observation()
92 | sys.action(frame, agent.delta_pos, agent.delta_rot)
93 | # agent.get_action_field(frame)
94 | # obs_list.append(sys.observation.to_torch('cpu'))
95 | # action_list.append(agent.tmp_action.to_torch('cpu'))
96 | sys.time_step(projection_query, frame)
97 | # sys.print_force()
98 | analy_grad.copy_pos(sys, frame)
99 | if render:
100 | renderer.render(str(frame))
101 | if i == 0:
102 | path = os.path.join(save_path, f"cloth_{frame}.ply")
103 | readfile.save_cloth_mesh(sys.cloths[0], path)
104 |
105 | end_time = time.time()
106 | print("tot_time:", end_time - start_time)
107 | tot_reward = sys.compute_reward(args.curve7, args.curve8)
108 | # if tot_reward > now_reward:
109 | # now_reward = tot_reward
110 | # data_path = f"../data/data_traj_opt_fold_{ww}"
111 | # if not os.path.exists(data_path):
112 | # os.mkdir(data_path)
113 | # for frame in range(tot_timestep - 1):
114 | # torch.save({
115 | # 'obs': obs_list[frame],
116 | # 'action': action_list[frame]
117 | # }, os.path.join(data_path, f"data_{frame + 1}"))
118 |
119 | plot_x.append(i)
120 | plot_y.append(tot_reward)
121 | print("total_reward:", plot_y)
122 | if tot_reward > now_reward:
123 | now_reward = tot_reward
124 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
125 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
126 |
127 | if render:
128 | renderer.end_rendering(i)
129 |
130 | analy_grad.get_loss_fold(sys, args.curve7, args.curve8)
131 |
132 | for i in range(tot_timestep - 1, 0, -1):
133 | # print("back prop step:", i)
134 | analy_grad.transfer_grad(i, sys, projection_query)
135 | print("done grad")
136 | sys.reset()
137 | adam.step(agent.traj, analy_grad.gripper_grad)
138 | agent.fix_action(0.015)
139 | analy_grad.reset()
140 | # agent.print_traj()
141 | plt.plot(plot_x, plot_y)
142 | plt.savefig(os.path.join(save_path, f"plot.png"))
143 |
--------------------------------------------------------------------------------
/code/training/trajopt_forming.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import os
8 | from thinshelllab.agent.traj_opt_single import agent_trajopt
9 | from thinshelllab.optimizer.optim import Adam_single
10 | import random
11 | from argparse import ArgumentParser
12 | import matplotlib.pyplot as plt
13 | parser = ArgumentParser()
14 | parser.add_argument('--l', type=int, default=0)
15 | parser.add_argument('--r', type=int, default=5)
16 | parser.add_argument('--iter', type=int, default=10)
17 | parser.add_argument('--lr', type=float, default=0.001)
18 | parser.add_argument('--tot_step', type=int, default=5)
19 | parser.add_argument('--target_dir', type=str, default=None)
20 | parser.add_argument('--load_traj', type=str, default=None)
21 | parser.add_argument('--render_option', type=str, default="Taichi")
22 | args = parser.parse_args()
23 |
24 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
25 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
26 | offline_cache_cleaning_policy='version')
27 |
28 | from thinshelllab.task_scene.Scene_forming import Scene, Body
29 | from thinshelllab.engine.geometry import projection_query
30 | from thinshelllab.engine.render_engine import Renderer
31 | from thinshelllab.engine import linalg
32 | from thinshelllab.engine import readfile
33 | from thinshelllab.engine.analytic_grad_single import Grad
34 |
35 | M = 2
36 | N = 16
37 | k_spring = 1000
38 | nv = M * (N ** 2)
39 | nf = M * 2 * (N - 1) ** 2
40 | # nv = (N**2)
41 | # nf = 2*(N-1)**2
42 | total_m = 0.005 * M
43 | h = 0.005
44 |
45 | tot_timestep = args.tot_step
46 | sys = Scene(cloth_size=0.1)
47 | sys.cloths[0].Kb[None] = 200.0
48 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
49 | adam = Adam_single((tot_timestep, sys.elastic_cnt - 1, 6), args.lr, 0.9, 0.9999, 1e-8)
50 | agent = agent_trajopt(args.tot_step, sys.elastic_cnt - 1, max_moving_dist=0.001)
51 | sys.init_all()
52 | analy_grad.init_mass(sys)
53 | renderer = Renderer(sys, "forming", option=args.render_option)
54 |
55 | now_reward = -10000
56 | if args.target_dir is not None:
57 | target_pos = np.load(args.target_dir)
58 |
59 | for ww in range(args.l, args.r):
60 | save_path = f"../imgs/traj_opt_forming_{ww}"
61 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
62 | renderer.set_save_dir(save_path)
63 | print(f"Saving Path: {save_path}")
64 |
65 | sys.reset()
66 | sys.mu_cloth_elastic[None] = 5.0
67 | plot_x = []
68 | plot_y = []
69 |
70 | if args.load_traj is not None:
71 | agent.traj.from_numpy(np.load(args.load_traj))
72 | agent.fix_action(0.015)
73 | adam.reset()
74 | pos_save_dir = f"../data/forming_pos_save"
75 | # os.mkdir(pos_save_dir)
76 | for i in range(args.iter):
77 | render = False
78 | if i % 20 == 0:
79 | render = True
80 | print("iter: ", i)
81 | analy_grad.copy_pos(sys, 0)
82 | obs_list = []
83 | action_list = []
84 | start_time = time.time()
85 | if render:
86 | renderer.render("0")
87 | for frame in range(1, tot_timestep):
88 | # print("frame:", frame)
89 | agent.get_action(frame)
90 | # sys.get_observation()
91 | sys.action(frame, agent.delta_pos, agent.delta_rot)
92 | # agent.get_action_field(frame)
93 | # obs_list.append(sys.observation.to_torch('cpu'))
94 | # action_list.append(agent.tmp_action.to_torch('cpu'))
95 | sys.time_step(projection_query, frame)
96 | # np_pos = sys.cloths[0].pos.to_numpy()
97 | # np.save(os.path.join(pos_save_dir, f"{frame}.npy"), np_pos)
98 | analy_grad.copy_pos(sys, frame)
99 | if render:
100 | renderer.render(str(frame))
101 |
102 | # for ii in range(4):
103 | # print(ii, sys.cloths[0].pos[ii])
104 | if args.target_dir is None:
105 | np_pos = sys.cloths[0].pos.to_numpy()
106 | np.save(os.path.join(pos_save_dir, f"cloth_pos.npy"), np_pos)
107 | else:
108 | end_time = time.time()
109 | print("tot_time:", end_time - start_time)
110 | tot_reward = sys.compute_reward(target_pos)
111 | # if tot_reward > now_reward:
112 | # now_reward = tot_reward
113 | # data_path = f"../data/data_traj_opt_fold_{ww}"
114 | # if not os.path.exists(data_path):
115 | # os.mkdir(data_path)
116 | # for frame in range(tot_timestep - 1):
117 | # torch.save({
118 | # 'obs': obs_list[frame],
119 | # 'action': action_list[frame]
120 | # }, os.path.join(data_path, f"data_{frame + 1}"))
121 |
122 | plot_x.append(i)
123 | plot_y.append(tot_reward)
124 | print("total_reward:", plot_y)
125 | if tot_reward > now_reward:
126 | now_reward = tot_reward
127 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
128 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
129 |
130 | if render:
131 | renderer.end_rendering(i)
132 |
133 | analy_grad.get_loss_push(sys, target_pos)
134 |
135 | for i in range(tot_timestep - 1, 0, -1):
136 | # print("back prop step:", i)
137 | analy_grad.transfer_grad(i, sys, projection_query)
138 | print("done grad")
139 | sys.reset()
140 | adam.step(agent.traj, analy_grad.gripper_grad)
141 | agent.fix_action(0.015)
142 | analy_grad.reset()
143 | # agent.print_traj()
144 | plt.plot(plot_x, plot_y)
145 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_interact.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | import time
3 | import numpy as np
4 | import torch
5 | import imageio
6 | import os
7 | from thinshelllab.agent.traj_opt_single import agent_trajopt
8 | from thinshelllab.optimizer.optim import Adam_single
9 | import random
10 | from argparse import ArgumentParser
11 | import matplotlib.pyplot as plt
12 |
13 | parser = ArgumentParser()
14 | parser.add_argument('--l', type=int, default=0)
15 | parser.add_argument('--r', type=int, default=5)
16 | parser.add_argument('--iter', type=int, default=10)
17 | parser.add_argument('--lr', type=float, default=0.001)
18 | parser.add_argument('--tot_step', type=int, default=5)
19 | parser.add_argument('--sep', action="store_true", default=False)
20 | parser.add_argument('--Kb', type=float, default=100)
21 | parser.add_argument('--mu', type=float, default=5.0)
22 | parser.add_argument('--discount', type=float, default=0.9)
23 | parser.add_argument('--load_traj', type=str, default=None)
24 | parser.add_argument('--soft', action="store_true", default=False)
25 | parser.add_argument('--dense', type=float, default=10000.0)
26 | parser.add_argument('--render', type=int, default=10)
27 | parser.add_argument('--render_option', type=str, default="Taichi")
28 | args = parser.parse_args()
29 |
30 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
31 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
32 | offline_cache_cleaning_policy='version')
33 |
34 | from thinshelllab.task_scene.Scene_interact import Scene, Body
35 | from thinshelllab.engine.geometry import projection_query
36 | from thinshelllab.engine.render_engine import Renderer
37 | from thinshelllab.engine import linalg
38 | from thinshelllab.engine.analytic_grad_single import Grad
39 |
40 | M = 2
41 | N = 16
42 | k_spring = 1000
43 | nv = M * (N ** 2)
44 | nf = M * 2 * (N - 1) ** 2
45 | # nv = (N**2)
46 | # nf = 2*(N-1)**2
47 | total_m = 0.005 * M
48 | h = 0.005
49 |
50 | tot_timestep = args.tot_step
51 | sys = Scene(cloth_size=0.06, soft=args.soft, dense=args.dense)
52 | sys.cloths[0].Kb[None] = args.Kb
53 | if args.soft:
54 | sys.cloths[0].Kb[None] = 0.1
55 | gripper_cnt = sys.elastic_cnt - 1
56 | if sys.enable_gripper:
57 | gripper_cnt = int((sys.effector_cnt - 1) // 2)
58 |
59 | analy_grad = Grad(sys, tot_timestep, gripper_cnt)
60 | adam = Adam_single((tot_timestep, gripper_cnt, 6), args.lr, 0.9, 0.9999, 1e-8, discount=args.discount)
61 | agent = agent_trajopt(tot_timestep, gripper_cnt, max_moving_dist=0.002)
62 | sys.init_all()
63 | analy_grad.init_mass(sys)
64 | renderer = Renderer(sys, "interact", option=args.render_option)
65 |
66 | now_reward = -100000
67 | for ww in range(args.l, args.r):
68 | save_path = f"../imgs/traj_opt_interact_{ww}"
69 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
70 | renderer.set_save_dir(save_path)
71 | print(f"Saving Path: {save_path}")
72 |
73 | sys.reset()
74 | sys.mu_cloth_elastic[None] = args.mu
75 | plot_x = []
76 | plot_y = []
77 | if args.load_traj is not None:
78 | traj_np = np.load(args.load_traj)
79 | agent.fix_action(0.015)
80 | adam.reset()
81 |
82 | if not args.sep:
83 | tot_reward = sys.compute_reward_1()
84 | else:
85 | tot_reward = sys.compute_reward()
86 | print("init reward:", tot_reward)
87 |
88 | for i in range(args.iter):
89 | render = False
90 | if i % args.render == 0:
91 | render = True
92 | print("iter: ", i)
93 | analy_grad.copy_pos(sys, 0)
94 | obs_list = []
95 | action_list = []
96 | start_time = time.time()
97 | if render:
98 | renderer.render("0")
99 | for frame in range(1, tot_timestep):
100 | # print("frame:", frame)
101 | agent.get_action(frame)
102 | # sys.get_observation()
103 | sys.action(frame, agent.delta_pos, agent.delta_rot)
104 | # agent.get_action_field(frame)
105 | # obs_list.append(sys.observation.to_torch('cpu'))
106 | # action_list.append(agent.tmp_action.to_torch('cpu'))
107 | sys.time_step(projection_query, frame)
108 | analy_grad.copy_pos(sys, frame)
109 | if render:
110 | renderer.render(str(frame))
111 |
112 | end_time = time.time()
113 | print("tot_time:", end_time - start_time)
114 |
115 | if not args.sep:
116 | tot_reward = sys.compute_reward_1()
117 | else:
118 | tot_reward = sys.compute_reward()
119 |
120 |
121 | plot_x.append(i)
122 | plot_y.append(tot_reward)
123 | print("total_reward:", plot_y)
124 | if tot_reward > now_reward:
125 | now_reward = tot_reward
126 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
127 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
128 | if args.sep:
129 | xx = []
130 | yy = []
131 | minx = 0.0
132 | for a in range(tot_timestep):
133 | minx = ti.min(agent.traj[a, 0, 0], minx)
134 | xx.append(a)
135 | yy.append(agent.traj[a, 0, 0])
136 | print("min traj x:", minx)
137 | plt.clf()
138 | plt.plot(xx, yy)
139 | plt.savefig(os.path.join(save_path, f"move_{i}.png"))
140 | plt.clf()
141 |
142 | if render:
143 | renderer.end_rendering(i)
144 |
145 | if not args.sep:
146 | analy_grad.get_loss_interact_1(sys)
147 | else:
148 | analy_grad.get_loss_interact(sys)
149 |
150 | for i in range(tot_timestep - 1, 5, -1):
151 | # print("back prop step:", i)
152 | analy_grad.transfer_grad(i, sys, projection_query)
153 | print("done grad")
154 | sys.reset()
155 | analy_grad.apply_action_limit_grad(agent, 0.015)
156 | # analy_grad.apply_gripper_pos_grad_interact(agent)
157 | adam.step(agent.traj, analy_grad.gripper_grad)
158 | # agent.fix_action(0.015)
159 | analy_grad.reset()
160 | # agent.print_traj()
161 | plt.plot(plot_x, plot_y)
162 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_lifting.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import os
8 | from ..agent.traj_opt_single import agent_trajopt
9 | from ..optimizer.optim import Adam_single
10 | import random
11 | from argparse import ArgumentParser
12 | import matplotlib.pyplot as plt
13 |
14 | parser = ArgumentParser()
15 | parser.add_argument('--l', type=int, default=0)
16 | parser.add_argument('--r', type=int, default=5)
17 | parser.add_argument('--iter', type=int, default=10)
18 | parser.add_argument('--lr', type=float, default=0.001)
19 | parser.add_argument('--tot_step', type=int, default=5)
20 | parser.add_argument('--Kb', type=float, default=100)
21 | parser.add_argument('--mu', type=float, default=1.0)
22 | parser.add_argument('--load_traj', type=str, default=None)
23 | parser.add_argument('--mode', type=str, default="grad")
24 | parser.add_argument('--render_option', type=str, default="Taichi")
25 | args = parser.parse_args()
26 |
27 | ti.init(ti.gpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
28 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
29 | offline_cache_cleaning_policy='version')
30 |
31 | from thinshelllab.task_scene.Scene_lifting import Scene, Body
32 | from thinshelllab.engine.geometry import projection_query
33 | from thinshelllab.engine.render_engine import Renderer
34 | from thinshelllab.engine import linalg
35 | from thinshelllab.engine.analytic_grad_single import Grad
36 |
37 | M = 2
38 | N = 16
39 | k_spring = 1000
40 | nv = M * (N ** 2)
41 | nf = M * 2 * (N - 1) ** 2
42 | # nv = (N**2)
43 | # nf = 2*(N-1)**2
44 | total_m = 0.005 * M
45 | h = 0.005
46 |
47 | tot_timestep = args.tot_step
48 | sys = Scene(cloth_size=0.06)
49 | sys.cloths[0].Kb[None] = args.Kb
50 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
51 | adam = Adam_single((tot_timestep, sys.elastic_cnt - 1, 6), args.lr, 0.9, 0.9999, 1e-8)
52 | agent = agent_trajopt(tot_timestep, sys.elastic_cnt - 1, max_moving_dist=0.001)
53 | sys.init_all()
54 | analy_grad.init_mass(sys)
55 | renderer = Renderer(sys, "lifting", option=args.render_option)
56 |
57 | now_reward = -100000
58 | for ww in range(args.l, args.r):
59 | save_path = f"../imgs/traj_opt_lift_{ww}"
60 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
61 | renderer.set_save_dir(save_path)
62 | print(f"Saving Path: {save_path}")
63 |
64 | sys.reset()
65 | sys.mu_cloth_elastic[None] = args.mu
66 | plot_x = []
67 | plot_y = []
68 | if args.load_traj is not None:
69 | agent.traj.from_numpy(np.load(args.load_traj))
70 | agent.fix_action(0.015)
71 | adam.reset()
72 |
73 | for i in range(args.iter):
74 | render = False
75 | print("iter: ", i)
76 | analy_grad.copy_pos(sys, 0)
77 | obs_list = []
78 | action_list = []
79 | start_time = time.time()
80 |
81 | renderer.render("0")
82 | for frame in range(1, tot_timestep):
83 | # print("frame:", frame)
84 | agent.get_action(frame)
85 | # sys.get_observation()
86 | sys.action(frame, agent.delta_pos, agent.delta_rot)
87 | # agent.get_action_field(frame)
88 | # obs_list.append(sys.observation.to_torch('cpu'))
89 | # action_list.append(agent.tmp_action.to_torch('cpu'))
90 | sys.time_step(projection_query, frame)
91 | if args.mode == "grad":
92 | analy_grad.copy_pos(sys, frame)
93 | if render:
94 | renderer.render(str(frame))
95 |
96 | end_time = time.time()
97 | print("tot_time:", end_time - start_time)
98 | end_time = time.time()
99 | print("tot_time:", end_time - start_time)
100 | tot_reward = sys.compute_reward()
101 | # if tot_reward > now_reward:
102 | # now_reward = tot_reward
103 | # data_path = f"../data/data_traj_opt_fold_{ww}"
104 | # if not os.path.exists(data_path):
105 | # os.mkdir(data_path)
106 | # for frame in range(tot_timestep - 1):
107 | # torch.save({
108 | # 'obs': obs_list[frame],
109 | # 'action': action_list[frame]
110 | # }, os.path.join(data_path, f"data_{frame + 1}"))
111 |
112 | plot_x.append(i)
113 | plot_y.append(tot_reward)
114 | print("total_reward:", plot_y)
115 | if tot_reward > now_reward:
116 | now_reward = tot_reward
117 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
118 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
119 |
120 | if render:
121 | renderer.end_rendering(i)
122 |
123 | analy_grad.get_loss_lift(sys)
124 |
125 | for i in range(tot_timestep - 1, 0, -1):
126 | # print("back prop step:", i)
127 | analy_grad.transfer_grad(i, sys, projection_query)
128 | analy_grad.apply_action_limit_grad(agent, 0.015)
129 | print("done grad")
130 | sys.reset()
131 | adam.step(agent.traj, analy_grad.gripper_grad)
132 | # agent.fix_action(0.015)
133 | analy_grad.reset()
134 | # agent.print_traj()
135 | plt.plot(plot_x, plot_y)
136 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_pick_fold.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import os
8 | from thinshelllab.agent.traj_opt_single import agent_trajopt
9 | from thinshelllab.optimizer.optim import Adam_single
10 | import random
11 | from argparse import ArgumentParser
12 | import matplotlib.pyplot as plt
13 |
14 | parser = ArgumentParser()
15 | parser.add_argument('--l', type=int, default=0)
16 | parser.add_argument('--r', type=int, default=5)
17 | parser.add_argument('--iter', type=int, default=10)
18 | parser.add_argument('--lr', type=float, default=0.001)
19 | parser.add_argument('--tot_step', type=int, default=5)
20 | parser.add_argument('--load_traj', type=str, default=None)
21 | parser.add_argument('--render', type=int, default=10)
22 | parser.add_argument('--max_dist', type=float, default=0.001)
23 | parser.add_argument('--render_option', type=str, default="Taichi")
24 | args = parser.parse_args()
25 |
26 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
27 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
28 | offline_cache_cleaning_policy='version')
29 |
30 | from thinshelllab.task_scene.Scene_pick import Scene, Body
31 | from thinshelllab.engine.geometry import projection_query
32 | from thinshelllab.engine.render_engine import Renderer
33 | from thinshelllab.engine import linalg
34 | from thinshelllab.engine.analytic_grad_single import Grad
35 |
36 | M = 2
37 | N = 16
38 | k_spring = 1000
39 | nv = M * (N ** 2)
40 | nf = M * 2 * (N - 1) ** 2
41 | # nv = (N**2)
42 | # nf = 2*(N-1)**2
43 | total_m = 0.005 * M
44 | h = 0.005
45 |
46 | tot_timestep = args.tot_step
47 | sys = Scene(cloth_size=0.06)
48 | sys.cloths[0].Kb[None] = 200.0
49 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
50 | adam = Adam_single((tot_timestep, sys.elastic_cnt - 1, 6), args.lr, 0.9, 0.9999, 1e-8)
51 | agent = agent_trajopt(tot_timestep, sys.elastic_cnt - 1, max_moving_dist=args.max_dist)
52 | sys.init_all()
53 | analy_grad.init_mass(sys)
54 | renderer = Renderer(sys, "pick", option=args.render_option)
55 |
56 | now_reward = -100000
57 | for ww in range(args.l, args.r):
58 | save_path = f"../imgs/traj_opt_pick_fold_{ww}"
59 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
60 | renderer.set_save_dir(save_path)
61 | print(f"Saving Path: {save_path}")
62 |
63 | sys.reset()
64 | sys.mu_cloth_elastic[None] = 10.0
65 | plot_x = []
66 | plot_y = []
67 | if args.load_traj is not None:
68 | agent.traj.from_numpy(np.load(args.load_traj))
69 | else:
70 | agent.init_traj_pick_fold()
71 |
72 | # agent.fix_action(0.015)
73 | # agent.traj.from_numpy(np.load("../data/cmaes_traj_pick_fold_1/traj.npy"))
74 | adam.reset()
75 |
76 | for i in range(args.iter):
77 | print("iter: ", i)
78 | render = False
79 | if i % args.render == 0:
80 | render = True
81 | analy_grad.copy_pos(sys, 0)
82 | obs_list = []
83 | action_list = []
84 | start_time = time.time()
85 | if render:
86 | renderer.render("0")
87 | for frame in range(1, tot_timestep):
88 | # print("frame:", frame)
89 | agent.get_action(frame)
90 | # sys.get_observation()
91 | sys.action(frame, agent.delta_pos, agent.delta_rot)
92 | # agent.get_action_field(frame)
93 | # obs_list.append(sys.observation.to_torch('cpu'))
94 | # action_list.append(agent.tmp_action.to_torch('cpu'))
95 | sys.time_step(projection_query, frame)
96 | analy_grad.copy_pos(sys, frame)
97 | if render:
98 | renderer.render(str(frame))
99 |
100 | end_time = time.time()
101 | print("tot_time:", end_time - start_time)
102 | tot_reward = sys.compute_reward_pick_fold()
103 | # if tot_reward > now_reward:
104 | # now_reward = tot_reward
105 | # data_path = f"../data/data_traj_opt_fold_{ww}"
106 | # if not os.path.exists(data_path):
107 | # os.mkdir(data_path)
108 | # for frame in range(tot_timestep - 1):
109 | # torch.save({
110 | # 'obs': obs_list[frame],
111 | # 'action': action_list[frame]
112 | # }, os.path.join(data_path, f"data_{frame + 1}"))
113 |
114 | plot_x.append(i)
115 | plot_y.append(tot_reward)
116 | print("total_reward:", plot_y)
117 | if tot_reward > now_reward:
118 | now_reward = tot_reward
119 | np.save(os.path.join(save_path, "best_traj.npy"), agent.traj.to_numpy())
120 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
121 | if render:
122 | renderer.end_rendering(i)
123 |
124 | analy_grad.get_loss_pick_fold(sys)
125 |
126 | for i in range(tot_timestep - 1, 8, -1):
127 | # print("back prop step:", i)
128 | analy_grad.transfer_grad(i, sys, projection_query)
129 | print("done grad")
130 | analy_grad.apply_action_limit_grad(agent, 0.015)
131 | sys.reset()
132 | adam.step(agent.traj, analy_grad.gripper_grad)
133 | # agent.fix_action(0.015)
134 | analy_grad.reset()
135 | # agent.print_traj()
136 | plt.plot(plot_x, plot_y)
137 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/code/training/trajopt_silding.py:
--------------------------------------------------------------------------------
1 | import taichi as ti
2 | # import torch
3 | import time
4 | # from PIL import Image
5 | import numpy as np
6 | import torch
7 | import os
8 | import random
9 | from argparse import ArgumentParser
10 | import matplotlib.pyplot as plt
11 |
12 | parser = ArgumentParser()
13 | parser.add_argument('--l', type=int, default=0)
14 | parser.add_argument('--r', type=int, default=5)
15 | parser.add_argument('--iter', type=int, default=10)
16 | parser.add_argument('--lr', type=float, default=0.001)
17 | parser.add_argument('--tot_step', type=int, default=5)
18 | parser.add_argument('--mu', type=float, default=0.001)
19 | parser.add_argument('--render_option', type=str, default="Taichi")
20 | args = parser.parse_args()
21 |
22 | ti.init(ti.cpu, device_memory_fraction=0.5, default_fp=ti.f64, default_ip=ti.i32, fast_math=False,
23 | offline_cache=True, offline_cache_max_size_of_files=1024 ** 3,
24 | offline_cache_cleaning_policy='version')
25 |
26 | from thinshelllab.task_scene.Scene_sliding import Scene, Body
27 | from thinshelllab.engine.geometry import projection_query
28 | from thinshelllab.engine.render_engine import Renderer
29 | from thinshelllab.engine import linalg
30 | from thinshelllab.engine.analytic_grad_system import Grad
31 | from thinshelllab.agent.traj_opt_single import agent_trajopt
32 |
33 | M = 2
34 | N = 16
35 | k_spring = 1000
36 | nv = M * (N ** 2)
37 | nf = M * 2 * (N - 1) ** 2
38 | # nv = (N**2)
39 | # nf = 2*(N-1)**2
40 | total_m = 0.005 * M
41 | h = 0.005
42 |
43 | tot_timestep = args.tot_step
44 | sys = Scene(cloth_size=0.06)
45 | sys.cloths[0].Kb[None] = 1000.0
46 | sys.mu_cloth_cloth[None] = args.mu
47 | analy_grad = Grad(sys, tot_timestep, sys.elastic_cnt - 1)
48 | analy_grad.count_friction_grad = True
49 | analy_grad.count_kb_grad = False
50 | agent = agent_trajopt(tot_timestep, sys.elastic_cnt - 1, max_moving_dist=0.001)
51 | sys.init_all()
52 | analy_grad.init_mass(sys)
53 | renderer = Renderer(sys, "sliding", option=args.render_option)
54 |
55 | now_reward = 0
56 | for ww in range(args.l, args.r):
57 | save_path = f"../imgs/traj_opt_slide_{ww}"
58 | # sys.init_pos = [(random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.002, (random.random() - 0.5) * 0.0006]
59 | renderer.set_save_dir(save_path)
60 | print(f"Saving Path: {save_path}")
61 |
62 | sys.reset()
63 | sys.mu_cloth_elastic[None] = 1.0
64 | plot_x = []
65 | plot_y = []
66 | agent.init_traj_slide()
67 | agent.fix_action(0.015)
68 | mu_list = []
69 | for i in range(args.iter):
70 | print("iter: ", i)
71 | analy_grad.copy_pos(sys, 0)
72 | obs_list = []
73 | action_list = []
74 | start_time = time.time()
75 |
76 | renderer.render("0")
77 | for frame in range(1, tot_timestep):
78 | # print("frame:", frame)
79 | agent.get_action(frame)
80 | # sys.get_observation()
81 | sys.action(frame, agent.delta_pos, agent.delta_rot)
82 | # agent.get_action_field(frame)
83 | # obs_list.append(sys.observation.to_torch('cpu'))
84 | # action_list.append(agent.tmp_action.to_torch('cpu'))
85 | sys.time_step(projection_query, frame)
86 | analy_grad.copy_pos(sys, frame)
87 | renderer.render(str(frame))
88 |
89 | end_time = time.time()
90 | print("tot_time:", end_time - start_time)
91 | tot_reward = sys.compute_reward()
92 | # if tot_reward > now_reward:
93 | # now_reward = tot_reward
94 | # data_path = f"../data/data_traj_opt_fold_{ww}"
95 | # if not os.path.exists(data_path):
96 | # os.mkdir(data_path)
97 | # for frame in range(tot_timestep - 1):
98 | # torch.save({
99 | # 'obs': obs_list[frame],
100 | # 'action': action_list[frame]
101 | # }, os.path.join(data_path, f"data_{frame + 1}"))
102 |
103 | plot_x.append(i)
104 | plot_y.append(tot_reward)
105 | np.save(os.path.join(save_path, "plot_data.npy"), np.array(plot_y))
106 | print("total_reward:", plot_y)
107 | renderer.end_rendering(i)
108 |
109 | analy_grad.get_loss_slide(sys)
110 |
111 | for i in range(tot_timestep - 1, 0, -1):
112 | # print("back prop step:", i)
113 | analy_grad.transfer_grad(i, sys, projection_query)
114 | print("done grad")
115 | mu_list.append(sys.mu_cloth_cloth[None])
116 | print("mu: ", mu_list)
117 | print("friction grad:", analy_grad.grad_friction_coef[None] * args.lr)
118 | sys.mu_cloth_cloth[None] -= analy_grad.grad_friction_coef[None] * args.lr
119 | sys.reset()
120 | # agent.fix_action(0.015)
121 | analy_grad.reset()
122 |
123 | plt.plot(plot_x, plot_y)
124 | plt.savefig(os.path.join(save_path, f"plot.png"))
--------------------------------------------------------------------------------
/data/balance_state/F_x_lower.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/F_x_lower.npy
--------------------------------------------------------------------------------
/data/balance_state/F_x_lower_world.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/F_x_lower_world.npy
--------------------------------------------------------------------------------
/data/balance_state/F_x_upper.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/F_x_upper.npy
--------------------------------------------------------------------------------
/data/balance_state/F_x_upper_world.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/F_x_upper_world.npy
--------------------------------------------------------------------------------
/data/balance_state/border_flag.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/border_flag.npy
--------------------------------------------------------------------------------
/data/balance_state/half_gripper_dist.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/half_gripper_dist.npy
--------------------------------------------------------------------------------
/data/balance_state/pos.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/pos.npy
--------------------------------------------------------------------------------
/data/balance_state/proj_dir.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/proj_dir.npy
--------------------------------------------------------------------------------
/data/balance_state/proj_flag.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/proj_flag.npy
--------------------------------------------------------------------------------
/data/balance_state/rot.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/rot.npy
--------------------------------------------------------------------------------
/data/balance_state/rotmat.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/rotmat.npy
--------------------------------------------------------------------------------
/data/balance_state/state:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/balance_state/state
--------------------------------------------------------------------------------
/data/ball.ele:
--------------------------------------------------------------------------------
1 | 295 4 0
2 | 0 95 20 21 13
3 | 1 93 25 33 34
4 | 2 94 11 10 18
5 | 3 96 35 25 34
6 | 4 96 15 95 28
7 | 5 95 5 12 13
8 | 6 89 59 66 67
9 | 7 93 27 20 26
10 | 8 98 78 97 79
11 | 9 93 26 19 25
12 | 10 96 41 91 42
13 | 11 92 54 60 61
14 | 12 86 22 31 32
15 | 13 90 75 68 74
16 | 14 88 52 45 44
17 | 15 86 37 31 43
18 | 16 94 9 10 2
19 | 17 96 33 93 86
20 | 18 99 6 95 81
21 | 19 93 24 19 18
22 | 20 97 1 88 0
23 | 21 97 65 89 88
24 | 22 93 19 12 18
25 | 23 94 17 87 18
26 | 24 99 85 95 94
27 | 25 96 27 93 26
28 | 26 92 61 69 62
29 | 27 98 75 90 74
30 | 28 88 57 64 58
31 | 29 96 25 93 34
32 | 30 97 8 2 1
33 | 31 90 69 68 75
34 | 32 86 37 43 44
35 | 33 92 62 55 54
36 | 34 99 5 95 6
37 | 35 94 87 93 18
38 | 36 86 46 39 38
39 | 37 91 39 86 46
40 | 38 95 7 81 6
41 | 39 98 81 75 74
42 | 40 97 87 94 8
43 | 41 92 63 49 55
44 | 42 86 32 39 33
45 | 43 86 44 45 37
46 | 44 90 82 75 81
47 | 45 96 34 86 39
48 | 46 59 58 89 66
49 | 47 95 15 83 84
50 | 48 93 19 20 12
51 | 49 92 53 88 52
52 | 50 88 57 30 0
53 | 51 86 32 31 38
54 | 52 97 8 94 2
55 | 53 86 37 45 38
56 | 54 96 86 91 39
57 | 55 97 1 71 64
58 | 56 86 38 31 37
59 | 57 96 39 91 40
60 | 58 88 64 71 65
61 | 59 88 43 86 30
62 | 60 86 31 30 43
63 | 61 97 1 2 77
64 | 62 93 25 24 33
65 | 63 95 84 83 82
66 | 64 96 36 35 42
67 | 65 86 33 39 34
68 | 66 97 65 88 71
69 | 67 93 86 87 85
70 | 68 88 44 43 51
71 | 69 97 71 78 72
72 | 70 98 79 74 73
73 | 71 99 94 95 5
74 | 72 92 85 90 89
75 | 73 97 88 94 87
76 | 74 96 34 39 40
77 | 75 96 36 27 26
78 | 76 96 85 91 86
79 | 77 91 42 41 49
80 | 78 95 21 93 28
81 | 79 97 2 3 77
82 | 80 97 64 88 1
83 | 81 96 50 85 15
84 | 82 98 81 90 75
85 | 83 98 74 90 89
86 | 84 95 12 20 13
87 | 85 88 85 87 86
88 | 86 92 63 90 56
89 | 87 98 74 80 81
90 | 88 92 47 53 54
91 | 89 99 89 98 90
92 | 90 96 86 93 85
93 | 91 94 87 88 85
94 | 92 92 70 63 62
95 | 93 92 54 55 47
96 | 94 92 49 91 55
97 | 95 99 89 90 85
98 | 96 96 85 95 15
99 | 97 99 80 98 79
100 | 98 92 62 54 61
101 | 99 98 72 79 73
102 | 100 95 83 90 82
103 | 101 96 34 41 35
104 | 102 92 53 91 88
105 | 103 92 52 88 59
106 | 104 97 0 87 8
107 | 105 94 8 87 9
108 | 106 94 2 10 3
109 | 107 94 85 93 87
110 | 108 96 34 93 33
111 | 109 88 57 0 64
112 | 110 96 42 49 50
113 | 111 96 26 25 35
114 | 112 96 29 15 28
115 | 113 90 56 83 15
116 | 114 98 89 97 72
117 | 115 99 4 80 79
118 | 116 90 82 83 76
119 | 117 99 81 98 80
120 | 118 96 29 36 42
121 | 119 89 74 68 73
122 | 120 86 38 45 46
123 | 121 86 38 39 32
124 | 122 99 90 98 81
125 | 123 96 35 41 42
126 | 124 97 0 88 87
127 | 125 97 4 79 78
128 | 126 88 43 30 57
129 | 127 94 17 10 9
130 | 128 99 97 98 89
131 | 129 92 47 91 53
132 | 130 88 30 87 0
133 | 131 65 89 88 58
134 | 132 7 6 95 14
135 | 133 88 51 57 58
136 | 134 91 48 41 40
137 | 135 93 28 21 27
138 | 136 92 60 89 67
139 | 137 97 1 77 71
140 | 138 97 72 89 71
141 | 139 92 53 60 54
142 | 140 90 15 85 56
143 | 141 97 71 89 65
144 | 142 95 84 7 14
145 | 143 98 73 89 72
146 | 144 93 18 87 24
147 | 145 96 40 91 41
148 | 146 99 82 95 90
149 | 147 88 64 65 58
150 | 148 92 69 90 62
151 | 149 96 33 86 34
152 | 150 92 67 89 68
153 | 151 96 29 27 36
154 | 152 99 79 97 4
155 | 153 94 8 9 2
156 | 154 94 18 10 17
157 | 155 87 33 24 23
158 | 156 99 81 95 82
159 | 157 96 28 27 29
160 | 158 99 81 80 6
161 | 159 99 6 80 5
162 | 160 97 0 8 1
163 | 161 95 14 15 84
164 | 162 97 71 88 64
165 | 163 89 66 72 73
166 | 164 96 93 95 85
167 | 165 92 63 55 62
168 | 166 98 78 79 72
169 | 167 88 43 57 51
170 | 168 91 52 53 45
171 | 169 92 50 91 49
172 | 170 99 85 97 89
173 | 171 87 0 30 8
174 | 172 87 30 86 31
175 | 173 97 3 94 4
176 | 174 89 66 65 72
177 | 175 99 82 90 81
178 | 176 96 49 91 50
179 | 177 95 12 93 20
180 | 178 92 67 68 61
181 | 179 95 12 94 93
182 | 180 91 48 40 47
183 | 181 92 56 90 85
184 | 182 92 55 91 47
185 | 183 92 62 90 70
186 | 184 92 52 60 53
187 | 185 92 50 49 63
188 | 186 97 85 94 88
189 | 187 88 58 59 51
190 | 188 87 18 17 24
191 | 189 94 18 12 11
192 | 190 94 11 5 4
193 | 191 94 3 10 11
194 | 192 92 89 90 68
195 | 193 96 50 15 29
196 | 194 88 51 59 52
197 | 195 99 79 98 97
198 | 196 90 70 83 56
199 | 197 90 68 89 74
200 | 198 65 66 89 58
201 | 199 93 24 87 33
202 | 200 95 5 13 6
203 | 201 91 49 48 55
204 | 202 90 62 69 70
205 | 203 96 42 50 29
206 | 204 95 5 94 12
207 | 205 90 75 76 69
208 | 206 93 33 87 86
209 | 207 87 33 23 32
210 | 208 95 20 93 21
211 | 209 95 13 21 14
212 | 210 87 22 30 31
213 | 211 95 84 82 7
214 | 212 97 78 3 4
215 | 213 87 22 86 32
216 | 214 96 40 41 34
217 | 215 96 28 95 93
218 | 216 91 46 86 45
219 | 217 99 4 97 94
220 | 218 91 45 53 46
221 | 219 91 40 46 47
222 | 220 90 82 76 75
223 | 221 92 56 50 63
224 | 222 95 85 90 15
225 | 223 91 55 48 47
226 | 224 94 11 12 5
227 | 225 87 24 17 23
228 | 226 92 70 90 63
229 | 227 92 68 90 69
230 | 228 92 61 68 69
231 | 229 97 2 94 3
232 | 230 96 26 35 36
233 | 231 85 56 15 50
234 | 232 92 88 91 85
235 | 233 96 50 91 85
236 | 234 96 26 93 25
237 | 235 91 85 88 86
238 | 236 93 26 20 19
239 | 237 92 59 60 52
240 | 238 87 23 16 22
241 | 239 92 56 85 50
242 | 240 92 59 89 60
243 | 241 88 64 0 1
244 | 242 97 77 78 71
245 | 243 95 82 81 7
246 | 244 89 66 73 67
247 | 245 90 56 63 70
248 | 246 95 14 21 28
249 | 247 97 77 3 78
250 | 248 99 94 97 85
251 | 249 90 76 83 70
252 | 250 90 69 76 70
253 | 251 95 93 94 85
254 | 252 89 65 71 72
255 | 253 13 14 95 6
256 | 254 93 27 21 20
257 | 255 99 90 95 85
258 | 256 91 46 53 47
259 | 257 89 67 60 59
260 | 258 87 32 86 33
261 | 259 98 74 89 73
262 | 260 97 88 89 85
263 | 261 92 85 89 88
264 | 262 92 85 91 50
265 | 263 94 18 93 12
266 | 264 87 17 9 16
267 | 265 87 16 9 8
268 | 266 87 31 86 22
269 | 267 87 23 22 32
270 | 268 91 40 39 46
271 | 269 92 61 60 67
272 | 270 94 9 87 17
273 | 271 96 42 91 49
274 | 272 94 3 11 4
275 | 273 91 86 88 45
276 | 274 93 25 19 24
277 | 275 88 86 87 30
278 | 276 88 45 86 44
279 | 277 88 51 52 44
280 | 278 95 15 90 83
281 | 279 98 79 80 74
282 | 280 95 28 15 14
283 | 281 89 73 68 67
284 | 282 92 88 89 59
285 | 283 91 49 41 48
286 | 284 99 4 94 5
287 | 285 87 23 17 16
288 | 286 99 5 80 4
289 | 287 98 72 97 78
290 | 288 87 8 30 16
291 | 289 87 16 30 22
292 | 290 91 52 88 53
293 | 291 91 45 88 52
294 | 292 96 28 93 27
295 | 293 88 89 59 58
296 | 294 88 44 86 43
297 |
--------------------------------------------------------------------------------
/data/ball.face:
--------------------------------------------------------------------------------
1 | 166 0
2 | 0 20 21 13 -1
3 | 1 25 33 34 -1
4 | 2 11 10 18 -1
5 | 3 35 25 34 -1
6 | 4 5 12 13 -1
7 | 5 59 66 67 -1
8 | 6 27 20 26 -1
9 | 7 26 19 25 -1
10 | 8 54 60 61 -1
11 | 9 22 31 32 -1
12 | 10 75 68 74 -1
13 | 11 52 45 44 -1
14 | 12 37 31 43 -1
15 | 13 9 10 2 -1
16 | 14 24 19 18 -1
17 | 15 19 12 18 -1
18 | 16 61 69 62 -1
19 | 17 57 64 58 -1
20 | 18 8 2 1 -1
21 | 19 69 68 75 -1
22 | 20 37 43 44 -1
23 | 21 62 55 54 -1
24 | 22 46 39 38 -1
25 | 23 7 81 6 -1
26 | 24 81 75 74 -1
27 | 25 63 49 55 -1
28 | 26 32 39 33 -1
29 | 27 44 45 37 -1
30 | 28 82 75 81 -1
31 | 29 59 58 66 -1
32 | 30 15 83 84 -1
33 | 31 19 20 12 -1
34 | 32 57 30 0 -1
35 | 33 32 31 38 -1
36 | 34 37 45 38 -1
37 | 35 1 71 64 -1
38 | 36 38 31 37 -1
39 | 37 64 71 65 -1
40 | 38 31 30 43 -1
41 | 39 1 2 77 -1
42 | 40 25 24 33 -1
43 | 41 84 83 82 -1
44 | 42 36 35 42 -1
45 | 43 33 39 34 -1
46 | 44 44 43 51 -1
47 | 45 71 78 72 -1
48 | 46 79 74 73 -1
49 | 47 34 39 40 -1
50 | 48 36 27 26 -1
51 | 49 42 41 49 -1
52 | 50 2 3 77 -1
53 | 51 12 20 13 -1
54 | 52 74 80 81 -1
55 | 53 47 53 54 -1
56 | 54 70 63 62 -1
57 | 55 54 55 47 -1
58 | 56 62 54 61 -1
59 | 57 72 79 73 -1
60 | 58 34 41 35 -1
61 | 59 2 10 3 -1
62 | 60 57 0 64 -1
63 | 61 42 49 50 -1
64 | 62 26 25 35 -1
65 | 63 29 15 28 -1
66 | 64 56 83 15 -1
67 | 65 4 80 79 -1
68 | 66 82 83 76 -1
69 | 67 29 36 42 -1
70 | 68 74 68 73 -1
71 | 69 38 45 46 -1
72 | 70 38 39 32 -1
73 | 71 35 41 42 -1
74 | 72 4 79 78 -1
75 | 73 43 30 57 -1
76 | 74 17 10 9 -1
77 | 75 7 6 14 -1
78 | 76 51 57 58 -1
79 | 77 48 41 40 -1
80 | 78 28 21 27 -1
81 | 79 1 77 71 -1
82 | 80 53 60 54 -1
83 | 81 84 7 14 -1
84 | 82 64 65 58 -1
85 | 83 29 27 36 -1
86 | 84 8 9 2 -1
87 | 85 18 10 17 -1
88 | 86 33 24 23 -1
89 | 87 28 27 29 -1
90 | 88 81 80 6 -1
91 | 89 6 80 5 -1
92 | 90 0 8 1 -1
93 | 91 14 15 84 -1
94 | 92 66 72 73 -1
95 | 93 63 55 62 -1
96 | 94 78 79 72 -1
97 | 95 43 57 51 -1
98 | 96 52 53 45 -1
99 | 97 0 30 8 -1
100 | 98 66 65 72 -1
101 | 99 67 68 61 -1
102 | 100 48 40 47 -1
103 | 101 52 60 53 -1
104 | 102 50 49 63 -1
105 | 103 58 59 51 -1
106 | 104 18 17 24 -1
107 | 105 18 12 11 -1
108 | 106 11 5 4 -1
109 | 107 3 10 11 -1
110 | 108 50 15 29 -1
111 | 109 51 59 52 -1
112 | 110 70 83 56 -1
113 | 111 65 66 58 -1
114 | 112 5 13 6 -1
115 | 113 49 48 55 -1
116 | 114 62 69 70 -1
117 | 115 42 50 29 -1
118 | 116 75 76 69 -1
119 | 117 33 23 32 -1
120 | 118 13 21 14 -1
121 | 119 22 30 31 -1
122 | 120 84 82 7 -1
123 | 121 78 3 4 -1
124 | 122 40 41 34 -1
125 | 123 45 53 46 -1
126 | 124 40 46 47 -1
127 | 125 82 76 75 -1
128 | 126 56 50 63 -1
129 | 127 55 48 47 -1
130 | 128 11 12 5 -1
131 | 129 24 17 23 -1
132 | 130 61 68 69 -1
133 | 131 26 35 36 -1
134 | 132 56 15 50 -1
135 | 133 26 20 19 -1
136 | 134 59 60 52 -1
137 | 135 23 16 22 -1
138 | 136 64 0 1 -1
139 | 137 77 78 71 -1
140 | 138 82 81 7 -1
141 | 139 66 73 67 -1
142 | 140 56 63 70 -1
143 | 141 14 21 28 -1
144 | 142 77 3 78 -1
145 | 143 76 83 70 -1
146 | 144 69 76 70 -1
147 | 145 65 71 72 -1
148 | 146 13 14 6 -1
149 | 147 27 21 20 -1
150 | 148 46 53 47 -1
151 | 149 67 60 59 -1
152 | 150 17 9 16 -1
153 | 151 16 9 8 -1
154 | 152 23 22 32 -1
155 | 153 40 39 46 -1
156 | 154 61 60 67 -1
157 | 155 3 11 4 -1
158 | 156 25 19 24 -1
159 | 157 51 52 44 -1
160 | 158 79 80 74 -1
161 | 159 28 15 14 -1
162 | 160 73 68 67 -1
163 | 161 49 41 48 -1
164 | 162 23 17 16 -1
165 | 163 5 80 4 -1
166 | 164 8 30 16 -1
167 | 165 16 30 22 -1
168 |
--------------------------------------------------------------------------------
/data/ball.node:
--------------------------------------------------------------------------------
1 | 100 3 0 0
2 | 0 0.00042419045348651707 9.241180669050664e-05 0.003484015818685293
3 | 1 0.0013656622031703591 -0.0007833822164684534 0.003156912513077259
4 | 2 0.002509001176804304 0.0001552697503939271 0.002421841723844409
5 | 3 0.003141906578093767 3.33890420733951e-05 0.0015274235047399998
6 | 4 0.0034250630997121334 -0.0002317293983651325 0.000531538447830826
7 | 5 0.003427582560107112 0.00021601082698907703 -0.0005118558183312416
8 | 6 0.0031309565529227257 -4.594486381392926e-05 -0.0015491247177124023
9 | 7 0.0025482396595180035 -0.00015020665887277573 -0.0023721407633274794
10 | 8 0.001564572099596262 0.0006662289379164577 0.0030909290071576834
11 | 9 0.0022125032264739275 0.0014093538047745824 0.002301450353115797
12 | 10 0.00270122941583395 0.0016016477020457387 0.0015309673035517335
13 | 11 0.0030545955523848534 0.0015792095800861716 0.0005357215413823724
14 | 12 0.0028552492149174213 0.001906170859001577 -0.0005120940040796995
15 | 13 0.0027316880878061056 0.001541423611342907 -0.0015337263466790318
16 | 14 0.001993679441511631 0.000989062711596489 -0.002737642265856266
17 | 15 0.0003449304786045104 0.00027731378213502467 -0.0035067806020379066
18 | 16 0.001132684643380344 0.002232926432043314 0.002418037038296461
19 | 17 0.001540305558592081 0.0027373796328902245 0.001528572873212397
20 | 18 0.0018564274068921804 0.0028974153101444244 0.0005320324562489986
21 | 19 0.0015012773219496012 0.0030852037016302347 -0.0005169399082660675
22 | 20 0.001597391557879746 0.0027112578973174095 -0.00151849037501961
23 | 21 0.001400334993377328 0.002202628180384636 -0.0023159098345786333
24 | 22 -0.00015092373359948397 0.0025402498431503773 0.002383544808253646
25 | 23 -3.771677438635379e-05 0.0031380325090140104 0.001534274430014193
26 | 24 0.0001587409497005865 0.0034372403752058744 0.0005320173222571611
27 | 25 -0.00022623503173235804 0.003426840528845787 -0.0005124802119098604
28 | 26 2.7116308046970516e-05 0.003145623952150345 -0.0015199375338852406
29 | 27 0.00015267462003976107 0.002528740558773279 -0.002403099089860916
30 | 28 0.0007102965610101819 0.0016948863631114364 -0.0029813728760927916
31 | 29 -0.0007209120667539537 0.0014441382372751832 -0.0031700492836534977
32 | 30 1.9349005015101284e-05 0.0013379496522247791 0.0033037944231182337
33 | 31 -0.0013652164489030838 0.0019125686958432198 0.0026142895221710205
34 | 32 -0.0015999932074919343 0.002682721009477973 0.0015554721467196941
35 | 33 -0.0015734841581434011 0.0030594258569180965 0.0005272503476589918
36 | 34 -0.0018619770416989923 0.0028908017557114363 -0.0005266899825073779
37 | 35 -0.0015423521399497986 0.0027378657832741737 -0.001526385429315269
38 | 36 -0.0011681132018566132 0.002321127336472273 -0.002329424023628235
39 | 37 -0.002337825484573841 0.00109494396019727 0.00234149768948555
40 | 38 -0.0027351994067430496 0.0015148753300309181 0.0015600845217704773
41 | 39 -0.002861838787794113 0.0019005301874130964 0.0005173941026441753
42 | 40 -0.003080892376601696 0.001510791713371873 -0.0005279274191707373
43 | 41 -0.0027034347876906395 0.0016001235926523805 -0.0015283717075362802
44 | 42 -0.0020753517746925354 0.001405108254402876 -0.002419079188257456
45 | 43 -0.0016757589764893055 0.00046783554716967046 0.003060741350054741
46 | 44 -0.00258523877710104 -0.0001028939732350409 0.0023424578830599785
47 | 45 -0.0031400728039443493 -3.65790183423087e-05 0.00153083645273
48 | 46 -0.00343597074970603 0.00015837654063943774 0.0005312079447321594
49 | 47 -0.003422023728489876 -0.00022105721291154623 -0.0005303109064698219
50 | 48 -0.0031588897109031677 7.527395791839808e-05 -0.0014823166420683265
51 | 49 -0.002563891001045704 4.2797513742698357e-05 -0.002367220353335142
52 | 50 -0.0014528802130371332 0.00012255937326699495 -0.0032020392827689648
53 | 51 -0.001956057734787464 -0.001220343983732164 0.0026692862156778574
54 | 52 -0.0026806090027093887 -0.0015970512758940458 0.0015615375014021993
55 | 53 -0.003056418150663376 -0.0015807042364031076 0.0005329859559424222
56 | 54 -0.0028414190746843815 -0.0019194543128833175 -0.0005430865567177534
57 | 55 -0.002705071121454239 -0.0013550346484407783 -0.0017748837126418948
58 | 56 -0.0004800620663445443 -0.0011955848895013332 -0.003302200697362423
59 | 57 -0.0007680894923396409 -0.0004636093508452177 0.0034010778181254864
60 | 58 -0.0009353061905130744 -0.0020952923223376274 0.0026652105152606964
61 | 59 -0.0014997124671936035 -0.0027157259173691273 0.001594329602085054
62 | 60 -0.0018641751958057284 -0.0028904390055686235 0.0005294322036206722
63 | 61 -0.0015716225607320666 -0.003060247516259551 -0.0005247195367701352
64 | 62 -0.001609797589480877 -0.002664964646100998 -0.0015724163968116045
65 | 63 -0.001607889193110168 -0.0015788647579029202 -0.0027451880741864443
66 | 64 0.00020219743601046503 -0.001483140978962183 0.0031957686878740788
67 | 65 0.00019671316840685904 -0.0025615228805691004 0.0023527040611952543
68 | 66 5.0284306780667976e-05 -0.003124859416857362 0.0015620277263224125
69 | 67 -0.00024505879264324903 -0.003422452136874199 0.0005259890458546579
70 | 68 0.00015833633369766176 -0.003436826402321458 -0.000527228054124862
71 | 69 -3.504346386762336e-05 -0.0031406544148921967 -0.0015288478462025523
72 | 70 -0.00014700184692628682 -0.002447315026074648 -0.0024644711520522833
73 | 71 0.0013984678080305457 -0.0020868293941020966 0.002413772977888584
74 | 72 0.001601043390110135 -0.0027032848447561264 0.0015279492363333702
75 | 73 0.0015107684303075075 -0.00308049819432199 0.0005329177947714925
76 | 74 0.001901043695397675 -0.002862073015421629 -0.0005144772585481405
77 | 75 0.001542037003673613 -0.002739283489063382 -0.001525248633697629
78 | 76 0.0011866625864058733 -0.002315487712621689 -0.002324865199625492
79 | 77 0.0023177186958491802 -0.001175187760964036 0.0023278158623725176
80 | 78 0.002736153081059456 -0.0015431565698236227 0.0015286526177078485
81 | 79 0.0028886012732982635 -0.0018638274632394314 0.0005295962910167873
82 | 80 0.003058504080399871 -0.0015741251409053802 -0.0005208472139202058
83 | 81 0.002703447360545397 -0.001599001931026578 -0.001528449123725295
84 | 82 0.002081856597214937 -0.0013934775488451123 -0.002425643615424633
85 | 83 0.0008861853275448084 -0.0012809685431420803 -0.0031855101697146893
86 | 84 0.001541665755212307 -0.0002163201424991712 -0.0031545963138341904
87 | 85 0.0002775003667920828 6.689957081107423e-05 -0.00016750000941101462
88 | 86 -0.001208517816849053 0.0011651127133518457 0.001200336031615734
89 | 87 0.00033982330933213234 0.0016159305814653635 0.0011557895923033357
90 | 88 -0.0006000404246151447 -0.0006653495947830379 0.0013990423176437616
91 | 89 0.0003494513512123376 -0.001795528456568718 0.0004920496721751988
92 | 90 0.0004386160580907017 -0.0017067493172362447 -0.0010786959901452065
93 | 91 -0.0016304112505167723 0.0003254158073104918 -0.0004234965599607676
94 | 92 -0.0011720337206497788 -0.0013592932373285294 -0.0004966422566212714
95 | 93 0.00036107833147980273 0.001857998431660235 -0.0005016995128244162
96 | 94 0.0016313071828335524 0.0006883144378662109 0.0006110704853199422
97 | 95 0.0015852489741519094 0.000405233382480219 -0.001136281294748187
98 | 96 -0.0009192065917886794 0.0014262610347941518 -0.0011521779233589768
99 | 97 0.0014166039181873202 -0.0007993298931978643 0.0010890995617955923
100 | 98 0.0016303493175655603 -0.0016582653624936938 5.6897784816101193e-05
101 | 99 0.0016240664990618825 -0.00037941845948807895 -0.00011297305172774941
102 |
--------------------------------------------------------------------------------
/data/ball_surface_mesh.ply:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/data/ball_surface_mesh.ply
--------------------------------------------------------------------------------
/data/tactile.face:
--------------------------------------------------------------------------------
1 | 200 0
2 | 0 0 52 50 -1
3 | 1 18 19 20 -1
4 | 2 40 58 57 -1
5 | 3 26 27 25 -1
6 | 4 13 3 42 -1
7 | 5 66 86 51 -1
8 | 6 52 66 51 -1
9 | 7 5 9 6 -1
10 | 8 29 31 30 -1
11 | 9 24 27 28 -1
12 | 10 33 31 32 -1
13 | 11 56 53 55 -1
14 | 12 14 3 13 -1
15 | 13 40 57 32 -1
16 | 14 0 34 65 -1
17 | 15 92 86 66 -1
18 | 16 20 45 16 -1
19 | 17 22 4 21 -1
20 | 18 44 3 45 -1
21 | 19 95 98 81 -1
22 | 20 72 71 77 -1
23 | 21 38 8 39 -1
24 | 22 65 34 53 -1
25 | 23 9 7 38 -1
26 | 24 83 95 81 -1
27 | 25 101 52 65 -1
28 | 26 43 44 95 -1
29 | 27 43 42 44 -1
30 | 28 50 27 0 -1
31 | 29 48 47 63 -1
32 | 30 90 82 84 -1
33 | 31 46 47 48 -1
34 | 32 64 94 91 -1
35 | 33 89 43 95 -1
36 | 34 8 61 39 -1
37 | 35 6 38 37 -1
38 | 36 81 70 83 -1
39 | 37 34 30 33 -1
40 | 38 91 94 76 -1
41 | 39 76 94 86 -1
42 | 40 23 24 17 -1
43 | 41 42 12 13 -1
44 | 42 78 72 77 -1
45 | 43 84 88 85 -1
46 | 44 39 61 60 -1
47 | 45 37 38 39 -1
48 | 46 46 97 62 -1
49 | 47 45 46 62 -1
50 | 48 36 5 6 -1
51 | 49 62 98 44 -1
52 | 50 45 62 44 -1
53 | 51 55 53 35 -1
54 | 52 28 49 2 -1
55 | 53 2 24 28 -1
56 | 54 71 74 75 -1
57 | 55 0 27 26 -1
58 | 56 22 21 25 -1
59 | 57 59 100 93 -1
60 | 58 59 60 100 -1
61 | 59 93 82 90 -1
62 | 60 63 2 64 -1
63 | 61 22 25 23 -1
64 | 62 61 68 60 -1
65 | 63 21 5 29 -1
66 | 64 13 12 7 -1
67 | 65 84 78 69 -1
68 | 66 69 88 84 -1
69 | 67 76 86 92 -1
70 | 68 80 78 82 -1
71 | 69 9 5 4 -1
72 | 70 10 11 9 -1
73 | 71 26 34 0 -1
74 | 72 89 95 83 -1
75 | 73 20 16 18 -1
76 | 74 68 61 41 -1
77 | 75 25 30 26 -1
78 | 76 2 49 64 -1
79 | 77 81 98 97 -1
80 | 78 53 34 35 -1
81 | 79 72 80 73 -1
82 | 80 75 91 76 -1
83 | 81 47 19 1 -1
84 | 82 71 75 77 -1
85 | 83 35 34 33 -1
86 | 84 14 13 11 -1
87 | 85 13 7 11 -1
88 | 86 1 19 17 -1
89 | 87 63 87 48 -1
90 | 88 97 98 62 -1
91 | 89 85 90 84 -1
92 | 90 84 82 78 -1
93 | 91 16 3 14 -1
94 | 92 89 79 96 -1
95 | 93 42 3 44 -1
96 | 94 10 4 15 -1
97 | 95 70 73 83 -1
98 | 96 82 79 80 -1
99 | 97 39 60 40 -1
100 | 98 100 96 93 -1
101 | 99 30 31 33 -1
102 | 100 100 60 68 -1
103 | 101 68 96 100 -1
104 | 102 72 73 70 -1
105 | 103 11 7 9 -1
106 | 104 21 30 25 -1
107 | 105 63 64 91 -1
108 | 106 6 9 38 -1
109 | 107 76 92 69 -1
110 | 108 35 57 55 -1
111 | 109 56 67 85 -1
112 | 110 83 79 89 -1
113 | 111 21 4 5 -1
114 | 112 12 41 8 -1
115 | 113 36 31 29 -1
116 | 114 87 74 97 -1
117 | 115 74 81 97 -1
118 | 116 64 49 94 -1
119 | 117 17 22 23 -1
120 | 118 45 3 16 -1
121 | 119 42 41 12 -1
122 | 120 28 27 50 -1
123 | 121 85 67 90 -1
124 | 122 74 71 70 -1
125 | 123 70 81 74 -1
126 | 124 29 5 36 -1
127 | 125 87 97 48 -1
128 | 126 20 19 47 -1
129 | 127 18 10 15 -1
130 | 128 90 67 58 -1
131 | 129 93 79 82 -1
132 | 130 63 91 87 -1
133 | 131 44 98 95 -1
134 | 132 46 20 47 -1
135 | 133 46 45 20 -1
136 | 134 66 52 101 -1
137 | 135 66 101 92 -1
138 | 136 38 7 8 -1
139 | 137 7 12 8 -1
140 | 138 72 70 71 -1
141 | 139 73 79 83 -1
142 | 140 96 79 93 -1
143 | 141 41 43 99 -1
144 | 142 99 43 89 -1
145 | 143 42 43 41 -1
146 | 144 77 75 76 -1
147 | 145 60 59 40 -1
148 | 146 17 15 22 -1
149 | 147 80 79 73 -1
150 | 148 69 77 76 -1
151 | 149 23 27 24 -1
152 | 150 25 27 23 -1
153 | 151 89 96 99 -1
154 | 152 32 35 33 -1
155 | 153 99 68 41 -1
156 | 154 99 96 68 -1
157 | 155 29 30 21 -1
158 | 156 78 80 72 -1
159 | 157 15 4 22 -1
160 | 158 6 37 36 -1
161 | 159 63 47 1 -1
162 | 160 65 52 0 -1
163 | 161 69 92 88 -1
164 | 162 54 65 53 -1
165 | 163 88 101 54 -1
166 | 164 101 65 54 -1
167 | 165 32 31 36 -1
168 | 166 37 32 36 -1
169 | 167 87 91 75 -1
170 | 168 17 24 1 -1
171 | 169 1 24 2 -1
172 | 170 10 9 4 -1
173 | 171 54 53 56 -1
174 | 172 88 54 85 -1
175 | 173 85 54 56 -1
176 | 174 26 30 34 -1
177 | 175 50 49 28 -1
178 | 176 94 49 51 -1
179 | 177 51 49 50 -1
180 | 178 57 67 55 -1
181 | 179 14 11 10 -1
182 | 180 55 67 56 -1
183 | 181 86 94 51 -1
184 | 182 50 52 51 -1
185 | 183 41 61 8 -1
186 | 184 58 67 57 -1
187 | 185 18 15 17 -1
188 | 186 18 16 10 -1
189 | 187 17 19 18 -1
190 | 188 48 97 46 -1
191 | 189 78 77 69 -1
192 | 190 88 92 101 -1
193 | 191 10 16 14 -1
194 | 192 75 74 87 -1
195 | 193 32 57 35 -1
196 | 194 59 58 40 -1
197 | 195 59 90 58 -1
198 | 196 93 90 59 -1
199 | 197 1 2 63 -1
200 | 198 40 32 37 -1
201 | 199 37 39 40 -1
202 |
--------------------------------------------------------------------------------
/images/teaser.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Genesis-Embodied-AI/ThinShellLab/a1cf8d0d549b130eb877482d3b1ed2aa1e3b1bbb/images/teaser.gif
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "thinshelllab"
7 | version = "0.1.0"
8 | description = "A differentiable simulation platform for thin-shell object manipulation"
9 | readme = "Readme.md"
10 | requires-python = ">=3.9"
11 | dependencies = [
12 | "cma",
13 | "cupy",
14 | "gymnasium",
15 | "h5py",
16 | "imageio",
17 | "matplotlib",
18 | "numpy",
19 | "open3d",
20 | "opencv_python",
21 | "Pillow",
22 | "sb3_contrib",
23 | "stable_baselines3",
24 | "taichi",
25 | "torch",
26 | "trimesh"
27 | ]
28 |
29 | [tool.setuptools.package-dir]
30 | thinshelllab = "code"
31 |
--------------------------------------------------------------------------------