├── .gitignore
├── LICENSE
├── README.md
├── create_dataset.py
├── data
└── gripper
│ ├── carpet.png
│ ├── marble.png
│ ├── marshmallow.png
│ ├── soft_experiments_softball.xml
│ ├── soft_experiments_softball_adjusted_for_2_fingers.xml
│ ├── soft_experiments_softbox_adjusted_for_2_fingers.xml
│ ├── soft_experiments_softcylinder_adjusted_for_2_fingers.xml
│ ├── soft_grip_four_fingers.xml
│ ├── soft_grip_two_fingers.xml
│ ├── soft_scene.xml
│ ├── sponge.png
│ └── tennis.png
├── environment
├── __init__.py
├── interface
│ ├── __init__.py
│ └── environment.py
└── manenv.py
├── functions
├── __init__.py
├── optimization.py
└── utils.py
├── images
└── real_experiments.png
├── net
├── NeuralNets.py
├── __init__.py
└── layers.py
├── playground.py
├── run_experiments.sh
├── testing_saved_model.py
└── training_cross_validate.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 | /.idea/
106 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Michał Bednarek
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Soft gripper
2 |
3 | Repository contains code and data needed for the reproduction of paper_Gaining a Sense of Touch.
4 | Physical Parameters Estimation using a Soft Gripper and Neural Networks_ published in Electronics 2021.
5 |
6 | ## Citation
7 |
8 | If you use this code in your research, please cite:
9 |
10 | ```bibtex
11 | @Article{BednarekSoftGrip,
12 | AUTHOR = {Bednarek, Michal and Kicki, Piotr and Bednarek, Jakub and Walas, Krzysztof},
13 | TITLE = {Gaining a Sense of Touch Object Stiffness Estimation Using a Soft Gripper and Neural Networks},
14 | JOURNAL = {Electronics},
15 | VOLUME = {10},
16 | YEAR = {2021},
17 | NUMBER = {1},
18 | ARTICLE-NUMBER = {96},
19 | URL = {https://www.mdpi.com/2079-9292/10/1/96},
20 | ISSN = {2079-9292},
21 | DOI = {10.3390/electronics10010096}
22 | }
23 |
24 | ```
25 |
26 | ## Supplementary results
27 |
28 | The following paper was partially used in the PhD thesis of the author of this repository.
29 | The following GitHub gist contains supplementary results for the paper:
30 | [link](https://gist.github.com/mbed92/5208a336a2d6b58a43ef6288204d70f8)
31 |
32 | Additional experiments contains benchmark of the Fully Connected network
33 | of the comparable size as the ConvNet.
34 |
35 | Datasets: [link](https://drive.google.com/drive/folders/1fLb1GNbBVXK_LCYk2dSYM7Ob8gCudXBt?usp=sharing)
--------------------------------------------------------------------------------
/create_dataset.py:
--------------------------------------------------------------------------------
1 | # Author: Michał Bednarek PUT Poznan
2 | # Comment: Script gor generating data from Mujoco simulation for deep learning
3 | # models. Data saved as a pickle. Each sample is a MAX_ITER_PER_EP samples of squeezing.
4 |
5 | import os
6 | import pickle
7 | from argparse import ArgumentParser
8 |
9 | import numpy as np
10 | from tqdm import tqdm
11 |
12 | from environment import ManEnv
13 |
14 | NUM_EPISODES = 1
15 | MAX_ITER_PER_EP = 160
16 | OPEN_CLOSE_DIV = 80
17 | START_STEP = 40
18 |
19 |
20 | def log_into_file(args):
21 | assert type(args.mujoco_model_paths) is list
22 | num_envs = len(args.mujoco_model_paths)
23 | current_env = 0
24 |
25 | env_spec = ManEnv.get_std_spec(args)
26 | env = ManEnv(**env_spec)
27 |
28 | os.makedirs(args.data_folder, exist_ok=True)
29 | path = os.path.join(args.data_folder, "{}.pickle".format(args.data_name))
30 | file = open(path, 'wb')
31 | data, stiffness = list(), list()
32 |
33 | for ep in tqdm(range(NUM_EPISODES * num_envs)):
34 |
35 | current_stiffness = env.reset()
36 | # print(current_stiffness)
37 |
38 | # start squeezing an object
39 | samples = list()
40 |
41 | for _ in range(START_STEP):
42 | readings, contact = env.step()
43 | if args.mask_contact and not contact:
44 | readings = np.zeros_like(readings)
45 | if readings is not None:
46 | samples.append(readings)
47 | env.close_hand()
48 | for i in range(MAX_ITER_PER_EP):
49 | env.render()
50 |
51 | # perform squeezing or loose a hand
52 | if i % OPEN_CLOSE_DIV == 0 and i > 0:
53 | env.toggle_grip()
54 |
55 | # gather readings and mask out data when there is no contact
56 | readings, contact = env.step()
57 | if args.mask_contact and not contact:
58 | readings = np.zeros_like(readings)
59 | if readings is not None:
60 | samples.append(readings)
61 |
62 | # add to a pickle (important to use array(), not asarray(), because it makes a copy!)
63 | samples = np.array(samples)
64 | data.append(samples)
65 | stiffness.append(current_stiffness)
66 |
67 | # change env number
68 | if (ep + 1) % NUM_EPISODES == 0 and num_envs > 1:
69 | current_env += 1
70 | if current_env > num_envs:
71 | current_env = 0
72 | env.load_env(current_env)
73 |
74 | # dump data
75 | pickle.dump({
76 | "data": data,
77 | "stiffness": stiffness
78 | }, file)
79 | file.close()
80 | print("Total number of samples: {0}".format(len(data)))
81 |
82 |
83 | if __name__ == '__main__':
84 | parser = ArgumentParser()
85 | parser.add_argument('--sim-step', type=int, default=7)
86 | parser.add_argument('--vis', type=bool, default=True)
87 | parser.add_argument('--mask-contact', type=bool, default=False)
88 | parser.add_argument('--sim-start', type=int, default=1)
89 | parser.add_argument('--data-folder', type=str, default="./data/dataset/testing_datasets")
90 | parser.add_argument('--data-name', type=str, default="dataset_all_shapes")
91 | parser.add_argument('--mujoco-model-paths', nargs="+", required=True)
92 | args, _ = parser.parse_known_args()
93 | log_into_file(args)
94 |
--------------------------------------------------------------------------------
/data/gripper/carpet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mbed92/soft-grip/12888e6a55572ea324691961c6bc35fa14942873/data/gripper/carpet.png
--------------------------------------------------------------------------------
/data/gripper/marble.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mbed92/soft-grip/12888e6a55572ea324691961c6bc35fa14942873/data/gripper/marble.png
--------------------------------------------------------------------------------
/data/gripper/marshmallow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mbed92/soft-grip/12888e6a55572ea324691961c6bc35fa14942873/data/gripper/marshmallow.png
--------------------------------------------------------------------------------
/data/gripper/soft_experiments_softball.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/data/gripper/soft_experiments_softball_adjusted_for_2_fingers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
13 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/data/gripper/soft_experiments_softbox_adjusted_for_2_fingers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/data/gripper/soft_experiments_softcylinder_adjusted_for_2_fingers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/data/gripper/soft_grip_four_fingers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 | 0 0"/
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
--------------------------------------------------------------------------------
/data/gripper/soft_grip_two_fingers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
--------------------------------------------------------------------------------
/data/gripper/soft_scene.xml:
--------------------------------------------------------------------------------
1 |
2 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/data/gripper/sponge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mbed92/soft-grip/12888e6a55572ea324691961c6bc35fa14942873/data/gripper/sponge.png
--------------------------------------------------------------------------------
/data/gripper/tennis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mbed92/soft-grip/12888e6a55572ea324691961c6bc35fa14942873/data/gripper/tennis.png
--------------------------------------------------------------------------------
/environment/__init__.py:
--------------------------------------------------------------------------------
1 | from .manenv import ManEnv
--------------------------------------------------------------------------------
/environment/interface/__init__.py:
--------------------------------------------------------------------------------
1 | from .environment import Env
2 |
--------------------------------------------------------------------------------
/environment/interface/environment.py:
--------------------------------------------------------------------------------
1 | class Env(object):
2 | def __init__(self, sim_start, sim_step):
3 | self.sim_start = sim_start
4 | self.sim_step = sim_step
5 |
6 | def step(self, *args):
7 | raise NotImplementedError("Not implemented")
8 |
9 | def reset(self):
10 | raise NotImplementedError("Not implemented")
11 |
--------------------------------------------------------------------------------
/environment/manenv.py:
--------------------------------------------------------------------------------
1 | from .interface import Env
2 | import mujoco_py
3 | import numpy as np
4 |
5 | DEFAULT_DAMPING = 200
6 |
7 |
8 | class ManEnv(Env):
9 | # ADJUST VARIABLES DEPENDING ON YOUR DATASET
10 | # id of joints used to create an objects - they'll be randomized during experiments
11 | # joint_ids = list(range(34, 251)) # JOINT INDEXES FOR 4 FINGER GRIPPER
12 | joint_ids = list(range(11, 64)) # JOINT INDEXES FOR 2 FINGER GRIPPER
13 | tendon_ids = list(range(1))
14 |
15 | # list of bodies that are check for collision (partial names are enough)
16 | # finger_names = ['g11', 'g12', 'g13', 'g2'] # FINGER NAMES FOR 4 FINGER GRIPPER
17 | finger_names = ['g12', 'g2'] # FINGER NAMES FOR 2 FINGER GRIPPER
18 | obj_name = 'OBJ'
19 |
20 | def __init__(self, sim_start, sim_step, env_paths, is_vis=True):
21 | super().__init__(sim_start, sim_step)
22 |
23 | # setup environment and viewer
24 | assert len(env_paths) > 0
25 | self.is_vis = is_vis
26 | self.env_paths = env_paths
27 | scene = mujoco_py.load_model_from_path(env_paths[0])
28 | self.env = mujoco_py.MjSim(scene)
29 | if self.is_vis:
30 | self.viewer = mujoco_py.MjViewer(self.env)
31 | self.is_closing = True
32 |
33 | def load_env(self, num):
34 | if num < len(self.env_paths):
35 | new_scene = self.env_paths[num]
36 | scene = mujoco_py.load_model_from_path(new_scene)
37 | self.env = mujoco_py.MjSim(scene)
38 | if self.is_vis:
39 | self.viewer = mujoco_py.MjViewer(self.env)
40 | else:
41 | print("Wrong number,")
42 |
43 | # main methods
44 | def step(self, num_steps=-1, actions=None, min_dist=0.1):
45 | if num_steps < 1:
46 | num_steps = self.sim_step
47 | try:
48 | for _ in range(num_steps):
49 | self.env.step()
50 | except mujoco_py.builder.MujocoException:
51 | self.reset()
52 |
53 | return np.asanyarray(self.get_sensor_sensordata()).reshape(-1)
54 |
55 | def reset(self):
56 | current_stiffness = self.set_new_stiffness()
57 | self.env.reset()
58 | self.env.forward()
59 |
60 | if self.sim_start > 0:
61 | self.step(self.sim_start)
62 |
63 | return current_stiffness
64 |
65 | def get_sensor_sensordata(self):
66 | data = self.env.data
67 |
68 | # return true when all fingers can contact an object's body
69 | is_contact_between_fingers_and_object = False
70 | fingers_left = self.finger_names
71 | for coni in range(data.ncon):
72 | contact = data.contact[coni]
73 | body1_name = self.env.model.geom_id2name(contact.geom1)
74 | body2_name = self.env.model.geom_id2name(contact.geom2)
75 | if body1_name is not None and body2_name is not None:
76 | if self.obj_name in body1_name or self.obj_name in body2_name:
77 | for finger_name in fingers_left:
78 | is_finger_contact = bool(finger_name in body1_name or finger_name in body2_name)
79 | if is_finger_contact:
80 | fingers_left.remove(finger_name)
81 | if len(fingers_left) == 0:
82 | is_contact_between_fingers_and_object = True
83 | break
84 |
85 | return np.copy(data.sensordata), is_contact_between_fingers_and_object
86 |
87 | def toggle_grip(self):
88 | if self.is_closing:
89 | self.loose_hand()
90 | else:
91 | self.close_hand()
92 |
93 | def close_hand(self):
94 | for i in range(2):
95 | self.env.data.ctrl[i] = -0.2
96 | self.is_closing = True
97 |
98 | def loose_hand(self):
99 | for i in range(2):
100 | self.env.data.ctrl[i] = 0.2
101 | self.is_closing = False
102 |
103 | def set_new_stiffness(self, range_min=300, range_max=1400):
104 | new_value = np.random.uniform(range_min, range_max)
105 | for i in self.joint_ids:
106 | self.env.model.jnt_stiffness[i] = new_value
107 | for i in self.tendon_ids:
108 | self.env.model.tendon_stiffness[i] = new_value
109 | return new_value
110 |
111 | def get_env(self):
112 | return self.env
113 |
114 | def render(self):
115 | if self.is_vis:
116 | self.viewer.render()
117 |
118 | # specs
119 | @staticmethod
120 | def get_std_spec(args):
121 | return {
122 | "sim_start": args.sim_start,
123 | "sim_step": args.sim_step,
124 | "env_paths": args.mujoco_model_paths,
125 | "is_vis": args.vis
126 | }
127 |
--------------------------------------------------------------------------------
/functions/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import _optimize, create_tf_generators, _add_to_tensorboard, allow_memory_growth
2 | from .optimization import train, validate
3 |
--------------------------------------------------------------------------------
/functions/optimization.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | from .utils import _add_to_tensorboard, _optimize, _type_check
4 |
5 |
6 | def noised_modality(data):
7 | # add accelerometer noise
8 | acc, gyro = data[:, :, :6], data[:, :, 6:]
9 | acc += tf.random.normal(mean=0.0, stddev=0.7, shape=acc.get_shape(), dtype=data.dtype)
10 |
11 | # add gyro noise
12 | gyro += tf.random.normal(mean=0.0, stddev=0.06, shape=gyro.get_shape(), dtype=data.dtype)
13 |
14 | return tf.concat([acc, gyro], -1)
15 |
16 |
17 | def normalize_predictions(preds):
18 | p = 1100.0 * tf.nn.sigmoid(preds) + 300.0
19 | return tf.squeeze(p, -1)
20 |
21 |
22 | def train(model, writer, ds, mean, std, optimizer, previous_steps, prefix="train", add_noise=False):
23 | metrics = [
24 | tf.keras.metrics.RootMeanSquaredError(name="RootMeanSquaredError"),
25 | tf.keras.metrics.MeanAbsoluteError(name="MeanAbsoluteError"),
26 | tf.keras.metrics.MeanAbsolutePercentageError(name="MeanAbsolutePercentageError")
27 | ]
28 |
29 | loss_metric = tf.keras.metrics.Mean("Loss")
30 |
31 | for i, (x_train, y_train) in enumerate(ds):
32 | if add_noise:
33 | x_train = noised_modality(x_train)
34 |
35 | x_train, y_train = tf.cast(x_train, tf.float32), tf.cast(y_train, tf.float32)
36 |
37 | with tf.GradientTape() as tape:
38 | predictions = model((x_train - mean) / std, training=True)
39 | predictions = normalize_predictions(predictions)
40 |
41 | vars = model.trainable_variables
42 | l2_reg = tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in vars]) * 0.001
43 | loss = tf.losses.mean_absolute_error(y_train, predictions) + l2_reg
44 | loss = tf.reduce_mean(loss)
45 | loss_metric.update_state(loss.numpy())
46 |
47 | loss = tf.keras.losses.mean_absolute_error(y_train, predictions)
48 | loss = tf.reduce_mean(loss)
49 | loss_metric.update_state(loss.numpy())
50 |
51 | _optimize(optimizer, tape, loss, model.trainable_variables)
52 |
53 | # gather stats
54 | for m in metrics:
55 | m.update_state(y_train, predictions)
56 |
57 | with writer.as_default():
58 | _add_to_tensorboard({
59 | "metrics": metrics + [loss_metric]
60 | }, previous_steps, prefix)
61 | writer.flush()
62 |
63 | for m in metrics + [loss_metric]:
64 | m.reset_states()
65 |
66 | previous_steps += 1
67 | return previous_steps
68 |
69 |
70 | def _val(model, writer, ds, mean, std, previous_steps=None, best_metric=None, prefix="validation", is_print=True):
71 | save_model = False
72 |
73 | metrics = [
74 | tf.keras.metrics.RootMeanSquaredError(name="RootMeanSquaredError"),
75 | tf.keras.metrics.MeanAbsoluteError(name="MeanAbsoluteError"),
76 | tf.keras.metrics.MeanAbsolutePercentageError(name="MeanAbsolutePercentageError")
77 | ]
78 |
79 | save_metric = tf.keras.metrics.MeanAbsolutePercentageError(name="save_metric")
80 | mae = tf.keras.metrics.MeanAbsoluteError(name="mae")
81 | loss_metric = tf.keras.metrics.Mean("Loss")
82 |
83 | for x_val, y_val in ds:
84 | x_val, y_val = tf.cast(x_val, tf.float32), tf.cast(y_val, tf.float32)
85 |
86 | predictions = model((x_val - mean) / std, training=False)
87 | predictions = normalize_predictions(predictions)
88 |
89 | loss = tf.losses.mean_squared_error(y_val, predictions)
90 | loss_metric.update_state(loss.numpy())
91 |
92 | # gather stats
93 | save_metric.update_state(y_val, predictions)
94 | mae.update_state(y_val, predictions)
95 | for m in metrics:
96 | m.update_state(y_val, predictions)
97 |
98 | if writer is not None and previous_steps is not None:
99 | with writer.as_default():
100 | _add_to_tensorboard({
101 | "metrics": metrics + [loss_metric]
102 | }, previous_steps, prefix)
103 | writer.flush()
104 | previous_steps += 1
105 |
106 | if best_metric is not None:
107 | if save_metric.result().numpy() < best_metric:
108 | save_model = True
109 | best_metric = save_metric.result().numpy()
110 |
111 | if is_print:
112 | print("Current best test result MAE/MAPE: {} / {}".format(mae.result().numpy(), best_metric))
113 |
114 | for m in metrics + [loss_metric]:
115 | m.reset_states()
116 |
117 | save_metric.reset_states()
118 | mae.reset_states()
119 |
120 | return previous_steps, best_metric, save_model
121 |
122 |
123 | def validate(model, writer, ds, mean, std, previous_steps, best_metric=None, prefix="validation"):
124 | save_model = False
125 | print("\n")
126 |
127 | if _type_check(ds, list, 1) and _type_check(best_metric, list, 1):
128 | if len(best_metric) > 1:
129 | for i, (sub_ds, metric) in enumerate(zip(ds, best_metric)):
130 | _, best, _ = _val(model, None, sub_ds, mean, std, None, metric, prefix, is_print=True)
131 | best_metric[i] = best
132 | elif len(best_metric) == 1:
133 | best_metric = best_metric[0]
134 | ds = ds[0]
135 | previous_steps, best_metric, save_model = _val(model, writer, ds, mean, std, previous_steps, best_metric,
136 | prefix)
137 | best_metric = [best_metric]
138 | else:
139 | print("Watch tensorboard.")
140 | else:
141 | previous_steps, best_metric, save_model = _val(model, writer, ds, mean, std, previous_steps, best_metric,
142 | prefix)
143 |
144 | print("\n")
145 | return previous_steps, best_metric, save_model
146 |
--------------------------------------------------------------------------------
/functions/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 |
4 |
5 | def create_tf_generators(train_dataset, test_datasets, train_idx, val_idx, batch_size, real_data=None,
6 | add_real_data=None):
7 | if train_idx is not None and val_idx is not None:
8 | train_x = np.array(train_dataset["data"])[train_idx.tolist()]
9 | train_y = np.array(train_dataset["stiffness"])[train_idx.tolist()]
10 |
11 | # append real data samples if needed
12 | if add_real_data:
13 | train_x = np.concatenate([train_x, real_data["data"]], 0)
14 | train_y = np.concatenate([train_y, real_data["stiffness"]], 0)
15 |
16 | val_x = np.array(train_dataset["data"])[val_idx.tolist()]
17 | val_y = np.array(train_dataset["stiffness"])[val_idx.tolist()]
18 |
19 | else:
20 | train_x = np.array(train_dataset["data"])
21 | train_y = np.array(train_dataset["stiffness"])
22 | val_x = np.array(real_data["data"])
23 | val_y = np.array(real_data["stiffness"])
24 |
25 | print("TRAIN NUM SAMPLES IN FOLD: {}".format(train_x.shape[0]))
26 | print("TRAIN VAL SAMPLES IN FOLD: {}".format(val_x.shape[0]))
27 | num_samples = train_x.shape[0]
28 |
29 | train_ds = tf.data.Dataset.from_tensor_slices((train_x, train_y)) \
30 | .shuffle(num_samples) \
31 | .batch(batch_size)
32 |
33 | val_ds = tf.data.Dataset.from_tensor_slices((val_x, val_y)).batch(batch_size)
34 |
35 | test_ds_list = list()
36 | for ds in test_datasets:
37 | tds = tf.data.Dataset.from_tensor_slices((ds["data"], ds["stiffness"])).batch(batch_size)
38 | test_ds_list.append(tds)
39 |
40 | train_mean = np.mean(train_x, axis=(0, 1), keepdims=True)
41 | train_std = np.std(train_x, axis=(0, 1), keepdims=True)
42 |
43 | return train_ds, val_ds, test_ds_list, train_mean, train_std
44 |
45 |
46 | def _add_to_tensorboard(scalars: dict, step: int, prefix: str):
47 | for key in scalars:
48 | for m in scalars[key]:
49 | tf.summary.scalar('{}/{}'.format(prefix, m.name), m.result().numpy(), step=step)
50 |
51 |
52 | def _optimize(optimizer, tape, loss, trainable_vars):
53 | gradients = tape.gradient(loss, trainable_vars)
54 | optimizer.apply_gradients(zip(gradients, trainable_vars))
55 |
56 |
57 | def allow_memory_growth():
58 | gpus = tf.config.experimental.list_physical_devices('GPU')
59 | if gpus:
60 | try:
61 | for gpu in gpus:
62 | tf.config.experimental.set_memory_growth(gpu, True)
63 | logical_gpus = tf.config.experimental.list_logical_devices('GPU')
64 | print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
65 | except RuntimeError as e:
66 | print(e)
67 |
68 |
69 | def _type_check(val, ret_type, num_elements=None):
70 | retval = False
71 | if ret_type is not None and type(val) is ret_type:
72 | if ret_type is list and num_elements is not None:
73 | if len(val) == num_elements:
74 | retval = True
75 | else:
76 | retval = False
77 | else:
78 | retval = True
79 | return retval
80 |
--------------------------------------------------------------------------------
/images/real_experiments.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mbed92/soft-grip/12888e6a55572ea324691961c6bc35fa14942873/images/real_experiments.png
--------------------------------------------------------------------------------
/net/NeuralNets.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | from .layers import _create_signal_network
4 |
5 |
6 | class ConvNet(tf.keras.Model):
7 |
8 | def __init__(self, batch_size):
9 | super(ConvNet, self).__init__()
10 |
11 | self.batch_size = batch_size
12 | self.conv1d, _, self.fc = _create_signal_network(batch_size,
13 | num_outputs=1,
14 | conv_filters=[128, 256, 512],
15 | conv_kernels=[3, 3, 3],
16 | conv_strides=[2, 2, 2],
17 | bilstm_units=[],
18 | fc_layers=[512, 256, 128, 64])
19 | self.pooling = tf.keras.layers.GlobalAveragePooling1D()
20 |
21 | def call(self, inputs, training=None, mask=None):
22 | inputs = tf.cast(inputs, tf.float32)
23 | x = self.conv1d(inputs, training=training)
24 | x = self.pooling(x, training=training)
25 | x = self.fc(x, training=training)
26 |
27 | return x
28 |
29 |
30 | class ConvBiLstmNet(tf.keras.Model):
31 |
32 | def __init__(self, batch_size):
33 | super(ConvBiLstmNet, self).__init__()
34 |
35 | self.batch_size = batch_size
36 | self.conv1d, self.lstm, self.fc = _create_signal_network(batch_size,
37 | num_outputs=1,
38 | conv_filters=[128, 256, 256],
39 | conv_kernels=[3, 3, 3],
40 | conv_strides=[2, 2, 2],
41 | bilstm_units=[128],
42 | fc_layers=[512, 256, 128, 64])
43 | self.pooling = tf.keras.layers.GlobalAveragePooling1D()
44 |
45 | def call(self, inputs, training=None, mask=None):
46 | inputs = tf.cast(inputs, tf.float32)
47 | x = self.conv1d(inputs, training=training)
48 | x = self.lstm(x, training=training)
49 | x = self.fc(x, training=training)
50 |
51 | return x
52 |
53 |
54 | class ConvLstmNet(tf.keras.Model):
55 |
56 | def __init__(self, batch_size):
57 | super(ConvLstmNet, self).__init__()
58 |
59 | self.batch_size = batch_size
60 | self.conv1d, _, self.fc = _create_signal_network(batch_size,
61 | num_outputs=1,
62 | conv_filters=[128, 256, 256],
63 | conv_kernels=[3, 3, 3],
64 | conv_strides=[2, 2, 2],
65 | bilstm_units=[],
66 | fc_layers=[512, 256, 128, 64])
67 |
68 | self.lstm = tf.keras.Sequential([
69 | tf.keras.layers.LSTM(128, return_sequences=True, dtype=tf.float64, dropout=0.3),
70 | tf.keras.layers.LSTM(128, return_sequences=False, dtype=tf.float64, dropout=0.3)
71 | ])
72 | self.pooling = tf.keras.layers.GlobalAveragePooling1D()
73 |
74 | def call(self, inputs, training=None, mask=None):
75 | inputs = tf.cast(inputs, tf.float32)
76 | x = self.conv1d(inputs, training=training)
77 | x = self.lstm(x, training=training)
78 | x = self.fc(x, training=training)
79 |
80 | return x
81 |
--------------------------------------------------------------------------------
/net/__init__.py:
--------------------------------------------------------------------------------
1 | from .NeuralNets import ConvBiLstmNet, ConvLstmNet, ConvNet
2 |
--------------------------------------------------------------------------------
/net/layers.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 |
4 | def create_bidir_lstm_layer(batch_size, lstm_units, return_sequences=False, dropout=0.3):
5 | forward_layer = tf.keras.layers.LSTM(lstm_units, return_sequences=return_sequences, dtype=tf.float64,
6 | dropout=dropout)
7 | backward_layer = tf.keras.layers.LSTM(lstm_units, return_sequences=return_sequences, go_backwards=True,
8 | dropout=dropout, dtype=tf.float64)
9 | return tf.keras.layers.Bidirectional(forward_layer, backward_layer=backward_layer,
10 | input_shape=(batch_size, int(2 * lstm_units)))
11 |
12 |
13 | def _create_signal_network(batch_size, num_outputs,
14 | conv_filters: list = (64, 64),
15 | conv_kernels: list = (3, 3),
16 | conv_strides: list = (2, 2),
17 | bilstm_units: list = (64,),
18 | fc_layers: list = (64,)):
19 | assert len(conv_strides) == len(conv_kernels) == len(conv_filters)
20 |
21 | # create conv1d blocks
22 | conv_net = tf.keras.Sequential()
23 | for i, (num_filters, kernel, stride) in enumerate(zip(conv_filters, conv_kernels, conv_strides)):
24 | conv_net.add(tf.keras.layers.Conv1D(num_filters, kernel, stride, padding="SAME"))
25 |
26 | if i != len(conv_filters) - 1:
27 | conv_net.add(tf.keras.layers.BatchNormalization())
28 | conv_net.add(tf.keras.layers.Activation("relu"))
29 |
30 | # create bilstm modules
31 | lstm_net = tf.keras.Sequential()
32 | for i, unit_size in enumerate(bilstm_units):
33 | return_sequences = True
34 | if i == len(bilstm_units) - 1:
35 | return_sequences = False
36 | lstm_net.add(create_bidir_lstm_layer(batch_size, unit_size, return_sequences=return_sequences))
37 |
38 | # create output layer
39 | fc_net = tf.keras.Sequential()
40 | fc_net.add(tf.keras.layers.Flatten())
41 | for i, fc_units in enumerate(fc_layers):
42 | fc_net.add(tf.keras.layers.Dense(fc_units))
43 |
44 | if i != len(fc_layers) - 1:
45 | fc_net.add(tf.keras.layers.BatchNormalization())
46 | fc_net.add(tf.keras.layers.Activation("relu"))
47 |
48 | # add number of outputs
49 | if num_outputs is not None and num_outputs >= 1:
50 | fc_net.add(tf.keras.layers.Dense(num_outputs, None))
51 |
52 | return conv_net, lstm_net, fc_net
53 |
--------------------------------------------------------------------------------
/playground.py:
--------------------------------------------------------------------------------
1 | # Author: Michał Bednarek PUT Poznan
2 | # Comment: Helper script for validating data created from the simulation
3 |
4 | import pickle
5 |
6 | import matplotlib.pyplot as plt
7 | import numpy as np
8 |
9 | # path = "./data/dataset/final_ds/real/real_train.pickle"
10 | # paths = ["./data/dataset/final_ds/real/real_train.pickle",
11 | # "./data/dataset/final_ds/real/real_val.pickle",
12 | # "./data/dataset/final_ds/real/real_test.pickle"]
13 | #
14 | # paths = ["./data/dataset/final_ds/sim/sim_train.pickle",
15 | # "./data/dataset/final_ds/sim/sim_val.pickle"]
16 |
17 | # paths = ["./data/dataset/40_10_60/",
18 | # "./data/dataset/final_ds/real/real_val.pickle",
19 | # "./data/dataset/final_ds/real/real_test.pickle"]
20 |
21 | # path = "data/dataset/40_10_60/real_dataset_train.pickle"
22 | real = "./data/experiments/real_200_300/train_050.pickle"
23 | train = "./data/experiments/sim_all/train.pickle"
24 |
25 |
26 | def noised_modality(data, noise_mag: float = 0.2):
27 | noise = np.random.uniform(-noise_mag, noise_mag, size=data.shape)
28 | data += noise
29 | return data
30 |
31 |
32 | def compute_magnitude(samples):
33 | return np.sqrt(samples[:, :, 0] ** 2 + samples[:, :, 1] ** 2 + samples[:, :, 2] ** 2)
34 |
35 |
36 | def playground():
37 | # labels, samples = list(), list()
38 | # for path in paths:
39 | # with open(path, "rb") as fp:
40 | # ds = pickle.load(fp)
41 | # labels.append(ds["stiffness"])
42 | # samples.append(ds["data"])
43 | #
44 | # labels = np.concatenate([*labels], axis=0)
45 | # samples = np.concatenate([*samples], axis=0)
46 | #
47 | # values = np.unique(labels)
48 | # train_dataset_x, train_dataset_y = list(), list()
49 | # val_dataset_x, val_dataset_y = list(), list()
50 | # test_dataset_x, test_dataset_y = list(), list()
51 | #
52 | # for i, val in enumerate(values):
53 | # arr = np.where(labels == val, 1, 0)
54 | # idx = np.argwhere(arr == 1).flatten()
55 | #
56 | # idx_train, idx_val, idx_test = idx[:30], idx[30:40], idx[40:100]
57 | #
58 | # # samples split
59 | # x_train, y_train = samples[idx_train, :, :], labels[idx_train]
60 | # x_train[..., 0] *= -1.0
61 | # x_train[..., 2] *= -1.0
62 | # train_dataset_x.append(x_train)
63 | # train_dataset_y.append(y_train)
64 | #
65 | # x_val, y_val = samples[idx_val, :, :], labels[idx_val]
66 | # x_val[..., 0] *= -1.0
67 | # x_val[..., 2] *= -1.0
68 | # val_dataset_x.append(x_val)
69 | # val_dataset_y.append(y_val)
70 | #
71 | # x_test, y_test = samples[idx_test, :, :], labels[idx_test]
72 | # x_test[..., 0] *= -1.0
73 | # x_test[..., 2] *= -1.0
74 | # test_dataset_x.append(x_test)
75 | # test_dataset_y.append(y_test)
76 | #
77 | # print("Val: {}, num_samples: {}".format(val, arr.sum()))
78 | #
79 | # train_dataset_x = np.vstack(train_dataset_x)
80 | # train_dataset_y = np.vstack(train_dataset_y).flatten()
81 | #
82 | # val_dataset_x = np.vstack(val_dataset_x)
83 | # val_dataset_y = np.vstack(val_dataset_y).flatten()
84 | #
85 | # test_dataset_x = np.vstack(test_dataset_x)
86 | # test_dataset_y = np.vstack(test_dataset_y).flatten()
87 | #
88 | # file = open('data/dataset/40_10_60/real_dataset_train.pickle', 'wb')
89 | # pickle.dump({
90 | # "data": train_dataset_x,
91 | # "stiffness": train_dataset_y
92 | # }, file)
93 | # file.close()
94 | #
95 | # file = open('data/dataset/40_10_60/real_dataset_val.pickle', 'wb')
96 | # pickle.dump({
97 | # "data": val_dataset_x,
98 | # "stiffness": val_dataset_y
99 | # }, file)
100 | # file.close()
101 | #
102 | # file = open('data/dataset/40_10_60/real_dataset_test.pickle', 'wb')
103 | # pickle.dump({
104 | # "data": test_dataset_x,
105 | # "stiffness": test_dataset_y
106 | # }, file)
107 | # file.close()
108 |
109 | with open(train, "rb") as fp:
110 | sim_data = pickle.load(fp)
111 |
112 | with open(real, "rb") as fp:
113 | data_real = pickle.load(fp)
114 |
115 | data = {"data": np.concatenate([sim_data["data"], data_real["data"]], 0),
116 | "stiffness": np.concatenate([sim_data["data"], data_real["data"]], 0)}
117 |
118 | acc1, acc2 = list(), list()
119 | w1, w2 = list(), list()
120 | m = np.mean(data["data"], axis=(0, 1), keepdims=True)
121 | s = np.std(data["data"], axis=(0, 1), keepdims=True)
122 | # data_real["data"] = (data_real["data"] - m) / s
123 |
124 |
125 | for sampl, stif in zip(data_real["data"], data_real["stiffness"]):
126 | acc1 = np.sqrt(sampl[:, 0] ** 2 + sampl[:, 1] ** 2)
127 | acc2 = np.sqrt(sampl[:, 3] ** 2 + sampl[:, 4] ** 2)
128 | w1 = np.sqrt(sampl[:, 6] ** 2 + sampl[:, 7] ** 2)
129 | w2 = np.sqrt(sampl[:, 9] ** 2 + sampl[:, 10] ** 2)
130 | mag = [acc1, acc2, w1, w2]
131 |
132 | # acc
133 | for i, signal in enumerate(mag):
134 | plt.subplot(4, 1, i + 1)
135 | plt.plot(signal, 'r')
136 |
137 | plt.show()
138 | input(stif)
139 | #
140 | # signal = np.stack([acc1, acc2], -1)
141 | # file = open('data/dataset/val_acc_only_sim.pickle', 'wb')
142 | # pickle.dump({
143 | # "data": signal,
144 | # "stiffness": data["stiffness"]
145 | # }, file)
146 | # file.close()
147 |
148 |
149 | if __name__ == '__main__':
150 | playground()
151 |
--------------------------------------------------------------------------------
/run_experiments.sh:
--------------------------------------------------------------------------------
1 | # stage 1 - pick the best architecture
2 | nohup python -u training_cross_validate.py --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/experiments/sim_box/val.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/01_train_conv --model-type conv --epochs 100 > nohup.out &
3 | nohup python -u training_cross_validate.py --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/experiments/sim_box/val.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/01_train_lstm_conv --model-type conv_lstm --epochs 100 > nohup.out &
4 | nohup python -u training_cross_validate.py --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/experiments/sim_box/val.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/01_train_bilstm_conv --model-type conv_bilstm --epochs 100 > nohup.out &
5 |
6 | # stage 2 - verify that the ConvBiLstm can be effectively used fof different shapes of squeezed objects
7 | nohup python -u training_cross_validate.py --data-path-train data/experiments/sim_all/train.pickle --data-path-test data/experiments/sim_all/softball_testing.pickle data/experiments/sim_all/softbox_testing.pickle data/experiments/sim_all/softcylinder_testing.pickle --results data/logs/02_train_conv_bilstm_all_shapes --model-type conv_bilstm --epochs 100 > nohup.out &
8 |
9 | # stage 3 - check how much real data you need in order to ensure reliable performance on real data
10 | nohup python -u training_cross_validate.py --add-validation-to-train --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/experiments/sim_box/val.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/03_train_sim_test_real_with_noise --model-type conv_bilstm --epochs --add-noise 100 > nohup1.out &&
11 | nohup python -u training_cross_validate.py --add-validation-to-train --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/dataset/40_10_60/real_dataset_train_050.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/03_train_sim_test_real_add_noise_050_real --model-type conv_bilstm --epochs 100 --add-validation-to-train > nohup1.out &&
12 | nohup python -u training_cross_validate.py --add-validation-to-train --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/dataset/40_10_60/real_dataset_train_100.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/03_train_sim_test_real_add_noise_100_real --model-type conv_bilstm --epochs 100 --add-validation-to-train > nohup2.out &&
13 | nohup python -u training_cross_validate.py --add-validation-to-train --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/dataset/40_10_60/real_dataset_train_150.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/03_train_sim_test_real_add_noise_150_real --model-type conv_bilstm --epochs 100 --add-validation-to-train > nohup3.out &&
14 | nohup python -u training_cross_validate.py --add-validation-to-train --data-path-train data/experiments/sim_box/train.pickle --data-path-validation data/dataset/40_10_60/real_dataset_train_200.pickle --data-path-test data/experiments/real_200_300/test.pickle --results data/logs/03_train_sim_test_real_add_noise_200_real --model-type conv_bilstm --epochs 100 --add-validation-to-train > nohup4.out &&
15 |
--------------------------------------------------------------------------------
/testing_saved_model.py:
--------------------------------------------------------------------------------
1 | # Author: Michał Bednarek PUT Poznan
2 |
3 | import os
4 | import pickle
5 | import sys
6 | from argparse import ArgumentParser
7 |
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 | import tensorflow as tf
11 | from sklearn.model_selection import KFold
12 |
13 | from functions import allow_memory_growth, create_tf_generators
14 | from functions.optimization import normalize_predictions
15 | from net import ConvNet, ConvBiLstmNet
16 |
17 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
18 |
19 |
20 | def do_regression(args):
21 | with open(args.data_path_train, "rb") as fp:
22 | total_dataset = pickle.load(fp)
23 | print("TRAIN NUM SAMPLES: {}".format(len(total_dataset["data"])))
24 |
25 | validation_dataset = None
26 | if args.data_path_validation is not None:
27 | with open(args.data_path_validation, "rb") as fp:
28 | validation_dataset = pickle.load(fp)
29 | print("TO-ADD NUM SAMPLES: {}".format(len(validation_dataset["data"])))
30 |
31 | if validation_dataset is not None:
32 | total_dataset["data"] = np.concatenate([total_dataset["data"], validation_dataset["data"]], 0)
33 | total_dataset["stiffness"] = np.concatenate([total_dataset["stiffness"], validation_dataset["stiffness"]], 0)
34 |
35 | with open(args.data_path_test, "rb") as fp:
36 | test_dataset = pickle.load(fp)
37 | print("TEST NUM SAMPLES: {}".format(len(test_dataset["data"])))
38 |
39 | kf = KFold(n_splits=5, shuffle=True)
40 | for split_no, (train_idx, val_idx) in enumerate(kf.split(total_dataset["data"], total_dataset["stiffness"])):
41 |
42 | # setup model
43 | if args.model_type == "conv":
44 | model = ConvNet(args.batch_size)
45 | elif args.model_type == "lstm":
46 | raise NotImplementedError("LSTM-only model not implemented.")
47 | elif args.model_type == "conv_lstm":
48 | model = ConvBiLstmNet(args.batch_size)
49 | else:
50 | model = ConvNet(args.batch_size)
51 |
52 | # restore from checkpoint
53 | ckpt = tf.train.Checkpoint(model=model)
54 | path = tf.train.latest_checkpoint(args.restore_path)
55 | ckpt.restore(path)
56 |
57 | _, _, test_ds, train_mean, train_std = create_tf_generators(total_dataset, test_dataset, train_idx,
58 | val_idx, args.batch_size,
59 | real_data=validation_dataset,
60 | add_real_data=True)
61 |
62 | # start testing
63 | metrics = [
64 | tf.keras.metrics.MeanAbsoluteError(name="MeanAbsoluteError"),
65 | tf.keras.metrics.MeanAbsolutePercentageError(name="MeanAbsolutePercentageError")
66 | ]
67 |
68 | loss_metric = tf.keras.metrics.Mean("Loss")
69 |
70 | error = list()
71 | for x_train, y_train in test_ds:
72 |
73 | x_train, y_train = tf.cast(x_train, tf.float32), tf.cast(y_train, tf.float32)
74 | predictions = model((x_train - train_mean) / train_std, training=False)
75 | predictions = normalize_predictions(predictions)
76 | loss_metric.update_state(tf.losses.mean_absolute_error(y_train, predictions))
77 |
78 | # gather stats
79 | for m in metrics:
80 | m.update_state(y_train, predictions)
81 | if m.name == "MeanAbsolutePercentageError":
82 | error.append(m.result().numpy())
83 |
84 | # print results
85 | for m in metrics:
86 | result = m.result().numpy()
87 | print("{} : {}".format(m.name, result))
88 |
89 | # plot error/data
90 | plt.scatter(test_dataset["stiffness"], error)
91 | plt.show()
92 | break
93 |
94 |
95 | if __name__ == '__main__':
96 | parser = ArgumentParser()
97 | parser.add_argument('--data-path-train', type=str, default="data/dataset/final_ds/sim/sim_train.pickle")
98 | parser.add_argument('--data-path-validation', type=str,
99 | default="data/dataset/40_10_60/real_dataset_train_200.pickle")
100 | parser.add_argument('--data-path-test', type=str, default="data/dataset/testing_datasets/box_test.pickle")
101 | parser.add_argument('--model-type', type=str, default="conv_lstm", choices=['conv', 'lstm', 'conv_lstm'], )
102 | parser.add_argument('--restore-path', type=str,
103 | default="data/logs/sim2rel_experiments/02_train_sim_test_real_add_noise_200_real/0")
104 | parser.add_argument('--epochs', type=int, default=1)
105 | parser.add_argument('--batch-size', type=int, default=1)
106 | args, _ = parser.parse_known_args()
107 |
108 | if args.model_type not in ['conv', 'lstm', 'conv_lstm']:
109 | parser.print_help()
110 | sys.exit(1)
111 |
112 | allow_memory_growth()
113 |
114 | do_regression(args)
115 |
--------------------------------------------------------------------------------
/training_cross_validate.py:
--------------------------------------------------------------------------------
1 | # Author: Michał Bednarek PUT Poznan
2 |
3 | import os
4 | import pickle
5 | import sys
6 | from argparse import ArgumentParser
7 |
8 | import numpy as np
9 | import tensorflow as tf
10 | from sklearn.model_selection import KFold
11 | from tqdm import tqdm
12 |
13 | from functions import *
14 | from net import *
15 |
16 |
17 | def do_regression(args):
18 | os.makedirs(args.results, exist_ok=True)
19 |
20 | with open(args.data_path_train, "rb") as fp:
21 | total_dataset = pickle.load(fp)
22 | print("TRAIN NUM SAMPLES: {}".format(len(total_dataset["data"])))
23 |
24 | with open(args.data_path_validation, "rb") as fp:
25 | validation_dataset = pickle.load(fp)
26 | print("TO-ADD NUM SAMPLES: {}".format(len(validation_dataset["data"])))
27 |
28 | test_ds_list = list()
29 | for test_ds_path in args.data_path_test:
30 | with open(test_ds_path, "rb") as fp:
31 | test_dataset = pickle.load(fp)
32 | test_ds_list.append(test_dataset)
33 | print("TEST NUM SAMPLES: {}".format(len(test_dataset["data"])))
34 |
35 | # start a cross validate training
36 | kf = KFold(n_splits=args.num_splits, shuffle=True)
37 | for split_no, (train_idx, val_idx) in enumerate(kf.split(total_dataset["data"], total_dataset["stiffness"])):
38 |
39 | # save split indexes
40 | logs_path = os.path.join(args.results, '{}'.format(split_no))
41 | print("Cross-validation, split no. {}. Saving dataset samples indexes...".format(split_no))
42 | np.savetxt(logs_path + "{}_split_train_data_samples.txt".format(split_no), train_idx)
43 | np.savetxt(logs_path + "{}_split_val_data_samples.txt".format(split_no), val_idx)
44 | print("... saved.")
45 |
46 | # setup model
47 | if args.model_type == "conv":
48 | model = ConvNet(args.batch_size)
49 | elif args.model_type == "conv_lstm":
50 | model = ConvLstmNet(args.batch_size)
51 | elif args.model_type == "conv_bilstm":
52 | model = ConvBiLstmNet(args.batch_size)
53 | else:
54 | model = ConvNet(args.batch_size)
55 | print("default ConvNet created.")
56 |
57 | # setup optimization procedure
58 | eta = tf.Variable(args.lr)
59 | eta_value = tf.keras.optimizers.schedules.ExponentialDecay(args.lr, 100, 0.99)
60 | eta.assign(eta_value(0))
61 | optimizer = tf.keras.optimizers.Adam(eta)
62 | ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
63 |
64 | # restore from checkpoint
65 | if args.restore:
66 | path = tf.train.latest_checkpoint(logs_path)
67 | ckpt.restore(path)
68 | ckpt_man = tf.train.CheckpointManager(ckpt, logs_path, max_to_keep=10)
69 |
70 | # setup writers
71 | os.makedirs(logs_path, exist_ok=True)
72 | train_writer = tf.summary.create_file_writer(logs_path + "/train")
73 | val_writer = tf.summary.create_file_writer(logs_path + "/val")
74 | test_writer = tf.summary.create_file_writer(logs_path + "/test")
75 |
76 | # create split datasets to tf generators
77 | train_ds, val_ds, test_ds, train_mean, train_std = create_tf_generators(total_dataset, test_ds_list, train_idx,
78 | val_idx, args.batch_size,
79 | real_data=validation_dataset,
80 | add_real_data=args.add_validation_to_train)
81 |
82 | # start training
83 | train_step, val_step, test_step = 0, 0, 0
84 | best_metric = [999999999.0 for _ in range(len(test_ds_list))]
85 | for _ in tqdm(range(args.epochs)):
86 | train_step = train(model, train_writer, train_ds, train_mean, train_std, optimizer, train_step,
87 | add_noise=args.add_noise)
88 | val_step, _, _ = validate(model, val_writer, val_ds, train_mean, train_std, val_step)
89 |
90 | test_step, best_metric, save_model = validate(model, test_writer, test_ds, train_mean, train_std, test_step,
91 | prefix="test", best_metric=best_metric)
92 |
93 | # assign eta
94 | eta.assign(eta_value(0))
95 |
96 | # save each save_period
97 | if save_model:
98 | ckpt_man.save()
99 | print("Best MAPE model saved.")
100 |
101 |
102 | if __name__ == '__main__':
103 | parser = ArgumentParser()
104 | parser.add_argument('--add-validation-to-train', default=False, action='store_true')
105 |
106 | parser.add_argument('--data-path-train', type=str, default="./data/experiments/real_200_300/test.pickle")
107 | parser.add_argument('--data-path-validation', type=str, default="./data/experiments/real_200_300/train_200.pickle")
108 |
109 | parser.add_argument('--data-path-test', nargs="+", required=True)
110 |
111 | parser.add_argument('--results', type=str, default="data/logs/real_test")
112 |
113 | parser.add_argument('--restore', default=False, action='store_true')
114 | parser.add_argument('--restore-dir', type=str, default="")
115 |
116 | parser.add_argument('--model-type', type=str, default="conv_bilstm", choices=['conv', 'conv_lstm', 'conv_bilstm'], )
117 |
118 | parser.add_argument('--epochs', type=int, default=100)
119 | parser.add_argument('--batch-size', type=int, default=100)
120 | parser.add_argument('--num-splits', type=int, default=5)
121 | parser.add_argument('--lr', type=float, default=1e-3)
122 |
123 | parser.add_argument('--add-noise', default=False, action='store_true')
124 | args, _ = parser.parse_known_args()
125 |
126 | if args.model_type not in ['conv', 'lstm', 'conv_lstm', 'conv_bilstm']:
127 | parser.print_help()
128 | sys.exit(1)
129 |
130 | allow_memory_growth()
131 |
132 | print("ARGUMENTS: {}".format(args))
133 | do_regression(args)
134 |
--------------------------------------------------------------------------------