├── models ├── __init__.py ├── database.py ├── model_3d.py └── model_arm.py ├── fig └── fig.png ├── configs ├── c3d.txt ├── arm.txt ├── test.txt ├── gibson.txt └── config_loader.py ├── .gitmodules ├── train ├── train_c3d.py ├── train_gib.py └── train_arm.py ├── NTFields_env.yml ├── dataprocessing ├── preprocess.py ├── convert_to_scaled_off.py ├── voxelized_pointcloud_sampling.py └── speed_sampling_gpu.py ├── test ├── sample_sg.py ├── gib_plan.py ├── arm_plan.py └── gib_stat.py ├── .gitignore ├── README.md └── LICENSE /models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fig/fig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruiqini/NTFields/HEAD/fig/fig.png -------------------------------------------------------------------------------- /configs/c3d.txt: -------------------------------------------------------------------------------- 1 | exp_name = c3d 2 | data_dir = datasets/c3d 3 | input_data_glob = /*/model.obj 4 | num_dim = 3 5 | num_samples = 500000 -------------------------------------------------------------------------------- /configs/arm.txt: -------------------------------------------------------------------------------- 1 | exp_name = arm 2 | data_dir = datasets/arm 3 | input_data_glob = /*/model.obj 4 | num_dim = 6 5 | num_samples = 1000000 -------------------------------------------------------------------------------- /configs/test.txt: -------------------------------------------------------------------------------- 1 | exp_name = test 2 | data_dir = datasets/test 3 | input_data_glob = /*/model.obj 4 | num_dim = 3 5 | num_samples = 100000 -------------------------------------------------------------------------------- /configs/gibson.txt: -------------------------------------------------------------------------------- 1 | exp_name = gibson 2 | data_dir = datasets/gibson 3 | input_data_glob = /*/mesh_z_up.obj 4 | num_dim = 3 5 | num_samples = 1000000 -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "bvh-distance-queries"] 2 | path = bvh-distance-queries 3 | url = https://github.com/YuliangXiu/bvh-distance-queries.git 4 | -------------------------------------------------------------------------------- /train/train_c3d.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | from models import model_3d as md 4 | from os import path 5 | 6 | 7 | modelPath = './Experiments/C3D' 8 | dataPath = './datasets/c3d/0' 9 | 10 | model = md.Model(modelPath, dataPath, 3,[-0.4,-0], device='cuda:0') 11 | 12 | model.train() 13 | 14 | 15 | -------------------------------------------------------------------------------- /train/train_gib.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | from models import model_3d as md 4 | from os import path 5 | 6 | 7 | modelPath = './Experiments/Gib' 8 | dataPath = './datasets/gibson/0' 9 | 10 | model = md.Model(modelPath, dataPath, 3,[-0.25,-0.2], device='cuda:0') 11 | 12 | model.train() 13 | 14 | 15 | -------------------------------------------------------------------------------- /train/train_arm.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | from models import model_arm as md 4 | from os import path 5 | 6 | 7 | modelPath = './Experiments/4DOF' 8 | dataPath = './datasets/arm/4DOF' 9 | 10 | model = md.Model(modelPath, dataPath, 4,[-0.0,-0.0], device='cuda:0') 11 | 12 | model.train() 13 | 14 | 15 | -------------------------------------------------------------------------------- /NTFields_env.yml: -------------------------------------------------------------------------------- 1 | name: NTFields 2 | channels: 3 | - pytorch 4 | - anaconda 5 | - conda-forge 6 | - soumith 7 | - defaults 8 | dependencies: 9 | - cudatoolkit=11.3 10 | - numpy=1.22.3 11 | - numpy-base=1.22.3 12 | - python=3.9.7 13 | - pytorch=1.10.2=py3.9_cuda11.3_cudnn8.2.0_0 14 | - igl=2.2.1 15 | - pip: 16 | - open3d 17 | - pickle5 18 | - pytorch-kinematics 19 | - -e ./bvh-distance-queries 20 | - lxml 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /dataprocessing/preprocess.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append('.') 4 | 5 | from dataprocessing.convert_to_scaled_off import to_off 6 | from dataprocessing.speed_sampling_gpu import sample_speed 7 | import dataprocessing.voxelized_pointcloud_sampling as voxelized_pointcloud_sampling 8 | from glob import glob 9 | import configs.config_loader as cfg_loader 10 | import multiprocessing as mp 11 | from multiprocessing import Pool 12 | import numpy as np 13 | import os 14 | 15 | cfg = cfg_loader.get_config() 16 | print(cfg.data_dir) 17 | print(cfg.input_data_glob) 18 | 19 | print('Finding raw files for preprocessing.') 20 | paths = glob( "./"+cfg.data_dir + cfg.input_data_glob) 21 | print(paths) 22 | paths = sorted(paths) 23 | 24 | 25 | 26 | num_cpus = mp.cpu_count() 27 | 28 | 29 | def multiprocess(func): 30 | p = Pool(num_cpus) 31 | p.map(func, paths) 32 | p.close() 33 | p.join() 34 | 35 | print('Start scaling.') 36 | multiprocess(to_off) 37 | 38 | print('Start speed sampling.') 39 | for path in paths: 40 | sample_speed(path, cfg.num_samples, cfg.num_dim) 41 | 42 | print('Start voxelized pointcloud sampling.') 43 | voxelized_pointcloud_sampling.init(cfg) 44 | multiprocess(voxelized_pointcloud_sampling.voxelized_pointcloud_sampling) 45 | 46 | 47 | -------------------------------------------------------------------------------- /models/database.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import Tensor 4 | from torch.autograd import Variable, grad 5 | 6 | 7 | 8 | class _numpy2dataset(torch.utils.data.Dataset): 9 | def __init__(self, points, speed, grid): 10 | # Creating identical pairs 11 | points = Variable(Tensor(points)) 12 | speed = Variable(Tensor(speed)) 13 | self.data=torch.cat((points,speed),dim=1) 14 | self.grid = Variable(Tensor(grid)) 15 | 16 | def send_device(self,device): 17 | self.data = self.data.to(device) 18 | self.grid = self.grid.to(device) 19 | 20 | 21 | def __getitem__(self, index): 22 | data = self.data[index] 23 | return data, index 24 | def __len__(self): 25 | return self.data.shape[0] 26 | 27 | def Database(PATH): 28 | 29 | try: 30 | points = np.load('{}/sampled_points.npy'.format(PATH)) 31 | speed = np.load('{}/speed.npy'.format(PATH)) 32 | occupancies = np.unpackbits(np.load('{}/voxelized_point_cloud_128res_20000points.npz'.format(PATH))['compressed_occupancies']) 33 | input = np.reshape(occupancies, (128,)*3) 34 | grid = np.array(input, dtype=np.float32) 35 | #print(tau.min()) 36 | except ValueError: 37 | print('Please specify a correct source path, or create a dataset') 38 | rows=points.shape[0] 39 | 40 | print(points.shape,speed.shape) 41 | print(np.shape(grid)) 42 | #print(XP.shape,YP.shape) 43 | database = _numpy2dataset(points,speed,grid) 44 | #database = _numpy2dataset(XP,YP) 45 | return database 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /dataprocessing/convert_to_scaled_off.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import multiprocessing as mp 4 | from multiprocessing import Pool 5 | import random 6 | import sys 7 | import traceback 8 | import logging 9 | import igl 10 | import numpy as np 11 | logger = logging.getLogger() 12 | logger.setLevel(logging.ERROR) 13 | 14 | class HiddenPrints: 15 | def __enter__(self): 16 | self._original_stdout = sys.stdout 17 | sys.stdout = open(os.devnull, 'w') 18 | 19 | def __exit__(self, exc_type, exc_val, exc_tb): 20 | sys.stdout.close() 21 | sys.stdout = self._original_stdout 22 | 23 | 24 | def to_off(path): 25 | 26 | file_path = os.path.dirname(path) 27 | data_type = file_path.split('/')[2] 28 | #print(data_type) 29 | file_name = os.path.splitext(os.path.basename(path))[0] 30 | output_file = os.path.join(file_path,file_name + '_scaled.off') 31 | 32 | if os.path.exists(output_file): 33 | print('Exists: {}'.format(output_file)) 34 | return 35 | 36 | try: 37 | 38 | v, f = igl.read_triangle_mesh(path) 39 | 40 | bb_max = v.max(axis=0, keepdims=True) 41 | bb_min = v.min(axis=0, keepdims=True) 42 | #print(centers) 43 | #print(bb_max-bb_min) 44 | if data_type == 'c3d': 45 | v/=40 46 | elif data_type != 'arm': 47 | centers = (bb_max+bb_min)/2.0 48 | v = v-centers 49 | v = v/(bb_max-bb_min) 50 | 51 | igl.write_triangle_mesh(output_file, v, f) 52 | 53 | print('Finished: {}'.format(path)) 54 | except: 55 | print('Error with {}: {}'.format(path, traceback.format_exc())) 56 | 57 | -------------------------------------------------------------------------------- /dataprocessing/voxelized_pointcloud_sampling.py: -------------------------------------------------------------------------------- 1 | from scipy.spatial import cKDTree as KDTree 2 | import numpy as np 3 | import os 4 | import traceback 5 | import igl 6 | import open3d as o3d 7 | 8 | kdtree, grid_points, cfg = None, None, None 9 | def voxelized_pointcloud_sampling(path): 10 | try: 11 | 12 | out_path = os.path.dirname(path) 13 | file_name = os.path.splitext(os.path.basename(path))[0] 14 | input_file = os.path.join(out_path,file_name + '_scaled.off') 15 | out_file = out_path + '/voxelized_point_cloud_{}res_{}points.npz'.format(cfg.input_res, cfg.num_points) 16 | 17 | 18 | if os.path.exists(out_file): 19 | print(f'Exists: {out_file}') 20 | return 21 | 22 | mesh = o3d.io.read_triangle_mesh(input_file) 23 | pcl = mesh.sample_points_poisson_disk(cfg.num_points) 24 | point_cloud = np.asarray(pcl.points) 25 | pcf=np.array([[0,0,0]]) 26 | igl.write_obj(out_path + '/pc.obj', point_cloud, pcf) 27 | 28 | occupancies = np.zeros(len(grid_points), dtype=np.int8) 29 | 30 | _, idx = kdtree.query(point_cloud) 31 | occupancies[idx] = 1 32 | 33 | compressed_occupancies = np.packbits(occupancies) 34 | 35 | np.savez(out_file, point_cloud=point_cloud, compressed_occupancies = compressed_occupancies, bb_min = cfg.bb_min, bb_max = cfg.bb_max, res = cfg.input_res) 36 | print('Finished: {}'.format(path)) 37 | 38 | except Exception as err: 39 | print('Error with {}: {}'.format(path, traceback.format_exc())) 40 | 41 | def init(cfg_param): 42 | global kdtree, grid_points, cfg 43 | cfg = cfg_param 44 | grid_points = create_grid_points_from_bounds(cfg.bb_min, cfg.bb_max, cfg.input_res) 45 | kdtree = KDTree(grid_points) 46 | 47 | def create_grid_points_from_bounds(minimun, maximum, res): 48 | x = np.linspace(minimun, maximum, res) 49 | X, Y, Z = np.meshgrid(x, x, x, indexing='ij') 50 | X = X.reshape((np.prod(X.shape),)) 51 | Y = Y.reshape((np.prod(Y.shape),)) 52 | Z = Z.reshape((np.prod(Z.shape),)) 53 | 54 | points_list = np.column_stack((X, Y, Z)) 55 | del X, Y, Z, x 56 | return points_list -------------------------------------------------------------------------------- /configs/config_loader.py: -------------------------------------------------------------------------------- 1 | import configargparse 2 | import numpy as np 3 | import os 4 | 5 | 6 | def config_parser(): 7 | parser = configargparse.ArgumentParser() 8 | 9 | # Experiment Setup 10 | parser.add_argument('--config', is_config_file=True, default='configs/shapenet_cars.txt', 11 | help='config file path') 12 | parser.add_argument("--exp_name", type=str, default=None, 13 | help='Experiment name, used as folder name for the experiment. If left blank, a \ 14 | name will be auto generated based on the configuration settings.') 15 | parser.add_argument("--data_dir", type=str, 16 | help='input data directory') 17 | parser.add_argument("--input_data_glob", type=str, 18 | help='glob expression to find raw input files') 19 | # Training Data Parameters 20 | parser.add_argument("--num_points", type=int, default=20000, 21 | help='Number of points sampled from each ground truth shape.') 22 | parser.add_argument("--num_samples", type=int, default=1e6, 23 | help='Number of start-goal pairs sampled in space.') 24 | parser.add_argument("--num_dim", type=int, default=3, 25 | help='Number of dimension for configuration space.') 26 | 27 | parser.add_argument("--bb_min", default=-0.5, type=float, 28 | help='Training and testing shapes are normalized to be in a common bounding box.\ 29 | This value defines the min value in x,y and z for the bounding box.') 30 | parser.add_argument("--bb_max", default=0.5, type=float, 31 | help='Training and testing shapes are normalized to be in a common bounding box.\ 32 | This value defines the max value in x,y and z for the bounding box.') 33 | parser.add_argument("--input_res", type=int, default=128, 34 | help='Training and testing shapes are normalized to be in a common bounding box.\ 35 | This value defines the max value in x,y and z for the bounding box.') 36 | 37 | 38 | return parser 39 | 40 | 41 | def get_config(): 42 | parser = config_parser() 43 | cfg = parser.parse_args() 44 | 45 | return cfg 46 | -------------------------------------------------------------------------------- /test/sample_sg.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | import numpy as np 4 | #import open3d as o3d 5 | import igl 6 | 7 | from timeit import default_timer as timer 8 | 9 | 10 | for gib_id in range(2): 11 | 12 | v, f = igl.read_triangle_mesh("datasets/gibson/"+str(gib_id)+"/mesh_z_up_scaled.off") 13 | v=v*20 14 | 15 | Xmin=[-9.9,-9.9,-9.9] 16 | Xmax=[9.9,9.9,9.9] 17 | Xmin = np.append(Xmin,Xmin) 18 | Xmax = np.append(Xmax,Xmax) 19 | dim=3 20 | numsamples = 2500*(gib_id+1) 21 | X = np.zeros((numsamples,2*dim)) 22 | PointsOutside = np.arange(numsamples) 23 | while len(PointsOutside) > 0: 24 | P = np.random.rand(len(PointsOutside),dim)*(Xmax[:dim]-Xmin[:dim])[None,None,:] + Xmin[:dim][None,None,:] 25 | dP = np.random.rand(len(PointsOutside),dim)-0.5 26 | rL = ((np.random.rand(len(PointsOutside),1))+0.10)*np.sqrt(np.sum((Xmax-Xmin)**2)) 27 | nP = P + (dP/np.sqrt(np.sum(dP**2,axis=1))[:,np.newaxis])*rL 28 | 29 | X[PointsOutside,:dim] = P 30 | X[PointsOutside,dim:] = nP 31 | 32 | maxs = np.any((X[:,dim:] > Xmax[:dim][None,:]),axis=1) 33 | mins = np.any((X[:,dim:] < Xmin[:dim][None,:]),axis=1) 34 | OutOfDomain = np.any(np.concatenate((maxs[:,None],mins[:,None]),axis=1),axis=1) 35 | PointsOutside = np.where(OutOfDomain)[0] 36 | print(X) 37 | vec=X[:,0:3]-X[:,3:6] 38 | 39 | p0=X[:,0:3] 40 | 41 | d0 = igl.signed_distance(p0, v, f)[0] 42 | 43 | p1=X[:,3:6] 44 | 45 | d1 = igl.signed_distance(p1, v, f)[0] 46 | 47 | mask = np.logical_and(d0>2.0 , d1>2.0) 48 | newX=X[mask,:] 49 | 50 | w0 = igl.winding_number(v, f, newX[:,0:3]) 51 | w1 = igl.winding_number(v, f, newX[:,3:6]) 52 | 53 | mask1 = np.logical_and(w0<0.0 , w1<0.0) 54 | finalX = newX[mask1,:] 55 | 56 | print(len(finalX)) 57 | finalX=finalX[0:500,:] 58 | np.save("sample_sg_"+str(gib_id),finalX) 59 | #''' 60 | ''' 61 | pcd2 = o3d.geometry.PointCloud() 62 | pcd2.points = o3d.utility.Vector3dVector(finalX[:,0:3]) 63 | 64 | pcd2.paint_uniform_color([0, 0.706, 1]) 65 | 66 | pcd = o3d.geometry.PointCloud() 67 | pcd.points = o3d.utility.Vector3dVector(finalX[:,3:6]) 68 | 69 | pcd.paint_uniform_color([1, 0.706, 0]) 70 | 71 | mesh = o3d.io.read_triangle_mesh("datasets/gibson/0/mesh_z_up_scaled.off") 72 | mesh.scale(20, center=(0,0,0)) 73 | o3d.visualization.draw_geometries([mesh,pcd, pcd2],mesh_show_wireframe=True) 74 | ''' -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Datasets 132 | datasets/ 133 | .npy 134 | # Experiments 135 | Experiments/ 136 | 137 | 138 | -------------------------------------------------------------------------------- /test/gib_plan.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | from models import model_3d as md 4 | import torch 5 | import os 6 | import numpy as np 7 | import math 8 | import torch 9 | from torch import Tensor 10 | from torch.autograd import Variable, grad 11 | 12 | from timeit import default_timer as timer 13 | import math 14 | 15 | import igl 16 | import open3d as o3d 17 | 18 | #except: 19 | # continue 20 | our_path=[] 21 | our_time=[] 22 | our_dis=[] 23 | collision=0 24 | 25 | modelPath = './Experiments/Gib'#arona,bolton,cabin,A_test 26 | #modelPath = './Experiments/Gib_res_changelr_scale' 27 | 28 | dataPath = './datasets/gibson/'#Arona,Cabin,Bolton#filePath = './Experiments/Gibson' 29 | womodel = md.Model(modelPath, dataPath, 3,[0,0], device='cuda') 30 | 31 | 32 | 33 | womodel.load('./Experiments/Gib/Model_Epoch_01100_0.pt') 34 | 35 | 36 | 37 | path = "datasets/gibson/0/voxelized_point_cloud_128res_20000points.npz" 38 | 39 | occupancies = np.unpackbits(np.load(path)['compressed_occupancies']) 40 | input = np.reshape(occupancies, (128,)*3) 41 | grid = np.array(input, dtype=np.float32) 42 | print(np.shape(grid)) 43 | grid = torch.from_numpy(grid).to('cuda:0').float() 44 | grid = grid.unsqueeze(0) 45 | 46 | 47 | f_0, f_1 = womodel.network.env_encoder(grid) 48 | 49 | for i in range(5): 50 | 51 | start_goal = np.array([[-6,-4,-6,6,7,-2.5]]) 52 | XP=start_goal 53 | XP = Variable(Tensor(XP)).to('cuda') 54 | XP=XP/20.0 55 | 56 | #print(XP) 57 | dis=torch.norm(XP[:,3:6]-XP[:,0:3]) 58 | start = timer() 59 | 60 | point0=[] 61 | point1=[] 62 | 63 | point0.append(XP[:,0:3]) 64 | point1.append(XP[:,3:6]) 65 | #print(id) 66 | 67 | iter=0 68 | while dis>0.06: 69 | gradient = womodel.Gradient(XP.clone(), f_0, f_1) 70 | 71 | XP = XP + 0.03 * gradient 72 | dis=torch.norm(XP[:,3:6]-XP[:,0:3]) 73 | point0.append(XP[:,0:3]) 74 | point1.append(XP[:,3:6]) 75 | iter=iter+1 76 | if(iter>500): 77 | break 78 | 79 | end = timer() 80 | 81 | print("time",end-start) 82 | point1.reverse() 83 | point=point0+point1 84 | 85 | xyz= torch.cat(point).to('cpu').data.numpy() 86 | 87 | xyz=20*xyz 88 | 89 | 90 | pcd = o3d.geometry.PointCloud() 91 | pcd.points = o3d.utility.Vector3dVector(xyz) 92 | 93 | mesh = o3d.io.read_triangle_mesh("datasets/gibson/0/mesh_z_up_scaled.off") 94 | 95 | mesh.scale(20, center=(0,0,0)) 96 | 97 | mesh.compute_vertex_normals() 98 | 99 | pcd.paint_uniform_color([0, 0.706, 1]) 100 | 101 | 102 | o3d.visualization.draw_geometries([mesh,pcd]) 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NTFields 2 | 3 | >**NTFields: Neural Time Fields for Physics-Informed Robot Motion Planning** 4 | \ 5 | >[Ruiqi Ni](https://ruiqini.github.io/), 6 | [Ahmed H Qureshi](https://qureshiahmed.github.io/) 7 | 8 | 9 | 10 | 11 | _[Paper](https://openreview.net/forum?id=ApF0dmi1_9K) | 12 | [GitHub](https://github.com/ruiqini/NTFields) | 13 | [arXiv](https://arxiv.org/abs/2210.00120) | 14 | Published in ICLR 2023._ 15 | 16 | ## Introduction 17 | 18 | This repository is the official implementation of "NTFields: Neural Time Fields for Physics-Informed Robot Motion Planning". 19 | 20 | ## Installation 21 | 22 | Clone the repository into your local machine: 23 | 24 | ``` 25 | git clone https://github.com/ruiqini/NTFields --recursive 26 | ``` 27 | 28 | Install requirements: 29 | 30 | ```setup 31 | conda env create -f NTFields_env.yml 32 | conda activate NTFields 33 | ``` 34 | 35 | Download datasets and pretrained models, exact and put `datasets/` `Experiments/` to the repository directory: 36 | 37 | [Datasets and pretrained model](https://drive.google.com/file/d/140W0iOJOwA-nku831mQgPIGGQmXAKtrz/view?usp=share_link) 38 | 39 | >The repository directory should look like this: 40 | ``` 41 | NTFields/ 42 | ├── datasets/ 43 | │ ├── arm/ # 4DOF and 6DOF robot arm, table environment 44 | │ ├── c3d/ # C3D environment 45 | │ ├── gibson/ # Gibson environment 46 | │ └── test/ # box and bunny environment 47 | ├── Experiments 48 | │ ├── 4DOF/ # pretrained model for 4DOF arm 49 | │ └── Gib/ # pretrained model for Gibson 50 | • • • 51 | • • • 52 | ``` 53 | 54 | ## Pre-processing 55 | 56 | To prepare the Gibson data, run: 57 | 58 | ``` 59 | python dataprocessing/preprocess.py --config configs/gibson.txt 60 | ``` 61 | 62 | To prepare the arm data, run: 63 | 64 | ``` 65 | python dataprocessing/preprocess.py --config configs/arm.txt 66 | ``` 67 | 68 | ## Testing 69 | 70 | To visualize our path in a Gibson environment, run: 71 | 72 | ```eval 73 | python test/gib_plan.py 74 | ``` 75 | 76 | To visualize our path in the 4DOF arm environment, run: 77 | 78 | ```eval 79 | python test/arm_plan.py 80 | ``` 81 | 82 | To sample random starts and goals in Gibson environments, run: 83 | 84 | ```eval 85 | python test/sample_sg.py 86 | ``` 87 | 88 | To show our statistics result in Gibson environments, run: 89 | 90 | ```eval 91 | python test/gib_stat.py 92 | ``` 93 | 94 | ## Training 95 | 96 | To train our model in a Gibson environment, run: 97 | 98 | ```train 99 | python train/train_gib.py 100 | ``` 101 | 102 | To train our model in the 4DOF arm environment, run: 103 | 104 | ```train 105 | python train/train_arm.py 106 | ``` 107 | 108 | ## Citation 109 | 110 | Please cite our paper if you find it useful in your research: 111 | 112 | ``` 113 | @inproceedings{ 114 | ni2023ntfields, 115 | title={{NTF}ields: Neural Time Fields for Physics-Informed Robot Motion Planning}, 116 | author={Ruiqi Ni and Ahmed H Qureshi}, 117 | booktitle={International Conference on Learning Representations}, 118 | year={2023}, 119 | url={https://openreview.net/forum?id=ApF0dmi1_9K} 120 | } 121 | ``` 122 | 123 | ## Acknowledgement 124 | This implementation takes [EikoNet](https://github.com/Ulvetanna/EikoNet) and [NDF](https://github.com/jchibane/ndf/) as references. We thank the authors for their excellent work. 125 | 126 | 127 | ## License 128 | 129 | NTFields is released under the MIT License. See the LICENSE file for more details. 130 | 131 | 132 | -------------------------------------------------------------------------------- /test/arm_plan.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | import torch 4 | import pytorch_kinematics as pk 5 | import numpy as np 6 | 7 | import matplotlib.pylab as plt 8 | 9 | import open3d as o3d 10 | import math 11 | import torch 12 | from torch import Tensor 13 | from torch.autograd import Variable, grad 14 | 15 | from models import model_arm as md 16 | from timeit import default_timer as timer 17 | 18 | file_path = 'datasets/arm/' 19 | path_name_ = '4DOF' 20 | 21 | out_path_ = file_path+path_name_ 22 | end_effect_='Link_3' 23 | 24 | modelPath = './Experiments/4DOF' 25 | dataPath = './datasets/arm/4DOF' 26 | model = md.Model(modelPath,dataPath,4,[-0.0,-0.0],device='cuda') 27 | model.load('./Experiments/4DOF/Model_Epoch_01100.pt') 28 | 29 | path = 'datasets/arm/'+path_name_+'/voxelized_point_cloud_128res_20000points.npz' 30 | 31 | occupancies = np.unpackbits(np.load(path)['compressed_occupancies']) 32 | input = np.reshape(occupancies, (128,)*3) 33 | x = np.array(input, dtype=np.float32) 34 | print(np.shape(x)) 35 | x = torch.from_numpy(x).cuda().float() 36 | x = x.unsqueeze(0) 37 | f_0, f_1 = model.network.env_encoder(x) 38 | 39 | for ii in range(5): 40 | start_goal = [[0.2,-1.5,-0.6,-1.5, 41 | -1,1.5,0.8,-1.8]] 42 | 43 | #start_goal = [[-0.2*math.pi,0.1,0.5,1.5, 44 | # 0.3*math.pi,0.1,0.5,1.5]] 45 | 46 | #start_goal = [[1.8,-0.2,0.2,2.2, 47 | # -2.6,0.2,0.8,1.8]] 48 | 49 | scale = 1.8 * math.pi 50 | 51 | XP=start_goal 52 | XP = Variable(Tensor(XP)).to('cuda')/scale 53 | dis=torch.norm(XP[:,4:8]-XP[:,0:4]) 54 | 55 | 56 | 57 | start = timer() 58 | 59 | point0=[] 60 | point1=[] 61 | point0.append(XP[:,:4].clone()) 62 | point1.append(XP[:,4:].clone()) 63 | while dis>0.04: 64 | gradient = model.Gradient(XP.clone(), f_0, f_1) 65 | 66 | XP = XP + 0.02 * gradient 67 | dis=torch.norm(XP[:,4:8]-XP[:,0:4]) 68 | point0.append(XP[:,:4].clone()) 69 | point1.append(XP[:,4:].clone()) 70 | end = timer() 71 | print("time",end-start) 72 | 73 | point1.reverse() 74 | point=point0+point1 75 | 76 | xyz=torch.cat(point)*scale 77 | 78 | d = "cuda" if torch.cuda.is_available() else "cpu" 79 | dtype = torch.float32 80 | 81 | chain = pk.build_serial_chain_from_urdf( 82 | open(out_path_+'/'+path_name_+".urdf").read(), end_effect_) 83 | chain = chain.to(dtype=dtype, device=d) 84 | 85 | N = 1 86 | #th_batch = torch.tensor([[-0.2*math.pi,0.1,0.5,1.5,0,1], 87 | # [0.3*math.pi,0.1,0.5,1.5,1,0]]).cuda() 88 | tg_batch = chain.forward_kinematics(xyz, end_only = False) 89 | 90 | p_list=[] 91 | iter = 0 92 | pointsize = 0 93 | for tg in tg_batch: 94 | print(iter,tg) 95 | if iter>1: 96 | v = np.load(out_path_+'/meshes/collision/'+tg+'.npy') 97 | nv = np.ones((v.shape[0],4)) 98 | pointsize = pointsize+v.shape[0] 99 | 100 | nv[:,:3]=v[:,:3] 101 | m = tg_batch[tg].get_matrix() 102 | #print(m.shape) 103 | t=torch.from_numpy(nv).float().cuda() 104 | p=torch.matmul(m[:],t.T) 105 | #p=p.cpu().numpy() 106 | p = torch.permute(p, (0, 2, 1)).contiguous() 107 | #p=np.transpose(p,(0,2,1)) 108 | p_list.append(p) 109 | del m,p,t,nv, v 110 | iter = iter+1 111 | 112 | p = torch.cat(p_list, dim=1) 113 | p = torch.reshape(p,(p.shape[0]*p.shape[1],p.shape[2])).contiguous() 114 | query_points = p[:,0:3].contiguous() 115 | 116 | xyz = 0.4*query_points.detach().cpu().numpy() 117 | 118 | pcd = o3d.geometry.PointCloud() 119 | pcd.points = o3d.utility.Vector3dVector(xyz[:,0:3]) 120 | 121 | mesh_name = 'model_scaled.off' 122 | 123 | path = file_path + path_name_+'/' 124 | 125 | obstacle = o3d.io.read_triangle_mesh(path + mesh_name) 126 | 127 | obstacle.compute_vertex_normals() 128 | 129 | o3d.visualization.draw_geometries([obstacle,pcd]) 130 | 131 | -------------------------------------------------------------------------------- /test/gib_stat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('.') 3 | from models import model_3d as md 4 | import torch 5 | import os 6 | import numpy as np 7 | import math 8 | import matplotlib 9 | import matplotlib.pylab as plt 10 | import torch 11 | from torch.nn import Linear 12 | from torch import Tensor 13 | from torch.nn import MSELoss 14 | from torch.optim import SGD, Adam, RMSprop 15 | from torch.autograd import Variable, grad 16 | from torch.utils.data.sampler import SubsetRandomSampler,WeightedRandomSampler 17 | 18 | from timeit import default_timer as timer 19 | import math 20 | 21 | import igl 22 | import bvh_distance_queries 23 | 24 | our_path=[] 25 | our_time=[] 26 | our_dis=[] 27 | collision=0 28 | 29 | modelPath = './Experiments/Gib' 30 | 31 | dataPath = './datasets/gibson/' 32 | womodel = md.Model(modelPath, dataPath, 3,[0,0], device='cuda:0') 33 | 34 | 35 | for gib_id in range(2): 36 | womodel.load('./Experiments/Gib/Model_Epoch_01100_'+str(gib_id)+'.pt') 37 | 38 | womodel.network.eval() 39 | v, f = igl.read_triangle_mesh("datasets/gibson/"+str(gib_id)+"/mesh_z_up_scaled.off") 40 | print(gib_id) 41 | 42 | vertices=v*20 43 | faces=f 44 | 45 | vertices = torch.tensor(vertices, dtype=torch.float32, device='cuda:0') 46 | faces = torch.tensor(faces, dtype=torch.long, device='cuda:0') 47 | triangles = vertices[faces].unsqueeze(dim=0) 48 | 49 | path = "datasets/gibson/"+str(gib_id)+"/voxelized_point_cloud_128res_20000points.npz" 50 | 51 | occupancies = np.unpackbits(np.load(path)['compressed_occupancies']) 52 | input = np.reshape(occupancies, (128,)*3) 53 | grid = np.array(input, dtype=np.float32) 54 | print(np.shape(grid)) 55 | grid = torch.from_numpy(grid).to('cuda:0').float() 56 | grid = grid.unsqueeze(0) 57 | 58 | 59 | f_0, f_1 = womodel.network.env_encoder(grid) 60 | 61 | 62 | sg = np.load("sample_sg_"+str(gib_id)+".npy") 63 | sg = Variable(Tensor(sg)).to('cuda') 64 | 65 | coll_sg =[] 66 | for id in range(500): 67 | 68 | 69 | start_goal = sg[id,:].unsqueeze(0) 70 | 71 | XP=start_goal 72 | 73 | XP=XP/20.0 74 | 75 | dis=torch.norm(XP[:,3:6]-XP[:,0:3]) 76 | 77 | start = timer() 78 | 79 | point0=[] 80 | point1=[] 81 | 82 | point0.append(XP[:,0:3].clone()) 83 | point1.append(XP[:,3:6].clone()) 84 | #print(id) 85 | 86 | iter=0 87 | while dis>0.06: 88 | gradient = womodel.Gradient(XP.clone(), f_0, f_1) 89 | 90 | XP = XP + 0.03 * gradient 91 | dis=torch.norm(XP[:,3:6]-XP[:,0:3]) 92 | 93 | point0.append(XP[:,0:3].clone()) 94 | point1.append(XP[:,3:6].clone()) 95 | iter=iter+1 96 | if(iter>500): 97 | break 98 | 99 | end = timer() 100 | 101 | 102 | point1.reverse() 103 | point=point0+point1 104 | 105 | xyz= torch.cat(point).to('cpu').data.numpy()#np.asarray(point) 106 | 107 | 108 | xyz=20*xyz 109 | STEP = 0.001 110 | all_p=[] 111 | for j in range(xyz.shape[0]-1): 112 | ll = np.linalg.norm(xyz[j+1] - xyz[j]) 113 | all_p.append(xyz[j]) 114 | all_p.append(xyz[j+1]) 115 | for k in range(math.floor(ll/STEP)): 116 | cur_p=xyz[j]+k/math.floor(ll/STEP)*(xyz[j+1] - xyz[j]) 117 | all_p.append(cur_p) 118 | query_points = np.vstack(all_p) 119 | #print(query_points) 120 | 121 | #unsigned_distance = igl.signed_distance(query_points, vertices, faces)[0] 122 | query_points = torch.tensor(query_points, dtype=torch.float32, device='cuda') 123 | query_points = query_points.unsqueeze(dim=0) 124 | #print(query_points.shape) 125 | bvh = bvh_distance_queries.BVH() 126 | torch.cuda.synchronize() 127 | torch.cuda.synchronize() 128 | distances, closest_points, closest_faces, closest_bcs= bvh(triangles, query_points) 129 | torch.cuda.synchronize() 130 | #unsigned_distance = abs() 131 | #print(distances.shape) 132 | unsigned_distance = torch.sqrt(distances).squeeze() 133 | unsigned_distance = unsigned_distance.detach().cpu().numpy() 134 | 135 | if np.min(unsigned_distance)<=0.01 or iter>500 or np.max(xyz)>13 or np.min(xyz)<-13: 136 | collision+=1 137 | coll_sg.append(start_goal.to('cpu').data.numpy()) 138 | else: 139 | our_path.append(xyz) 140 | our_time.append(end - start) 141 | our_dis.append(np.min(unsigned_distance)) 142 | 143 | coll_sg=np.asarray(coll_sg) 144 | 145 | print(collision) 146 | 147 | def length(path): 148 | size=path.shape[0] 149 | l=0 150 | for i in range(size-1): 151 | l+=np.linalg.norm(path[i+1,:]-path[i,:]) 152 | return l 153 | #our_path=np.asarray(our_path) 154 | our_time=np.asarray(our_time) 155 | 156 | our_len=[] 157 | for i in range(len(our_path)): 158 | our_len.append(length(our_path[i])) 159 | our_len=np.asarray(our_len) 160 | our_dis=np.asarray(our_dis) 161 | print(np.max(our_len)) 162 | print("len p",len(our_path)) 163 | print("len t",len(our_time)) 164 | print(np.sum(our_time)/len(our_path)) 165 | print(np.sum(our_len)/len(our_path)) 166 | 167 | np.save("gib_len",our_len) 168 | np.save("gib_time",our_time) 169 | np.save("gib_dis",our_dis) 170 | 171 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | SYSTEMS AND METHODS FOR NEURAL TIME FIELDS Non-Commercial Open Source Software License 2 | 3 | 4 | 5 | Copyright © 2024 Purdue University. Developed by Ruiqi Ni & Ahmed H. Qureshi and CoRAL Lab. Purdue Research Foundation Reference Number 70169-01 6 | 7 | 8 | 9 | This Non-Commercial Open Source Software License (the "License" or “Agreement”) applies to any Software made available by its owner (the "Licensor"), and Derivative Works thereof, for use according to the following terms and conditions. 10 | 11 | 12 | 13 | For purposes of this License: 14 | 15 | 16 | 17 | “Software” means (i) the actual copy of all or any portion of code for program routines made accessible to You by Licensor pursuant to this Agreement, inclusive of backups, updates, and/or merged copies permitted hereunder or subsequently supplied by Licensor, including all or any file structures, programming instructions, user interfaces and screen formats and sequences as well as any and all documentation and instructions related to it. 18 | 19 | 20 | 21 | “Derivative Works” means any and all derivatives and/or modifications created or made by You to the Software. 22 | 23 | 24 | 25 | “You”, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with you. For purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 26 | 27 | 28 | 29 | You acknowledge and agree: 30 | 31 | 32 | 33 | Grant of Copyright License. Licensor grants You a worldwide, royalty-free, non-exclusive license to the Software for the duration of the copyright to redistribute and use for non-commercial purposes in source and binary forms, with or without modification, provided that the following conditions are met: 34 | 35 | 36 | 37 | The Software is used solely for non-commercial purposes. It may not be used indirectly for commercial use, such as on a website that accepts advertising money for content. Non- commercial use does include use by a for-profit company in its research. For commercial use rights, contact the Office of Technology Commercialization at Purdue Research Foundation, at otcip@prf.org and inquire about \[insert software name]. 38 | 39 | 40 | 41 | Redistributions of source code must retain the above copyright notice, this list of conditions, and the disclaimer in Section 4 below. 42 | 43 | 44 | 45 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer in Section 4 below in the documentation and/or other materials provided with the distribution. 46 | 47 | 48 | 49 | Neither the name of Purdue Research Foundation, Purdue University, nor the names of its contributors, nor any of their trademarks or service marks, may be used, including to endorse or promote products derived from this Software, without specific prior written permission. 50 | 51 | 52 | 53 | No Implied License. Except as expressly stated herein, nothing in this License grants any license to Licensor's trademarks, copyrights, patents, trade secrets or any other intellectual property. No patent license is granted to make, use, sell, offer for sale, have made, or import embodiments of any patent claims. No license is granted to the trademarks of Licensor even if such marks are included in the Software. Nothing in this License shall be interpreted to prohibit Licensor from licensing under terms different from this License any Software that Licensor otherwise would have a right to license. 54 | 55 | 56 | 57 | Same License and Attribution in Derivative Works. Derivative Works must be released under this same license when distributing Derivative Works or the licensed Software. You must retain in the source code of any Derivative Works that You create, all copyright, patent, or trademark notices from the source code of the Software, as well as any notices of licensing and any descriptive text identified therein as an "Attribution Notice." You must cause the source code for any Derivative Works that You create to carry a prominent Attribution Notice reasonably calculated to inform recipients that You have modified the Software. 58 | 59 | 60 | 61 | DISCLAIMER OF WARRANTIES. THE SOFTWARE IS PROVIDED “AS-IS”WITHOUT WARRANTY OF ANY KIND INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE OR OF NON-INFRINGEMENT. LICENSEE BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND RELATED MATERIALS. 62 | 63 | 64 | 65 | Limitation of Liability. Under no circumstances and under no legal theory, whether in tort (including negligence), contract, or otherwise, shall the Licensor be liable to anyone for any indirect, special, incidental, or consequential damages of any character arising as a result of this License or the use of the Software including, without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses. This limitation of liability shall not apply to the extent applicable law prohibits such limitation. 66 | 67 | 68 | 69 | Acceptance and Termination. Your act of accessing or using the Software constitutes express assent to and acceptance of the terms and conditions of this License. Nothing in this License is intended to affect copyright exceptions and limitations (including "fair use" or "fair dealing"). This License shall terminate immediately, and You may no longer exercise any of the rights granted to You by this License, upon your failure to honor the conditions in Section 1. 70 | 71 | 72 | 73 | Jurisdiction, Venue and Governing Law. Any action or suit relating to this License may be brought only in the courts of a jurisdiction wherein the Licensor resides or in which Licensor conducts its primary business, and under the laws of that jurisdiction without regard to its conflict- of-law provisions. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any use of the Software outside the scope of this License or after its termination shall be subject to the requirements and penalties of copyright or patent law in the appropriate jurisdiction. This section shall survive the termination of this License. 74 | 75 | 76 | 77 | Severability. If any provision or portion of any provision of this Agreement not essential to the purpose of this Agreement shall be held to be illegal, invalid or unenforceable by a court of competent jurisdiction, it is the intention of the You and Licensor that the remaining provisions or portions thereof shall constitute their agreement with respect to the subject matter hereof, and all such remaining provisions or portions thereof shall remain in full force and effect. To the extent legally permissible, any illegal, invalid or unenforceable provision of this Agreement shall be replaced by a valid provision which will implement the commercial purpose of the illegal, invalid or unenforceable provision. In the event that any provision essential to the purpose of this Agreement is held to be illegal, invalid or unenforceable and cannot be replaced by a valid provision which will implement the purpose of this Agreement, this Agreement and the rights granted herein shall terminate. 78 | 79 | 80 | 81 |   82 | 83 | 84 | 85 |   86 | 87 | 88 | 89 |   90 | -------------------------------------------------------------------------------- /dataprocessing/speed_sampling_gpu.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from timeit import default_timer as timer 4 | import igl 5 | import traceback 6 | import math 7 | import torch 8 | import pytorch_kinematics as pk 9 | 10 | import bvh_distance_queries 11 | import math 12 | 13 | def arm_rand_sample_bound_points(numsamples, dim, 14 | vertices, faces,velocity_max, offset, margin, 15 | out_path_ ,path_name_,end_effect_): 16 | numsamples = int(numsamples) 17 | 18 | d = "cuda" if torch.cuda.is_available() else "cpu" 19 | dtype = torch.float32 20 | 21 | chain = pk.build_serial_chain_from_urdf( 22 | open(out_path_+'/'+path_name_+".urdf").read(), end_effect_) 23 | chain = chain.to(dtype=dtype, device=d) 24 | 25 | scale = 0.9 * math.pi/0.5 26 | #base = torch.from_numpy(np.array([[0, -0.5*np.pi, 0.0, -0.5*np.pi,0.0,0.0]])).cuda() 27 | #vertices = vertices*2.5 28 | 29 | vertices = torch.tensor(vertices, dtype=torch.float32, device='cuda') 30 | faces = torch.tensor(faces, dtype=torch.long, device='cuda') 31 | 32 | triangles = vertices[faces].unsqueeze(dim=0) 33 | 34 | print(vertices.shape) 35 | print(faces.shape) 36 | print(triangles.shape) 37 | 38 | X_list = [] 39 | Y_list = [] 40 | OutsideSize = numsamples + 10 41 | WholeSize = 0 42 | while OutsideSize > 0: 43 | P = torch.rand((2*numsamples,dim),dtype=torch.float32, device='cuda')-0.5 44 | dP = torch.rand((2*numsamples,dim),dtype=torch.float32, device='cuda')-0.5 45 | rL = (torch.rand((2*numsamples,1),dtype=torch.float32, device='cuda'))*np.sqrt(3.) 46 | nP = P + torch.nn.functional.normalize(dP,dim=1)*rL 47 | 48 | PointsInside = torch.all((nP <= 0.5),dim=1) & torch.all((nP >= -0.5),dim=1) 49 | 50 | x0 = P[PointsInside,:] 51 | x1 = nP[PointsInside,:] 52 | #print(x0.shape[0]) 53 | 54 | if(x0.shape[0]<=1): 55 | continue 56 | 57 | 58 | 59 | th_batch = scale*x0#+base 60 | whole_p = [] 61 | batch_size = 80000 62 | for batch_id in range(math.floor(x0.shape[0]/batch_size)+1): 63 | if batch_id*batch_size==x0.shape[0]: 64 | break 65 | #print(batch_id) 66 | tg_batch = chain.forward_kinematics( 67 | th_batch[batch_id*batch_size: 68 | min((batch_id+1)*batch_size,x0.shape[0]),:] 69 | , end_only = False) 70 | 71 | p_list=[] 72 | iter = 0 73 | pointsize = 0 74 | for tg in tg_batch: 75 | print(iter,tg) 76 | if iter>1: 77 | v = np.load(out_path_+'/meshes/collision/'+tg+'.npy') 78 | nv = np.ones((v.shape[0],4)) 79 | pointsize = pointsize+v.shape[0] 80 | 81 | nv[:,:3]=v[:,:3] 82 | m = tg_batch[tg].get_matrix() 83 | #print(m.shape) 84 | t=torch.from_numpy(nv).float().cuda() 85 | p=torch.matmul(m[:],t.T) 86 | #p=p.cpu().numpy() 87 | p = torch.permute(p, (0, 2, 1)).contiguous() 88 | #p=np.transpose(p,(0,2,1)) 89 | p_list.append(p) 90 | del m,p,t,nv, v 91 | iter = iter+1 92 | #print(pointsize) 93 | #p = np.concatenate(p_list,axis=1) 94 | p = torch.cat(p_list, dim=1) 95 | p = torch.reshape(p,(p.shape[0]*p.shape[1],p.shape[2])).contiguous() 96 | query_points = p[:,0:3].contiguous() 97 | query_points = 0.4*query_points.unsqueeze(dim=0) 98 | bvh = bvh_distance_queries.BVH() 99 | 100 | torch.cuda.synchronize() 101 | torch.cuda.synchronize() 102 | distance, closest_points, closest_faces, closest_bcs= bvh(triangles, query_points) 103 | torch.cuda.synchronize() 104 | 105 | distance = torch.sqrt(distance).squeeze() 106 | 107 | distance,_ = torch.min(torch.reshape(distance, (-1, pointsize)), dim=1) 108 | #distance = distance.detach().cpu().numpy() 109 | 110 | #print(distance.shape) 111 | whole_p.append(distance) 112 | del p, p_list, tg_batch, distance, query_points, bvh 113 | 114 | #unsigned_distance = np.concatenate(whole_p, axis=0) 115 | unsigned_distance = torch.cat(whole_p, dim=0) 116 | #print(unsigned_distance.shape) 117 | 118 | where_d = (unsigned_distance <= margin) & \ 119 | (unsigned_distance >= offset) 120 | x0 = x0[where_d] 121 | x1 = x1[where_d] 122 | y0 = unsigned_distance[where_d] 123 | 124 | #print(x1.shape[0]) 125 | if(x1.shape[0]<=1): 126 | continue 127 | 128 | #print('x1 ',x0.shape) 129 | th_batch = scale*x1#+base 130 | whole_p = [] 131 | batch_size = 80000 132 | for batch_id in range(math.floor(x1.shape[0]/batch_size)+1): 133 | if batch_id*batch_size==x1.shape[0]: 134 | break 135 | #print(batch_id) 136 | tg_batch = chain.forward_kinematics( 137 | th_batch[batch_id*batch_size: 138 | min((batch_id+1)*batch_size,x1.shape[0]),:] 139 | , end_only = False) 140 | 141 | p_list=[] 142 | iter = 0 143 | pointsize = 0 144 | for tg in tg_batch: 145 | if iter>1: 146 | #print(tg) 147 | v = np.load(out_path_+'/meshes/collision/'+tg+'.npy') 148 | nv = np.ones((v.shape[0],4)) 149 | pointsize = pointsize+v.shape[0] 150 | 151 | nv[:,:3]=v[:,:3] 152 | m = tg_batch[tg].get_matrix() 153 | #print(m.shape) 154 | t=torch.from_numpy(nv).float().cuda() 155 | p=torch.matmul(m[:],t.T) 156 | #p=p.cpu().numpy() 157 | p = torch.permute(p, (0, 2, 1)).contiguous() 158 | #p=np.transpose(p,(0,2,1)) 159 | p_list.append(p) 160 | del m,p,t,nv, v 161 | iter = iter+1 162 | 163 | p = torch.cat(p_list, dim=1) 164 | p = torch.reshape(p,(p.shape[0]*p.shape[1],p.shape[2])).contiguous() 165 | query_points = p[:,0:3].contiguous() 166 | query_points = 0.4*query_points.unsqueeze(dim=0) 167 | 168 | bvh = bvh_distance_queries.BVH() 169 | 170 | torch.cuda.synchronize() 171 | torch.cuda.synchronize() 172 | distance, closest_points, closest_faces, closest_bcs= bvh(triangles, query_points) 173 | torch.cuda.synchronize() 174 | 175 | distance = torch.sqrt(distance).squeeze() 176 | 177 | distance,_ = torch.min(torch.reshape(distance, (-1, pointsize)), dim=1) 178 | 179 | whole_p.append(distance) 180 | del p, p_list, tg_batch, distance, query_points, bvh 181 | 182 | unsigned_distance = torch.cat(whole_p, dim=0) 183 | 184 | y1 = unsigned_distance 185 | 186 | x = torch.cat((x0,x1),1) 187 | y = torch.cat((y0.unsqueeze(1),y1.unsqueeze(1)),1) 188 | 189 | X_list.append(x) 190 | Y_list.append(y) 191 | OutsideSize = OutsideSize - x.shape[0] 192 | WholeSize = WholeSize + x.shape[0] 193 | print(WholeSize) 194 | if(WholeSize > numsamples): 195 | break 196 | 197 | X = torch.cat(X_list,0)[:numsamples] 198 | Y = torch.cat(Y_list,0)[:numsamples] 199 | 200 | sampled_points = X.detach().cpu().numpy() 201 | distance = Y.detach().cpu().numpy() 202 | 203 | speed0 = velocity_max*np.clip(distance[:,0] , a_min = offset, a_max = margin)/margin 204 | speed1 = velocity_max*np.clip(distance[:,1] , a_min = offset, a_max = margin)/margin 205 | speed = np.zeros((distance.shape[0],2)) 206 | speed[:,0]=speed0 207 | speed[:,1]=speed1 208 | return sampled_points, speed 209 | 210 | def point_rand_sample_bound_points(numsamples, dim, 211 | vertices, faces,velocity_max, offset, margin): 212 | numsamples = int(numsamples) 213 | 214 | vertices = torch.tensor(vertices, dtype=torch.float32, device='cuda') 215 | faces = torch.tensor(faces, dtype=torch.long, device='cuda') 216 | triangles = vertices[faces].unsqueeze(dim=0) 217 | print(vertices.shape) 218 | print(faces.shape) 219 | print(triangles.shape) 220 | #X = torch.zeros((numsamples,2*dim)).cuda() 221 | #Y = torch.zeros((numsamples,2)).cuda() 222 | X_list = [] 223 | Y_list = [] 224 | OutsideSize = numsamples + 2 225 | WholeSize = 0 226 | while OutsideSize > 0: 227 | P = torch.rand((2*numsamples,dim),dtype=torch.float32, device='cuda')-0.5 228 | dP = torch.rand((2*numsamples,dim),dtype=torch.float32, device='cuda')-0.5 229 | rL = (torch.rand((2*numsamples,1),dtype=torch.float32, device='cuda'))*np.sqrt(3.) 230 | nP = P + torch.nn.functional.normalize(dP,dim=1)*rL 231 | 232 | PointsInside = torch.all((nP <= 0.5),dim=1) & torch.all((nP >= -0.5),dim=1) 233 | 234 | 235 | x0 = P[PointsInside,:] 236 | x1 = nP[PointsInside,:] 237 | 238 | print(x0.shape[0]) 239 | if(x0.shape[0]<=1): 240 | continue 241 | #print(len(PointsOutside)) 242 | query_points = x0 243 | query_points = query_points.unsqueeze(dim=0) 244 | #print(query_points.shape) 245 | bvh = bvh_distance_queries.BVH() 246 | torch.cuda.synchronize() 247 | torch.cuda.synchronize() 248 | distances, closest_points, closest_faces, closest_bcs= bvh(triangles, query_points) 249 | torch.cuda.synchronize() 250 | unsigned_distance = torch.sqrt(distances).squeeze() 251 | 252 | where_d = (unsigned_distance <= margin) & \ 253 | (unsigned_distance >= offset) 254 | x0 = x0[where_d] 255 | x1 = x1[where_d] 256 | y0 = unsigned_distance[where_d] 257 | 258 | if(x1.shape[0]<=1): 259 | continue 260 | 261 | query_points = x1 262 | query_points = query_points.unsqueeze(dim=0) 263 | bvh = bvh_distance_queries.BVH() 264 | torch.cuda.synchronize() 265 | torch.cuda.synchronize() 266 | distances, closest_points, closest_faces, closest_bcs= bvh(triangles, query_points) 267 | torch.cuda.synchronize() 268 | #unsigned_distance = abs() 269 | y1 = torch.sqrt(distances).squeeze() 270 | 271 | x = torch.cat((x0,x1),1) 272 | y = torch.cat((y0.unsqueeze(1),y1.unsqueeze(1)),1) 273 | 274 | X_list.append(x) 275 | Y_list.append(y) 276 | OutsideSize = OutsideSize - x.shape[0] 277 | WholeSize = WholeSize + x.shape[0] 278 | if(WholeSize > numsamples): 279 | break 280 | 281 | 282 | 283 | X = torch.cat(X_list,0)[:numsamples] 284 | Y = torch.cat(Y_list,0)[:numsamples] 285 | 286 | sampled_points = X.detach().cpu().numpy() 287 | distance = Y.detach().cpu().numpy() 288 | speed0 = velocity_max*np.clip(distance[:,0] , a_min = offset, a_max = margin)/margin 289 | speed1 = velocity_max*np.clip(distance[:,1] , a_min = offset, a_max = margin)/margin 290 | speed = np.zeros((distance.shape[0],2)) 291 | speed[:,0]=speed0 292 | speed[:,1]=speed1 293 | return sampled_points, speed 294 | 295 | 296 | def sample_speed(path, numsamples, dim): 297 | 298 | try: 299 | 300 | global out_path 301 | out_path = os.path.dirname(path) 302 | #print(out_path) 303 | global path_name 304 | path_name = os.path.splitext(os.path.basename(out_path))[0] 305 | #print('pp',path) 306 | global task_name 307 | task_name = out_path.split('/')[2]#os.path.splitext(os.path.basename(out_path),'/') 308 | if task_name=='arm': 309 | #dim = np.loadtxt(out_path+'/dim') 310 | global end_effect 311 | with open(out_path+'/dim') as f: 312 | iter = 0 313 | for line in f: 314 | data = line.split() 315 | if iter==0: 316 | dim = int(data[0]) 317 | else: 318 | end_effect = data[0] 319 | #print(end_effect) 320 | iter=iter+1 321 | file_name = os.path.splitext(os.path.basename(path))[0] 322 | input_file = os.path.join(out_path,file_name + '_scaled.off') 323 | out_file = out_path + '/sampled_points.npy' 324 | 325 | print(input_file) 326 | if os.path.exists(out_file): 327 | print(f'Exists: {out_file}') 328 | #return 329 | 330 | #out_file = out_path + '/boundary_{}_samples.npz'.format( sigma) 331 | limit = 0.5 332 | xmin=[-limit]*dim 333 | xmax=[limit]*dim 334 | velocity_max = 1 335 | 336 | if task_name=='c3d' or task_name=='test': 337 | margin = limit/5.0 338 | offset = margin/10.0 339 | elif task_name=='gibson': 340 | margin = limit/10.0 341 | offset = margin/7.0 342 | elif task_name=='arm': 343 | margin = limit/10.0 344 | offset = margin/15.0 345 | 346 | v, f = igl.read_triangle_mesh(input_file) 347 | 348 | 349 | start = timer() 350 | if task_name=='arm': 351 | sampled_points, speed = arm_rand_sample_bound_points(numsamples, dim, 352 | v, f, velocity_max, offset, margin, out_path ,path_name,end_effect) 353 | else: 354 | sampled_points, speed = point_rand_sample_bound_points(numsamples, dim, 355 | v, f, velocity_max, offset, margin) 356 | 357 | end = timer() 358 | 359 | print(end-start) 360 | 361 | np.save('{}/sampled_points'.format(out_path),sampled_points) 362 | np.save('{}/speed'.format(out_path),speed) 363 | except Exception as err: 364 | print('Error with {}: {}'.format(path, traceback.format_exc())) 365 | 366 | -------------------------------------------------------------------------------- /models/model_3d.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import numpy as np 3 | import math 4 | import random 5 | import time 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | 10 | from torch.nn import Linear 11 | from torch import Tensor 12 | from torch.nn import Conv3d 13 | from torch.autograd import Variable, grad 14 | from torch.utils.data.sampler import SubsetRandomSampler, WeightedRandomSampler 15 | from models import database as db 16 | 17 | import matplotlib 18 | import matplotlib.pylab as plt 19 | 20 | import pickle5 as pickle 21 | 22 | from timeit import default_timer as timer 23 | 24 | class NN(torch.nn.Module): 25 | 26 | def __init__(self, device, dim):#10 27 | super(NN, self).__init__() 28 | self.dim = dim 29 | 30 | h_size = 256 #512,256 31 | fh_size = 128 32 | 33 | 34 | #3D CNN encoder 35 | self.conv_in = Conv3d(1, 16, 3, padding=1, padding_mode='zeros') # out: 256 ->m.p. 128 36 | self.conv_0 = Conv3d(16, 32, 3, padding=1, padding_mode='zeros') # out: 128 37 | 38 | self.actvn = torch.nn.ReLU() 39 | 40 | self.maxpool = torch.nn.MaxPool3d(2) 41 | 42 | self.conv_in_bn = torch.nn.BatchNorm3d(16) 43 | self.device = device 44 | 45 | feature_size = (1 + 16 ) * 7 #+ 3 46 | 47 | displacment = 0.0222#0.0222 48 | displacments = [] 49 | 50 | displacments.append([0, 0, 0]) 51 | for x in range(3): 52 | for y in [-1, 1]: 53 | input = [0, 0, 0] 54 | input[x] = y * displacment 55 | displacments.append(input) 56 | 57 | self.displacments = torch.Tensor(displacments).to(self.device)#cuda 58 | 59 | #decoder 60 | 61 | self.scale = 10 62 | 63 | self.act = torch.nn.ELU() 64 | 65 | self.nl1=5 66 | self.nl2=7 67 | 68 | self.encoder = torch.nn.ModuleList() 69 | self.encoder1 = torch.nn.ModuleList() 70 | 71 | self.encoder.append(Linear(dim,h_size)) 72 | self.encoder1.append(Linear(dim,h_size)) 73 | 74 | for i in range(self.nl1-1): 75 | self.encoder.append(Linear(h_size, h_size)) 76 | self.encoder1.append(Linear(h_size, h_size)) 77 | 78 | self.encoder.append(Linear(h_size, h_size)) 79 | 80 | self.generator = torch.nn.ModuleList() 81 | self.generator1 = torch.nn.ModuleList() 82 | 83 | self.generator.append(Linear(2*h_size + 2*fh_size, 2*h_size)) 84 | self.generator1.append(Linear(2*h_size + 2*fh_size, 2*h_size)) 85 | 86 | for i in range(self.nl2-1): 87 | self.generator.append(Linear(2*h_size, 2*h_size)) 88 | self.generator1.append(Linear(2*h_size, 2*h_size)) 89 | 90 | self.generator.append(Linear(2*h_size,h_size)) 91 | self.generator.append(Linear(h_size,1)) 92 | 93 | self.fc_env0 = Linear(feature_size, fh_size) 94 | self.fc_env1 = Linear(fh_size, fh_size) 95 | 96 | def init_weights(self, m): 97 | 98 | if type(m) == torch.nn.Linear: 99 | stdv = (1. / math.sqrt(m.weight.size(1))/1.)*2 100 | #stdv = np.sqrt(6 / 64.) / self.T 101 | m.weight.data.uniform_(-stdv, stdv) 102 | m.bias.data.uniform_(-stdv, stdv) 103 | 104 | def env_encoder(self, x): 105 | x = x.unsqueeze(1) 106 | f_0 = x 107 | 108 | net = self.actvn(self.conv_in(x)) 109 | net = self.conv_in_bn(net) 110 | f_1 = net 111 | return f_0, f_1 112 | 113 | def env_features(self, coords, f_0, f_1): 114 | 115 | coords = coords.clone().detach().requires_grad_(False) 116 | 117 | p0=coords[:,:3] 118 | p1=coords[:,3:] 119 | 120 | size=p0.shape[0] 121 | 122 | p = torch.vstack((p0,p1)) 123 | 124 | p = torch.index_select(p, 1, torch.LongTensor([2,1,0]).to(self.device)) 125 | 126 | 127 | p=2*p 128 | 129 | p = p.unsqueeze(0) 130 | p = p.unsqueeze(1).unsqueeze(1) 131 | p = torch.cat([p + d for d in self.displacments], dim=2) 132 | 133 | #print(p.shape) 134 | feature_0 = F.grid_sample(f_0, p, mode='bilinear', padding_mode='border') 135 | feature_1 = F.grid_sample(f_1, p, mode='bilinear', padding_mode='border') 136 | 137 | 138 | features = torch.cat((feature_0, feature_1), dim=1) 139 | 140 | shape = features.shape 141 | features = torch.reshape(features, 142 | (shape[0], shape[1] * shape[3], shape[4])) 143 | #print(features.size()) 144 | features = torch.squeeze(features.transpose(1, -1)) 145 | 146 | features = self.act(self.fc_env0(features)) 147 | features = self.act(self.fc_env1(features)) 148 | 149 | features0=features[:size,:] 150 | features1=features[size:,:] 151 | 152 | return features0, features1 153 | 154 | def out(self, coords, features0, features1): 155 | 156 | coords = coords.clone().detach().requires_grad_(True) # allows to take derivative w.r.t. input 157 | size = coords.shape[0] 158 | x0 = coords[:,:self.dim] 159 | x1 = coords[:,self.dim:] 160 | 161 | x = torch.vstack((x0,x1)) 162 | 163 | x = self.act(self.encoder[0](x)) 164 | for ii in range(1,self.nl1): 165 | x_tmp = x 166 | x = self.act(self.encoder[ii](x)) 167 | x = self.act(self.encoder1[ii](x) + x_tmp) 168 | 169 | x = self.encoder[-1](x) 170 | 171 | x0 = x[:size,...] 172 | x1 = x[size:,...] 173 | 174 | x_0 = torch.max(x0,x1) 175 | x_1 = torch.min(x0,x1) 176 | 177 | features_0 = torch.max(features0,features1) 178 | features_1 = torch.min(features0,features1) 179 | 180 | x = torch.cat((x_0, x_1, features_0, features_1),1) 181 | 182 | x = self.act(self.generator[0](x)) 183 | 184 | for ii in range(1, self.nl2): 185 | x_tmp = x 186 | x = self.act(self.generator[ii](x)) 187 | x = self.act(self.generator1[ii](x) + x_tmp) 188 | 189 | y = self.generator[-2](x) 190 | x = self.act(y) 191 | 192 | y = self.generator[-1](x) 193 | x = torch.sigmoid(0.1*y) 194 | 195 | return x, coords 196 | 197 | def forward(self, coords, grid): 198 | coords = coords.clone().detach().requires_grad_(True) # allows to take derivative w.r.t. input 199 | f_0, f_1 = self.env_encoder(grid) 200 | feature0, feature1= self.env_features(coords, f_0, f_1) 201 | output, coords = self.out(coords, feature0, feature1) 202 | 203 | return output, coords 204 | 205 | 206 | class Model(): 207 | def __init__(self, ModelPath, DataPath, dim, pos,device='cpu'): 208 | 209 | self.Params = {} 210 | self.Params['ModelPath'] = ModelPath 211 | self.Params['DataPath'] = DataPath 212 | self.dim = dim 213 | self.pos = pos 214 | 215 | # Pass the JSON information 216 | self.Params['Device'] = device 217 | 218 | self.Params['Network'] = {} 219 | 220 | self.Params['Training'] = {} 221 | self.Params['Training']['Batch Size'] = 10000 222 | self.Params['Training']['Number of Epochs'] = 20000 223 | self.Params['Training']['Resampling Bounds'] = [0.2, 0.95] 224 | self.Params['Training']['Print Every * Epoch'] = 1 225 | self.Params['Training']['Save Every * Epoch'] = 10 226 | self.Params['Training']['Learning Rate'] = 2e-4#5e-5 227 | 228 | # Parameters to alter during training 229 | self.total_train_loss = [] 230 | 231 | def gradient(self, y, x, create_graph=True): 232 | 233 | grad_y = torch.ones_like(y) 234 | 235 | grad_x = torch.autograd.grad(y, x, grad_y, only_inputs=True, retain_graph=True, create_graph=create_graph)[0] 236 | 237 | return grad_x 238 | 239 | def Loss(self, points, features0, features1, Yobs): 240 | 241 | 242 | start=time.time() 243 | tau, Xp = self.network.out(points, features0, features1) 244 | dtau = self.gradient(tau, Xp) 245 | end=time.time() 246 | 247 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 248 | 249 | T0 = torch.einsum('ij,ij->i', D, D) 250 | 251 | 252 | DT0=dtau[:,:self.dim] 253 | DT1=dtau[:,self.dim:] 254 | 255 | T01 = T0*torch.einsum('ij,ij->i', DT0, DT0) 256 | T02 = -2*tau[:,0]*torch.einsum('ij,ij->i', DT0, D) 257 | 258 | T11 = T0*torch.einsum('ij,ij->i', DT1, DT1) 259 | T12 = 2*tau[:,0]*torch.einsum('ij,ij->i', DT1, D) 260 | 261 | 262 | T3 = tau[:,0]**2 263 | 264 | S0 = (T01-T02+T3) 265 | S1 = (T11-T12+T3) 266 | 267 | sq_Ypred0 = 1/torch.sqrt(torch.sqrt(S0)/T3) 268 | sq_Ypred1 = 1/torch.sqrt(torch.sqrt(S1)/T3) 269 | 270 | 271 | sq_Yobs0=torch.sqrt(Yobs[:,0]) 272 | sq_Yobs1=torch.sqrt(Yobs[:,1]) 273 | 274 | 275 | diff = abs(1-sq_Ypred0/sq_Yobs0)+abs(1-sq_Ypred1/sq_Yobs1)+\ 276 | abs(1-sq_Yobs0/sq_Ypred0)+abs(1-sq_Yobs1/sq_Ypred1) 277 | 278 | loss_n = torch.sum(diff)/Yobs.shape[0] 279 | 280 | loss = loss_n 281 | 282 | return loss, loss_n, diff 283 | 284 | def train(self): 285 | 286 | 287 | 288 | self.network = NN(self.Params['Device'],self.dim) 289 | self.network.apply(self.network.init_weights) 290 | #self.network.float() 291 | self.network.to(self.Params['Device']) 292 | 293 | 294 | 295 | self.optimizer = torch.optim.AdamW( 296 | self.network.parameters(), lr=self.Params['Training']['Learning Rate'] 297 | ,weight_decay=0.1) 298 | 299 | self.dataset = db.Database(self.Params['DataPath']) 300 | 301 | 302 | dataloader = torch.utils.data.DataLoader( 303 | self.dataset, 304 | batch_size=int(self.Params['Training']['Batch Size']), 305 | num_workers = 0, 306 | shuffle=True) 307 | 308 | 309 | weights = Tensor(torch.ones(len(self.dataset))).to( 310 | torch.device(self.Params['Device'])) 311 | 312 | dists=torch.norm(self.dataset.data[:,0:3]-self.dataset.data[:,3:6],dim=1) 313 | weights = dists.max()-dists 314 | 315 | weights = torch.clamp( 316 | weights/weights.max(), self.Params['Training']['Resampling Bounds'][0], self.Params['Training']['Resampling Bounds'][1]) 317 | 318 | train_sampler_wei = WeightedRandomSampler( 319 | weights, len(weights), replacement=True) 320 | 321 | train_loader_wei = torch.utils.data.DataLoader( 322 | self.dataset, 323 | batch_size=int(self.Params['Training']['Batch Size']), 324 | sampler=train_sampler_wei 325 | ) 326 | 327 | speed = self.dataset.data[:,2*self.dim:] 328 | 329 | grid = self.dataset.grid 330 | grid = grid.to(self.Params['Device']) 331 | grid = grid.unsqueeze(0) 332 | print(speed.min()) 333 | #''' 334 | 335 | weights = Tensor(torch.ones(len(self.dataset))).to( 336 | torch.device(self.Params['Device'])) 337 | 338 | prev_diff = 1.0 339 | current_diff = 1.0 340 | #step = 1.0 341 | tt =time.time() 342 | 343 | current_state = pickle.loads(pickle.dumps(self.network.state_dict())) 344 | current_optimizer = pickle.loads(pickle.dumps(self.optimizer.state_dict())) 345 | #p=(torch.rand((5,6))-0.5).cuda() 346 | prev_state_queue = [] 347 | prev_optimizer_queue = [] 348 | 349 | self.l0 = 500 350 | 351 | self.l1 = 500 352 | 353 | for epoch in range(1, self.Params['Training']['Number of Epochs']+1): 354 | total_train_loss = 0 355 | 356 | total_diff=0 357 | 358 | 359 | self.lamb = min(1.0,max(0,(epoch-self.l0)/self.l1)) 360 | 361 | prev_state_queue.append(current_state) 362 | prev_optimizer_queue.append(current_optimizer) 363 | if(len(prev_state_queue)>5): 364 | prev_state_queue.pop(0) 365 | prev_optimizer_queue.pop(0) 366 | 367 | current_state = pickle.loads(pickle.dumps(self.network.state_dict())) 368 | current_optimizer = pickle.loads(pickle.dumps(self.optimizer.state_dict())) 369 | 370 | 371 | self.optimizer.param_groups[0]['lr'] = max(5e-4*(1-epoch/self.l0),1e-5) 372 | 373 | prev_diff = current_diff 374 | iter=0 375 | while True: 376 | total_train_loss = 0 377 | total_diff = 0 378 | 379 | for i, data in enumerate(train_loader_wei, 0):#train_loader_wei,dataloader 380 | 381 | data=data[0].to(self.Params['Device']) 382 | #data, indexbatch = data 383 | points = data[:,:2*self.dim]#.float()#.cuda() 384 | speed = data[:,2*self.dim:]#.float()#.cuda() 385 | 386 | feature0=torch.zeros((points.shape[0],128)).to(self.Params['Device']) 387 | feature1=torch.zeros((points.shape[0],128)).to(self.Params['Device']) 388 | 389 | if self.lamb > 0: 390 | f_0, f_1 = self.network.env_encoder(grid) 391 | feature0, feature1 = self.network.env_features(points, f_0, f_1) 392 | feature0 = feature0*self.lamb 393 | feature1 = feature1*self.lamb 394 | 395 | loss_value, loss_n, wv = self.Loss(points, feature0, feature1, speed) 396 | 397 | loss_value.backward() 398 | 399 | # Update parameters 400 | self.optimizer.step() 401 | self.optimizer.zero_grad() 402 | 403 | 404 | total_train_loss += loss_value.clone().detach() 405 | total_diff += loss_n.clone().detach() 406 | 407 | 408 | del points, speed, loss_value, loss_n, wv#,indexbatch 409 | 410 | 411 | total_train_loss /= len(dataloader)#dataloader train_loader 412 | total_diff /= len(dataloader)#dataloader train_loader 413 | 414 | current_diff = total_diff 415 | diff_ratio = current_diff/prev_diff 416 | 417 | if (diff_ratio < 1.2 and diff_ratio > 0):#1.5 418 | break 419 | else: 420 | 421 | iter+=1 422 | with torch.no_grad(): 423 | random_number = random.randint(0, min(4,epoch-1)) 424 | self.network.load_state_dict(prev_state_queue[random_number], strict=True) 425 | self.optimizer.load_state_dict(prev_optimizer_queue[random_number]) 426 | 427 | print("RepeatEpoch = {} -- Loss = {:.4e}".format( 428 | epoch, total_diff)) 429 | 430 | 431 | self.total_train_loss.append(total_train_loss) 432 | 433 | 434 | if epoch % self.Params['Training']['Print Every * Epoch'] == 0: 435 | with torch.no_grad(): 436 | print("Epoch = {} -- Loss = {:.4e}".format( 437 | epoch, total_diff.item())) 438 | 439 | if (epoch % self.Params['Training']['Save Every * Epoch'] == 0) or (epoch == self.Params['Training']['Number of Epochs']) or (epoch == 1): 440 | self.plot(epoch,total_diff.item(),grid) 441 | with torch.no_grad(): 442 | self.save(epoch=epoch, val_loss=total_diff) 443 | 444 | def save(self, epoch='', val_loss=''): 445 | ''' 446 | Saving a instance of the model 447 | ''' 448 | torch.save({'epoch': epoch, 449 | 'model_state_dict': self.network.state_dict(), 450 | 'optimizer_state_dict': self.optimizer.state_dict(), 451 | 'train_loss': self.total_train_loss, 452 | 'val_loss': self.total_train_loss}, '{}/Model_Epoch_{}_ValLoss_{:.6e}.pt'.format(self.Params['ModelPath'], str(epoch).zfill(5), val_loss)) 453 | 454 | def load(self, filepath): 455 | 456 | checkpoint = torch.load( 457 | filepath, map_location=torch.device(self.Params['Device'])) 458 | 459 | self.network = NN(self.Params['Device'],self.dim) 460 | 461 | self.network.load_state_dict(checkpoint['model_state_dict'], strict=True) 462 | self.network.to(torch.device(self.Params['Device'])) 463 | self.network.float() 464 | self.network.eval() 465 | 466 | 467 | def load_pretrained_state_dict(self, state_dict): 468 | own_state=self.state_dict 469 | 470 | 471 | def TravelTimes(self, Xp, feature0, feature1): 472 | Xp = Xp.to(torch.device(self.Params['Device'])) 473 | 474 | tau, coords = self.network.out(Xp, feature0, feature1) 475 | 476 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 477 | 478 | T0 = torch.einsum('ij,ij->i', D, D) 479 | 480 | TT = torch.sqrt(T0)/tau[:, 0] 481 | 482 | del Xp, tau, T0 483 | return TT 484 | 485 | def Tau(self, Xp, feature0, feature1): 486 | Xp = Xp.to(torch.device(self.Params['Device'])) 487 | 488 | tau, coords = self.network.out(Xp, feature0, feature1) 489 | 490 | return tau 491 | 492 | def Speed(self, Xp, feature0, feature1): 493 | Xp = Xp.to(torch.device(self.Params['Device'])) 494 | 495 | tau, Xp = self.network.out(Xp, feature0, feature1) 496 | dtau = self.gradient(tau, Xp) 497 | 498 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 499 | T0 = torch.einsum('ij,ij->i', D, D) 500 | 501 | DT1 = dtau[:,self.dim:] 502 | 503 | T1 = T0*torch.einsum('ij,ij->i', DT1, DT1) 504 | T2 = 2*tau[:,0]*torch.einsum('ij,ij->i', DT1, D) 505 | 506 | T3 = tau[:,0]**2 507 | 508 | S = (T1-T2+T3) 509 | 510 | Ypred = T3 / torch.sqrt(S) 511 | 512 | del Xp, tau, dtau, T0, T1, T2, T3 513 | return Ypred 514 | 515 | def Gradient(self, Xp, f_0, f_1): 516 | Xp = Xp.to(torch.device(self.Params['Device'])) 517 | 518 | #Xp.requires_grad_() 519 | feature0, feature1 = self.network.env_features(Xp, f_0, f_1) 520 | 521 | tau, Xp = self.network.out(Xp, feature0, feature1) 522 | dtau = self.gradient(tau, Xp) 523 | 524 | 525 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 526 | T0 = torch.sqrt(torch.einsum('ij,ij->i', D, D)) 527 | T3 = tau[:, 0]**2 528 | 529 | V0 = D 530 | V1 = dtau[:,self.dim:] 531 | 532 | Y1 = 1/(T0*tau[:, 0])*V0 533 | Y2 = T0/T3*V1 534 | 535 | 536 | Ypred1 = -(Y1-Y2) 537 | Spred1 = torch.norm(Ypred1) 538 | Ypred1 = 1/Spred1**2*Ypred1 539 | 540 | V0=-D 541 | V1=dtau[:,:self.dim] 542 | 543 | Y1 = 1/(T0*tau[:, 0])*V0 544 | Y2 = T0/T3*V1 545 | 546 | Ypred0 = -(Y1-Y2) 547 | Spred0 = torch.norm(Ypred0) 548 | 549 | Ypred0 = 1/Spred0**2*Ypred0 550 | 551 | return torch.cat((Ypred0, Ypred1),dim=1) 552 | 553 | def plot(self,epoch,total_train_loss, grid): 554 | limit = 0.5 555 | xmin = [-limit,-limit] 556 | xmax = [limit,limit] 557 | spacing=limit/40.0 558 | X,Y = np.meshgrid(np.arange(xmin[0],xmax[0],spacing),np.arange(xmin[1],xmax[1],spacing)) 559 | 560 | Xsrc = [0]*self.dim 561 | 562 | Xsrc[0] = self.pos[0] 563 | Xsrc[1] = self.pos[1] 564 | 565 | XP = np.zeros((len(X.flatten()),2*self.dim)) 566 | XP[:,:self.dim] = Xsrc 567 | XP[:,self.dim+0] = X.flatten() 568 | XP[:,self.dim+1] = Y.flatten() 569 | XP = Variable(Tensor(XP)).to(self.Params['Device']) 570 | 571 | feature0=torch.zeros((XP.shape[0],128)).to(self.Params['Device']) 572 | feature1=torch.zeros((XP.shape[0],128)).to(self.Params['Device']) 573 | 574 | if self.lamb > 0: 575 | f_0, f_1 = self.network.env_encoder(grid) 576 | feature0, feature1 = self.network.env_features(XP, f_0, f_1) 577 | feature0 = feature0*self.lamb 578 | feature1 = feature1*self.lamb 579 | 580 | 581 | tt = self.TravelTimes(XP,feature0, feature1) 582 | ss = self.Speed(XP,feature0, feature1)#*5 583 | tau = self.Tau(XP,feature0, feature1) 584 | 585 | TT = tt.to('cpu').data.numpy().reshape(X.shape) 586 | V = ss.to('cpu').data.numpy().reshape(X.shape) 587 | TAU = tau.to('cpu').data.numpy().reshape(X.shape) 588 | 589 | fig = plt.figure() 590 | 591 | ax = fig.add_subplot(111) 592 | quad1 = ax.pcolormesh(X,Y,V,vmin=0,vmax=1) 593 | ax.contour(X,Y,TT,np.arange(0,3,0.05), cmap='bone', linewidths=0.5)#0.25 594 | plt.colorbar(quad1,ax=ax, pad=0.1, label='Predicted Velocity') 595 | plt.savefig(self.Params['ModelPath']+"/plots"+str(epoch)+"_"+str(round(total_train_loss,4))+"_0.jpg",bbox_inches='tight') 596 | 597 | plt.close(fig) 598 | fig = plt.figure() 599 | ax = fig.add_subplot(111) 600 | quad1 = ax.pcolormesh(X,Y,TAU,vmin=0,vmax=1) 601 | ax.contour(X,Y,TT,np.arange(0,3,0.05), cmap='bone', linewidths=0.5)#0.25 602 | plt.colorbar(quad1,ax=ax, pad=0.1, label='Predicted Velocity') 603 | plt.savefig(self.Params['ModelPath']+"/tauplots"+str(epoch)+"_"+str(round(total_train_loss,4))+"_0.jpg",bbox_inches='tight') 604 | 605 | plt.close(fig) 606 | -------------------------------------------------------------------------------- /models/model_arm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | import random 4 | import time 5 | import os 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | from torch.nn import Linear 10 | from torch import Tensor 11 | from torch.nn import Conv3d 12 | from torch.autograd import Variable, grad 13 | from torch.utils.data.sampler import SubsetRandomSampler, WeightedRandomSampler 14 | from models import database as db 15 | 16 | import matplotlib 17 | import matplotlib.pylab as plt 18 | 19 | import pickle5 as pickle 20 | 21 | from timeit import default_timer as timer 22 | 23 | import pytorch_kinematics as pk 24 | 25 | class NN(torch.nn.Module): 26 | 27 | def __init__(self, device, dim, out_path_):#10 28 | super(NN, self).__init__() 29 | self.dim = dim 30 | 31 | h_size = 256 #512,256 32 | fh_size = 128 33 | 34 | #Arm 35 | 36 | with open(out_path_+'/dim') as f: 37 | iter = 0 38 | for line in f: 39 | data = line.split() 40 | if iter==0: 41 | dim = int(data[0]) 42 | else: 43 | end_effect_ = data[0] 44 | print(end_effect_) 45 | iter=iter+1 46 | path_name_ = os.path.splitext(os.path.basename(out_path_))[0] 47 | #d = "cuda" if torch.cuda.is_available() else "cpu" 48 | dtype = torch.float32 49 | 50 | chain = pk.build_serial_chain_from_urdf( 51 | open(out_path_+'/'+path_name_+".urdf").read(), end_effect_) 52 | self.chain = chain.to(dtype=dtype, device=device) 53 | 54 | tg_batch = self.chain.forward_kinematics(torch.zeros(1,self.dim).to(device), end_only = False) 55 | 56 | self.v_list=[] 57 | iter = 0 58 | pointsize = 0 59 | for tg in tg_batch: 60 | print(iter,tg) 61 | if iter>1: 62 | v = np.load(out_path_+'/meshes/collision/'+tg+'.npy') 63 | pointsize = pointsize+v.shape[0] 64 | 65 | self.v_list.append(torch.from_numpy(v).float().cuda()) 66 | 67 | del v 68 | iter = iter+1 69 | 70 | #3D CNN encoder 71 | self.conv_in = Conv3d(1, 16, 3, padding=1, padding_mode='zeros') # out: 256 ->m.p. 128 72 | self.conv_0 = Conv3d(16, 32, 3, padding=1, padding_mode='zeros') # out: 128 73 | 74 | self.actvn = torch.nn.ReLU() 75 | 76 | self.maxpool = torch.nn.MaxPool3d(2) 77 | 78 | self.conv_in_bn = torch.nn.BatchNorm3d(16) 79 | self.device = device 80 | 81 | feature_size = (1 + 16 ) * 7 * pointsize #+ 3 82 | 83 | displacment = 0.0222#0.0222 84 | displacments = [] 85 | 86 | displacments.append([0, 0, 0]) 87 | for x in range(3): 88 | for y in [-1, 1]: 89 | input = [0, 0, 0] 90 | input[x] = y * displacment 91 | displacments.append(input) 92 | 93 | self.displacments = torch.Tensor(displacments).to(self.device)#cuda 94 | 95 | #decoder 96 | 97 | self.scale = 10 98 | 99 | self.act = torch.nn.ELU()#Softplus(beta=self.scale)#ELU,CELU 100 | 101 | self.nl1=5 102 | self.nl2=7 103 | 104 | self.encoder = torch.nn.ModuleList() 105 | self.encoder1 = torch.nn.ModuleList() 106 | #self.encoder.append(Linear(self.dim,h_size)) 107 | 108 | self.encoder.append(Linear(dim,h_size)) 109 | self.encoder1.append(Linear(dim,h_size)) 110 | 111 | for i in range(self.nl1-1): 112 | self.encoder.append(Linear(h_size, h_size)) 113 | self.encoder1.append(Linear(h_size, h_size)) 114 | 115 | self.encoder.append(Linear(h_size, h_size)) 116 | 117 | self.generator = torch.nn.ModuleList() 118 | self.generator1 = torch.nn.ModuleList() 119 | 120 | self.generator.append(Linear(2*h_size + 2*fh_size, 2*h_size)) 121 | self.generator1.append(Linear(2*h_size + 2*fh_size, 2*h_size)) 122 | 123 | for i in range(self.nl2-1): 124 | self.generator.append(Linear(2*h_size, 2*h_size)) 125 | self.generator1.append(Linear(2*h_size, 2*h_size)) 126 | 127 | self.generator.append(Linear(2*h_size,h_size)) 128 | self.generator.append(Linear(h_size,1)) 129 | 130 | self.fc_env0 = Linear(feature_size, fh_size) 131 | self.fc_env1 = Linear(fh_size, fh_size) 132 | 133 | def init_weights(self, m): 134 | 135 | if type(m) == torch.nn.Linear: 136 | stdv = (1. / math.sqrt(m.weight.size(1))/1.)*2 137 | #stdv = np.sqrt(6 / 64.) / self.T 138 | m.weight.data.uniform_(-stdv, stdv) 139 | m.bias.data.uniform_(-stdv, stdv) 140 | 141 | def env_encoder(self, x): 142 | x = x.unsqueeze(1) 143 | f_0 = x 144 | 145 | net = self.actvn(self.conv_in(x)) 146 | net = self.conv_in_bn(net) 147 | f_1 = net 148 | 149 | return f_0, f_1 150 | 151 | def FK(self,theta): 152 | scale=1.8*math.pi 153 | theta = scale*theta 154 | tg_batch = self.chain.forward_kinematics(theta, end_only = False) 155 | 156 | p_list=[] 157 | iter = 0 158 | #pointsize = 0 159 | for tg in tg_batch: 160 | #print(iter,tg) 161 | if iter>1: 162 | 163 | m = tg_batch[tg].get_matrix() 164 | #print(m.shape) 165 | t=self.v_list[iter-2] 166 | p=torch.matmul(m[:],t.T) 167 | #p=p.cpu().numpy() 168 | p = torch.permute(p, (2, 0, 1)).contiguous() 169 | 170 | p_list.append(p) 171 | del m,p,t#,nv, v 172 | iter = iter+1 173 | 174 | p = torch.cat(p_list, dim=0) 175 | 176 | query_points = p[...,0:3].contiguous() 177 | #print(query_points.shape) 178 | return 0.4*query_points 179 | 180 | def env_features(self, coords, f_0, f_1): 181 | 182 | coords = coords.clone().detach().requires_grad_(False) 183 | 184 | p0=coords[:,:self.dim] 185 | p1=coords[:,self.dim:] 186 | 187 | size=p0.shape[0] 188 | 189 | p = torch.vstack((p0,p1)) 190 | 191 | p = self.FK(p) 192 | 193 | p = torch.index_select(p, 2, torch.LongTensor([2,1,0]).to(self.device)) 194 | 195 | p=2*p 196 | 197 | p = p.unsqueeze(0) 198 | p = p.unsqueeze(1) 199 | p = torch.cat([p + d for d in self.displacments], dim=2) 200 | #print(p.shape) 201 | 202 | feature_0 = F.grid_sample(f_0, p, mode='bilinear', padding_mode='border') 203 | feature_1 = F.grid_sample(f_1, p, mode='bilinear', padding_mode='border') 204 | 205 | 206 | features = torch.cat((feature_0, feature_1), dim=1) 207 | 208 | shape = features.shape 209 | features = torch.reshape(features, 210 | (shape[0], shape[1] * shape[3], shape[4])) 211 | #print(features.size()) 212 | features = torch.squeeze(features.transpose(1, -1)) 213 | 214 | features = self.act(self.fc_env0(features)) 215 | features = self.act(self.fc_env1(features)) 216 | 217 | features0=features[:size,:] 218 | features1=features[size:,:] 219 | 220 | return features0, features1 221 | 222 | def out(self, coords, features0, features1): 223 | 224 | coords = coords.clone().detach().requires_grad_(True) # allows to take derivative w.r.t. input 225 | size = coords.shape[0] 226 | x0 = coords[:,:self.dim] 227 | x1 = coords[:,self.dim:] 228 | 229 | x = torch.vstack((x0,x1)) 230 | 231 | x = self.act(self.encoder[0](x)) 232 | for ii in range(1,self.nl1): 233 | x_tmp = x 234 | x = self.act(self.encoder[ii](x)) 235 | x = self.act(self.encoder1[ii](x) + x_tmp) 236 | 237 | x = self.encoder[-1](x) 238 | 239 | x0 = x[:size,...] 240 | x1 = x[size:,...] 241 | 242 | x_0 = torch.max(x0,x1) 243 | x_1 = torch.min(x0,x1) 244 | 245 | features_0 = torch.max(features0,features1) 246 | features_1 = torch.min(features0,features1) 247 | 248 | x = torch.cat((x_0, x_1, features_0, features_1),1) 249 | 250 | x = self.act(self.generator[0](x)) 251 | 252 | for ii in range(1, self.nl2): 253 | x_tmp = x 254 | x = self.act(self.generator[ii](x)) 255 | x = self.act(self.generator1[ii](x) + x_tmp) 256 | 257 | y = self.generator[-2](x) 258 | x = self.act(y) 259 | 260 | y = self.generator[-1](x) 261 | x = torch.sigmoid(0.1*y) 262 | 263 | return x, coords 264 | 265 | def forward(self, coords, grid): 266 | coords = coords.clone().detach().requires_grad_(True) # allows to take derivative w.r.t. input 267 | f_0, f_1 = self.env_encoder(grid) 268 | feature0, feature1= self.env_features(coords, f_0, f_1) 269 | output, coords = self.out(coords, feature0, feature1) 270 | 271 | return output, coords 272 | 273 | 274 | class Model(): 275 | def __init__(self, ModelPath, DataPath, dim, pos,device='cpu'): 276 | 277 | self.Params = {} 278 | self.Params['ModelPath'] = ModelPath 279 | self.Params['DataPath'] = DataPath 280 | self.dim = dim 281 | self.pos = pos 282 | 283 | # Pass the JSON information 284 | self.Params['Device'] = device 285 | 286 | self.Params['Network'] = {} 287 | 288 | self.Params['Training'] = {} 289 | self.Params['Training']['Batch Size'] = 10000 290 | self.Params['Training']['Number of Epochs'] = 20000 291 | self.Params['Training']['Resampling Bounds'] = [0.2, 0.95] 292 | self.Params['Training']['Print Every * Epoch'] = 1 293 | self.Params['Training']['Save Every * Epoch'] = 10 294 | self.Params['Training']['Learning Rate'] = 2e-4#5e-5 295 | 296 | # Parameters to alter during training 297 | self.total_train_loss = [] 298 | self.total_val_loss = [] 299 | 300 | def gradient(self, y, x, create_graph=True): 301 | 302 | grad_y = torch.ones_like(y) 303 | 304 | grad_x = torch.autograd.grad(y, x, grad_y, only_inputs=True, retain_graph=True, create_graph=create_graph)[0] 305 | 306 | return grad_x 307 | 308 | def Loss(self, points, features0, features1, Yobs): 309 | 310 | 311 | start=time.time() 312 | tau, Xp = self.network.out(points, features0, features1) 313 | dtau = self.gradient(tau, Xp) 314 | end=time.time() 315 | 316 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 317 | 318 | T0 = torch.einsum('ij,ij->i', D, D)#torch.norm(D, p=2, dim =1)**2 319 | 320 | 321 | DT0=dtau[:,:self.dim] 322 | DT1=dtau[:,self.dim:] 323 | 324 | T01 = T0*torch.einsum('ij,ij->i', DT0, DT0) 325 | T02 = -2*tau[:,0]*torch.einsum('ij,ij->i', DT0, D) 326 | 327 | T11 = T0*torch.einsum('ij,ij->i', DT1, DT1) 328 | T12 = 2*tau[:,0]*torch.einsum('ij,ij->i', DT1, D) 329 | 330 | 331 | T3 = tau[:,0]**2 332 | 333 | S0 = (T01-T02+T3) 334 | S1 = (T11-T12+T3) 335 | 336 | #0.001 337 | sq_Ypred0 = 1/torch.sqrt(torch.sqrt(S0)/T3) 338 | sq_Ypred1 = 1/torch.sqrt(torch.sqrt(S1)/T3) 339 | 340 | 341 | sq_Yobs0=torch.sqrt(Yobs[:,0]) 342 | sq_Yobs1=torch.sqrt(Yobs[:,1]) 343 | 344 | 345 | diff = abs(1-sq_Ypred0/sq_Yobs0)+abs(1-sq_Ypred1/sq_Yobs1)+\ 346 | abs(1-sq_Yobs0/sq_Ypred0)+abs(1-sq_Yobs1/sq_Ypred1) 347 | 348 | loss_n = torch.sum(diff)/Yobs.shape[0] 349 | 350 | loss = loss_n 351 | 352 | return loss, loss_n, diff 353 | 354 | def train(self): 355 | 356 | self.network = NN(self.Params['Device'],self.dim,self.Params['DataPath']) 357 | self.network.apply(self.network.init_weights) 358 | 359 | self.network.to(self.Params['Device']) 360 | 361 | self.optimizer = torch.optim.AdamW( 362 | self.network.parameters(), lr=self.Params['Training']['Learning Rate'] 363 | ,weight_decay=0.1) 364 | 365 | self.dataset = db.Database(self.Params['DataPath']) 366 | 367 | 368 | dataloader = torch.utils.data.DataLoader( 369 | self.dataset, 370 | batch_size=int(self.Params['Training']['Batch Size']), 371 | num_workers = 0, 372 | shuffle=True) 373 | 374 | weights = Tensor(torch.ones(len(self.dataset))).to( 375 | torch.device(self.Params['Device'])) 376 | 377 | dists=torch.norm(self.dataset.data[:,0:3]-self.dataset.data[:,3:6],dim=1) 378 | weights = dists.max()-dists 379 | 380 | weights = torch.clamp( 381 | weights/weights.max(), self.Params['Training']['Resampling Bounds'][0], self.Params['Training']['Resampling Bounds'][1]) 382 | 383 | train_sampler_wei = WeightedRandomSampler( 384 | weights, len(weights), replacement=True) 385 | 386 | train_loader_wei = torch.utils.data.DataLoader( 387 | self.dataset, 388 | batch_size=int(self.Params['Training']['Batch Size']), 389 | sampler=train_sampler_wei 390 | ) 391 | 392 | speed = self.dataset.data[:,2*self.dim:] 393 | 394 | grid = self.dataset.grid 395 | grid = grid.to(self.Params['Device']) 396 | grid = grid.unsqueeze(0) 397 | print(speed.min()) 398 | #''' 399 | 400 | weights = Tensor(torch.ones(len(self.dataset))).to( 401 | torch.device(self.Params['Device'])) 402 | 403 | prev_diff = 1.0 404 | current_diff = 1.0 405 | 406 | 407 | current_state = pickle.loads(pickle.dumps(self.network.state_dict())) 408 | current_optimizer = pickle.loads(pickle.dumps(self.optimizer.state_dict())) 409 | 410 | prev_state_queue = [] 411 | prev_optimizer_queue = [] 412 | 413 | self.l0 = 500 414 | 415 | self.l1 = 500 416 | 417 | for epoch in range(1, self.Params['Training']['Number of Epochs']+1): 418 | 419 | total_train_loss = 0 420 | total_diff=0 421 | 422 | 423 | self.lamb = min(1.0,max(0,(epoch-self.l0)/self.l1)) 424 | 425 | 426 | 427 | prev_state_queue.append(current_state) 428 | prev_optimizer_queue.append(current_optimizer) 429 | if(len(prev_state_queue)>5): 430 | prev_state_queue.pop(0) 431 | prev_optimizer_queue.pop(0) 432 | 433 | current_state = pickle.loads(pickle.dumps(self.network.state_dict())) 434 | current_optimizer = pickle.loads(pickle.dumps(self.optimizer.state_dict())) 435 | 436 | 437 | self.optimizer.param_groups[0]['lr'] = max(1e-4*(1-epoch/self.l0),1e-5) 438 | 439 | prev_diff = current_diff 440 | iter=0 441 | while True: 442 | total_train_loss = 0 443 | total_diff = 0 444 | #for i in range(10): 445 | for i, data in enumerate(train_loader_wei, 0):#train_loader_wei,dataloader 446 | 447 | data=data[0].to(self.Params['Device']) 448 | #data, indexbatch = data 449 | points = data[:,:2*self.dim]#.float()#.cuda() 450 | speed = data[:,2*self.dim:]#.float()#.cuda() 451 | 452 | feature0=torch.zeros((points.shape[0],128)).to(self.Params['Device']) 453 | feature1=torch.zeros((points.shape[0],128)).to(self.Params['Device']) 454 | 455 | if self.lamb > 0: 456 | f_0, f_1 = self.network.env_encoder(grid) 457 | feature0, feature1 = self.network.env_features(points, f_0, f_1) 458 | feature0 = feature0*self.lamb 459 | feature1 = feature1*self.lamb 460 | 461 | loss_value, loss_n, wv = self.Loss(points, feature0, feature1, speed) 462 | 463 | loss_value.backward() 464 | 465 | # Update parameters 466 | self.optimizer.step() 467 | self.optimizer.zero_grad() 468 | 469 | 470 | total_train_loss += loss_value.clone().detach() 471 | total_diff += loss_n.clone().detach() 472 | 473 | 474 | del points, speed, loss_value, loss_n, wv#,indexbatch 475 | 476 | total_train_loss /= len(dataloader)#dataloader train_loader 477 | total_diff /= len(dataloader)#dataloader train_loader 478 | 479 | current_diff = total_diff 480 | diff_ratio = current_diff/prev_diff 481 | if (diff_ratio < 2.0 and diff_ratio > 0):#1.5 482 | break 483 | else: 484 | 485 | iter+=1 486 | with torch.no_grad(): 487 | random_number = random.randint(0, min(4,epoch-1)) 488 | self.network.load_state_dict(prev_state_queue[random_number], strict=True) 489 | self.optimizer.load_state_dict(prev_optimizer_queue[random_number]) 490 | print("RepeatEpoch = {} -- Loss = {:.4e}".format( 491 | epoch, total_diff)) 492 | 493 | 494 | self.total_train_loss.append(total_train_loss) 495 | 496 | 497 | if epoch % self.Params['Training']['Print Every * Epoch'] == 0: 498 | with torch.no_grad(): 499 | print("Epoch = {} -- Loss = {:.4e}".format( 500 | epoch, total_diff.item())) 501 | 502 | if (epoch % self.Params['Training']['Save Every * Epoch'] == 0) or (epoch == self.Params['Training']['Number of Epochs']) or (epoch == 1): 503 | self.plot(epoch,total_diff.item(),grid) 504 | with torch.no_grad(): 505 | self.save(epoch=epoch, val_loss=total_diff) 506 | 507 | def save(self, epoch='', val_loss=''): 508 | ''' 509 | Saving a instance of the model 510 | ''' 511 | torch.save({'epoch': epoch, 512 | 'model_state_dict': self.network.state_dict(), 513 | 'optimizer_state_dict': self.optimizer.state_dict(), 514 | 'train_loss': self.total_train_loss, 515 | 'val_loss': self.total_train_loss}, '{}/Model_Epoch_{}_ValLoss_{:.6e}.pt'.format(self.Params['ModelPath'], str(epoch).zfill(5), val_loss)) 516 | 517 | def load(self, filepath): 518 | 519 | checkpoint = torch.load( 520 | filepath, map_location=torch.device(self.Params['Device'])) 521 | 522 | self.network = NN(self.Params['Device'],self.dim,self.Params['DataPath']) 523 | 524 | self.network.load_state_dict(checkpoint['model_state_dict'], strict=True) 525 | self.network.to(torch.device(self.Params['Device'])) 526 | self.network.float() 527 | self.network.eval() 528 | 529 | 530 | def load_pretrained_state_dict(self, state_dict): 531 | own_state=self.state_dict 532 | 533 | 534 | def TravelTimes(self, Xp, feature0, feature1): 535 | 536 | Xp = Xp.to(torch.device(self.Params['Device'])) 537 | 538 | 539 | tau, coords = self.network.out(Xp, feature0, feature1) 540 | 541 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 542 | 543 | T0 = torch.einsum('ij,ij->i', D, D) 544 | 545 | TT = torch.sqrt(T0)/tau[:, 0] 546 | 547 | del Xp, tau, T0 548 | return TT 549 | 550 | def Tau(self, Xp, feature0, feature1): 551 | Xp = Xp.to(torch.device(self.Params['Device'])) 552 | 553 | tau, coords = self.network.out(Xp, feature0, feature1) 554 | 555 | return tau 556 | 557 | def Speed(self, Xp, feature0, feature1): 558 | Xp = Xp.to(torch.device(self.Params['Device'])) 559 | 560 | tau, Xp = self.network.out(Xp, feature0, feature1) 561 | dtau = self.gradient(tau, Xp) 562 | 563 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 564 | T0 = torch.einsum('ij,ij->i', D, D) 565 | 566 | DT1 = dtau[:,self.dim:] 567 | 568 | T1 = T0*torch.einsum('ij,ij->i', DT1, DT1) 569 | T2 = 2*tau[:,0]*torch.einsum('ij,ij->i', DT1, D) 570 | 571 | T3 = tau[:,0]**2 572 | 573 | S = (T1-T2+T3) 574 | 575 | Ypred = T3 / torch.sqrt(S) 576 | 577 | del Xp, tau, dtau, T0, T1, T2, T3 578 | return Ypred 579 | 580 | def Gradient(self, Xp, f_0, f_1): 581 | Xp = Xp.to(torch.device(self.Params['Device'])) 582 | 583 | #Xp.requires_grad_() 584 | feature0, feature1 = self.network.env_features(Xp, f_0, f_1) 585 | 586 | tau, Xp = self.network.out(Xp, feature0, feature1) 587 | dtau = self.gradient(tau, Xp) 588 | 589 | 590 | D = Xp[:,self.dim:]-Xp[:,:self.dim] 591 | T0 = torch.sqrt(torch.einsum('ij,ij->i', D, D)) 592 | T3 = tau[:, 0]**2 593 | 594 | V0 = D 595 | V1 = dtau[:,self.dim:] 596 | 597 | Y1 = 1/(T0*tau[:, 0])*V0 598 | Y2 = T0/T3*V1 599 | 600 | 601 | Ypred1 = -(Y1-Y2) 602 | Spred1 = torch.norm(Ypred1) 603 | Ypred1 = 1/Spred1**2*Ypred1 604 | 605 | V0=-D 606 | V1=dtau[:,:self.dim] 607 | 608 | Y1 = 1/(T0*tau[:, 0])*V0 609 | Y2 = T0/T3*V1 610 | 611 | Ypred0 = -(Y1-Y2) 612 | Spred0 = torch.norm(Ypred0) 613 | 614 | Ypred0 = 1/Spred0**2*Ypred0 615 | 616 | return torch.cat((Ypred0, Ypred1),dim=1) 617 | 618 | def plot(self,epoch,total_train_loss, grid): 619 | limit = 0.5 620 | xmin = [-limit,-limit] 621 | xmax = [limit,limit] 622 | spacing=limit/40.0 623 | X,Y = np.meshgrid(np.arange(xmin[0],xmax[0],spacing),np.arange(xmin[1],xmax[1],spacing)) 624 | Xsrc = [0]*self.dim 625 | 626 | Xsrc[0] = self.pos[0] 627 | Xsrc[1] = self.pos[1] 628 | 629 | XP = np.zeros((len(X.flatten()),2*self.dim)) 630 | XP[:,:self.dim] = Xsrc 631 | XP[:,self.dim+0] = X.flatten() 632 | XP[:,self.dim+1] = Y.flatten() 633 | XP = Variable(Tensor(XP)).to(self.Params['Device']) 634 | 635 | feature0=torch.zeros((XP.shape[0],128)).to(self.Params['Device']) 636 | feature1=torch.zeros((XP.shape[0],128)).to(self.Params['Device']) 637 | 638 | if self.lamb > 0: 639 | f_0, f_1 = self.network.env_encoder(grid) 640 | feature0, feature1 = self.network.env_features(XP, f_0, f_1) 641 | feature0 = feature0*self.lamb 642 | feature1 = feature1*self.lamb 643 | 644 | 645 | tt = self.TravelTimes(XP,feature0, feature1) 646 | ss = self.Speed(XP,feature0, feature1)#*5 647 | tau = self.Tau(XP,feature0, feature1) 648 | 649 | TT = tt.to('cpu').data.numpy().reshape(X.shape) 650 | V = ss.to('cpu').data.numpy().reshape(X.shape) 651 | TAU = tau.to('cpu').data.numpy().reshape(X.shape) 652 | 653 | fig = plt.figure() 654 | 655 | ax = fig.add_subplot(111) 656 | quad1 = ax.pcolormesh(X,Y,V,vmin=0,vmax=1) 657 | ax.contour(X,Y,TT,np.arange(0,3,0.05), cmap='bone', linewidths=0.5)#0.25 658 | plt.colorbar(quad1,ax=ax, pad=0.1, label='Predicted Velocity') 659 | plt.savefig(self.Params['ModelPath']+"/plots"+str(epoch)+"_"+str(round(total_train_loss,4))+"_0.jpg",bbox_inches='tight') 660 | 661 | plt.close(fig) 662 | fig = plt.figure() 663 | ax = fig.add_subplot(111) 664 | quad1 = ax.pcolormesh(X,Y,TAU,vmin=0,vmax=1) 665 | ax.contour(X,Y,TT,np.arange(0,3,0.05), cmap='bone', linewidths=0.5)#0.25 666 | plt.colorbar(quad1,ax=ax, pad=0.1, label='Predicted Velocity') 667 | plt.savefig(self.Params['ModelPath']+"/tauplots"+str(epoch)+"_"+str(round(total_train_loss,4))+"_0.jpg",bbox_inches='tight') 668 | 669 | plt.close(fig) 670 | --------------------------------------------------------------------------------