├── MC_object_collide.py ├── MC_self_collide.py ├── ModelingClothNewFeatures.py ├── ModelingClothRewrite.py ├── barycentric_stuff.py ├── bend_solver.py ├── collision.py ├── final_cut.py ├── function_libraries.py ├── grid_fill.py ├── new_methods_2.py ├── polyline_tools.py ├── seam_wrangler.py ├── self_collide.py ├── slices.py ├── text_from_object.py └── zips.py /MC_object_collide.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import numpy as np 3 | import bmesh 4 | import time 5 | 6 | def timer(t, name='name'): 7 | ti = bpy.context.scene.timers 8 | if name not in ti: 9 | ti[name] = 0.0 10 | ti[name] += t 11 | 12 | 13 | # universal --------------------- 14 | def revert_transforms(ob, co): 15 | """Set world coords on object. 16 | Run before setting coords to deal with object transforms 17 | if using apply_transforms()""" 18 | m = np.linalg.inv(ob.matrix_world) 19 | mat = m[:3, :3]# rotates backwards without T 20 | loc = m[:3, 3] 21 | return co @ mat + loc 22 | 23 | 24 | # universal --------------------- 25 | def revert_rotation(ob, co): 26 | """When reverting vectors such as normals we only need 27 | to rotate""" 28 | m = np.array(ob.matrix_world) 29 | mat = m[:3, :3] / np.array(ob.scale, dtype=np.float32) # rotates backwards without T 30 | #print(mat) 31 | return (co @ mat) / np.array(ob.scale, dtype=np.float32) 32 | 33 | 34 | # universal --------------------- 35 | def absolute_co(ob, co=None): 36 | """Get vert coords in world space with modifiers""" 37 | co, proxy = get_proxy_co(ob, co, return_proxy=True) 38 | m = np.array(ob.matrix_world, dtype=np.float32) 39 | mat = m[:3, :3].T # rotates backwards without T 40 | loc = m[:3, 3] 41 | return co @ mat + loc, proxy 42 | 43 | 44 | def apply_transforms(ob, co): 45 | """Get vert coords in world space""" 46 | m = np.array(ob.matrix_world, dtype=np.float32) 47 | mat = m[:3, :3].T # rotates backwards without T 48 | loc = m[:3, 3] 49 | return co @ mat + loc 50 | 51 | 52 | def select_edit_mode(sc, ob, idx, type='v', deselect=False, obm=None): 53 | """Selects verts in edit mode and updates""" 54 | 55 | if ob.data.is_editmode: 56 | if obm is None: 57 | obm = bmesh.from_edit_mesh(ob.data) 58 | obm.verts.ensure_lookup_table() 59 | 60 | if type == 'v': 61 | x = obm.verts 62 | if type == 'f': 63 | x = obm.faces 64 | if type == 'e': 65 | x = obm.edges 66 | 67 | if deselect: 68 | for i in x: 69 | i.select = False 70 | 71 | for i in idx: 72 | sc.select_counter[i] += 1 73 | x[i].select = True 74 | 75 | if obm is None: 76 | bmesh.update_edit_mesh(ob.data) 77 | #bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) 78 | 79 | 80 | def bmesh_proxy(ob): 81 | """Get a bmesh contating modifier effects""" 82 | dg = bpy.context.evaluated_depsgraph_get() 83 | prox = ob.evaluated_get(dg) 84 | proxy = prox.to_mesh() 85 | obm = bmesh.new() 86 | obm.from_mesh(proxy) 87 | return obm 88 | 89 | 90 | def get_proxy_co(ob, co=None, proxy=None, return_proxy=False, return_normals=False): 91 | """Gets co with modifiers like cloth""" 92 | if proxy is None: 93 | dg = bpy.context.evaluated_depsgraph_get() 94 | prox = ob.evaluated_get(dg) 95 | proxy = prox.to_mesh() 96 | 97 | if co is None: 98 | vc = len(proxy.vertices) 99 | co = np.empty((vc, 3), dtype=np.float32) 100 | 101 | proxy.vertices.foreach_get('co', co.ravel()) 102 | if return_proxy: 103 | return co, proxy 104 | 105 | proxy.to_mesh_clear() 106 | #ob.to_mesh_clear() 107 | return co 108 | 109 | 110 | def get_edges(ob, fake=False): 111 | """Edge indexing for self collision""" 112 | if fake: 113 | c = len(ob.data.vertices) 114 | ed = np.empty((c, 2), dtype=np.int32) 115 | idx = np.arange(c * 2, dtype=np.int32) 116 | ed[:, 0] = idx[:c] 117 | ed[:, 1] = idx[c:] 118 | return ed 119 | 120 | ed = np.empty((len(ob.data.edges), 2), dtype=np.int32) 121 | ob.data.edges.foreach_get('vertices', ed.ravel()) 122 | return ed 123 | 124 | 125 | def get_faces(ob): 126 | """Only works on triangle mesh.""" 127 | fa = np.empty((len(ob.data.polygons), 3), dtype=np.int32) 128 | ob.data.polygons.foreach_get('vertices', fa.ravel()) 129 | return fa 130 | 131 | 132 | def get_tridex(ob, tobm=None): 133 | """Return an index for viewing the verts as triangles""" 134 | free = True 135 | if ob.data.is_editmode: 136 | ob.update_from_editmode() 137 | if tobm is None: 138 | tobm = bmesh.new() 139 | tobm.from_mesh(ob.data) 140 | free = True 141 | bmesh.ops.triangulate(tobm, faces=tobm.faces[:]) 142 | tridex = np.array([[v.index for v in f.verts] for f in tobm.faces], dtype=np.int32) 143 | if free: 144 | tobm.free() 145 | return tridex 146 | 147 | 148 | def inside_triangles(tris, points, margin=0.0):#, cross_vecs): 149 | """Checks if points are inside triangles""" 150 | origins = tris[:, 0] 151 | cross_vecs = tris[:, 1:] - origins[:, None] 152 | 153 | v2 = points - origins 154 | 155 | # --------- 156 | v0 = cross_vecs[:,0] 157 | v1 = cross_vecs[:,1] 158 | 159 | d00_d11 = np.einsum('ijk,ijk->ij', cross_vecs, cross_vecs) 160 | d00 = d00_d11[:,0] 161 | d11 = d00_d11[:,1] 162 | d01 = np.einsum('ij,ij->i', v0, v1) 163 | d02 = np.einsum('ij,ij->i', v0, v2) 164 | d12 = np.einsum('ij,ij->i', v1, v2) 165 | 166 | div = 1 / (d00 * d11 - d01 * d01) 167 | u = (d11 * d02 - d01 * d12) * div 168 | v = (d00 * d12 - d01 * d02) * div 169 | 170 | w = 1 - (u+v) 171 | # !!!! needs some thought 172 | margin = 0.0 173 | # !!!! ================== 174 | weights = np.array([w, u, v]).T 175 | check = (u > margin) & (v > margin) & (w > margin) 176 | 177 | return check, weights 178 | 179 | 180 | def b2(sc, cloth): 181 | print('runing b2') 182 | if len(sc.big_boxes) == 0: 183 | print("ran out") 184 | return 185 | 186 | boxes = [] 187 | for oct in sc.big_boxes: 188 | t = oct[0] 189 | e = oct[1] 190 | b = oct[2] 191 | 192 | tfull, efull, bounds = octree_et(sc, margin=0.0, idx=t, eidx=e, bounds=b, cloth=cloth) 193 | 194 | for i in range(len(tfull)): 195 | t = tfull[i] 196 | e = efull[i] 197 | bmin = bounds[0][i] 198 | bmax = bounds[1][i] 199 | 200 | if (t.shape[0] < sc.box_max) | (e.shape[0] < sc.box_max): 201 | sc.small_boxes.append([t, e]) 202 | else: 203 | boxes.append([t, e, [bmin, bmax]]) 204 | sc.big_boxes = boxes 205 | 206 | 207 | def generate_bounds(minc, maxc, margin): 208 | """from a min corner and a max corner 209 | generate the min and max corner of 8 boxes""" 210 | 211 | diag = (maxc - minc) / 2 212 | mid = minc + diag 213 | mins = np.zeros((8,3), dtype=np.float32) 214 | maxs = np.zeros((8,3), dtype=np.float32) 215 | 216 | # blf 217 | mins[0] = minc 218 | maxs[0] = mid 219 | # brf 220 | mins[1] = minc 221 | mins[1][0] += diag[0] 222 | maxs[1] = mid 223 | maxs[1][0] += diag[0] 224 | # blb 225 | mins[2] = minc 226 | mins[2][1] += diag[1] 227 | maxs[2] = mid 228 | maxs[2][1] += diag[1] 229 | # brb 230 | mins[3] = mins[2] 231 | mins[3][0] += diag[0] 232 | maxs[3] = maxs[2] 233 | maxs[3][0] += diag[0] 234 | # tlf 235 | mins[4] = mins[0] 236 | mins[4][2] += diag[2] 237 | maxs[4] = maxs[0] 238 | maxs[4][2] += diag[2] 239 | # trf 240 | mins[5] = mins[1] 241 | mins[5][2] += diag[2] 242 | maxs[5] = maxs[1] 243 | maxs[5][2] += diag[2] 244 | # tlb 245 | mins[6] = mins[2] 246 | mins[6][2] += diag[2] 247 | maxs[6] = maxs[2] 248 | maxs[6][2] += diag[2] 249 | # trb 250 | mins[7] = mins[3] 251 | mins[7][2] += diag[2] 252 | maxs[7] = maxs[3] 253 | maxs[7][2] += diag[2] 254 | 255 | return mid, [mins, maxs] 256 | 257 | 258 | def octree_et(sc, margin, idx=None, eidx=None, bounds=None, cloth=None): 259 | """Adaptive octree. Good for finding doubles or broad 260 | phase collision culling. et does edges and tris. 261 | Also groups edges in boxes.""" # first box is based on bounds so first box could be any shape rectangle 262 | 263 | T = time.time() 264 | margin = sc.M # might be faster than >=, <= 265 | 266 | co = cloth.oc_co 267 | 268 | if bounds is None: 269 | b_min = np.min(co, axis=0) 270 | b_max = np.max(co, axis=0) 271 | else: 272 | b_min, b_max = bounds[0], bounds[1] 273 | 274 | #eco = co[sc.ed[eidx].ravel()] 275 | #b_min = np.min(eco, axis=0) 276 | #b_max = np.max(eco, axis=0) 277 | 278 | # bounds_8 is for use on the next iteration. 279 | mid, bounds_8 = generate_bounds(b_min, b_max, margin) 280 | 281 | #mid = b_min + ((b_max - b_min) / 2) 282 | mid_ = mid + margin 283 | _mid = mid - margin 284 | 285 | x_, y_, z_ = mid_[0], mid_[1], mid_[2] 286 | _x, _y, _z = _mid[0], _mid[1], _mid[2] 287 | 288 | # tris 289 | xmax = sc.txmax 290 | xmin = sc.txmin 291 | 292 | ymax = sc.tymax 293 | ymin = sc.tymin 294 | 295 | zmax = sc.tzmax 296 | zmin = sc.tzmin 297 | 298 | # edges 299 | exmin = sc.exmin 300 | eymin = sc.eymin 301 | ezmin = sc.ezmin 302 | 303 | exmax = sc.exmax 304 | eymax = sc.eymax 305 | ezmax = sc.ezmax 306 | 307 | # l = left, r = right, f = front, b = back, u = up, d = down 308 | if idx is None: 309 | idx = cloth.oc_indexer 310 | if eidx is None: 311 | eidx = cloth.oc_eidx 312 | 313 | idx = np.array(idx, dtype=np.int32) 314 | eidx = np.array(eidx, dtype=np.int32) 315 | 316 | # ------------------------------- 317 | B = xmin[idx] < x_# + margin 318 | il = idx[B] 319 | 320 | B = xmax[idx] > _x# - margin 321 | ir = idx[B] 322 | 323 | # edges 324 | eB = exmin[eidx] < x_# + margin 325 | eil = eidx[eB] 326 | 327 | eB = exmax[eidx] > _x# - margin 328 | eir = eidx[eB] 329 | 330 | # ------------------------------ 331 | B = ymax[il] > _y# - margin 332 | ilf = il[B] 333 | 334 | B = ymin[il] < y_# + margin 335 | ilb = il[B] 336 | 337 | B = ymax[ir] > _y# - margin 338 | irf = ir[B] 339 | 340 | B = ymin[ir] < y_# + margin 341 | irb = ir[B] 342 | 343 | # edges 344 | eB = eymax[eil] > _y# - margin 345 | eilf = eil[eB] 346 | 347 | eB = eymin[eil] < y_# + margin 348 | eilb = eil[eB] 349 | 350 | eB = eymax[eir] > _y# - margin 351 | eirf = eir[eB] 352 | 353 | eB = eymin[eir] < y_# + margin 354 | eirb = eir[eB] 355 | 356 | # ------------------------------ 357 | B = zmax[ilf] > _z# - margin 358 | ilfu = ilf[B] 359 | B = zmin[ilf] < z_# + margin 360 | ilfd = ilf[B] 361 | 362 | B = zmax[ilb] > _z# - margin 363 | ilbu = ilb[B] 364 | B = zmin[ilb] < z_# + margin 365 | ilbd = ilb[B] 366 | 367 | B = zmax[irf] > _z# - margin 368 | irfu = irf[B] 369 | B = zmin[irf] < z_# + margin 370 | irfd = irf[B] 371 | 372 | B = zmax[irb] > _z# - margin 373 | irbu = irb[B] 374 | B = zmin[irb] < z_# + margin 375 | irbd = irb[B] 376 | 377 | # edges 378 | eB = ezmax[eilf] > _z# - margin 379 | eilfu = eilf[eB] 380 | eB = ezmin[eilf] < z_# + margin 381 | eilfd = eilf[eB] 382 | 383 | eB = ezmax[eilb] > _z# - margin 384 | eilbu = eilb[eB] 385 | eB = ezmin[eilb] < z_# + margin 386 | eilbd = eilb[eB] 387 | 388 | eB = ezmax[eirf] > _z# - margin 389 | eirfu = eirf[eB] 390 | eB = ezmin[eirf] < z_# + margin 391 | eirfd = eirf[eB] 392 | 393 | eB = ezmax[eirb] > _z# - margin 394 | eirbu = eirb[eB] 395 | eB = ezmin[eirb] < z_# + margin 396 | eirbd = eirb[eB] 397 | 398 | boxes = [ilbd, irbd, ilfd, irfd, ilbu, irbu, ilfu, irfu] 399 | eboxes = [eilbd, eirbd, eilfd, eirfd, eilbu, eirbu, eilfu, eirfu] 400 | 401 | bbool = np.array([i.shape[0] > 0 for i in boxes]) 402 | ebool = np.array([i.shape[0] > 0 for i in eboxes]) 403 | both = bbool & ebool 404 | 405 | full = np.array(boxes, dtype=np.object)[both] 406 | efull = np.array(eboxes, dtype=np.object)[both] 407 | 408 | return full, efull, [bounds_8[0][both], bounds_8[1][both]] 409 | 410 | 411 | def self_collisions_7(sc, margin=0.1, cloth=None): 412 | 413 | T = time.time() 414 | 415 | tx = sc.tris[:, :, 0] 416 | ty = sc.tris[:, :, 1] 417 | tz = sc.tris[:, :, 2] 418 | 419 | txmax = np.max(tx, axis=1) + margin 420 | txmin = np.min(tx, axis=1) - margin 421 | 422 | tymax = np.max(ty, axis=1) + margin 423 | tymin = np.min(ty, axis=1) - margin 424 | 425 | tzmax = np.max(tz, axis=1) + margin 426 | tzmin = np.min(tz, axis=1) - margin 427 | 428 | sc.txmax = txmax 429 | sc.txmin = txmin 430 | 431 | sc.tymax = tymax 432 | sc.tymin = tymin 433 | 434 | sc.tzmax = tzmax 435 | sc.tzmin = tzmin 436 | 437 | # edge bounds: 438 | ex = sc.edges[:, :, 0] 439 | ey = sc.edges[:, :, 1] 440 | ez = sc.edges[:, :, 2] 441 | 442 | sc.exmin = np.min(ex, axis=1) - margin 443 | sc.eymin = np.min(ey, axis=1) - margin 444 | sc.ezmin = np.min(ez, axis=1) - margin 445 | 446 | sc.exmax = np.max(ex, axis=1) + margin 447 | sc.eymax = np.max(ey, axis=1) + margin 448 | sc.ezmax = np.max(ez, axis=1) + margin 449 | 450 | #timer(time.time()-T, "self col 5") 451 | # !!! can do something like check the octree to make sure the boxes are smaller 452 | # to know if we hit a weird case where we're no longer getting fewer in boxes 453 | 454 | tfull, efull, bounds = octree_et(sc, margin=0.0, cloth=cloth) 455 | 456 | T = time.time() 457 | for i in range(len(tfull)): 458 | t = tfull[i] 459 | e = efull[i] 460 | bmin = bounds[0][i] 461 | bmax = bounds[1][i] 462 | 463 | if (t.shape[0] < sc.box_max) | (e.shape[0] < sc.box_max): 464 | sc.small_boxes.append([t, e]) 465 | else: 466 | sc.big_boxes.append([t, e, [bmin, bmax]]) # using a dictionary or class might be faster !!! 467 | # !!! instead of passing bounds could figure out the min and max in the tree every time 468 | # we divide. So divide the left and right for example then get the new bounds for 469 | # each side and so on... 470 | 471 | #timer(time.time()-T, 'sort boxes') 472 | T = time.time() 473 | 474 | limit = 3 475 | count = 0 476 | #sc.report = True 477 | while len(sc.big_boxes) > 0: 478 | b2(sc, cloth) 479 | if sc.report: 480 | print("recursion level:", count) 481 | if count > limit: 482 | for b in sc.big_boxes: 483 | sc.small_boxes.append(b) 484 | break 485 | count += 1 486 | 487 | 488 | #timer(time.time()-T, 'b2') 489 | #if sc.report: 490 | if 0: 491 | print(len(sc.big_boxes), "how many big boxes") 492 | print(len(sc.small_boxes), "how many small boxes") 493 | 494 | for en, b in enumerate(sc.small_boxes): 495 | trs = np.array(b[0], dtype=np.int32) 496 | ed = np.array(b[1], dtype=np.int32) # can't figure out why this becomes an object array sometimes... 497 | 498 | if ed.shape[0] == 0: 499 | continue 500 | 501 | tris = sc.tris[trs] 502 | eds = sc.edges[ed] 503 | 504 | 505 | # detect link faces and broadcast 506 | #nlf_0 = cloth.sc_edges[ed][:, 0] == cloth.total_tridex[trs][:, :, None] 507 | #ab = np.any(nlf_0, axis=1) 508 | 509 | rse = np.tile(ed, trs.shape[0]) 510 | rse.shape = (trs.shape[0], ed.shape[0]) 511 | rst = np.repeat(trs, ed.shape[0]) 512 | rst.shape = (trs.shape[0], ed.shape[0]) 513 | 514 | re = rse#[~ab] # repeated edges with link faces removed 515 | rt = rst#[~ab] # repeated triangles to match above edges 516 | 517 | in_x = txmax[rt] > sc.exmin[re] 518 | rt, re = rt[in_x], re[in_x] 519 | 520 | in_x2 = txmin[rt] < sc.exmax[re] 521 | rt, re = rt[in_x2], re[in_x2] 522 | 523 | in_y = tymax[rt] > sc.eymin[re] 524 | rt, re = rt[in_y], re[in_y] 525 | 526 | in_y2 = tymin[rt] < sc.eymax[re] 527 | rt, re = rt[in_y2], re[in_y2] 528 | 529 | in_z = tzmin[rt] < sc.ezmax[re] 530 | rt, re = rt[in_z], re[in_z] 531 | 532 | in_z2 = tzmax[rt] > sc.ezmin[re] 533 | rt, re = rt[in_z2], re[in_z2] 534 | 535 | #timer(time.time()-T, 'edge bounds') 536 | 537 | T = time.time() 538 | 539 | if rt.shape[0] > 0: 540 | 541 | sc.ees += re.tolist() 542 | sc.trs += rt.tolist() 543 | 544 | 545 | def ray_check(sc, ed, trs, cloth): 546 | 547 | 548 | # ed is a list object so we convert it for indexing the points 549 | # trs indexes the tris 550 | edidx = np.array(ed, dtype=np.int32) 551 | 552 | # e is the start co and current co of the cloth paird in Nx2x3 553 | e = sc.edges[ed] 554 | 555 | t = sc.tris[trs] 556 | 557 | start_co = e[:, 0] 558 | co = e[:, 1] 559 | 560 | ori = t[:, 3] 561 | t1 = t[:, 4] - ori 562 | t2 = t[:, 5] - ori 563 | 564 | norms = np.cross(t1, t2) 565 | un = norms / np.sqrt(np.einsum('ij,ij->i', norms, norms))[:, None] 566 | 567 | vecs = co - ori 568 | dots = np.einsum('ij,ij->i', vecs, un) 569 | 570 | switch = dots < 0 571 | 572 | check, weights = inside_triangles(t[:, :3][switch], co[switch], margin= -sc.M) 573 | start_check, start_weights = inside_triangles(t[:, :3][switch], start_co[switch], margin= -sc.M) 574 | travel = un[switch][check] * -dots[switch][check][:, None] 575 | 576 | weight_plot = t[:, 3:][switch][check] * start_weights[check][:, :, None] 577 | 578 | loc = np.sum(weight_plot, axis=1) 579 | 580 | pcols = edidx[switch][check] 581 | cco = sc.fco[pcols] 582 | pl_move = loc - cco 583 | 584 | fr = cloth.object_friction # put in a static friction method !!! when the force is greater than a length it pulls otherwise it sticks. 585 | move = (travel * (1 - fr)) + (pl_move * fr) 586 | rev = revert_rotation(cloth.ob, move) 587 | cloth.co[pcols] += rev 588 | 589 | 590 | class ObjectCollide(): 591 | name = "oc" 592 | 593 | def __init__(self, cloth): 594 | 595 | tris_six = cloth.oc_tris_six# = np.empty((total_tridex.shape[0], 6, 3), dtype=np.float32) 596 | 597 | ob = cloth.ob 598 | ### tris_six = cloth.tris_six 599 | 600 | sco = apply_transforms(ob, cloth.select_start) 601 | fco = apply_transforms(ob, cloth.co) 602 | self.fco = fco 603 | 604 | cloth.oc_co[:cloth.v_count] = sco 605 | #cloth.oc_co[:cloth.v_count] = fco 606 | cloth.oc_co[cloth.v_count:] = fco 607 | 608 | tris_six[:, :3] = cloth.last_co[cloth.total_tridex] 609 | tris_six[:, 3:] = cloth.total_co[cloth.total_tridex] 610 | 611 | # ----------------------- 612 | 613 | self.box_max = cloth.ob.MC_props.sc_box_max 614 | 615 | self.M = cloth.OM 616 | self.force = cloth.ob.MC_props.self_collide_force 617 | self.tris = tris_six 618 | self.edges = cloth.oc_co[cloth.sc_edges] 619 | self.big_boxes = [] # boxes that still need to be divided 620 | self.small_boxes = [] # finished boxes less than the maximum box size 621 | 622 | # debug stuff 623 | self.sel = False 624 | #self.sel = True 625 | self.report = False 626 | #self.report = True 627 | if self.report: 628 | self.select_counter = np.zeros(cloth.sc_eidx.shape[0], dtype=np.int32) 629 | if self.sel: 630 | if self.ob.data.is_editmode: 631 | self.obm = bmesh.from_edit_mesh(self.ob.data) 632 | else: 633 | self.obm = bmesh.new() 634 | self.obm.from_mesh(self.ob.data) 635 | self.obm.edges.ensure_lookup_table() 636 | self.obm.verts.ensure_lookup_table() 637 | self.obm.faces.ensure_lookup_table() 638 | 639 | self.trs = [] 640 | self.ees = [] 641 | 642 | 643 | def detect_collisions(cloth): 644 | 645 | sc = ObjectCollide(cloth) 646 | 647 | #return 648 | t = time.time() 649 | 650 | self_collisions_7(sc, sc.M, cloth) 651 | 652 | ray_check(sc, sc.ees, sc.trs, cloth) 653 | 654 | if sc.report: 655 | print(sc.box_max, "box max") 656 | print(np.sum(sc.select_counter > 1), ": In too many boxes") 657 | print(np.max(sc.select_counter), "max times and edge was selected") 658 | print(time.time() - t) 659 | 660 | if sc.sel: 661 | if ob.data.is_editmode: 662 | bmesh.update_edit_mesh(ob.data) 663 | 664 | ob.data.update() 665 | -------------------------------------------------------------------------------- /MC_self_collide.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import numpy as np 3 | import bmesh 4 | import time 5 | 6 | def timer(t, name='name'): 7 | ti = bpy.context.scene.timers 8 | if name not in ti: 9 | ti[name] = 0.0 10 | ti[name] += t 11 | 12 | 13 | def select_edit_mode(sc, ob, idx, type='v', deselect=False, obm=None): 14 | """Selects verts in edit mode and updates""" 15 | 16 | if ob.data.is_editmode: 17 | if obm is None: 18 | obm = bmesh.from_edit_mesh(ob.data) 19 | obm.verts.ensure_lookup_table() 20 | 21 | if type == 'v': 22 | x = obm.verts 23 | if type == 'f': 24 | x = obm.faces 25 | if type == 'e': 26 | x = obm.edges 27 | 28 | if deselect: 29 | for i in x: 30 | i.select = False 31 | 32 | for i in idx: 33 | sc.select_counter[i] += 1 34 | x[i].select = True 35 | 36 | if obm is None: 37 | bmesh.update_edit_mesh(ob.data) 38 | #bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) 39 | 40 | 41 | def bmesh_proxy(ob): 42 | """Get a bmesh contating modifier effects""" 43 | dg = bpy.context.evaluated_depsgraph_get() 44 | prox = ob.evaluated_get(dg) 45 | proxy = prox.to_mesh() 46 | obm = bmesh.new() 47 | obm.from_mesh(proxy) 48 | return obm 49 | 50 | 51 | def get_proxy_co(ob, co=None, proxy=None): 52 | """Gets co with modifiers like cloth""" 53 | if proxy is None: 54 | dg = bpy.context.evaluated_depsgraph_get() 55 | prox = ob.evaluated_get(dg) 56 | proxy = prox.to_mesh() 57 | 58 | if co is None: 59 | vc = len(proxy.vertices) 60 | co = np.empty((vc, 3), dtype=np.float32) 61 | 62 | proxy.vertices.foreach_get('co', co.ravel()) 63 | ob.to_mesh_clear() 64 | return co 65 | 66 | 67 | def get_edges(ob, fake=False): 68 | """Edge indexing for self collision""" 69 | if fake: 70 | c = len(ob.data.vertices) 71 | ed = np.empty((c, 2), dtype=np.int32) 72 | idx = np.arange(c * 2, dtype=np.int32) 73 | ed[:, 0] = idx[:c] 74 | ed[:, 1] = idx[c:] 75 | return ed 76 | 77 | ed = np.empty((len(ob.data.edges), 2), dtype=np.int32) 78 | ob.data.edges.foreach_get('vertices', ed.ravel()) 79 | return ed 80 | 81 | 82 | def get_faces(ob): 83 | """Only works on triangle mesh.""" 84 | fa = np.empty((len(ob.data.polygons), 3), dtype=np.int32) 85 | ob.data.polygons.foreach_get('vertices', fa.ravel()) 86 | return fa 87 | 88 | 89 | def get_tridex(ob, tobm=None): 90 | """Return an index for viewing the verts as triangles""" 91 | free = True 92 | if ob.data.is_editmode: 93 | ob.update_from_editmode() 94 | if tobm is None: 95 | tobm = bmesh.new() 96 | tobm.from_mesh(ob.data) 97 | free = True 98 | bmesh.ops.triangulate(tobm, faces=tobm.faces[:]) 99 | tridex = np.array([[v.index for v in f.verts] for f in tobm.faces], dtype=np.int32) 100 | if free: 101 | tobm.free() 102 | return tridex 103 | 104 | 105 | def inside_triangles(tris, points, margin=0.0):#, cross_vecs): 106 | """Checks if points are inside triangles""" 107 | origins = tris[:, 0] 108 | cross_vecs = tris[:, 1:] - origins[:, None] 109 | 110 | v2 = points - origins 111 | 112 | # --------- 113 | v0 = cross_vecs[:,0] 114 | v1 = cross_vecs[:,1] 115 | 116 | d00_d11 = np.einsum('ijk,ijk->ij', cross_vecs, cross_vecs) 117 | d00 = d00_d11[:,0] 118 | d11 = d00_d11[:,1] 119 | d01 = np.einsum('ij,ij->i', v0, v1) 120 | d02 = np.einsum('ij,ij->i', v0, v2) 121 | d12 = np.einsum('ij,ij->i', v1, v2) 122 | 123 | div = 1 / (d00 * d11 - d01 * d01) 124 | u = (d11 * d02 - d01 * d12) * div 125 | v = (d00 * d12 - d01 * d02) * div 126 | 127 | w = 1 - (u+v) 128 | # !!!! needs some thought 129 | #margin = 0.0 130 | # !!!! ================== 131 | weights = np.array([w, u, v]).T 132 | check = (u > margin) & (v > margin) & (w > margin) 133 | 134 | return check, weights 135 | 136 | 137 | def b2(sc, cloth): 138 | 139 | 140 | if len(sc.big_boxes) == 0: 141 | print("ran out") 142 | return 143 | 144 | boxes = [] 145 | for oct in sc.big_boxes: 146 | t = oct[0] 147 | e = oct[1] 148 | b = oct[2] 149 | 150 | tfull, efull, bounds = octree_et(sc, margin=0.0, idx=t, eidx=e, bounds=b, cloth=cloth) 151 | 152 | for i in range(len(tfull)): 153 | t = tfull[i] 154 | e = efull[i] 155 | bmin = bounds[0][i] 156 | bmax = bounds[1][i] 157 | 158 | if (t.shape[0] < sc.box_max) | (e.shape[0] < sc.box_max): 159 | sc.small_boxes.append([t, e]) 160 | else: 161 | boxes.append([t, e, [bmin, bmax]]) 162 | sc.big_boxes = boxes 163 | 164 | 165 | def generate_bounds(minc, maxc, margin): 166 | """from a min corner and a max corner 167 | generate the min and max corner of 8 boxes""" 168 | 169 | diag = (maxc - minc) / 2 170 | mid = minc + diag 171 | mins = np.zeros((8,3), dtype=np.float32) 172 | maxs = np.zeros((8,3), dtype=np.float32) 173 | 174 | # blf 175 | mins[0] = minc 176 | maxs[0] = mid 177 | # brf 178 | mins[1] = minc 179 | mins[1][0] += diag[0] 180 | maxs[1] = mid 181 | maxs[1][0] += diag[0] 182 | # blb 183 | mins[2] = minc 184 | mins[2][1] += diag[1] 185 | maxs[2] = mid 186 | maxs[2][1] += diag[1] 187 | # brb 188 | mins[3] = mins[2] 189 | mins[3][0] += diag[0] 190 | maxs[3] = maxs[2] 191 | maxs[3][0] += diag[0] 192 | # tlf 193 | mins[4] = mins[0] 194 | mins[4][2] += diag[2] 195 | maxs[4] = maxs[0] 196 | maxs[4][2] += diag[2] 197 | # trf 198 | mins[5] = mins[1] 199 | mins[5][2] += diag[2] 200 | maxs[5] = maxs[1] 201 | maxs[5][2] += diag[2] 202 | # tlb 203 | mins[6] = mins[2] 204 | mins[6][2] += diag[2] 205 | maxs[6] = maxs[2] 206 | maxs[6][2] += diag[2] 207 | # trb 208 | mins[7] = mins[3] 209 | mins[7][2] += diag[2] 210 | maxs[7] = maxs[3] 211 | maxs[7][2] += diag[2] 212 | 213 | return mid, [mins, maxs] 214 | 215 | 216 | def octree_et(sc, margin, idx=None, eidx=None, bounds=None, cloth=None): 217 | """Adaptive octree. Good for finding doubles or broad 218 | phase collision culling. et does edges and tris. 219 | Also groups edges in boxes.""" # first box is based on bounds so first box could be any shape rectangle 220 | 221 | T = time.time() 222 | margin = sc.M # might be faster than >=, <= 223 | 224 | co = cloth.sc_co 225 | 226 | if bounds is None: 227 | b_min = np.min(co, axis=0) 228 | b_max = np.max(co, axis=0) 229 | else: 230 | b_min, b_max = bounds[0], bounds[1] 231 | 232 | #eco = co[sc.ed[eidx].ravel()] 233 | #b_min = np.min(eco, axis=0) 234 | #b_max = np.max(eco, axis=0) 235 | 236 | # bounds_8 is for use on the next iteration. 237 | mid, bounds_8 = generate_bounds(b_min, b_max, margin) 238 | 239 | #mid = b_min + ((b_max - b_min) / 2) 240 | mid_ = mid + margin 241 | _mid = mid - margin 242 | 243 | x_, y_, z_ = mid_[0], mid_[1], mid_[2] 244 | _x, _y, _z = _mid[0], _mid[1], _mid[2] 245 | 246 | # tris 247 | xmax = sc.txmax 248 | xmin = sc.txmin 249 | 250 | ymax = sc.tymax 251 | ymin = sc.tymin 252 | 253 | zmax = sc.tzmax 254 | zmin = sc.tzmin 255 | 256 | # edges 257 | exmin = sc.exmin 258 | eymin = sc.eymin 259 | ezmin = sc.ezmin 260 | 261 | exmax = sc.exmax 262 | eymax = sc.eymax 263 | ezmax = sc.ezmax 264 | 265 | # l = left, r = right, f = front, b = back, u = up, d = down 266 | if idx is None: 267 | idx = cloth.sc_indexer 268 | if eidx is None: 269 | eidx = cloth.sc_eidx 270 | 271 | idx = np.array(idx, dtype=np.int32) 272 | eidx = np.array(eidx, dtype=np.int32) 273 | 274 | # ------------------------------- 275 | B = xmin[idx] < x_# + margin 276 | il = idx[B] 277 | 278 | B = xmax[idx] > _x# - margin 279 | ir = idx[B] 280 | 281 | # edges 282 | eB = exmin[eidx] < x_# + margin 283 | eil = eidx[eB] 284 | 285 | eB = exmax[eidx] > _x# - margin 286 | eir = eidx[eB] 287 | 288 | # ------------------------------ 289 | B = ymax[il] > _y# - margin 290 | ilf = il[B] 291 | 292 | B = ymin[il] < y_# + margin 293 | ilb = il[B] 294 | 295 | B = ymax[ir] > _y# - margin 296 | irf = ir[B] 297 | 298 | B = ymin[ir] < y_# + margin 299 | irb = ir[B] 300 | 301 | # edges 302 | eB = eymax[eil] > _y# - margin 303 | eilf = eil[eB] 304 | 305 | eB = eymin[eil] < y_# + margin 306 | eilb = eil[eB] 307 | 308 | eB = eymax[eir] > _y# - margin 309 | eirf = eir[eB] 310 | 311 | eB = eymin[eir] < y_# + margin 312 | eirb = eir[eB] 313 | 314 | # ------------------------------ 315 | B = zmax[ilf] > _z# - margin 316 | ilfu = ilf[B] 317 | B = zmin[ilf] < z_# + margin 318 | ilfd = ilf[B] 319 | 320 | B = zmax[ilb] > _z# - margin 321 | ilbu = ilb[B] 322 | B = zmin[ilb] < z_# + margin 323 | ilbd = ilb[B] 324 | 325 | B = zmax[irf] > _z# - margin 326 | irfu = irf[B] 327 | B = zmin[irf] < z_# + margin 328 | irfd = irf[B] 329 | 330 | B = zmax[irb] > _z# - margin 331 | irbu = irb[B] 332 | B = zmin[irb] < z_# + margin 333 | irbd = irb[B] 334 | 335 | # edges 336 | eB = ezmax[eilf] > _z# - margin 337 | eilfu = eilf[eB] 338 | eB = ezmin[eilf] < z_# + margin 339 | eilfd = eilf[eB] 340 | 341 | eB = ezmax[eilb] > _z# - margin 342 | eilbu = eilb[eB] 343 | eB = ezmin[eilb] < z_# + margin 344 | eilbd = eilb[eB] 345 | 346 | eB = ezmax[eirf] > _z# - margin 347 | eirfu = eirf[eB] 348 | eB = ezmin[eirf] < z_# + margin 349 | eirfd = eirf[eB] 350 | 351 | eB = ezmax[eirb] > _z# - margin 352 | eirbu = eirb[eB] 353 | eB = ezmin[eirb] < z_# + margin 354 | eirbd = eirb[eB] 355 | 356 | boxes = [ilbd, irbd, ilfd, irfd, ilbu, irbu, ilfu, irfu] 357 | eboxes = [eilbd, eirbd, eilfd, eirfd, eilbu, eirbu, eilfu, eirfu] 358 | 359 | bbool = np.array([i.shape[0] > 0 for i in boxes]) 360 | ebool = np.array([i.shape[0] > 0 for i in eboxes]) 361 | both = bbool & ebool 362 | 363 | full = np.array(boxes, dtype=np.object)[both] 364 | efull = np.array(eboxes, dtype=np.object)[both] 365 | 366 | return full, efull, [bounds_8[0][both], bounds_8[1][both]] 367 | 368 | 369 | def self_collisions_7(sc, margin=0.1, cloth=None): 370 | 371 | T = time.time() 372 | 373 | tx = sc.tris[:, :, 0] 374 | ty = sc.tris[:, :, 1] 375 | tz = sc.tris[:, :, 2] 376 | 377 | txmax = np.max(tx, axis=1) + margin 378 | txmin = np.min(tx, axis=1) - margin 379 | 380 | tymax = np.max(ty, axis=1) + margin 381 | tymin = np.min(ty, axis=1) - margin 382 | 383 | tzmax = np.max(tz, axis=1) + margin 384 | tzmin = np.min(tz, axis=1) - margin 385 | 386 | sc.txmax = txmax 387 | sc.txmin = txmin 388 | 389 | sc.tymax = tymax 390 | sc.tymin = tymin 391 | 392 | sc.tzmax = tzmax 393 | sc.tzmin = tzmin 394 | 395 | # edge bounds: 396 | ex = sc.edges[:, :, 0] 397 | ey = sc.edges[:, :, 1] 398 | ez = sc.edges[:, :, 2] 399 | 400 | sc.exmin = np.min(ex, axis=1) - margin 401 | sc.eymin = np.min(ey, axis=1) - margin 402 | sc.ezmin = np.min(ez, axis=1) - margin 403 | 404 | sc.exmax = np.max(ex, axis=1) + margin 405 | sc.eymax = np.max(ey, axis=1) + margin 406 | sc.ezmax = np.max(ez, axis=1) + margin 407 | 408 | #timer(time.time()-T, "self col 5") 409 | # !!! can do something like check the octree to make sure the boxes are smaller 410 | # to know if we hit a weird case where we're no longer getting fewer in boxes 411 | 412 | tfull, efull, bounds = octree_et(sc, margin=0.0, cloth=cloth) 413 | 414 | T = time.time() 415 | for i in range(len(tfull)): 416 | t = tfull[i] 417 | e = efull[i] 418 | bmin = bounds[0][i] 419 | bmax = bounds[1][i] 420 | 421 | if (t.shape[0] < sc.box_max) | (e.shape[0] < sc.box_max): 422 | sc.small_boxes.append([t, e]) 423 | else: 424 | sc.big_boxes.append([t, e, [bmin, bmax]]) # using a dictionary or class might be faster !!! 425 | # !!! instead of passing bounds could figure out the min and max in the tree every time 426 | # we divide. So divide the left and right for example then get the new bounds for 427 | # each side and so on... 428 | 429 | #timer(time.time()-T, 'sort boxes') 430 | #T = time.time() 431 | 432 | limit = 3 433 | count = 0 434 | while len(sc.big_boxes) > 0: 435 | b2(sc, cloth) 436 | if sc.report: 437 | print("recursion level:", count) 438 | if count > limit: 439 | for b in sc.big_boxes: 440 | sc.small_boxes.append(b) 441 | break 442 | count += 1 443 | 444 | #timer(time.time()-T, 'b2') 445 | #if sc.report: 446 | if 0: 447 | print(len(sc.big_boxes), "how many big boxes") 448 | print(len(sc.small_boxes), "how many small boxes") 449 | 450 | for en, b in enumerate(sc.small_boxes): 451 | trs = np.array(b[0], dtype=np.int32) 452 | ed = np.array(b[1], dtype=np.int32) # can't figure out why this becomes an object array sometimes... 453 | 454 | if ed.shape[0] == 0: 455 | continue 456 | 457 | tris = sc.tris[trs] 458 | eds = sc.edges[ed] 459 | 460 | # detect link faces and broadcast 461 | nlf_0 = cloth.sc_edges[ed][:, 0] == cloth.tridex[trs][:, :, None] 462 | ab = np.any(nlf_0, axis=1) 463 | 464 | rse = np.tile(ed, trs.shape[0]) 465 | rse.shape = (trs.shape[0], ed.shape[0]) 466 | rst = np.repeat(trs, ed.shape[0]) 467 | rst.shape = (trs.shape[0], ed.shape[0]) 468 | 469 | re = rse[~ab] # repeated edges with link faces removed 470 | rt = rst[~ab] # repeated triangles to match above edges 471 | 472 | in_x = txmax[rt] > sc.exmin[re] 473 | rt, re = rt[in_x], re[in_x] 474 | 475 | in_x2 = txmin[rt] < sc.exmax[re] 476 | rt, re = rt[in_x2], re[in_x2] 477 | 478 | in_y = tymax[rt] > sc.eymin[re] 479 | rt, re = rt[in_y], re[in_y] 480 | 481 | in_y2 = tymin[rt] < sc.eymax[re] 482 | rt, re = rt[in_y2], re[in_y2] 483 | 484 | in_z = tzmin[rt] < sc.ezmax[re] 485 | rt, re = rt[in_z], re[in_z] 486 | 487 | in_z2 = tzmax[rt] > sc.ezmin[re] 488 | rt, re = rt[in_z2], re[in_z2] 489 | 490 | #timer(time.time()-T, 'edge bounds') 491 | 492 | T = time.time() 493 | 494 | if rt.shape[0] > 0: 495 | 496 | sc.ees += re.tolist() 497 | sc.trs += rt.tolist() 498 | 499 | def ray_check_obj(sc, ed, trs, cloth): 500 | 501 | 502 | # ed is a list object so we convert it for indexing the points 503 | # trs indexes the tris 504 | edidx = np.array(ed, dtype=np.int32) 505 | 506 | # e is the start co and current co of the cloth paird in Nx2x3 507 | e = sc.edges[ed] 508 | 509 | t = sc.tris[trs] 510 | 511 | start_co = e[:, 0] 512 | co = e[:, 1] 513 | 514 | ori = t[:, 3] 515 | t1 = t[:, 4] - ori 516 | t2 = t[:, 5] - ori 517 | 518 | norms = np.cross(t1, t2) 519 | un = norms / np.sqrt(np.einsum('ij,ij->i', norms, norms))[:, None] 520 | 521 | vecs = co - ori 522 | dots = np.einsum('ij,ij->i', vecs, un) 523 | 524 | switch = dots < 0 525 | 526 | check, weights = inside_triangles(t[:, :3][switch], co[switch], margin= -sc.M) 527 | start_check, start_weights = inside_triangles(t[:, :3][switch], start_co[switch], margin= -sc.M) 528 | travel = un[switch][check] * -dots[switch][check][:, None] 529 | 530 | weight_plot = t[:, 3:][switch][check] * start_weights[check][:, :, None] 531 | 532 | loc = np.sum(weight_plot, axis=1) 533 | 534 | pcols = edidx[switch][check] 535 | cco = sc.fco[pcols] 536 | pl_move = loc - cco 537 | 538 | fr = cloth.ob.MC_props.sc_friction 539 | move = (travel * (1 - fr)) + (pl_move * fr) 540 | #rev = revert_rotation(cloth.ob, move) 541 | cloth.co[pcols] += move * .5 542 | 543 | 544 | def ray_check_oc(sc, ed, trs, cloth): 545 | 546 | eidx = np.array(ed, dtype=np.int32) 547 | tidx = np.array(trs, dtype=np.int32) 548 | 549 | e = sc.edges[eidx] 550 | t = sc.tris[tidx] 551 | 552 | start_co = e[:, 0] 553 | co = e[:, 1] 554 | 555 | M = cloth.ob.MC_props.self_collide_margin 556 | 557 | start_ori = t[:, 0] 558 | st1 = t[:, 1] - start_ori 559 | st2 = t[:, 2] - start_ori 560 | start_norms = np.cross(st1, st2) 561 | u_start_norms = start_norms / np.sqrt(np.einsum('ij,ij->i', start_norms, start_norms))[:, None] 562 | start_vecs = start_co - start_ori 563 | start_dots = np.einsum('ij,ij->i', start_vecs, u_start_norms) 564 | 565 | 566 | # normals from cloth.co (not from select_start) 567 | ori = t[:, 3] 568 | t1 = t[:, 4] - ori 569 | t2 = t[:, 5] - ori 570 | norms = np.cross(t1, t2) 571 | un = norms / np.sqrt(np.einsum('ij,ij->i', norms, norms))[:, None] 572 | 573 | vecs = co - ori 574 | dots = np.einsum('ij,ij->i', vecs, un) 575 | 576 | switch = np.sign(dots * start_dots) 577 | direction = np.sign(dots) 578 | abs_dots = np.abs(dots) 579 | 580 | # !!! if a point has switched sides, direction has to be reversed !!! 581 | direction *= switch 582 | in_margin = (abs_dots <= M) | (switch == -1) 583 | 584 | 585 | 586 | 587 | check, weights = inside_triangles(t[:, 3:][in_margin], co[in_margin], margin= -0.1) 588 | start_check, start_weights = inside_triangles(t[:, :3][in_margin][check], start_co[in_margin][check], margin= 0.0) 589 | 590 | weight_plot = t[:, 3:][in_margin][check] * start_weights[:, :, None] 591 | if False: # using start weight 592 | weight_plot = t[:, 3:][in_margin][check] * start_weights[:, :, None] 593 | if False: # trying loc with start normals... 594 | loc = np.sum(weight_plot, axis=1) + ((un[in_margin][check] * M) * direction[in_margin][check][:, None]) 595 | loc = np.sum(weight_plot, axis=1) + ((u_start_norms[in_margin][check] * M) * direction[in_margin][check][:, None]) 596 | 597 | co_idx = eidx[in_margin][check] 598 | 599 | if False: # start norms (seems to make no difference...) 600 | travel = -(un[in_margin][check] * dots[in_margin][check][:, None]) + ((un[in_margin][check] * M) * direction[in_margin][check][:, None]) 601 | travel = -(u_start_norms[in_margin][check] * dots[in_margin][check][:, None]) + ((u_start_norms[in_margin][check] * M) * direction[in_margin][check][:, None]) 602 | #start_check, start_weights = inside_triangles(t[:, :3][in_margin][check], co[in_margin][check], margin= -0.1) 603 | #move = cloth.co[co_idx] - start_co_loc 604 | 605 | #now in theory I can use the weights from start tris 606 | 607 | 608 | if False: # moving tris away 609 | 610 | travel *= 0.5 611 | tridex = cloth.tridex[tidx[in_margin][check]] 612 | cloth.co[tridex] -= travel[:, None] 613 | 614 | fr = cloth.ob.MC_props.sc_friction 615 | if fr == 0: 616 | 617 | 618 | cloth.co[co_idx] += travel 619 | print('zero friction') 620 | return 621 | 622 | # could try managing the velocity instead of all this 623 | # add.at crap for the friction... So like reduce the vel when self collide 624 | # happens. 625 | 626 | 627 | pl_move = loc - cloth.co[co_idx] 628 | 629 | 630 | uni = np.unique(co_idx, return_counts=True, return_inverse=True) 631 | div = uni[2][uni[1]] 632 | div[div > 1] *= 2 633 | #pl_move /= div[:, None] 634 | 635 | if False: # moving tris away 636 | 637 | move *= 0.5 638 | tridex = cloth.tridex[tidx[in_margin][check]] 639 | cloth.co[tridex] -= move[:, None] 640 | 641 | f_zeros = np.zeros((uni[0].shape[0], 3), dtype=np.float32) 642 | zeros = np.zeros((uni[0].shape[0], 3), dtype=np.float32) 643 | 644 | np.add.at(f_zeros, uni[1], pl_move/div[:, None]) 645 | np.add.at(zeros, uni[1], travel/div[:, None]) 646 | 647 | 648 | move = (zeros * (1 - fr)) + (f_zeros * fr) 649 | #move = (travel * (1 - fr)) + (pl_move * fr) 650 | 651 | 652 | 653 | cloth.co[uni[0]] += move 654 | 655 | 656 | 657 | 658 | #cloth.velocity[co_idx] *= 0 659 | #cloth.velocity[tridex] *= 0 660 | 661 | #move = (un[in_margin][check] * dots[in_margin][check][:, None]) + (un[in_margin][check] * M) * direction[in_margin][check][:, None] 662 | 663 | #move = (un[in_margin][check] * M) * direction[in_margin][check][:, None] 664 | #cloth.co[co_idx] -= move 665 | 666 | #print(co_idx) 667 | #cloth.co[co_idx] += un[in_margin][check] 668 | 669 | 670 | 671 | 672 | 673 | 674 | class SelfCollide(): 675 | name = "sc" 676 | 677 | def __init__(self, cloth): 678 | 679 | # ----------------------- 680 | ob = cloth.ob 681 | tris_six = cloth.tris_six 682 | 683 | tridex = cloth.tridex 684 | 685 | cloth.sc_co[:cloth.v_count] = cloth.select_start 686 | cloth.sc_co[cloth.v_count:] = cloth.co 687 | self.fco = cloth.co 688 | 689 | tris_six[:, :3] = cloth.select_start[cloth.tridex] 690 | tris_six[:, 3:] = cloth.co[cloth.tridex] 691 | 692 | M = cloth.ob.MC_props.self_collide_margin 693 | cloth.surface_offset_tris[:, 0] = (cloth.co - (cloth.sc_normals * M))[cloth.tridex] 694 | cloth.surface_offset_tris[:, 1] = (cloth.co + (cloth.sc_normals * M))[cloth.tridex] 695 | # ----------------------- 696 | 697 | self.has_col = False 698 | 699 | #self.indexer = cloth.sc_indexer 700 | 701 | self.box_max = cloth.ob.MC_props.sc_box_max 702 | 703 | self.M = cloth.ob.MC_props.self_collide_margin 704 | self.force = cloth.ob.MC_props.self_collide_force 705 | 706 | self.tris = tris_six 707 | self.edges = cloth.sc_co[cloth.sc_edges] 708 | self.big_boxes = [] # boxes that still need to be divided 709 | self.small_boxes = [] # finished boxes less than the maximum box size 710 | 711 | # debug stuff 712 | self.sel = False 713 | #self.sel = True 714 | self.report = False 715 | #self.report = True 716 | if self.report: 717 | self.select_counter = np.zeros(cloth.sc_eidx.shape[0], dtype=np.int32) 718 | if self.sel: 719 | if self.ob.data.is_editmode: 720 | self.obm = bmesh.from_edit_mesh(self.ob.data) 721 | else: 722 | self.obm = bmesh.new() 723 | self.obm.from_mesh(self.ob.data) 724 | self.obm.edges.ensure_lookup_table() 725 | self.obm.verts.ensure_lookup_table() 726 | self.obm.faces.ensure_lookup_table() 727 | 728 | # store sets of edge and tris to check 729 | #self.trs = np.empty((0), dtype=np.int32) 730 | #self.ees = np.empty((0), dtype=np.int32) 731 | self.trs = [] 732 | self.ees = [] 733 | 734 | 735 | def detect_collisions(cloth): 736 | 737 | sc = SelfCollide(cloth) 738 | t = time.time() 739 | 740 | self_collisions_7(sc, sc.M, cloth) 741 | 742 | ray_check_oc(sc, sc.ees, sc.trs, cloth) 743 | 744 | if sc.report: 745 | print(sc.box_max, "box max") 746 | print(np.sum(sc.select_counter > 1), ": In too many boxes") 747 | print(np.max(sc.select_counter), "max times and edge was selected") 748 | print(time.time() - t) 749 | 750 | if sc.sel: 751 | if ob.data.is_editmode: 752 | bmesh.update_edit_mesh(ob.data) 753 | 754 | ob.data.update() 755 | 756 | 757 | ''' 758 | 759 | 760 | 761 | ''' 762 | 763 | 764 | 765 | ''' 766 | Might want to look into skipping 767 | the bounds check. Just use smaller boxes 768 | and go straight to check every pair for 769 | which side of tri... 770 | 771 | so I group the tris in sets of 6. 772 | the start and end tri. 773 | I get the bounds from that. 774 | So bounding boxes will be around 775 | 776 | a tri is like Nx3x3 777 | a moving tri is like Nx6x3 778 | I still get the bounds from axis 1 779 | 780 | I can use the start and end of each point 781 | just like an edge. Should be able 782 | to set up phony edge indexing and 783 | use the exact same system. 784 | Should work all the way until raycast. 785 | 786 | once we get to raycast... 787 | were dealing with edges that are the 788 | start and end of moving points 789 | and two tris that ar the start and 790 | end of a moving tri. 791 | I can check what side Im on 792 | in the beginning by getting the 793 | dot of the point origin and the normal 794 | for the start edge and the start tri... 795 | If I get what side the end tri and end 796 | point are on and its different I could 797 | then do a bary check... 798 | 799 | Should I bary check both points? 800 | drop to plane with first tri? 801 | drop to plane with second tri? 802 | bary check without dropping to plane? 803 | check 1st tri without dropping to plane? 804 | check 2nd tri without dropping to plane? 805 | 806 | ''' 807 | -------------------------------------------------------------------------------- /ModelingClothNewFeatures.py: -------------------------------------------------------------------------------- 1 | """ New Features: """ 2 | # someone requested a feature to let the settings apply to all selected cloth objects. It would have to be in a preference setting. 3 | # so probably a user preferences section is in order. The preferences need to save to the addon 4 | # directory so when blender loads a new file the settings come in. 5 | 6 | # pause button using space bar or something with modal grab 7 | # cache file option 8 | # awesome sew doesn't care how many verts (target a location based on a percentage of the edge) 9 | # Could I pull the normals off a normal map and add them to the bend springs for adding wrinkles? 10 | # For adding wrinkles maybe I can come up with a clever way to put them into the bend springs. 11 | # Could even create a paint modal tool that expands the source where the user paints to create custom wrinkles. 12 | # Wrinkle brush could take into account stroke direction, or could grow in all directions. 13 | # Crease brush would be different making wrinkles more like what you would need to iron out. 14 | 15 | # could have a button to switch modes from animate to model. Model mode would turn down velocity and such. 16 | # could even have stored user settings 17 | 18 | 19 | # Target: 20 | # Could make the cloth always read from the source shape key 21 | # and just update target changes to the source shape. 22 | 23 | # Bugs (not bunny) 24 | # Don't currently have a way to update settings on duplicated objects. 25 | # !! could load a separate timer that both updates cloth objects when 26 | # loading a saved file and updates duplicates that have cloth properties 27 | # I think it would load whenever blender reopens if the module is registered 28 | # I'll have to see if I need to regen springs or if the global variable is overwritten every time 29 | 30 | 31 | """ create my own pin function that writes to the vertex group 32 | so that I don't have to pop in and out of edit mode """ 33 | 34 | """ create my own button to switch between source and current """ 35 | 36 | """ can't currently run more than one object at a time """ 37 | 38 | 39 | # -------------------------------- START save states 40 | # Save current state will let you create a shape key for each time you like your cloth settings. 41 | # It will need to respect armature or other deforms so we'll have to think about the modifier stack and so on. 42 | # maybe create the little arrows in the ui to let you move up and down through your saved state. 43 | # Name each shape key with something that let's the UI know which keys to scroll through. 44 | # !!! Will need the modeling cloth to switch to your current selected state instead of writing to the modeling cloth key!!! 45 | # Need to think about which modifiers to turn on and off here... Anything that changes the vert count has to go. 46 | # Might be able to check which modifiers have the "apply as shape" option. 47 | # Blender might have already sorted mods that change vertex counts in this way 48 | 49 | def soft_grab(cloth): 50 | """ 51 | uses various falloff curves to grab points in the cloth. 52 | As the size of the area increases the points around the selection are 53 | expanded. The distance along the surface is then measured to apply grab 54 | motion with the appropriate level of force/falloff for the distance from the selected point. 55 | Needs to work with hooks such that each hook can have a falloff setting. This way 56 | You can animate hooks to behave more like fingers pusing or pulling the cloth 57 | instead of just a single point or a selection area behaving as if rigid. 58 | """ 59 | 60 | 61 | 62 | 63 | 64 | def apply_shape(ob, modifier_name='Cloth', update_existing_key=False, keep=['Cloth'], key_name='Cloth'): 65 | """Apply modifier as shape without using bpy.ops. 66 | Does not apply modifiers. 67 | Mutes modifiers not listed in 'keep.' 68 | Using update allows writing to an existing shape_key.""" 69 | 70 | def turn_off_modifier(modifier, on_off=False): 71 | modifier.show_viewport = on_off 72 | 73 | mod_states = [mod.show_viewport for mod in ob.modifiers] 74 | [turn_off_modifier(mod, False) for mod in ob.modifiers if mod.name not in keep] 75 | 76 | dg = bpy.context.evaluated_depsgraph_get() 77 | proxy = ob.evaluated_get(dg) 78 | co = get_co(proxy) 79 | 80 | if update_existing_key: 81 | key = ob.data.shape_keys.key_blocks[key_name] 82 | else: 83 | key = new_shape_key(ob, name=key_name, arr=None, value=0) 84 | 85 | key.data.foreach_set('co', co.ravel()) 86 | 87 | for i, j in zip(mod_states, ob.modifiers): 88 | j.show_viewport = i 89 | 90 | return key 91 | # -------------------------------- END save states 92 | -------------------------------------------------------------------------------- /barycentric_stuff.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import numpy as np 3 | from numpy import newaxis as nax 4 | import bmesh 5 | import time 6 | 7 | def cpoe(p, e1, e2): 8 | ev = e2 - e1 9 | pv = p - e1 10 | d = np.dot(ev, pv) / np.dot(ev,ev) 11 | cp = e1 + ev * d 12 | return cp, d 13 | 14 | 15 | def inside_tri_wa(tris, points, norms=None): 16 | """use weighted average to check inside tri. 17 | Might be able to modify to work with more than 18 | three points for surface follow to more than three""" 19 | 20 | # drop points to planes 21 | origins = tris[:, 0] 22 | cross_vecs = origins[:, nax] - tris[:, 1:] 23 | if norms is None: 24 | norms = np.cross(cross_vecs[:,0], cross_vecs[:, 1]) 25 | pv = points - origins 26 | d = np.einsum('ij,ij->i', norms, pv) / np.einsum('ij,ij->i', norms, norms) 27 | cpv = norms * d[:, nax] 28 | on_p = points - cpv 29 | 30 | # get weights 31 | weights = np.linalg.solve(np.swapaxes(tris, 1,2), on_p) 32 | inside = np.any(weights < 0, axis=1) 33 | 34 | 35 | def inside_triangles(tris, points, check=True): 36 | 37 | origins = tris[:, 0] 38 | cross_vecs = tris[:, 1:] - origins[:, nax] 39 | v2 = points - origins 40 | 41 | # --------- 42 | v0 = cross_vecs[:,0] 43 | v1 = cross_vecs[:,1] 44 | 45 | d00_d11 = np.einsum('ijk,ijk->ij', cross_vecs, cross_vecs) 46 | d00 = d00_d11[:,0] 47 | d11 = d00_d11[:,1] 48 | d01 = np.einsum('ij,ij->i', v0, v1) 49 | d02 = np.einsum('ij,ij->i', v0, v2) 50 | d12 = np.einsum('ij,ij->i', v1, v2) 51 | 52 | div = 1 / (d00 * d11 - d01 * d01) 53 | u = (d11 * d02 - d01 * d12) * div 54 | v = (d00 * d12 - d01 * d02) * div 55 | 56 | weights = np.array([1 - (u+v), u, v, ]) 57 | if not check: 58 | return weights.T 59 | 60 | check = np.all(weights > 0, axis=0) 61 | # check if bitwise is faster when using lots of tris 62 | if False: 63 | check = (u > 0) & (v > 0) & (u + v < 1) 64 | 65 | return weights.T, check 66 | 67 | 68 | def closest_point_mesh(obm, edit_obj, target): 69 | """Uses bmesh method to get CPM""" 70 | context = bpy.context 71 | scene = context.scene 72 | 73 | me = edit_obj.data 74 | mesh_objects = [target] 75 | bm = bmesh.new() 76 | 77 | smwi = target.matrix_world.inverted() 78 | bm.from_mesh(me) 79 | bm.verts.ensure_lookup_table() 80 | vert = bm.verts[0] 81 | for vert in bm.verts: 82 | v1 = edit_obj.matrix_world @ vert.co # global face median 83 | local_pos = smwi @ v1 # face cent in target local space 84 | 85 | (hit, loc, norm, face_index) = target.closest_point_on_mesh(local_pos) 86 | if hit: 87 | v2 = target.matrix_world @ loc 88 | bpy.ops.object.empty_add(location=v2) 89 | #print(target.name, (v2 - v1).length) 90 | #print(face_index) 91 | bm.clear() 92 | 93 | if False: 94 | ob = bpy.context.object 95 | target = bpy.data.objects['target'] 96 | obm = bmesh.new() 97 | obm.from_mesh(ob.data) 98 | print(obm.verts) 99 | closest_point_mesh(obm, ob, target) 100 | 101 | 102 | 103 | tri = bpy.data.objects['tri'] 104 | tri2 = bpy.data.objects['tri2'] 105 | tri3 = bpy.data.objects['t3'] 106 | tri4 = bpy.data.objects['t4'] 107 | e = bpy.data.objects['e'] 108 | e2 = bpy.data.objects['e2'] 109 | 110 | 111 | def wab(point, tri): 112 | print() 113 | print() 114 | co = np.array([v.co for v in tri.data.vertices]) 115 | co2 = np.array([v.co for v in tri2.data.vertices]) 116 | co3 = np.array([v.co for v in tri3.data.vertices]) 117 | co4 = np.array([v.co for v in tri4.data.vertices]) 118 | # it's going to be each vector * something... 119 | 120 | weightss = np.array([7, 1, 1]) 121 | 122 | norm = weightss/np.sum(weightss) 123 | 124 | balance = co * norm[:, nax] 125 | bpy.data.objects['eee'].location = np.sum(balance, axis=0) 126 | eco = np.array(e.location) 127 | e2co = np.array(e2.location) 128 | 129 | 130 | tris = np.array([co, co2, co3, co4]) 131 | points = np.array([e2co, e2co, eco, eco]) 132 | 133 | #inside_tri_wa(tris2, points2, norms=None) 134 | weights, check = inside_triangles(tris, points) 135 | 136 | bpy.data.objects['s'].location = np.sum(tris[0] * weights[0][:, nax], axis=0) 137 | 138 | 139 | 140 | def get_weights(ob, group): 141 | idx = ob.vertex_groups[group].index 142 | w = np.array([v.groups[idx].weight for v in ob.data.vertices]) 143 | 144 | 145 | 146 | 147 | def apply_weights(ob, points, weights, tris, normals): 148 | """ 149 | ob: the blender object 150 | points: a bool array of the points we're moving 151 | weights: barycentric weigts 152 | tris: the faces we parented to 153 | normals: the distance from those faces 154 | """ 155 | 156 | 157 | 158 | 159 | 160 | # divide the verts that need to follow into a vertex group 161 | # This way we can pull from the blender object. 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | wab(e, tri) 170 | 171 | def get_co_with_modifiers(ob, types=[], names=[], include_mesh=False): 172 | """Get the coordinates of modifiers with 173 | specific modifiers turned on or off. 174 | List mods by type or name. 175 | This lets you turn off all mods of a type 176 | or just turn off by name.""" 177 | 178 | debug = True 179 | if debug: 180 | # verify modifier names and types 181 | mod_types = [mod.type for mod in ob.modifiers] 182 | mod_names = [mod.name for mod in ob.modifiers] 183 | # if the arg names ar not right return 184 | type_check = np.all(np.in1d(types, mod_types)) 185 | name_check = np.all(np.in1d(names, mod_names)) 186 | 187 | if not (type_check & name_check): 188 | print("!!! Warning. Mods not listed correctly !!!") 189 | print("!!! Warning. Mods not listed correctly !!!") 190 | return 191 | 192 | # save mod states for reset 193 | mod_states = [mod.show_viewport for mod in ob.modifiers] 194 | 195 | def turn_off_modifier(modifier): 196 | modifier.show_viewport = False 197 | 198 | [turn_off_modifier(mod) for mod in ob.modifiers if mod.name in names] 199 | [turn_off_modifier(mod) for mod in ob.modifiers if mod.type in types] 200 | 201 | # get the coordinates with the current modifier state 202 | dg = bpy.context.evaluated_depsgraph_get() 203 | proxy = ob.evaluated_get(dg) 204 | co = get_co(proxy) 205 | 206 | for i, j in zip(mod_states, ob.modifiers): 207 | j.show_viewport = i 208 | 209 | if include_mesh: 210 | return co, proxy.data 211 | 212 | return co 213 | 214 | 215 | 216 | 217 | 218 | so first I get a selection set by doing something like if i.select 219 | -------------------------------------------------------------------------------- /bend_solver.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import bmesh 3 | import numpy as np 4 | 5 | 6 | # universal --------------------- 7 | def get_obm(ob): 8 | """gets bmesh in editmode or object mode""" 9 | ob.update_from_editmode() 10 | obm = bmesh.new() 11 | obm.from_mesh(ob.data) 12 | return obm 13 | 14 | 15 | # universal --------------------- 16 | def get_bary_weights(tris, points): 17 | """Find barycentric weights for triangles. 18 | Tris is a Nx3x3 set of triangle coords. 19 | points is the same N in Nx3 coords""" 20 | origins = tris[:, 0] 21 | cross_vecs = tris[:, 1:] - origins[:, None] 22 | v2 = points - origins 23 | 24 | # --------- 25 | v0 = cross_vecs[:,0] 26 | v1 = cross_vecs[:,1] 27 | 28 | d00_d11 = np.einsum('ijk,ijk->ij', cross_vecs, cross_vecs) 29 | d00 = d00_d11[:,0] 30 | d11 = d00_d11[:,1] 31 | d01 = np.einsum('ij,ij->i', v0, v1) 32 | d02 = np.einsum('ij,ij->i', v0, v2) 33 | d12 = np.einsum('ij,ij->i', v1, v2) 34 | 35 | div = 1 / (d00 * d11 - d01 * d01) 36 | u = (d11 * d02 - d01 * d12) * div 37 | v = (d00 * d12 - d01 * d02) * div 38 | 39 | weights = np.array([1 - (u+v), u, v, ]).T 40 | return weights 41 | 42 | 43 | # universal --------------------- 44 | def get_co_shape(ob, key=None, ar=None): 45 | """Get vertex coords from a shape key""" 46 | if ar is not None: 47 | ob.data.shape_keys.key_blocks[key].data.foreach_get('co', ar.ravel()) 48 | return ar 49 | c = len(ob.data.vertices) 50 | ar = np.empty((c, 3), dtype=np.float32) 51 | ob.data.shape_keys.key_blocks[key].data.foreach_get('co', ar.ravel()) 52 | return ar 53 | 54 | 55 | # universal --------------------- 56 | def get_poly_centers(ob, co, data=None): 57 | """Get poly centers. Data is meant 58 | to be built the first time then 59 | passed in. (dynamic)""" 60 | 61 | if data is not None: 62 | data[0][:] = 0 63 | np.add.at(data[0], data[2], co[data[3]]) 64 | data[0] /= data[1][:, None] 65 | return data[0] 66 | 67 | pc = len(ob.data.polygons) 68 | pidex = np.hstack([[v for v in p.vertices] for p in ob.data.polygons]) 69 | 70 | div = [len(p.vertices) for p in ob.data.polygons] 71 | 72 | indexer = [] 73 | for i, j in enumerate(div): 74 | indexer += [i] * j 75 | div = np.array(div, dtype=np.float32) 76 | 77 | centers = np.zeros((pc, 3), dtype=np.float32) 78 | 79 | np.add.at(centers, indexer, co[pidex]) 80 | centers /= div[:, None] 81 | 82 | return [centers, div, indexer, pidex] 83 | 84 | 85 | # universal --------------------- 86 | def pairs_idx(ar): 87 | """Eliminates duplicates and mirror duplicates. 88 | for example, [1,4], [4,1] or duplicate occurrences of [1,4] 89 | Returns ar (array) and the index that removes the duplicates.""" 90 | # no idea how this works (probably sorcery) but it's really fast 91 | a = np.sort(ar, axis=1) # because it only sorts on the second acess the index still matches other arrays. 92 | #x = np.random.rand(a.shape[1]) 93 | x = np.linspace(1, 2, num=a.shape[1]) 94 | y = a @ x 95 | unique, index = np.unique(y, return_index=True) 96 | return a[index], index 97 | # # 98 | # # 99 | # --------------------end universal---------------------- # 100 | 101 | 102 | # precalculated ------------------------ 103 | def get_j_surface_offset(cloth): 104 | """Get the vecs to move the plotted 105 | wieghts off the surface.""" 106 | 107 | ax = cloth.j_axis_vecs 108 | ce = cloth.j_ce_vecs # has the faces swapped so the normal corresponds to the other side of the axis 109 | cross = np.cross(ax, ce) 110 | 111 | cloth.j_normals = cross / np.sqrt(np.einsum('ij,ij->i', cross, cross))[:, None] 112 | cloth.plot_normals = cloth.j_normals[cloth.j_tiler] 113 | 114 | cloth.plot_vecs = cloth.sco[cloth.swap_jpv] - cloth.j_plot 115 | cloth.plot_dots = np.einsum('ij,ij->i', cloth.plot_normals, cloth.plot_vecs)[:, None] 116 | 117 | 118 | # dynamic ------------------------------ 119 | def measure_linear_bend(cloth): 120 | """Takes a set of coords and an edge idx and measures segments""" 121 | l = cloth.sp_ls # left side of the springs (Full moved takes the place of the right side) 122 | v = cloth.full_moved - cloth.co[l] 123 | d = np.einsum("ij ,ij->i", v, v) 124 | return v, d, np.sqrt(d) 125 | 126 | 127 | # dynamic ------------------------------ 128 | def get_eq_tri_tips(cloth, co, centers, skip=False): 129 | """Slide the centers of each face along 130 | the axis until it's in the middle for 131 | using as a triangle. (dynamic)""" 132 | 133 | skip = True # set to false to use eq tris. 134 | if skip: # skip will test if it really makes any difference to move the tris to the center 135 | cloth.j_axis_vecs = co[cloth.stacked_edv[:,1]] - co[cloth.stacked_edv[:,0]] 136 | cloth.j_tips = centers[cloth.stacked_faces] 137 | cloth.j_ce_vecs = centers[cloth.stacked_faces] - co[cloth.stacked_edv[:,0]] 138 | return cloth.j_tips, cloth.j_axis_vecs, cloth.j_ce_vecs 139 | 140 | # creates tris from center and middle of edge. 141 | # Not sure if it makes any difference... 142 | j_axis_vecs = co[cloth.stacked_edv[:,1]] - co[cloth.stacked_edv[:,0]] 143 | j_axis_dots = np.einsum('ij,ij->i', j_axis_vecs, j_axis_vecs) 144 | j_ce_vecs = centers[cloth.stacked_faces] - co[cloth.stacked_edv[:,0]] 145 | cloth.swap_ce_vecs = centers[cloth.swap_faces] - co[cloth.stacked_edv[:,0]] 146 | j_cea_dots = np.einsum('ij,ij->i', j_axis_vecs, j_ce_vecs) 147 | 148 | j_div = j_cea_dots / j_axis_dots 149 | j_spit = j_axis_vecs * j_div[:,None] 150 | 151 | j_cpoe = co[cloth.stacked_edv[:,0]] + j_spit 152 | jt1 = centers[cloth.stacked_faces] - j_cpoe 153 | j_mid = co[cloth.stacked_edv[:,0]] + (j_axis_vecs * 0.5) 154 | 155 | cloth.j_tips = j_mid + jt1 156 | cloth.j_axis_vecs = j_axis_vecs 157 | cloth.j_ce_vecs = j_ce_vecs 158 | # --------------------- 159 | return cloth.j_tips, cloth.j_axis_vecs, cloth.j_ce_vecs 160 | 161 | 162 | # precalculated ------------------------ 163 | def eq_bend_data(cloth): 164 | """Generates face pairs around axis edges. 165 | Supports edges with 2-N connected faces. 166 | Can use internal structures this way.""" 167 | ob = cloth.ob 168 | obm = get_obm(ob) 169 | sco = cloth.sco 170 | 171 | # eliminate sew edges and outer edges: 172 | ed = [e for e in obm.edges if len(e.link_faces) > 1] 173 | 174 | # remove based on zero weight vertex group 175 | 176 | ''' 177 | if we're using it in modeling cloth we want 178 | a vertex group to define bend stiffness. 179 | This means everything will be in the group 180 | when it's turned to a cloth object 181 | It's default weight will be zero 182 | We have to check for a weight of more than 183 | zero to know if we should include it 184 | in the edges. 185 | 186 | So: 187 | 188 | 189 | ''' 190 | 191 | 192 | 193 | 194 | first_row = [] 195 | e_tiled = [] 196 | f_ls = [] 197 | f_rs = [] 198 | for e in ed: 199 | ls = [] 200 | for f in e.link_faces: 201 | otf = [lf for lf in e.link_faces if lf != f] 202 | for lf in otf: 203 | f_ls += [f.index] 204 | f_rs += [lf.index] 205 | e_tiled += [e.index] 206 | 207 | shape1 = len(f_ls) 208 | paired = np.empty((shape1, 2), dtype=np.int32) 209 | paired[:, 0] = f_ls 210 | paired[:, 1] = f_rs 211 | 212 | # faces grouped left and right 213 | cloth.face_pairs, idx = pairs_idx(paired) 214 | cloth.stacked_faces = cloth.face_pairs.T.ravel() 215 | jfps = cloth.stacked_faces.shape[0] 216 | 217 | # swap so we get wieghts from tris opposite axis 218 | cloth.swap_faces = np.empty(jfps, dtype=np.int32) 219 | cloth.swap_faces[:jfps//2] = cloth.face_pairs[:, 1] 220 | cloth.swap_faces[jfps//2:] = cloth.face_pairs[:, 0] 221 | 222 | # remove duplicate pairs so edges match face pairs 223 | tiled_edges = np.array(e_tiled)[idx] 224 | 225 | # v1 and v2 for each face pair (twice as many faces because each pair shares an edge) 226 | obm.edges.ensure_lookup_table() 227 | cloth.edv = np.array([[obm.edges[e].verts[0].index, 228 | obm.edges[e].verts[1].index] 229 | for e in tiled_edges], dtype=np.int32) 230 | 231 | shape = cloth.edv.shape[0] 232 | cloth.stacked_edv = np.tile(cloth.edv.ravel(), 2) 233 | cloth.stacked_edv.shape = (shape * 2, 2) 234 | 235 | 236 | def get_poly_vert_tilers(cloth): 237 | """Get an index to tile the left and right sides. 238 | ls and rs is based on the left and right sides of 239 | the face pairs.""" 240 | 241 | cloth.swap_jpv = [] 242 | cloth.jpv_full = [] 243 | ob = cloth.ob 244 | 245 | cloth.ab_faces = [] 246 | cloth.ab_edges = [] 247 | 248 | count = 0 249 | for i, j in zip(cloth.swap_faces, cloth.stacked_edv): # don't need to swap edv because both sides share the same edge 250 | 251 | pvs = [v for v in ob.data.polygons[i].vertices] 252 | nar = np.array(pvs) 253 | b1 = nar != j[0] 254 | b2 = nar != j[1] 255 | 256 | nums = np.arange(nar.shape[0]) + count 257 | cloth.ab_faces += nums[b1 & b2].tolist() 258 | cloth.ab_edges += nums[~(b1)].tolist() 259 | cloth.ab_edges += nums[~(b2)].tolist() 260 | 261 | count += nar.shape[0] 262 | r = [v for v in ob.data.polygons[i].vertices if v not in j] 263 | cloth.swap_jpv += r 264 | 265 | for i in cloth.swap_faces: 266 | r = [v for v in ob.data.polygons[i].vertices] 267 | cloth.jpv_full += r 268 | 269 | 270 | def tiled_weights(cloth): 271 | """Tile the tris with the polys for getting 272 | barycentric weights""" 273 | 274 | ob = cloth.ob 275 | face_pairs = cloth.face_pairs 276 | 277 | # counts per poly less the two in the edges 278 | cloth.full_counts = np.array([len(p.vertices) for p in ob.data.polygons], dtype=np.int32) 279 | cloth.full_div = np.array(cloth.full_counts, dtype=np.float32)[cloth.swap_faces][:, None] 280 | cloth.plot_counts = cloth.full_counts - 2 # used by plotted centers 281 | 282 | # joined: 283 | jfps = cloth.stacked_faces.shape[0] 284 | 285 | jsc = cloth.plot_counts[cloth.swap_faces] 286 | cloth.j_tiler = np.hstack([[i] * jsc[i] for i in range(jfps)]) 287 | cloth.js_tris = cloth.j_tris[cloth.j_tiler] 288 | 289 | jscf = cloth.full_counts[cloth.swap_faces] 290 | cloth.ab_tiler = np.hstack([[i] * jscf[i] for i in range(jfps)]) 291 | cloth.sp_ls = np.hstack([[v for v in cloth.ob.data.polygons[f].vertices] for f in cloth.swap_faces]) 292 | cloth.sp_rs = np.arange(cloth.sp_ls.shape[0]) 293 | 294 | 295 | def triangle_data(cloth): 296 | 297 | sco = cloth.sco 298 | edv = cloth.edv 299 | 300 | # joined tris: 301 | j_tris = np.zeros((cloth.j_tips.shape[0], 3, 3), dtype=np.float32) 302 | j_tris[:, :2] = sco[cloth.stacked_edv] 303 | j_tris[:, 2] = cloth.j_tips 304 | cloth.j_tris = j_tris 305 | #cloth.js_tris = j_tris 306 | #----------------- 307 | 308 | # get the tilers for creating tiled weights 309 | tiled_weights(cloth) 310 | 311 | jw = get_bary_weights(cloth.js_tris, sco[cloth.swap_jpv]) 312 | cloth.j_plot = np.sum(cloth.js_tris * jw[:,:,None], axis=1) 313 | get_j_surface_offset(cloth) 314 | cloth.jw = jw 315 | 316 | 317 | def linear_bend_set(cloth): 318 | 319 | cloth.bend_stretch_array = np.zeros(cloth.co.shape[0], dtype=np.float32) 320 | ab = np.array(cloth.jpv_full) 321 | 322 | springs = [] 323 | vc = len(cloth.ob.data.vertices) 324 | for i in range(vc): 325 | w = np.where(i == ab)[0] 326 | for j in w: 327 | springs.append([i,j]) 328 | 329 | cloth.linear_bend_springs = np.array(springs, dtype=np.int32) 330 | cloth.bend_v_fancy = cloth.linear_bend_springs[:,0] 331 | 332 | 333 | def ab_setup(cloth): 334 | cloth.ab_centers = np.empty((cloth.stacked_faces.shape[0], 3), dtype=np.float32) 335 | cloth.ab_coords = np.empty((len(cloth.jpv_full), 3), dtype=np.float32) 336 | 337 | 338 | def dynamic(cloth): 339 | 340 | # get centers from MC_current 341 | centers = get_poly_centers(cloth.ob, cloth.co, cloth.center_data) 342 | co = cloth.co 343 | 344 | #cloth.j_tris[:] = 0 345 | cloth.j_tris[:, :2] = co[cloth.stacked_edv] 346 | tips, ax, ce = get_eq_tri_tips(cloth, co, centers, skip=False) 347 | cloth.j_tris[:, 2] = tips 348 | 349 | jw = cloth.jw 350 | j_plot = np.sum(cloth.j_tris[cloth.j_tiler] * jw[:,:,None], axis=1) 351 | 352 | # for just flattening 353 | final_plot = j_plot 354 | flat = False 355 | if not flat: 356 | cross = np.cross(ax, ce) 357 | normals = cross / np.sqrt(np.einsum('ij,ij->i', cross, cross))[:, None] 358 | plot_normals = normals[cloth.j_tiler] 359 | final_plot = j_plot + (plot_normals * cloth.plot_dots) 360 | 361 | # get centers from plot 362 | cloth.ab_centers[:] = 0 363 | cloth.ab_centers += co[cloth.stacked_edv[:, 0]] 364 | cloth.ab_centers += co[cloth.stacked_edv[:, 1]] 365 | np.add.at(cloth.ab_centers, cloth.j_tiler, final_plot) 366 | 367 | cloth.ab_centers /= cloth.full_div 368 | 369 | c_vecs = centers[cloth.swap_faces] - cloth.ab_centers 370 | 371 | cloth.ab_coords[cloth.ab_faces] = final_plot 372 | cloth.ab_coords[cloth.ab_edges] = cloth.co[cloth.stacked_edv.ravel()] 373 | 374 | full_moved = cloth.ab_coords + c_vecs[cloth.ab_tiler] 375 | 376 | cloth.full_moved = full_moved 377 | 378 | 379 | def bend_setup(cloth): 380 | cloth.center_data = get_poly_centers(cloth.ob, cloth.sco, data=None) 381 | cloth.source_centers = np.copy(cloth.center_data[0]) # so we can overwrite the centers array when dynamic 382 | eq_bend_data(cloth) 383 | get_poly_vert_tilers(cloth) 384 | get_eq_tri_tips(cloth, cloth.sco, cloth.source_centers) 385 | triangle_data(cloth) 386 | ab_setup(cloth) 387 | linear_bend_set(cloth) 388 | 389 | 390 | def abstract_bend(cloth): 391 | dynamic(cloth) 392 | 393 | stretch = cloth.ob.MC_props.bend * .3 394 | basic_set = cloth.linear_bend_springs 395 | basic_v_fancy = cloth.sp_ls 396 | stretch_array = cloth.bend_stretch_array 397 | 398 | # (current vec, dot, length) 399 | cv, cd, cl = measure_linear_bend(cloth) # from current cloth state 400 | move_l = cl * stretch 401 | 402 | # mean method ------------------- 403 | cloth.bend_stretch_array[:] = 0.0 404 | 405 | #rock_hard_abs = np.abs(move_l) 406 | np.add.at(cloth.bend_stretch_array, basic_v_fancy, move_l) 407 | weights = move_l / cloth.bend_stretch_array[basic_v_fancy] 408 | # mean method ------------------- 409 | 410 | # apply forces ------------------ 411 | 412 | move = cv * (move_l / cl)[:,None] 413 | move *= weights[:,None] 414 | np.add.at(cloth.co, basic_v_fancy, np.nan_to_num(move)) 415 | 416 | 417 | 418 | print() 419 | print("------------------ new eq ------------------") 420 | 421 | class Cloth: 422 | pass 423 | 424 | cloth = Cloth() 425 | cloth.ob = bpy.data.objects['nn'] 426 | cloth.ob = bpy.context.object 427 | #cloth.ob = bpy.data.objects['nnn'] 428 | cloth.co = get_co_shape(cloth.ob, key='MC_current', ar=None) 429 | cloth.sco = get_co_shape(cloth.ob, key='MC_source', ar=None) 430 | 431 | bend_setup(cloth) 432 | 433 | abstract_bend(cloth) 434 | 435 | 436 | cloth.ob.data.shape_keys.key_blocks['MC_current'].data.foreach_set('co', cloth.co.ravel()) 437 | cloth.ob.data.update() 438 | -------------------------------------------------------------------------------- /collision.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Nov 24 11:39:26 2015 4 | 5 | @author: rich.colburn 6 | """ 7 | import bpy 8 | import rotate_methods 9 | import mesh_info 10 | import numpy as np 11 | 12 | def get_triangle_normals(tr_co): 13 | '''Takes N x 3 x 3 set of 3d triangles and 14 | returns non-unit normals and origins''' 15 | origins = tr_co[:,0] 16 | expanded = np.expand_dims(origins, axis=1) 17 | cross_vecs = tr_co[:,1:] - expanded 18 | return np.cross(cross_vecs[:,0], cross_vecs[:,1]), origins 19 | 20 | def get_face_normals(tri_coords): 21 | '''does the same as get_triangle_normals 22 | but I need to compare their speed''' 23 | t0 = tri_coords[:, 0] 24 | t1 = tri_coords[:, 1] 25 | t2 = tri_coords[:, 2] 26 | return np.cross(t1 - t0, t2 - t0) 27 | 28 | def edge_edge_intersect_2d(a1,a2, b1,b2, intersect=False): 29 | '''simple 2d line intersect''' 30 | da = a2-a1 31 | db = b2-b1 32 | dp = a1-b1 33 | dap = da[::-1] * np.array([1,-1]) 34 | denom = np.dot(dap, db) 35 | num = np.dot(dap, dp) 36 | scale = (num / denom) 37 | if intersect: 38 | return b1 + db * scale, (scale > 0) & (scale < 1) 39 | else: 40 | return b1 + db * scale 41 | 42 | def edges_edges_intersect_2d(a1,a2, b1,b2, intersect=False): 43 | '''simple 2d line intersect''' 44 | da = a2-a1 45 | db = b2-b1 46 | dp = a1-b1 47 | dap = da[:, ::-1] * np.array([1,-1]) 48 | denom = np.einsum('ij,ij->i', dap, db) 49 | num = np.einsum('ij,ij->i', dap, dp) 50 | scale = (num / denom) 51 | if intersect: 52 | return b1 + db * scale[:, None], (scale > 0) & (scale < 1) 53 | else: 54 | return b1 + db * scale[:, None] 55 | 56 | 57 | def edge_edges_intersect_2d(edges1, edges2, vec1, vec2): 58 | '''Vector intersecting multiple edges in 2d 59 | requires N x 2 sets of edges and xy for vec1 and vec2. 60 | Returns the locations where intersections occur inside 61 | both the line and the edges and the bool arrays for each''' 62 | e_vecs = edges2 - edges1 63 | vec = vec2 - vec1 64 | vec_ori = vec1 - edges1 65 | vec_perp = vec[[1, 0]] * np.array([1,-1]) 66 | denom = np.einsum('j,ij->i', vec_perp, e_vecs) 67 | num = np.einsum('j,ij->i', vec_perp, vec_ori) 68 | scale = (num / denom) 69 | scale_bool = (scale > 0) & (scale < 1) 70 | locations = edges1[scale_bool] + e_vecs[scale_bool] * np.expand_dims(scale[scale_bool], axis=1) 71 | dif = locations - vec1 72 | lvd = np.einsum('j,ij->i', vec, dif) 73 | d = np.dot(vec, vec) 74 | div = lvd / d 75 | check2 = (div > 0) & (div < 1) 76 | return locations[check2], scale_bool, check2 77 | 78 | def cpoe(p, e1, e2): 79 | ev = e2 - e1 80 | pv = p - e1 81 | d = np.dot(ev, pv) / np.dot(ev,ev) 82 | cp = e1 + ev * d 83 | return cp, d 84 | 85 | def barycentric_remap(p, s, t): 86 | '''Takes the position of the point relative to the first triangle 87 | and returns the point relative to the second triangle. 88 | p, s, t = point, source tri, target tri''' 89 | cp1, d1 = cpoe(p, s[0], s[1]) 90 | t1 = (t[1] - t[0]) * d1 91 | cp2, d2 = cpoe(p, s[0], s[2]) 92 | t2 = (t[2] - t[0]) * d2 93 | cp3, d3 = cpoe(p, cp1, cp2) 94 | t3 = (t2 - t1) * d3 95 | cp4, d4 = cpoe(s[0], cp1, cp2) 96 | t4 = (t2 - t1) * d4 97 | cp5, d5 = cpoe(p, s[0], cp4 ) 98 | t5 = (t1 + t4) * d5 99 | perp = (t5) - (t1 + t4) 100 | return t[0] + t1 + t3 + perp 101 | 102 | 103 | # !!! hey! I might be able to use this for edge intersections in the cloth engine. 104 | def edge_to_edge(e1, e2, e3, e4 ): 105 | '''Takes two edges defined by four vectors. 106 | Returns the two points that describe the shortest 107 | distance between the two edges. The two points comprise 108 | a segment that is orthagonal to both edges. Think of it 109 | as an edge intersect.''' 110 | v1 = e2 - e1 111 | v2 = e3 - e4 112 | v3 = e3 - e1 113 | cross = np.cross(v1, v2) 114 | d = np.dot(v3, cross) / np.dot(cross, cross) 115 | spit = cross * d 116 | cp1 = e1 + spit # spit because if you stand on cp1 and spit this is where it lands. 117 | vec2 = cp1 - e3 118 | d = np.dot(vec2, v2) / np.dot(v2, v2) 119 | nor = v2 * d 120 | cp2 = e3 + nor 121 | normal = cp1 - cp2 122 | or_vec = e1 - cp2 123 | e_dot = np.dot(normal, v1) 124 | e_n_dot = np.dot(normal, or_vec) 125 | scale = e_n_dot / e_dot 126 | p_on_p = (or_vec - v1 * scale) + cp2 127 | return p_on_p, p_on_p + spit 128 | 129 | def edge_to_edges(coords, e1, e2, edges_idx): 130 | '''Takes an edge and finds the vectors orthagonal to it 131 | and a set of edges.''' 132 | ec = coords[edges_idx] 133 | e3 = ec[:,0] 134 | e4 = ec[:,1] 135 | v1 = e2 - e1 136 | v2 = e4 - e3 137 | v3 = e3 - e1 138 | cross = np.cross(v1, v2) 139 | d = np.einsum('ij,ij->i', v3, cross) / np.einsum('ij,ij->i', cross, cross) 140 | spit = cross * np.expand_dims(d, axis=1) #spit is where your spit would land if you stood on v3 and spit 141 | cp1 = e1 + spit 142 | vec2 = cp1 - e3 143 | d2 = np.einsum('ij,ij->i', vec2, v2) / np.einsum('ij,ij->i', v2, v2) 144 | nor = v2 * np.expand_dims(d2, axis=1) 145 | cp2 = e3 + nor 146 | normal = cp1 - cp2 147 | or_vec = e1 - cp2 148 | e_dot = np.einsum('j,ij->i', v1, normal) 149 | e_n_dot = np.einsum('ij,ij->i', normal, or_vec) 150 | scale = e_n_dot / e_dot 151 | p_on_p = (or_vec - v1 * np.expand_dims(scale, axis=1)) + cp2 152 | return p_on_p, p_on_p + spit 153 | 154 | def edges_to_edges(e1, e2, e3, e4 ): 155 | '''Takes two sets of edges defined by four vectors. 156 | Returns the two points that describe the shortest 157 | distance between the two edges. The two points comprise 158 | a segment that is orthagonal to both edges. Think of it 159 | as an edge intersect.''' 160 | v1 = e2 - e1 161 | v2 = e3 - e4 162 | v3 = e3 - e1 163 | cross = np.cross(v1, v2) 164 | #d = np.dot(v3, cross) / np.dot(cross, cross) 165 | d = np.einsum('ij,ij->i',v3, cross) / np.einsum('ij,ij->i', cross, cross) 166 | spit = cross * np.expand_dims(d, axis=1) # spit because if you stand on cp1 and spit this is where it lands. 167 | cp1 = e1 + spit 168 | vec2 = cp1 - e3 169 | d2 = np.einsum('ij,ij->i', vec2, v2) / np.einsum('ij,ij->i', v2, v2) 170 | nor = v2 * np.expand_dims(d2, axis=1) 171 | cp2 = e3 + nor 172 | normal = cp1 - cp2 173 | or_vec = e1 - cp2 174 | e_dot = np.einsum('ij,ij->i', normal, v1) 175 | e_n_dot = np.einsum('ij,ij->i', normal, or_vec) 176 | scale = np.nan_to_num(e_n_dot / e_dot) 177 | p_on_p = (or_vec - v1 * np.expand_dims(scale, axis=1)) + cp2 178 | return p_on_p, p_on_p + spit 179 | 180 | def deflect_ray(e1, e2, normal, origin): 181 | '''Deflects a ray along the surface of the plane defined by the normal 182 | No Unit Vectors Required!! Normal does not requre unit vector!''' 183 | e_vec = e2 - e1 184 | e_dot = np.dot(normal, e_vec) 185 | e_n_dot = np.dot(normal, e1 - origin) 186 | scale = e_n_dot / e_dot 187 | hit = (e1 - e_vec * scale) 188 | v2 = e2 - hit 189 | d = np.dot(v2, normal) / np.dot(normal, normal) 190 | cp = hit + normal * d 191 | deflect = e2 - cp 192 | d1 = np.dot(v2, v2) 193 | d2 = np.dot(deflect, deflect) 194 | div = d1/d2 195 | swap = hit + deflect * np.sqrt(div) # * friction 196 | return swap 197 | 198 | def reflect_ray(e1, e2, normal, origin): 199 | '''plots angle of reflection 200 | No Unit Vectors Required!! Normal does not requre unit vector!''' 201 | e_vec = e2 - e1 202 | e_dot = np.dot(normal, e_vec) 203 | e_n_dot = np.dot(normal, e1 - origin) 204 | scale = e_n_dot / e_dot 205 | hit = (e1 - e_vec * scale) 206 | v2 = e2 - hit 207 | d = np.dot(v2, normal) / np.dot(normal, normal) 208 | cp = hit + normal * -d 209 | deflect = e2 - cp 210 | return cp + deflect 211 | 212 | def reflect_intersected_rays(e1, e2, origin, normal=np.array([0,0,1]), friction=1, sticky_threshold=.001, bounce=0.001): 213 | '''plots angle of reflection''' 214 | # could make friction behave more realistically by setting values below a certain level to zero 215 | e_vec = e2 - e1 216 | e_dot = np.einsum('j,ij->i', normal, e_vec) 217 | e_n_dot = np.einsum('j,ij->i', normal, e1 - origin) 218 | scale = e_n_dot / e_dot 219 | intersect = (scale < 0) & (scale > -1) # screwing with these can lock the points on the wrong side of the plane. 220 | hit = (e1[intersect] - e_vec[intersect] * np.expand_dims(scale[intersect], axis=1)) 221 | v2 = e2[intersect] - hit 222 | d = np.einsum('j,ij->i', normal, v2) / np.dot(normal, normal) 223 | cp = hit + normal * np.expand_dims(d, axis=1) 224 | bp = hit + normal * np.expand_dims(-d, axis=1) * bounce 225 | deflect = e2[intersect] - cp 226 | sticky_check = np.sqrt(np.einsum('ij,ij->i', deflect, deflect)) 227 | stuck = sticky_check < sticky_threshold 228 | deflect[stuck] = np.array([0.0,0.0,0.0]) 229 | reflect = bp + deflect * friction 230 | return reflect, intersect 231 | 232 | def deflect_intersected_rays(e1, e2, origin, normal=np.array([0,0,1]), back_check=0, friction=1): 233 | '''Deflects intersected rays along the surface of the plane defined by the normal 234 | No Unit Vectors Required!! Normal does not requre unit vector! 235 | Returns deflected locations and bool array where intersected. 236 | 'backcheck' will check backwards along the ray by the given amount''' 237 | e_vec = e2 - e1 238 | e_dot = np.einsum('j,ij->i', normal, e_vec) 239 | e_n_dot = np.einsum('j,ij->i', normal, e1 - origin) 240 | scale = e_n_dot / e_dot 241 | intersect = (scale < 0 + back_check) & (scale > -1) 242 | hit = (e1[intersect] - e_vec[intersect] * np.expand_dims(scale[intersect], axis=1)) 243 | v2 = e2[intersect] - hit 244 | d = np.einsum('j,ij->i', normal, v2) / np.dot(normal, normal) 245 | cp = hit + normal * np.expand_dims(d, axis=1) 246 | deflect = e2[intersect] - cp 247 | d1 = np.einsum('ij,ij->i', v2, v2) 248 | d2 = np.einsum('ij,ij->i', deflect, deflect) 249 | div = d1/d2 250 | new_vel = deflect * np.expand_dims(np.sqrt(div), axis=1) * friction 251 | swap = hit + new_vel 252 | return swap, intersect, new_vel 253 | 254 | def closest_point_edge(e1, e2, p): 255 | '''Returns the location of the point on the edge''' 256 | vec1 = e2 - e1 257 | vec2 = p - e1 258 | d = np.dot(vec2, vec1) / np.dot(vec1, vec1) 259 | cp = e1 + vec1 * d 260 | return cp 261 | 262 | def closest_points_edge(e1, e2, p): 263 | '''Returns the location of the points on the edge''' 264 | vec1 = e2 - e1 265 | vec2 = p - e1 266 | d = np.einsum('j,ij->i', vec1, vec2) / np.expand_dims(np.dot(vec1, vec1),axis=1) 267 | cp = e1 + vec1 * np.expand_dims(d, axis=1) 268 | return cp 269 | 270 | def closest_points_edge_no_origin(vec, p): 271 | '''Returns the location of the points on the vector starting at [0,0,0]''' 272 | d = np.einsum('j,ij->i', vec, p) / np.expand_dims(np.dot(vec, vec),axis=1) 273 | cp = vec * np.expand_dims(d, axis=1) 274 | return cp 275 | 276 | def closest_points_edges(edges, points): 277 | '''Takes groups of edges in N x N x 2 x 3 and returns 278 | the location of the points on each of the edges''' 279 | e1 = edges[:,0,:] 280 | e2 = edges[:,1,:] 281 | vec1 = e2 - e1 282 | vec2 = np.expand_dims(points, axis=1) - e1 283 | d = np.einsum('ijk, ijk->ij',vec2, vec1) / np.einsum('ijk, ijk->ij',vec1, vec1) 284 | return e1 + vec1 * np.expand_dims(d, axis=2) 285 | 286 | def closest_point_edges(point, edges, e2='empty', merged_edges=True): 287 | '''Takes groups of edges in N x N x 2 x 3 or two sets of edges 288 | matching N x 3 and returns the location of the point on each of the edges. 289 | If two sets of edges are provided e1 is edges, e2 is e2, merged_edges=False''' 290 | if merged_edges: 291 | e1 = edges[:,0,:] 292 | e2 = edges[:,1,:] 293 | else: 294 | e1 = edges 295 | vec1 = e2 - e1 296 | vec2 = point - e1 297 | d = np.einsum('ij, ij->i',vec2, vec1) / np.einsum('ij, ij->i',vec1, vec1) 298 | return e1 + vec1 * np.expand_dims(d, axis=2) 299 | 300 | def drop_points_to_plane(points, normal=np.array([0,0,1]), origin='empty'): 301 | '''Points is an N x 3 array of vectors. Normal is perpindicular to the 302 | infinite plane the points will be dropped on. returns the points on the 303 | plane. This is the foundation for scaling on a custom axis''' 304 | if len(points) > 0: 305 | if origin != 'empty': 306 | points -= origin 307 | nor_dot = np.dot(normal, normal) 308 | dots = np.einsum('j,ij->i', normal, points) 309 | scale = dots / nor_dot 310 | drop = normal * np.expand_dims(scale, axis=1) 311 | p = points - drop 312 | if origin != 'empty': 313 | p += origin 314 | return p 315 | else: 316 | return None 317 | 318 | def slide_point_to_plane(e1, e2, normal, origin, intersect=False): 319 | '''Ray is defined by e1 and e2. Find where this 320 | ray intersects the plane defined by the normal 321 | and the origin. Normal does NOT need to be unit. 322 | Returns the hit location. If intersect: returns a 323 | tuple including the hit and true or false''' 324 | e_vec = e2 - e1 325 | or_vec = e1 - origin 326 | e_dot = np.dot(normal, e_vec) 327 | e_n_dot = np.dot(normal, or_vec) 328 | scale = e_n_dot / e_dot 329 | if intersect: 330 | return (or_vec - e_vec * scale) + origin, (scale < 0) & (scale > -1) 331 | else: 332 | return (or_vec - e_vec * scale) + origin 333 | 334 | def slide_points_to_plane(e1, e2, origin, normal=np.array([0,0,1])): 335 | '''Takes the start and end of a set of edges as Nx3 vector sets 336 | Returns where they intersect the plane with a bool array for the 337 | edges that pass through the plane''' 338 | e_vecs = e2 - e1 339 | e1or = e1 - origin 340 | edge_dots = np.einsum('j,ij->i', normal, e_vecs) 341 | dots = np.einsum('j,ij->i', normal, e1or) 342 | scale = dots / edge_dots 343 | drop = (e1or - e_vecs * np.expand_dims(scale, axis=1)) + origin 344 | intersect = (scale < 0) & (scale > -1) 345 | return drop, intersect, scale 346 | 347 | def slide_points_to_planes(e1, e2, origins, normals): 348 | '''Takes the start and end of a set of edges as Nx3 vector sets 349 | Returns where they intersect the planes with a bool array for the 350 | edges that pass through the planes. Point count must match triangle 351 | count so tile them in advance.''' 352 | e_vecs = e2 - e1 353 | e1or = e1 - origins 354 | edge_dots = np.einsum('ij,ij->i', normals, e_vecs) 355 | dots = np.einsum('ij,ij->i', normals, e1or) 356 | scale = dots / edge_dots 357 | drop = (e1or - e_vecs * np.expand_dims(scale, axis=1)) + origins 358 | intersect = (scale < 0) & (scale > -1) 359 | return drop, intersect, scale 360 | 361 | def raycast_no_check(ray_start, ray_end, tri_coords): 362 | '''Ray start and end take Nx3 arrays of vectors reperesenting the 363 | start and the end of the rays (or edges). Use mesh_info.triangulate 364 | to get triangulated coords of the mesh with mesh_info.get_coords. 365 | tri_idx but store it instead of recalculating it on iterations. 366 | Returns hit locations for each ray on each triangle with an indexing 367 | array containing the index of the rays that cross the planes inside 368 | the triangles. When a ray crosses more than one triangle, the index 369 | array contains the first triangle intersected.''' 370 | tri_vecs = np.take(tri_coords, [2, 0, 1], axis=1) - tri_coords 371 | origins = tri_coords[:, 0] 372 | rays = ray_end - ray_start 373 | w = np.expand_dims(ray_start, axis=1) - origins #$$ 374 | normals = np.cross(tri_vecs[: ,0], tri_vecs[:, 1]) # if CP is there it's cheaper to get this from origin minus CP 375 | dots = np.einsum('ij,i...j->...i', rays, np.expand_dims(normals, axis=0)) 376 | factor = -np.einsum('i...j,i...j->...i', w, np.expand_dims(normals, axis=0)) / dots 377 | hit = ray_start + rays * np.expand_dims(factor, axis=2) 378 | inside = (factor > 1e-6) & (factor < 1) #each column represents a ray 379 | return hit, inside 380 | 381 | def collision_setup(ray_start, mesh): 382 | '''Used to generate the storable values needed for raycast. 383 | Currently assumes raycasting onto a static object. When it comes time 384 | to check animated objects, mesh_info.get_coords will have to be run 385 | every frame then viewed with the stored tri_idx''' 386 | tri_idx = mesh_info.triangulate(mesh) 387 | tri_coords = mesh_info.get_coords(mesh, mesh)[tri_idx] 388 | tri_count = len(tri_coords) 389 | ray_count = len(ray_start) 390 | tri_indexing = (np.repeat([np.arange(tri_count)], ray_count, axis=0)).T 391 | ray_indexing = (np.repeat([np.arange(ray_count)], tri_count, axis=0)) 392 | return tri_coords, tri_indexing, ray_indexing 393 | 394 | def pre_check(ray_start, ray_end, tri_coords): 395 | '''creates a bool array based on the absolute bounds of all the rays 396 | checked against the bounds of all the triangles. Works best with single ray. 397 | Becomes mostly uselsess on the diagonal because the box of the ray grows. 398 | Consider checking coords rotated to world axis''' 399 | rays_combined = np.append(ray_start, ray_end, axis=0) 400 | tri_min = np.min(tri_coords, axis=1) # this represents the min corner of the b_box for each tri 401 | tri_max = np.max(tri_coords, axis=1) # this represents the max corner of the b_box for each tri 402 | min = np.min(rays_combined, axis=0) 403 | max = np.max(rays_combined, axis=0) 404 | outside = ((min < tri_min) & (max < tri_min)) | ((min > tri_max) & (max > tri_max)) 405 | view = np.all(np.invert(outside), axis=1) 406 | return view 407 | 408 | def pre_check_rotate(ray_start, ray_end, tri_coords): 409 | '''Example of raycasting a single ray and rotating 410 | the mesh so the diagonal doesn't create a giant box''' 411 | v1 = ray_end - ray_start 412 | Q = rotate_methods.get_quat_2(v1[0], np.array([0, 0, 1])) 413 | tri_vecs = tri_coords - ray_start 414 | shape = tri_vecs.shape 415 | v1r = np.array([rotate_methods.rotate_around_axis(v1[0], Q)]) 416 | vecsr = rotate_methods.rotate_around_axis(tri_vecs.reshape(shape[0] * 3, 3), Q) 417 | tri_coords = vecsr.reshape(shape) 418 | rays_combined = np.append(np.array([[0,0]]), v1r[:, [0,1]], axis=0) 419 | tri_min = np.min(tri_coords[:, :, [0,1]], axis=1) # this represents the min corner of the b_box for each tri 420 | tri_max = np.max(tri_coords[:, :, [0,1]], axis=1) # this represents the max corner of the b_box for each tri 421 | min = np.min(rays_combined, axis=0) 422 | max = np.max(rays_combined, axis=0) 423 | outside = ((min - tri_min<0) & (max - tri_min<0)) | ((min - tri_max>0) & (max - tri_max>0)) 424 | view = np.all(np.invert(outside), axis=1) 425 | return view 426 | 427 | def raycast(ray_start, ray_end, tri_coords, tri_indexing, ray_indexing): 428 | '''Ray start and end take Nx3 arrays of vectors reperesenting the 429 | start and the end of the rays (or edges). Use mesh_info.triangulate 430 | to get triangulated coords of the mesh with mesh_info.get_coords[tri_idx] 431 | but store it instead of recalculating it on iterations. 432 | Returns hit locations for each ray on each triangle with an indexing 433 | array containing the index of the rays that cross the planes inside 434 | the triangles. When a ray crosses more than one triangle, the index 435 | array contains the first triangle intersected.''' 436 | if len(tri_coords)>0: 437 | tri_vecs = np.take(tri_coords, [1, 2, 0], axis=1) - tri_coords # roll the triangle coords and subtract to get the vectors 438 | origins = tri_coords[:, 0] # Point of reference for ray intersect 439 | rays = ray_end - ray_start # convert rays to vectors 440 | w = np.expand_dims(ray_start, axis=1) - origins # Draw a vector from each ray start to each origin 441 | normals = np.cross(tri_vecs[: ,0], tri_vecs[:, 1]) # Cross product used to place the ray on the plane 442 | dots = np.einsum('ij,i...j->...i', rays, np.expand_dims(normals, axis=0)) # Ray and line perpindicular to triangle 443 | factor = -np.einsum('i...j,i...j->...i', w, np.expand_dims(normals, axis=0)) / dots # How far along the ray we go to hit the plane 444 | hits = ray_start + rays * np.expand_dims(factor, axis=2) # final location of hits 445 | 446 | # Check if rays crossed planes: 447 | intersected = (factor > 0) & (factor < 1) #each column represents a ray 448 | view = tri_indexing[intersected] # fancy indexing for tiling triangles 449 | hits_culled = hits[intersected] # view from edges passing through planes 450 | 451 | # Check for inside triangle where rays crossed planes 452 | # Phase 1: closest points on triangle vectors 453 | t_vecs = tri_vecs[view] # triangles repeated to match hits_culled 454 | tri_coords_view = tri_coords[view] 455 | p_vecs = np.expand_dims(hits_culled, axis=1) - tri_coords_view 456 | t_p_dot = np.einsum('ijk, ijk->ij', t_vecs, p_vecs) 457 | t_t_dot = np.einsum('ijk, ijk->ij', t_vecs, t_vecs) 458 | scalar = t_p_dot / t_t_dot 459 | CP1 = t_vecs * np.expand_dims(scalar, axis=2) + tri_coords_view 460 | # Phase 2: closest points on vectors made from closest points 461 | CP_vecs = CP1 - np.take(tri_coords_view, [2, 0, 1], axis=1) 462 | p_vecs_rollback = np.expand_dims(hits_culled, axis=1) - np.take(tri_coords_view, [2, 0, 1], axis=1) 463 | CP_p_dot = np.einsum('ijk, ijk->ij', CP_vecs, p_vecs_rollback) 464 | CP_CP_dot = np.einsum('ijk, ijk->ij', CP_vecs, CP_vecs) 465 | scalar2 = CP_p_dot / CP_CP_dot 466 | inside = np.all(scalar2<1, axis=1) 467 | intersected[intersected] = inside # set intersected outside to False 468 | 469 | # Set all but best hits to False (indexing arrays here are a nightmare. Needs optomizing. slicing the slices allows modifying in place: coords[view[set]] not coords[view][set]) 470 | # Creates an array of zeros matching tiled triangles then uses argmax measuring the ray backwards for 471 | # furthest hit from end. 472 | hits_culled = hits[intersected] # Reset hits culled after culling points not in triangles 473 | shape = intersected.shape 474 | rows = shape[0] 475 | columns = shape[1] 476 | ray_view = ray_indexing[intersected] 477 | ray_to_hits = hits_culled - ray_end[ray_view] 478 | mags = np.einsum('ij,ij->i', ray_to_hits, ray_to_hits) 479 | zeros = np.zeros((shape)) 480 | zeros[intersected]=mags 481 | grid = np.expand_dims(np.arange(0, rows * columns, rows), axis=1) #create a grid to offset rows by number in each row 482 | argsort = (np.argsort(zeros.T,axis=1) + grid)[:,:-1].T.ravel() # offset each row by number in row to get number sequence 483 | intersected_t = intersected.T 484 | raveled = intersected_t.ravel() 485 | raveled[argsort] = False 486 | final = raveled.reshape(intersected_t.shape).T 487 | if len(ray_indexing[final])> 0: 488 | return hits[final], ray_indexing[final] 489 | else: 490 | return None 491 | else: 492 | return None 493 | -------------------------------------------------------------------------------- /final_cut.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | try: 4 | import bpy 5 | except: 6 | pass 7 | 8 | 9 | def final_cut_(existing_points, max_depth=10, min_distance=2, straightness=0.5): 10 | divisions = 16 11 | 12 | ep = existing_points 13 | 14 | # could get left and right max and add padding 15 | # 16 | ls = np.min(ep[:, 0]) - 0.0001 17 | rs = np.max(ep[:, 0]) + 0.0001 18 | 19 | spread = np.linspace(ls, rs, divisions) 20 | 21 | idx = np.searchsorted(spread, ep[:, 0]) 22 | numidx = np.arange(idx.shape[0]) 23 | nums = [] 24 | peaks = [] 25 | for i in range(1, divisions): 26 | slice = ep[idx==i] 27 | if slice.shape[0] == 0: 28 | peak = peaks[-1] 29 | nums.append(nums[-1]) 30 | peaks.append(peaks[-1]) 31 | else: 32 | peak = np.max(slice[:, 1]) 33 | peaks.append(peak) 34 | apeak = np.argmax(slice[:, 1]) 35 | num = numidx[idx==i][apeak] 36 | nums.append(num) 37 | #bool[idx==i] = True 38 | 39 | return idx, slice, nums 40 | 41 | 42 | # questions: 43 | # 1. how do we know the left and right start and 44 | # stop locations for the laser? 45 | 46 | pass 47 | 48 | 49 | 50 | 51 | 52 | a = bpy.data.objects['a'] 53 | a = bpy.data.objects['b'] 54 | a = bpy.data.objects['p'] 55 | 56 | co = np.zeros((len(a.data.vertices), 3), dtype=np.float32) 57 | a.data.vertices.foreach_get('co', co.ravel()) 58 | 59 | idx, slice, peaks = final_cut_(co[:, :2]) 60 | 61 | bool = np.zeros(idx.shape[0], dtype=np.bool) 62 | #print(idx==0) 63 | bool[peaks] = True 64 | #bool[idx==5] = True 65 | #print(slice) 66 | #for i in range(1, 12): 67 | #bool[idx==i] = True 68 | 69 | #a.data.vertices.foreach_set('select', bool) 70 | #a.data.update() 71 | 72 | 73 | def double_check(ep, lp, md, k, iters): 74 | """Checks if any edges dip below the pattern 75 | and moves them up.""" 76 | #return 77 | dists = np.zeros(lp.shape, dtype=np.float32) 78 | middle = lp[:, 0][1:-1] 79 | ls = middle - lp[:, 0][:-2] 80 | rs = lp[:, 0][2:] - middle 81 | 82 | dists[:, 0][1:-1] = ls 83 | dists[:, 1][1:-1] = rs 84 | dists[0][0] = ls[0] 85 | dists[-1][0] = rs[-1] 86 | dis = np.sort(dists, axis=0)[:, 1] 87 | x_dif = lp[:, 0][:, None] - ep[:, 0] 88 | bool = np.abs(x_dif) < dis[:, None] 89 | 90 | for i in range(lp.shape[0]): 91 | idx = np.arange(ep.shape[0])[bool[i]] 92 | ey = ep[:, 1][idx] 93 | ly = lp[i][1] - md 94 | 95 | mult = 1 96 | if iters > 10: 97 | if k > iters - 3: 98 | mult = 0 99 | #if k == iters - 1: 100 | #mult == 0 101 | 102 | if np.any(ey > ly): 103 | t = np.max(ey) 104 | lp[i][1] += ((t-ly) + md) * mult 105 | 106 | 107 | def smoothed(ar, ep, zeros, md, iters, reduce_only=False): 108 | """Reduce only based on theory that 109 | we only want to reduce vel in sharp 110 | turns not allowing vel to increase 111 | where we may still be turning.""" 112 | 113 | # get weights so sharp angles move less 114 | vecs = ar[1:] - ar[:-1] 115 | length = np.sqrt(np.einsum('ij,ij->i', vecs, vecs)) 116 | uvecs = vecs / length[:, None] 117 | d = np.einsum('ij,ij->i', uvecs[1:], uvecs[:-1]) 118 | 119 | d[d > 1] = 1 120 | d[d < 0] = 0 121 | weights = np.ones(ar.shape[0], dtype=np.float32) 122 | #weights[1:-1] = d# ** 1.5 123 | 124 | end = ar.shape[0] - 1 125 | 126 | sh = ar.shape[0] 127 | idx1 = np.empty((sh, 3), dtype=np.int32) 128 | 129 | id = np.arange(sh) 130 | 131 | idx1[:, 0] = id - 1 132 | idx1[:, 1] = id 133 | idx1[:, 2] = id + 1 134 | idx1[idx1 < 1] = 0 135 | idx1[idx1 > (sh - 1)] = sh - 1 136 | means1 = np.mean(ar[idx1], axis=1) 137 | 138 | for k in range(iters): 139 | means1 = np.mean(ar[idx1], axis=1) 140 | move = (np.array(means1) - ar) 141 | 142 | if reduce_only: 143 | #move *= weights[:, None] 144 | pass 145 | #move[move < 0] = 0.0 146 | ar[:,1] += move[:, 1] 147 | ar[zeros] = 0.0 148 | 149 | #if k < iters - 4: 150 | double_check(ep, ar, md, k, iters) 151 | 152 | return ar 153 | 154 | 155 | def pad_ends(ar, overcut=4): 156 | 157 | ls = ar[0][0] 158 | rs = ar[-1][0] 159 | 160 | safe_margin = 10 161 | overlap_size = 30 # area to go back and forth 162 | 163 | pad = 20 164 | 165 | # start = start_location 166 | start = ls - safe_margin - overlap_size - pad 167 | # end = end_location 168 | end = rs + safe_margin + overlap_size + pad 169 | 170 | ls_start = ls - safe_margin - 171 | 172 | # or: for i in range(N): 173 | # walk(i) 174 | 175 | #!!! Make args to pad the edges so like: 176 | # edge_dist = 10 # centimeters 177 | #!!! make args for number of times to go 178 | # back and forth over the edge where it curls up 179 | #!!! Put in the args for max depth and stuff... 180 | 181 | def final_cut(existing_points=co, max_depth=350, min_distance=10, straightness=0.0): 182 | "straightness must be from 0.0 - 1.0, 1.0 is a straight line" 183 | smoothing_iters = 20 184 | md = min_distance 185 | 186 | flat =.5 187 | s = flat 188 | 189 | convert = straightness * 200 190 | 191 | 192 | 193 | divisions = int((200 - convert)) + 10 194 | ep = existing_points 195 | 196 | ab_max = np.max(ep[:, 1]) 197 | bottom = ab_max - max_depth 198 | 199 | ls = np.min(ep[:, 0]) - 0.0001 200 | rs = np.max(ep[:, 0]) + 0.0001 201 | 202 | spread = np.linspace(ls, rs, divisions) 203 | 204 | f = bpy.context.scene.frame_current - 1 205 | 206 | #print(spread[f]) 207 | 208 | e = bpy.data.objects['e'] 209 | e2 = bpy.data.objects['e2'] 210 | dir = -1 211 | look = (rs - ls) / divisions 212 | drop = look * s * dir 213 | vec = np.array([look * s, drop]) 214 | 215 | idx = np.searchsorted(spread, ep[:, 0]) 216 | numidx = np.arange(idx.shape[0]) 217 | nums = [] 218 | peaks = [] 219 | vecs = [0.0] 220 | expand = 9 221 | epeaks = [] 222 | xy = [] 223 | for i in range(divisions): 224 | b = i 225 | if b > 4: 226 | b = 5 227 | slice = ep[np.in1d(idx, np.arange(i-b, i + expand))] 228 | #slice = ep[np.in1d(idx, np.arange(i, i + expand))] 229 | if slice.shape[0] == 0: 230 | peak = peaks[-1] + vecs[-1] 231 | vec = vecs[-1] 232 | if peak < bottom: 233 | peak = bottom 234 | vec = bottom - vec 235 | vecs.append(vec) 236 | #peaks.append(peak) 237 | else: 238 | peak = np.max(slice[:, 1]) + md 239 | if peak < bottom: 240 | peak = bottom 241 | #peaks.append(peak) 242 | 243 | if i > 0: 244 | dif = peak - peaks[i - 1] 245 | vec = dif * flat 246 | vecs.append(vec) 247 | peak = peaks[i - 1] + vec 248 | 249 | # edges go back and forth where it curls 250 | # number of edge passes. how wide an area. 251 | # edge width var 252 | peaks.append(peak) 253 | 254 | xy += [[spread[i], peaks[i]]] 255 | 256 | npxy = np.array(xy) 257 | zeros = np.zeros(npxy.shape[0], dtype=np.bool) 258 | sm_ar = smoothed(npxy, ep, zeros, md=min_distance, iters=smoothing_iters, reduce_only=True) 259 | 260 | back_forth = pad_ends(sm_ar) 261 | 262 | line = bpy.data.objects['l'] 263 | lco = np.zeros((len(line.data.vertices), 3), dtype=np.float32) 264 | line.data.vertices.foreach_get('co', lco.ravel()) 265 | 266 | lco[:, :2][:len(xy)] = sm_ar 267 | line.data.vertices.foreach_set('co', lco.ravel()) 268 | line.data.update() 269 | 270 | return [sm_ar.tolist()] # formatted like cut_polyline 271 | 272 | 273 | cut_polyline = final_cut() 274 | 275 | 276 | -------------------------------------------------------------------------------- /function_libraries.py: -------------------------------------------------------------------------------- 1 | ## =============================== ## 2 | ## =============================== ## 3 | # play mega tri mesh cache 4 | # !!! don't forget to delete shape keys !!! 5 | import bpy 6 | import numpy as np 7 | 8 | # for using pip in python: 9 | from pip._internal import main as pipmain 10 | pipmain(['install', 'package-name']) 11 | 12 | 13 | def make_2d_object(xy, name, offset=0.0): 14 | """Takes the 2d coords and adds the z as 0.0 15 | then makes new object or replaces data if the 16 | object with that name exists.""" 17 | import bpy 18 | xyz = np.zeros((len(xy), 3), dtype=np.float32) 19 | xyz[:, :2] = xy 20 | edges = np.empty((xyz.shape[0] - 1, 2), dtype=np.int32) 21 | edges[:, 0] = np.arange(xyz.shape[0] - 1) 22 | edges[:, 1] = np.arange(xyz.shape[0] - 1) + 1 23 | edges = edges.tolist() 24 | verts = xyz.tolist() 25 | faces = [] 26 | 27 | if name in bpy.data.objects: 28 | ob = bpy.data.objects[name] 29 | link_mesh(verts, edges, faces, name=name, ob=ob) 30 | ob.location.x = offset 31 | else: 32 | link_mesh(verts, edges, faces, name=name, offset=offset) 33 | 34 | 35 | def add_empty(name, loc, rot, size=0.05, type="SPHERE"): 36 | """Create an empty and link it 37 | to the scene""" 38 | if name in bpy.data.objects: 39 | o = bpy.data.objects[name] 40 | else: 41 | o = bpy.data.objects.new(name, None) 42 | bpy.context.scene.collection.objects.link(o) 43 | 44 | # empty_draw was replaced by empty_display 45 | o.empty_display_size = size 46 | o.empty_display_type = type 47 | 48 | o.location.xy = loc 49 | o.rotation_euler[2] = rot 50 | 51 | 52 | def redistribute_polyline(co, spacing=None, respaced=None): 53 | """Walk the points in a polygon 54 | assuming they are not spaced 55 | evenly and space them evenly. 56 | If spacing is None use average len. 57 | Respaced can be an array of distances 58 | for spacing unevenly.""" 59 | 60 | vecs = co[1:] - co[:-1] 61 | dots = np.einsum('ij,ij->i', vecs, vecs) 62 | 63 | # cull zero length 64 | booly = np.ones(co.shape[0], dtype=bool) 65 | booly[1:] = dots > 0.0 66 | co = co[booly] 67 | 68 | vecs = co[1:] - co[:-1] 69 | dots = np.einsum('ij,ij->i', vecs, vecs) 70 | 71 | lengths = np.sqrt(dots) 72 | u_vecs = vecs / lengths[:, None] 73 | av_len = np.mean(lengths) 74 | total_len = np.sum(lengths) 75 | 76 | if spacing is None: 77 | spacing = av_len 78 | 79 | # get the number of points 80 | point_count = int(total_len // spacing) 81 | 82 | if respaced is None: 83 | respaced = np.linspace(0.0, total_len, point_count) 84 | 85 | cumulated = np.cumsum(lengths) 86 | with_zero = np.zeros(cumulated.shape[0] + 1, dtype=np.float32) 87 | with_zero[1:] = cumulated 88 | 89 | idxer = np.arange(with_zero.shape[0]) 90 | hits = [] 91 | for i in range(respaced.shape[0] - 1): 92 | hit = respaced[i] >= with_zero 93 | idx = idxer[hit][-1] 94 | 95 | dif = respaced[i] - with_zero[idx] 96 | crawl = co[idx] + (u_vecs[idx] * dif) 97 | hits.append(crawl) 98 | 99 | return hits + [co[-1]] 100 | 101 | 102 | class Bezier(): 103 | def TwoPoints(t, P1, P2): 104 | """ 105 | Returns a point between P1 and P2, parametised by t. 106 | """ 107 | 108 | Q1 = (1 - t) * P1 + t * P2 109 | return Q1 110 | 111 | def Points(t, points): 112 | """ 113 | Returns a list of points interpolated by the Bezier process 114 | """ 115 | newpoints = [] 116 | for i1 in range(0, len(points) - 1): 117 | #for i1 in range(0, len(points)): 118 | newpoints += [Bezier.TwoPoints(t, points[i1], points[i1 + 1])] 119 | return newpoints 120 | 121 | def Point(t, points): 122 | """ 123 | Returns a point interpolated by the Bezier process 124 | """ 125 | newpoints = points 126 | while len(newpoints) > 1: 127 | newpoints = Bezier.Points(t, newpoints) 128 | return newpoints[0] 129 | 130 | def Curve(t_values, points): 131 | """ 132 | Returns a point interpolated by the Bezier process 133 | """ 134 | 135 | curve = np.array([[0.0] * len(points[0])]) 136 | for t in t_values: 137 | curve = np.append(curve, [Bezier.Point(t, points)], axis=0) 138 | 139 | curve = np.delete(curve, 0, 0) 140 | 141 | return curve 142 | 143 | # example 144 | bez_test = False 145 | if bez_test: 146 | t_points = np.arange(0, 2, 0.1) 147 | test = np.array([[0, 5], [4, 10], [8, 10]])#, [4, 0], [6, 0], [10, 5]]) # can be lots of points 148 | test_set_1 = Bezier.Curve(t_points, test) 149 | 150 | 151 | def percent_solve(result, decimal): 152 | """Find the value needed for 153 | the end result given a percentage. 154 | Example: if I want 100 and 20 percent 155 | is going to be taken: solve(100, .2).""" 156 | mult = 1 / decimal # 5 here 157 | div = result / (mult - 1) 158 | return div * mult 159 | 160 | 161 | def get_linked(obm, idx, op=None): 162 | """put in the index of a vert. Get everything 163 | linked just like 'select_linked_pick()'""" 164 | vboos = np.zeros(len(obm.verts), dtype=np.bool) 165 | cvs = [obm.verts[idx]] 166 | escape = False 167 | while not escape: 168 | new = [] 169 | for v in cvs: 170 | if not vboos[v.index]: 171 | vboos[v.index] = True 172 | lv = [e.other_vert(v) for e in v.link_edges] 173 | culled = [v for v in lv if not vboos[v.index]] 174 | new += culled 175 | cvs = new 176 | if len(cvs) == 0: 177 | escape = True 178 | idxer = np.arange(len(obm.verts))[vboos] 179 | if op == "DELETE": 180 | verts = [obm.verts[i] for i in idxer] 181 | bmesh.ops.delete(obm, geom=verts) 182 | return idxer 183 | 184 | 185 | def map_ranges(r1b, r1t, r2b, r2t, val): 186 | """Find the value of range 1 where 187 | it maps to range two. 188 | r1b: range 1 bottom 189 | r1t: range 1 top""" 190 | 191 | dif1 = r1t - r1b 192 | dif2 = r2t - r2b 193 | vd = val - r1b 194 | dv1 = vd / dif1 195 | 196 | return dif2 * dv1 + r2b 197 | 198 | 199 | #---------------------------------------------- 200 | def spread_array(ar=None, steps=6): 201 | """Create interpolated points 202 | between points in an array 203 | of vectors.""" 204 | v = ar[1:] - ar[:-1] 205 | div = v / (steps + 1) 206 | if len(ar.shape) == 1: 207 | new_ar = np.zeros(ar.shape[0] * (steps + 1)) 208 | else: 209 | new_ar = np.zeros((ar.shape[0] * (steps + 1), ar.shape[1])) 210 | new_ar[::steps + 1] = ar 211 | for i in range(steps): 212 | ph = ar[:-1] + (div * (i + 1)) 213 | new_ar[:-(steps +1)][i+1::steps+1] = ph 214 | 215 | return new_ar[: -steps] 216 | 217 | xy = np.arange(4) 218 | ar = np.arange(8) 219 | ar.shape = (4,2) 220 | ar[:,0] = xy 221 | ar[:,1] = xy 222 | 223 | new_ar = spread_array(ar) 224 | 225 | print('------ new ------') 226 | print(new_ar) 227 | #---------------------------------------------- 228 | 229 | 230 | 231 | def get_proxy_co(ob, co=None, proxy=None): 232 | """Gets co with modifiers like cloth""" 233 | if proxy is None: 234 | dg = bpy.context.evaluated_depsgraph_get() 235 | prox = ob.evaluated_get(dg) 236 | proxy = prox.to_mesh() 237 | 238 | if co is None: 239 | vc = len(proxy.vertices) 240 | co = np.empty((vc, 3), dtype=np.float32) 241 | 242 | proxy.vertices.foreach_get('co', co.ravel()) 243 | ob.to_mesh_clear() 244 | return co 245 | 246 | 247 | def np_co_to_text(ob, co, rw='w'): 248 | """Read or write cache file""" 249 | name = ob.name + 'cache.npy' 250 | 251 | if rw == 'w': 252 | if name not in bpy.data.texts: 253 | bpy.data.texts.new(name) 254 | 255 | txt = bpy.data.texts[name] 256 | np.savetxt(txt, co) 257 | 258 | return 259 | 260 | vc = len(ob.data.vertices) 261 | txt = bpy.data.texts[name].as_string() 262 | frame = bpy.context.scene.frame_current 263 | start = (frame -1) * vc * 3 264 | 265 | co = np.fromstring(txt, sep='\n')[start: start + (vc * 3)] 266 | co.shape = (co.shape[0]//3, 3) 267 | 268 | ob.data.vertices.foreach_set('co', co.ravel()) 269 | ob.data.update() 270 | 271 | 272 | def play_internal_cache(scene): 273 | ob = bpy.data.objects['Cube'] 274 | np_co_to_text(ob, co=None, rw='r') 275 | 276 | 277 | bpy.app.handlers.frame_change_post.append(play_internal_cache) 278 | ## =============================== ## 279 | ## =============================== ## 280 | 281 | 282 | def read_flap_target(): 283 | import bpy 284 | import json 285 | ob = bpy.data.objects['g8193'] 286 | 287 | file = bpy.data.texts['flap_ptrs.json'] 288 | slices = json.loads(file.as_string()) 289 | 290 | for k, v in slices.items(): 291 | for ve in v: 292 | ob.data.vertices[ve].select = True 293 | 294 | ob.data.update() 295 | #error 296 | 297 | def bmesh_proxy(ob): 298 | """Get a bmesh contating modifier effects""" 299 | dg = bpy.context.evaluated_depsgraph_get() 300 | prox = ob.evaluated_get(dg) 301 | proxy = prox.to_mesh() 302 | obm = bmesh.new() 303 | obm.from_mesh(proxy) 304 | return obm 305 | 306 | 307 | def select_edit_mode(sc, ob, idx, type='v', deselect=False, obm=None): 308 | """Selects verts in edit mode and updates""" 309 | 310 | if ob.data.is_editmode: 311 | if obm is None: 312 | obm = bmesh.from_edit_mesh(ob.data) 313 | obm.verts.ensure_lookup_table() 314 | 315 | if type == 'v': 316 | x = obm.verts 317 | if type == 'f': 318 | x = obm.faces 319 | if type == 'e': 320 | x = obm.edges 321 | 322 | if deselect: 323 | for i in x: 324 | i.select = False 325 | 326 | for i in idx: 327 | sc.select_counter[i] += 1 328 | x[i].select = True 329 | 330 | if obm is None: 331 | bmesh.update_edit_mesh(ob.data) 332 | #bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) 333 | 334 | 335 | # ---------------------four functions below------------------------------- << 336 | def eliminate_duplicate_pairs(ar): 337 | """Eliminates duplicates and mirror duplicates. 338 | for example, [1,4], [4,1] or duplicate occurrences of [1,4] 339 | Returns an Nx2 array.""" 340 | # no idea how this works (probably sorcery) but it's really fast 341 | a = np.sort(ar, axis=1) 342 | x = np.random.rand(a.shape[1]) 343 | y = a @ x 344 | unique, index = np.unique(y, return_index=True) 345 | return a[index] 346 | # or 347 | strs = [str(e) + str(t) for e, t in zip(a[:,0], a[:,1])] 348 | uni = np.unique(strs, return_index=True)[1] 349 | 350 | 351 | 352 | def tree(co, margin=0.001, _idx=None): 353 | 354 | # could test dividing up the world instead of dividing boxes 355 | b_min = np.min(co, axis=0) 356 | b_max = np.max(co, axis=0) 357 | mid = b_min + ((b_max - b_min) / 2) 358 | 359 | bpy.data.objects['a'].location = mid 360 | # l = left, r = right, f = front, b = back, u = up, d = down 361 | idx = np.arange(co.shape[0], dtype=np.int32) 362 | boxes = [] 363 | 364 | # ------------------------------- 365 | B = co[:,0] < mid[0] + margin 366 | il = idx[B] 367 | 368 | B = co[:,0] > mid[0] - margin 369 | ir = idx[B] 370 | 371 | # ------------------------------ 372 | cil = co[:,1][il] 373 | B = cil > mid[1] - margin 374 | ilf = il[B] 375 | 376 | B = cil < mid[1] + margin 377 | ilb = il[B] 378 | 379 | cir = co[:,1][ir] 380 | B = cir > mid[1] - margin 381 | irf = ir[B] 382 | 383 | B = cir < mid[1] + margin 384 | irb = ir[B] 385 | 386 | # ------------------------------ 387 | cilf = co[:,2][ilf] 388 | B = cilf > mid[2] - margin 389 | ilfu = ilf[B] 390 | B = cilf < mid[2] + margin 391 | ilfd = ilf[B] 392 | 393 | cilb = co[:,2][ilb] 394 | B = cilb > mid[2] - margin 395 | ilbu = ilb[B] 396 | B = cilb < mid[2] + margin 397 | ilbd = ilb[B] 398 | 399 | cirf = co[:,2][irf] 400 | B = cirf > mid[2] - margin 401 | irfu = irf[B] 402 | B = cirf < mid[2] + margin 403 | irfd = irf[B] 404 | 405 | cirb = co[:,2][irb] 406 | B = cirb > mid[2] - margin 407 | irbu = irb[B] 408 | B = cirb < mid[2] + margin 409 | irbd = irb[B] 410 | 411 | if _idx is None: 412 | boxes = [ilfu, ilfd, ilbu, ilbd, irfu, irfd, irbu, irbd] 413 | doubles = [i for i in boxes if i.shape[0] > 1] 414 | #return #[i.tolist() for i in boxes] 415 | return doubles 416 | 417 | boxes = [_idx[ilfu], 418 | _idx[ilfd], 419 | _idx[ilbu], 420 | _idx[ilbd], 421 | _idx[irfu], 422 | _idx[irfd], 423 | _idx[irbu], 424 | _idx[irbd] 425 | ] 426 | 427 | doubles = [i for i in boxes if i.shape[0] > 1] 428 | return doubles 429 | 430 | 431 | def branches(co, margin): 432 | """Subsets of trees""" 433 | boxes = [] 434 | b1 = tree(co, margin=margin) 435 | for i in b1: 436 | b2 = tree(co[i], margin=margin, _idx=i) 437 | for j in b2: 438 | b3 = tree(co[j], margin=margin, _idx=j) 439 | boxes += b3 440 | 441 | return boxes 442 | 443 | 444 | def find_doubles(ob, margin=0.001): 445 | """Finds verts whose distance from each 446 | other is less than the margin. 447 | Returns an Nx2 numpy arry of close pairs.""" 448 | 449 | vc = len(ob.data.vertices) 450 | co = np.empty((vc, 3), dtype=np.float32) 451 | ob.data.vertices.foreach_get('co', co.ravel()) 452 | 453 | boxes = branches(co, margin) 454 | dubs = [] 455 | m = margin ** 2 456 | 457 | for bz in boxes: 458 | if bz.shape[0] > 0: 459 | c = co[bz] 460 | b_vecs = c[:, None] - c 461 | d = np.einsum('ijk,ijk->ij', b_vecs, b_vecs) 462 | agw = np.argwhere(d <= m) 463 | cull = agw[:, 0] == agw[:, 1] 464 | agwc = agw[~cull] 465 | if agwc.shape[0] > 0: 466 | 467 | dubs += bz[agwc].tolist() 468 | 469 | return eliminate_duplicate_pairs(np.array(dubs)) 470 | # ------------------------four functions above---------------------------- >> 471 | 472 | 473 | def merge_verts(ob, margin=0.001, obm=None): 474 | 475 | if obm is None: 476 | obm = bmesh.new() 477 | obm.from_mesh(ob.data) 478 | 479 | bmesh.ops.remove_doubles(obm, verts=obm.verts, dist=margin) 480 | obm.to_mesh(ob.data) 481 | 482 | ob.data.update() 483 | obm.clear() 484 | obm.free() 485 | 486 | 487 | def get_ordered_loop(poly_line, edges=None): 488 | """Takes a bunch of verts and gives the order 489 | based on connected edges. 490 | Or, takes an edge array of vertex indices 491 | and gives the vertex order.""" 492 | 493 | if edges is not None: 494 | v = edges[0][0] 495 | le = edges[np.any(v == edges, axis=1)] 496 | if len(le) != 2: 497 | print("requires a continuous loop of edges") 498 | return 499 | 500 | ordered = [v] 501 | for i in range(len(poly_line.data.vertices)): 502 | #le = v.link_edges 503 | le = edges[np.any(v == edges, axis=1)] 504 | if len(le) != 2: 505 | print("requires a continuous loop of edges") 506 | break 507 | 508 | ot1 = le[0][le[0] != v] 509 | ot2 = le[1][le[1] != v] 510 | v = ot1 511 | if ot1 in ordered[-2:]: 512 | v = ot2 513 | if v == ordered[0]: 514 | break 515 | 516 | ordered += [v[0]] 517 | return ordered 518 | 519 | obm = get_bmesh(poly_line, refresh=True) 520 | v = obm.edges[0].verts[0] 521 | le = v.link_edges 522 | 523 | if len(le) != 2: 524 | print("requires a continuous loop of edges") 525 | return 526 | 527 | ordered = [v.index] 528 | for i in range(len(poly_line.data.vertices)): 529 | le = v.link_edges 530 | 531 | if len(le) != 2: 532 | print("requires a continuous loop of edges") 533 | break 534 | 535 | ot1 = le[0].other_vert(v) 536 | ot2 = le[1].other_vert(v) 537 | v = ot1 538 | if ot1.index in ordered[-2:]: 539 | v = ot2 540 | if v.index == ordered[0]: 541 | break 542 | 543 | ordered += [v.index] 544 | 545 | return ordered 546 | 547 | 548 | def read_python_script(name=None): 549 | import bpy 550 | import inspect 551 | import pathlib 552 | """When this runs it makes a copy of this script 553 | and saves it to the blend file as a text""" 554 | 555 | p_ = pathlib.Path(inspect.getfile(inspect.currentframe())) 556 | py = p_.parts[-1] 557 | p = p_.parent.parent.joinpath(py) 558 | try: 559 | o = open(p) 560 | except: 561 | p = p_.parent.joinpath(py) # linux or p1 (not sure why this is happening in p1) 562 | o = open(p) 563 | 564 | if name is None: 565 | name = 'new_' + py 566 | 567 | new = bpy.data.texts.new(name) 568 | 569 | r = o.read() 570 | new.write(r) 571 | 572 | 573 | def cross_from_tris(tris): 574 | origins = tris[:, 0] 575 | vecs = tris[:, 1:] - origins[:, nax] 576 | cross = np.cross(vecs[:, 0], vecs[:, 1]) 577 | return cross 578 | 579 | 580 | def distance_along_normal(tris, points): 581 | """Return the distance along the cross 582 | product and the distance along normalized 583 | cross product""" 584 | origins = tris[:, 0] 585 | cross_vecs = tris[:, 1:] - origins[:, nax] 586 | v2 = points - origins 587 | 588 | cross = np.cross(cross_vecs[:,0], cross_vecs[:,1]) 589 | d_v2_c = np.einsum('ij,ij->i', v2, cross) 590 | d_v2_v2 = np.einsum('ij,ij->i', cross, cross) 591 | div = d_v2_c / d_v2_v2 592 | 593 | U_cross = cross / np.sqrt(d_v2_v2)[:, None] 594 | U_d = np.einsum('ij,ij->i', v2, U_cross) 595 | 596 | return div, U_d# for normalized 597 | 598 | 599 | def connect_panels(self, s_norm_val=1.0, offset_steps=0, correct_rotation=True, reverse=False): 600 | """Offset steps is an int that allows backing up or moving forward 601 | the given number of edges. 602 | correct_rotation checks that the two arrays are paired correctly. 603 | reverse flips the direction of both arrays.""" 604 | 605 | #========================================== 606 | if False: 607 | Bobj = self.garment.Bobj 608 | left_zipper_vert_ptrs = self.left_panel.get_connection_bmesh_vert_ptrs() 609 | right_zipper_vert_ptrs = self.right_panel.get_connection_bmesh_vert_ptrs() 610 | 611 | Bobj = bpy.context.object 612 | left_zipper_vert_ptrs = np.arange(48, 60) 613 | right_zipper_vert_ptrs = np.arange(36, 48) 614 | #========================================== 615 | 616 | 617 | if correct_rotation: 618 | # If first vert in left should pair with last vert in right 619 | # or just assume we need to correct: right_zipper_vert_ptrs = right_zipper_vert_ptrs[::-1] 620 | v_first_l = Bobj.data.vertices[left_zipper_vert_ptrs[0]].co 621 | v_first_r = Bobj.data.vertices[right_zipper_vert_ptrs[0]].co 622 | v_last_r = Bobj.data.vertices[right_zipper_vert_ptrs[-1]].co 623 | vec1 = v_first_l - v_first_r 624 | vec2 = v_first_l - v_last_r 625 | l1 = vec1 @ vec1 626 | l2 = vec2 @ vec2 627 | # print(l2, l1,"are we there yet???") 628 | 629 | if l2 < l1: 630 | 631 | right_zipper_vert_ptrs = right_zipper_vert_ptrs[::-1] 632 | 633 | if reverse: 634 | right_zipper_vert_ptrs = right_zipper_vert_ptrs[::-1] 635 | left_zipper_vert_ptrs = left_zipper_vert_ptrs[::-1] 636 | 637 | obm = get_bmesh(Bobj) 638 | obm.verts.ensure_lookup_table() 639 | 640 | # get the total length 641 | 642 | co = np.array([obm.verts[v].co for v in left_zipper_vert_ptrs]) 643 | vecs = co[1:] - co[:-1] 644 | l = np.sqrt(np.einsum("ij ,ij->i", vecs, vecs)) 645 | sums = np.cumsum(np.nan_to_num(l/np.sum(l))) 646 | 647 | bool = s_norm_val < sums 648 | indexer = np.where(bool)[0] 649 | removing = False 650 | 651 | # print(np.abs(offset_steps), bool.shape[0]) 652 | 653 | stop = None 654 | if indexer.shape[0] == 0: 655 | stop = -1 # fill them all 656 | if offset_steps < 0: 657 | if np.abs(offset_steps) <= bool.shape[0]: 658 | stop = left_zipper_vert_ptrs[offset_steps -1] 659 | else: 660 | removing =True 661 | else: 662 | with_offset = indexer[0] + offset_steps 663 | set_stop = True 664 | 665 | if with_offset > indexer[-1]: 666 | stop = -1 667 | set_stop = False 668 | if with_offset < 0: 669 | removing = True 670 | set_stop = False 671 | if set_stop: 672 | stop = left_zipper_vert_ptrs[with_offset] 673 | 674 | if np.all(bool): 675 | removing = True 676 | for v1, v2 in zip(left_zipper_vert_ptrs, right_zipper_vert_ptrs): 677 | le = [e for e in obm.verts[v1].link_edges if e.other_vert(obm.verts[v1]).index == v2] 678 | existing = len(le) == 1 679 | 680 | if (removing & existing): 681 | obm.edges.remove(le[0]) 682 | 683 | if not existing: 684 | if not removing: 685 | obm.edges.new([obm.verts[v1],obm.verts[v2]]) 686 | 687 | if v1 == stop: 688 | removing = True 689 | 690 | obm.to_mesh(Bobj.data) 691 | Bobj.data.update() 692 | 693 | 694 | def apply_shape(ob, modifier_name='Cloth', update_existing_key=False, keep=['Cloth'], key_name='Cloth'): 695 | """Apply modifier as shape without using bpy.ops. 696 | Does not apply modifiers. 697 | Mutes modifiers not listed in 'keep.' 698 | Using update allows writing to an existing shape_key.""" 699 | 700 | def turn_off_modifier(modifier, on_off=False): 701 | modifier.show_viewport = on_off 702 | 703 | mod_states = [mod.show_viewport for mod in ob.modifiers] 704 | [turn_off_modifier(mod, False) for mod in ob.modifiers if mod.name not in keep] 705 | 706 | dg = bpy.context.evaluated_depsgraph_get() 707 | proxy = ob.evaluated_get(dg) 708 | co = get_co(proxy) 709 | 710 | if update_existing_key: 711 | key = ob.data.shape_keys.key_blocks[key_name] 712 | else: 713 | key = new_shape_key(ob, name=key_name, arr=None, value=0) 714 | 715 | key.data.foreach_set('co', co.ravel()) 716 | 717 | for i, j in zip(mod_states, ob.modifiers): 718 | j.show_viewport = i 719 | 720 | return key 721 | 722 | 723 | def matrix_from_custom_orientation(): 724 | """For using custom orientations as a matrix transform""" 725 | import bpy 726 | from bpy import context 727 | import mathutils 728 | #Get the matrix of the transform orientation called 'name' 729 | custom_matrix = bpy.context.scene.orientations['name'].matrix 730 | #Copy the matrix to resize it from 3x3 matrix to 4x4 matrix 731 | custom_matrix_4 = custom_matrix.copy() 732 | custom_matrix_4.resize_4x4() 733 | #Set the matrix of the active object to match the resized matrix 734 | bpy.context.active_object.matrix_world = custom_matrix_4 735 | 736 | 737 | def verts_in_group(ob, name='Group'): 738 | """Returns np array of indices for vertices in the group""" 739 | ob.update_from_editmode() # in case someone has assigned verts in editmode 740 | idx = ob.vertex_groups[name].index 741 | idxer = np.arange(len(ob.data.vertices)) 742 | this = [[j.group for j in v.groups if j.group == idx] for v in ob.data.vertices] 743 | idxs = [i for i in idxer if len(this[i]) > 0] 744 | return np.array(idxs) 745 | 746 | 747 | def save_data(name='saved_data.py', var='some_variable', data={'key': [1,2,3]}, overwrite=True): 748 | """Saves a dictionary as a variable in a python file 749 | as a blender internal text file. Can later import 750 | module and call all data as global variables.""" 751 | if name not in bpy.data.texts: 752 | bpy.data.texts.new(name) 753 | 754 | data_text = bpy.data.texts[name] 755 | 756 | m = json.dumps(data, sort_keys=True, indent=2) 757 | 758 | if overwrite: 759 | data_text.from_string(var + ' = ' + m) 760 | return 761 | 762 | # can also just add to the text 763 | data_text.cursor_set(-1, character=-1) # in case someone moves the cursor 764 | data_text.write(var + ' = ' + m) 765 | 766 | data_text.cursor_set(-1, character=-1) # add the new line or we can't read it as a python module 767 | data_text.write('\n') 768 | 769 | 770 | # to save an external text in a blend file 771 | def save_text_in_blend_file(path, file_name='my_text.py'): 772 | """Run this then save the blend file. 773 | file_name is the key blender uses to store the file: 774 | bpy.data.texts[file_name]""" 775 | t = bpy.data.texts.new(file_name) 776 | read = open(path).read() 777 | t.write(read) 778 | 779 | 780 | # to import the text as a module from within the blend file 781 | def get_internal_text_as_module(filename, key): 782 | """Load a module and return a dictionary from 783 | that module.""" 784 | module = bpy.data.texts[filename].as_module() 785 | return module.points[key] 786 | 787 | 788 | def get_co(ob): 789 | """Returns Nx3 numpy array of vertex coords as float32""" 790 | v_count = len(ob.data.vertices) 791 | co = np.empty(v_count * 3, dtype=np.float32) 792 | ob.data.vertices.foreach_get('co', co) 793 | co.shape = (v_count, 3) 794 | return co 795 | 796 | 797 | def new_shape_key(ob, name, arr=None, value=1): 798 | """Create a new shape key, set it's coordinates 799 | and set it's value""" 800 | new_key = ob.shape_key_add(name=name) 801 | new_key.value = value 802 | if arr is not None: 803 | new_key.data.foreach_set('co', arr.ravel()) 804 | return new_key 805 | 806 | 807 | def get_verts_in_group(ob, name): 808 | """Returns the indices of the verts that belong to the group""" 809 | idx = ob.vertex_groups[name].index 810 | vg = [v.index for v in ob.data.vertices if idx in [vg.group for vg in v.groups]] 811 | return np.array(vg) 812 | 813 | 814 | # -------- group debug ----------------# 815 | def new_shape_key(ob, name, arr=None, value=1): 816 | """Create a new shape key, set it's coordinates 817 | and set it's value""" 818 | new_key = ob.shape_key_add(name=name) 819 | new_key.value = value 820 | if arr is not None: 821 | new_key.data.foreach_set('co', arr.ravel()) 822 | return new_key 823 | 824 | # -------- group debug ----------------# 825 | def link_mesh(verts, edges=[], faces=[], name='!!! Debug Mesh !!!'): 826 | """Generate and link a new object from pydata""" 827 | mesh = bpy.data.meshes.new(name) 828 | mesh.from_pydata(verts, edges, faces) 829 | mesh.update() 830 | mesh_ob = bpy.data.objects.new(name, mesh) 831 | bpy.context.collection.objects.link(mesh_ob) 832 | return mesh_ob 833 | 834 | # -------- group debug ----------------# 835 | def create_debug_mesh(numpy_coords=[np.array([[1,2,3]]), np.array([[4,5,6]])], 836 | shape_keys=['Basis', 'key_1']): 837 | """Use a list of sets of numpy coords and matching list of shape key names. 838 | Creates a mesh point cloud with shape keys for each numpy coords set. 839 | !!! Adds this objet to the blend file !!!""" 840 | key_count = len(shape_keys) 841 | ob = link_mesh(numpy_coords[0]) 842 | keys = ob.data.shape_keys 843 | 844 | for i in range(key_count): 845 | new_shape_key(ob, shape_keys[i], numpy_coords[i], value=0) 846 | 847 | ob.data.update() 848 | 849 | 850 | def offset_face_indices(faces=[]): 851 | 852 | # this is probably better: (the bool is the verts you are keeping) 853 | #vidx_offset = np.cumsum(~data['vert_bool']) 854 | #verts = co[data['vert_bool']] 855 | #lens_polys = np.arange(len(ob.data.polygons))[data['face_bool']] 856 | #faces = [[p - vidx_offset[p] for p in ob.data.polygons[f].vertices] for f in lens_polys] 857 | 858 | """Sorts the original face vert indices 859 | for a new mesh from subset.""" 860 | # Example: face[n].verts = [[20, 10, 30], [10, 30, 100]] 861 | # Converts to [[1, 0, 2], [0, 2, 3]] 862 | 863 | def add(c): 864 | c['a'] += 1 865 | return c['a'] 866 | 867 | flat = np.hstack(faces) 868 | idx = np.unique(flat, return_inverse=True)[1] 869 | c = {'a': -1} 870 | new_idx = [[idx[add(c)] for j in i] for i in faces] 871 | 872 | 873 | # get depsgraph co with various modifiers turned off 874 | def get_co_with_modifiers(ob, types=[], names=[], include_mesh=False): 875 | """Get the coordinates of modifiers with 876 | specific modifiers turned on or off. 877 | List mods by type or name. 878 | This lets you turn off all mods of a type 879 | or just turn off by name.""" 880 | 881 | debug = True 882 | if debug: 883 | # verify modifier names and types 884 | mod_types = [mod.type for mod in ob.modifiers] 885 | mod_names = [mod.name for mod in ob.modifiers] 886 | # if the arg names ar not right return 887 | type_check = np.all(np.in1d(types, mod_types)) 888 | name_check = np.all(np.in1d(names, mod_names)) 889 | 890 | if not (type_check & name_check): 891 | print("!!! Warning. Mods not listed correctly !!!") 892 | print("!!! Warning. Mods not listed correctly !!!") 893 | return 894 | 895 | # save mod states for reset 896 | mod_states = [mod.show_viewport for mod in ob.modifiers] 897 | 898 | def turn_off_modifier(modifier): 899 | modifier.show_viewport = False 900 | 901 | [turn_off_modifier(mod) for mod in ob.modifiers if mod.name in names] 902 | [turn_off_modifier(mod) for mod in ob.modifiers if mod.type in types] 903 | 904 | # get the coordinates with the current modifier state 905 | dg = bpy.context.evaluated_depsgraph_get() 906 | proxy = ob.evaluated_get(dg) 907 | co = get_co(proxy) 908 | 909 | for i, j in zip(mod_states, ob.modifiers): 910 | j.show_viewport = i 911 | 912 | if include_mesh: 913 | return co, proxy.data 914 | 915 | return co 916 | 917 | 918 | def dots(a,b): 919 | #N x 3 - N x 3 920 | x = np.einsum('ij,ij->i', a, b) 921 | #3 - N x 3 922 | y = np.einsum('j,ij->i', a, b) # more simply b @ n 923 | #N x 3 - N x N x 3 924 | z = np.einsum('ij,ikj->ik', a, b) 925 | #N x N x 3 - N x N x 3 926 | w = np.einsum('ijk,ijk->ij', a, b) 927 | #N x 2 x 3 - N x 2 x 3 928 | a = np.einsum('ijk,ijk->ij', a, b) 929 | 930 | #N x N x 3 - 3 x 3 931 | z = np.einsum('ikj,ij->ik', ori_vecs, ori) 932 | 933 | #N x 2 x 3 - N x 3 934 | np.einsum('ij, ikj->ik', axis_vecs, po_vecs) 935 | 936 | #mismatched N x 3 - N2 x 3 with broadcasting so that the end result is tiled 937 | mismatched = np.einsum('ij,i...j->...i', a, np.expand_dims(b, axis=0)) 938 | # 4,3,3 - 4,2,3 with broadcasting 939 | mismatched_2 = np.einsum('ijk,ij...k->i...j', a, np.expand_dims(b, axis=1)) 940 | return x,y,z,w,a, mismatched, mismatched_2 941 | 942 | 943 | -------------------------------------------------------------------------------- /grid_fill.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import bmesh 3 | import numpy as np 4 | from numpy import newaxis as nax 5 | 6 | 7 | def get_co(mesh): 8 | v_count = len(mesh.vertices) 9 | co = np.zeros(v_count * 3, dtype=np.float32) 10 | mesh.vertices.foreach_get('co', co) 11 | co.shape = (v_count, 3) 12 | return co 13 | 14 | 15 | def get_volume_data(volume_object): 16 | co = get_co(volume_object.data) 17 | min_corner = np.min(co, axis=0) 18 | max_corner = np.max(co, axis=0) 19 | return min_corner, max_corner 20 | 21 | 22 | def create_array(volume_object, resolution, override=False): 23 | min, max = get_volume_data(volume_object) 24 | dimensions = max - min 25 | counts = np.array(dimensions//np.array(resolution), dtype=np.int32) 26 | 27 | x_lin = np.linspace(min[0], max[0], counts[0]) 28 | y_lin = np.linspace(min[1], max[1], counts[1]) 29 | z_lin = np.linspace(min[2], max[2], counts[2]) 30 | 31 | # can use it in 2d space or fill a 3d volume 32 | if override: 33 | counts[2] = 1 34 | z_lin = np.array([min[2]]) 35 | 36 | x1 = np.tile(x_lin, counts[1]) 37 | x2 = np.tile(x1, counts[2]) 38 | 39 | y1 = np.tile(y_lin, counts[0]) 40 | y2 = np.tile(y1, counts[2]) 41 | y2.shape = (counts[0], counts[2] * counts[1]) 42 | y2 = y2.T.ravel() 43 | 44 | z1 = np.tile(z_lin, counts[0]) 45 | z2 = np.tile(z1, counts[1]) 46 | z2.shape = (counts[0], counts[1], counts[2]) 47 | z2 = z2.T.ravel() 48 | 49 | total = np.product(counts) 50 | vec_array = np.zeros(total * 3, dtype=np.float32) 51 | vec_array.shape = (total, 3) 52 | 53 | vec_array[:,0] = x2 54 | vec_array[:,1] = y2 55 | vec_array[:,2] = z2 56 | 57 | return vec_array 58 | 59 | 60 | def create_point_mesh(vec_array): 61 | mesh = bpy.data.meshes.new("points") 62 | mesh.from_pydata(vec_array, [], []) 63 | mesh.update() 64 | 65 | points = bpy.data.objects.new("points", mesh) 66 | bpy.context.collection.objects.link(points) 67 | 68 | 69 | def append_values_to_array(vec_array, val): 70 | new_array = np.zeros(vec_array.shape[0] * 4, dtype=np.float32) 71 | new_array.shape = (vec_array.shape[0], 4) 72 | new_array[:, :3] = vec_array 73 | new_array[:, 3] = val 74 | return new_array 75 | 76 | # end volume functions ------------------------------------ 77 | 78 | 79 | # begin edge loop functions ------------------------------- 80 | 81 | def get_proxy_eidx(ob, eval=True): 82 | """Get the edge indices as an Nx2 numpy array 83 | for the object with modifiers.""" 84 | evob = ob 85 | if eval: 86 | dg = bpy.context.evaluated_depsgraph_get() 87 | evob = ob.evaluated_get(dg) 88 | e_count = len(evob.data.edges) 89 | eidx = np.zeros(e_count * 2, dtype=np.int32) 90 | evob.data.edges.foreach_get('vertices', eidx) 91 | eidx.shape = (e_count, 2) 92 | return eidx 93 | 94 | 95 | def get_proxy_co(ob, co=None, eval=True): 96 | """Get coordiates with modifiers added. 97 | Can supply the array to avoid allocating the memory""" 98 | evob = ob 99 | if eval: 100 | dg = bpy.context.evaluated_depsgraph_get() 101 | evob = ob.evaluated_get(dg) 102 | dg = bpy.context.evaluated_depsgraph_get() 103 | evob = ob.evaluated_get(dg) 104 | v_count = len(evob.data.vertices) 105 | if co is None: 106 | co = np.zeros(v_count * 3, dtype=np.float32) 107 | evob.data.vertices.foreach_get('co', co) 108 | co.shape = (v_count, 3) 109 | return co 110 | 111 | 112 | def get_bmesh(ob): 113 | obm = bmesh.new() 114 | if ob.mode == 'OBJECT': 115 | obm.from_mesh(ob.data) 116 | elif ob.mode == 'EDIT': 117 | obm = bmesh.from_edit_mesh(ob.data) 118 | return obm 119 | 120 | 121 | def slide_points_to_plane(e1, e2, origin, normal=np.array([0,0,1])): 122 | '''Takes the start and end of a set of edges as Nx3 vector sets 123 | Returns where they intersect the plane with a bool array for the 124 | edges that pass through the plane''' 125 | e_vecs = e2 - e1 126 | e1or = e1 - origin 127 | edge_dots = np.einsum('j,ij->i', normal, e_vecs) 128 | dots = np.einsum('j,ij->i', normal, e1or) 129 | scale = dots / edge_dots 130 | drop = (e1or - e_vecs * np.expand_dims(scale, axis=1)) + origin 131 | intersect = (scale < 0) & (scale > -1) 132 | return drop, intersect, scale 133 | 134 | 135 | def measure_angle_at_each_vert(grid): 136 | """Provide mesh and anlge limit in degrees. 137 | Returns the indices of verts that are sharper than limit""" 138 | 139 | limit = np.cos(grid.angle_limit * (np.pi/180)) 140 | eidx = grid.eidx 141 | co = grid.co 142 | order = grid.order 143 | 144 | ls = np.roll(order, 1) 145 | rs = np.roll(order, -1) 146 | 147 | v1 = co[ls] - grid.co_order 148 | v2 = co[rs] - grid.co_order 149 | 150 | # use the vecs pointing away from each vertex later 151 | grid.vls = v1 152 | grid.vrs = v2 153 | 154 | ls_dots = np.einsum('ij, ij->i', v1, v1) 155 | rs_dots = np.einsum('ij, ij->i', v2, v2) 156 | 157 | uv1 = v1 / np.sqrt(ls_dots)[:, nax] 158 | uv2 = v2 / np.sqrt(rs_dots)[:, nax] 159 | 160 | # used by a bunch of other functions later 161 | grid.uvls = uv1 162 | grid.uvrs = uv2 163 | 164 | angle = np.einsum('ij, ij->i', uv1, uv2) 165 | sharps = angle > -limit 166 | 167 | return order[sharps] 168 | 169 | 170 | def loop_order(ob): 171 | """takes an object consisting of a single loop of edges and gives the order""" 172 | obm = get_bmesh(ob) 173 | obm.edges.ensure_lookup_table() 174 | obm.verts.ensure_lookup_table() 175 | 176 | v = obm.verts[0] 177 | new_v = v.link_edges[0].other_vert(v) 178 | order = [0] 179 | last_v = v 180 | 181 | for i in range(len(obm.verts) - 1): 182 | other_vert = np.array([ed.other_vert(v).index for ed in v.link_edges]) 183 | not_v = other_vert[other_vert != last_v.index][0] 184 | order.append(not_v) 185 | last_v = v 186 | v = obm.verts[not_v] 187 | 188 | return np.array(order) 189 | 190 | 191 | def get_segments(grid): 192 | """Generate a list of segments between sharp edges""" 193 | ob = grid.ob 194 | sharps = grid.sharps 195 | 196 | obm = get_bmesh(ob) 197 | obm.edges.ensure_lookup_table() 198 | obm.verts.ensure_lookup_table() 199 | 200 | # in case there are no sharps 201 | if len(sharps) == 0: 202 | sharps = np.array([0]) 203 | 204 | count = 0 205 | # start with the first sharp: 206 | sharp = sharps[0] 207 | v = obm.verts[sharp] 208 | other_verts = np.array([ed.other_vert(v).index for ed in v.link_edges]) 209 | move = obm.verts[other_verts[0]] 210 | 211 | seg = [sharp] 212 | segs = [] 213 | for i in range(len(sharps)): 214 | while True: 215 | if move.index in sharps: 216 | seg.append(move.index) 217 | segs.append(seg) 218 | seg = [] 219 | 220 | seg.append(move.index) 221 | other_verts = np.array([ed.other_vert(move).index for ed in move.link_edges]) 222 | new = other_verts[other_verts != v.index][0] 223 | v = move 224 | move = obm.verts[new] 225 | 226 | if move.index == sharp: 227 | seg.append(move.index) 228 | segs.append(seg) 229 | return segs 230 | 231 | count +=1 232 | 233 | if count > grid.v_count: 234 | print("ACK!!!!!! We almost got stuck in an infinite loop! Oh the humanity!") 235 | return segs 236 | 237 | 238 | def get_seg_length(grid, seg): 239 | """returns the total length of a set 240 | of points that are in linear order""" 241 | co = grid.co 242 | vecs = co[seg[1:]] - co[seg[:-1]] 243 | grid.seg_vecs.append(vecs) # might as well save this for later 244 | seg_length = np.sqrt(np.einsum('ij, ij->i', vecs, vecs)) 245 | grid.seg_lengths.append(seg_length) # saving this also 246 | total_length = np.sum(np.sqrt(np.einsum('ij, ij->i', vecs, vecs))) 247 | 248 | return total_length 249 | 250 | 251 | def generate_perimeter(grid, current): 252 | """Place points around perimeter""" 253 | 254 | # get the length of each segments 255 | seg_lengths = np.array([get_seg_length(grid, s) for s in grid.segments]) 256 | 257 | grid.point_counts = seg_lengths // grid.size 258 | 259 | # have to decide where to transition between splitting 260 | # in half and doing just one point 261 | grid.point_counts[grid.size / seg_lengths > 0.5] = 0 262 | 263 | 264 | grid.spacing = seg_lengths / grid.point_counts 265 | 266 | #current = 19 267 | 268 | # doing one example: 269 | seg = grid.segments[current] 270 | seg_len = grid.seg_lengths[current] 271 | 272 | # add the first point in the segment (second one gets added next time) 273 | seg_sets = np.empty((0,3), dtype=np.float32) 274 | # 275 | seg_sets = move_point_on_path(grid, current, seg_sets) 276 | 277 | return seg_sets 278 | 279 | 280 | def move_point_on_path(grid, idx, seg_sets): 281 | """Walk the points until we are at the distance 282 | we space them""" 283 | 284 | co = grid.co 285 | seg = grid.segments[idx] 286 | lengths = grid.seg_lengths[idx] 287 | spacing = grid.spacing[idx] 288 | vecs = grid.seg_vecs[idx] 289 | count = grid.point_counts[idx] 290 | 291 | seg_co_set = [co[seg[0]]] # the last one will be filled in by the first one next time. 292 | if count == 0: 293 | return seg_co_set 294 | 295 | growing_length = 0 296 | len_idx = 0 297 | build = spacing 298 | 299 | counter = 0 300 | for x in range(int(count) - 1): 301 | growing_length = 0 302 | len_idx = 0 303 | counter += 1 304 | while growing_length < spacing: 305 | growing_length += lengths[len_idx] 306 | len_idx += 1 307 | 308 | # back up to the last point now 309 | len_idx -= 1 310 | growing_length -= lengths[len_idx] 311 | point = co[len_idx] 312 | 313 | # move from the past point along the last vector until we 314 | # hit the proper spacing 315 | end_offset = spacing - growing_length 316 | last_dif = lengths[len_idx] # !!!!!!!!!!!!!!!!!!!!!!!!!!!! 317 | along_last = end_offset / last_dif 318 | 319 | move = vecs[len_idx] 320 | 321 | loc = co[seg[len_idx]] + move * along_last 322 | 323 | seg_co_set.append(loc) 324 | 325 | # start from the beginning because it's easier 326 | spacing += build 327 | 328 | # join to master array: 329 | seg_sets = np.append(seg_sets, seg_co_set, axis=0) 330 | 331 | return seg_sets 332 | 333 | 334 | def get_direction(grid, angle_limit=45): 335 | """Get the direction to move towards the inside 336 | of the shape at each vert around the edges.""" 337 | 338 | print("--------------------------------------") 339 | print() 340 | 341 | #Z = np.array([0, 0, 1], dtype=np.float32) 342 | 343 | # get average normal of whole shape 344 | norms = np.cross(grid.vls, grid.vrs) 345 | av_norm = np.sum(norms, axis=0) 346 | 347 | # this normal gets dropped into the gengrid class 348 | grid.av_norm = av_norm 349 | 350 | # plot it to an empty to test 351 | bpy.data.objects['ee'].location = av_norm 352 | 353 | # use the angle at each vert to determine how to plot points 354 | # If more than 45 we 355 | # get the angle at each 356 | 357 | # doing the infinite plane thing 358 | # get the perp from the two vectors around each vert 359 | perp = grid.uvls - grid.uvrs 360 | 361 | 362 | 363 | 364 | # check all the edges to see if they pass through the infinite 365 | # plane defined by that perp vec with the point as the origin. 366 | 367 | # find the point of intersection for edges that pass through the plane 368 | 369 | 370 | 371 | 372 | 373 | """ 374 | Weird thought while praying... 375 | get a vec that would go between the unit vecs of the left 376 | and right side at each point. 377 | Use that to define an infinite plane originating at the point 378 | or just do a closest point on edge calc to find points that are close by 379 | 380 | For points where the ls and rs are parallel, the direction could be 381 | where the nearest point intersects the infinite plane. 382 | 383 | Could smooth at each iteration 384 | Could use somthing like the cloth engine to pull points 385 | towards a target connected edge length. 386 | """ 387 | 388 | 389 | # For now... Fill with points 390 | 391 | # get bounds 392 | 393 | 394 | 395 | 396 | # get vecs pointing away from each vert 397 | 398 | 399 | 400 | 401 | # if the cross is zero use the z to get the perp 402 | # if 403 | 404 | 405 | 406 | # get bounding box of entire shape 407 | 408 | 409 | # create a point outside of that for 2d intersection check 410 | 411 | 412 | # 413 | 414 | # if the two vecs are paralell the cross will be zero 415 | # If I'm planning to use UV mapping I can use the z axis 416 | 417 | 418 | def gengrid_angle(grid): 419 | """Provide mesh and anlge limit in degrees. 420 | Returns the indices of verts that are sharper than limit""" 421 | 422 | limit = np.cos(grid.angle_limit * (np.pi/180)) 423 | eidx = grid.eidx 424 | co = grid.co 425 | 426 | ls = np.roll(co, 1, axis=0) 427 | rs = np.roll(co, -1, axis=0) 428 | 429 | v1 = ls - co 430 | v2 = rs - co 431 | 432 | # use the vecs pointing away from each vertex later 433 | grid.vls = v1 434 | grid.vrs = v2 435 | 436 | ls_dots = np.einsum('ij, ij->i', v1, v1) 437 | rs_dots = np.einsum('ij, ij->i', v2, v2) 438 | 439 | uv1 = v1 / np.sqrt(ls_dots)[:, nax] 440 | uv2 = v2 / np.sqrt(rs_dots)[:, nax] 441 | 442 | # used by a bunch of other functions later 443 | grid.uvls = uv1 444 | grid.uvrs = uv2 445 | 446 | angle = np.einsum('ij, ij->i', uv1, uv2) 447 | sharps = angle > -limit 448 | 449 | return sharps 450 | 451 | 452 | def mag_set(mag, v2): 453 | '''Applys the magnitude of v1 to v2''' 454 | d1 = mag ** 2 455 | d2 = v2 @ v2 456 | div = d1/d2 457 | return v2 * np.sqrt(div) 458 | 459 | 460 | def gen_grid_project(grid): 461 | 462 | new_points = np.empty((0,3), dtype=np.float32) 463 | 464 | #idx = 6 # edge and origin matches 465 | 466 | co = grid.co 467 | shape = co.shape[0] 468 | 469 | e1 = co[grid.eidx[:, 0]] 470 | e2 = co[grid.eidx[:, 1]] 471 | 472 | perps = grid.uvls - grid.uvrs 473 | 474 | # iterate throuhg all points: 475 | for idx in range(shape): 476 | # remove edges connected to point if needed 477 | plane = slide_points_to_plane(e1, e2, co[idx], perps[idx]) 478 | # !!! might work better to do cpoe back to origial perp 479 | # vec with vec centered on the origin. 480 | inter = plane[0][plane[1]] 481 | print(inter, "this is the intersections") 482 | 483 | # eliminate points that are outside of the shape (not sure how to do that with a 3d grid...) 484 | 485 | 486 | # find the closest intersection for getting the vec direction 487 | vecs = inter - co[idx] 488 | dots = np.einsum('ij, ij->i', vecs, vecs) 489 | min = np.argmin(dots) 490 | vec = vecs[min] 491 | plot = co[idx] + mag_set(grid.size, vec) 492 | new_points = np.append(new_points, plot[nax], axis=0) 493 | 494 | bpy.data.objects['se'].location = inter[0] 495 | #bpy.data.objects['se2'].location = inter[1] 496 | 497 | 498 | return new_points 499 | 500 | #print(np.arange(shape)[plane[1]]) 501 | #print(inter, "this is intersections") 502 | 503 | 504 | 505 | 506 | 507 | class Grid(object): 508 | """The outline we start with""" 509 | pass 510 | 511 | 512 | class GenGrid(object): 513 | """The generated grid""" 514 | pass 515 | 516 | 517 | def main(ob): 518 | # walk around the edges and and plot evenly spaced points 519 | # respecting the sharpness of the angle 520 | M = ob.matrix_world.copy() 521 | obl = ob.location 522 | obr = ob.rotation_quaternion 523 | obre = ob.rotation_euler 524 | obs = ob.scale 525 | 526 | dg = bpy.context.evaluated_depsgraph_get() 527 | evob = ob.evaluated_get(dg) 528 | 529 | grid = Grid() 530 | grid.ob = evob 531 | grid.co = get_proxy_co(evob, None, eval=False) 532 | grid.angle_limit = 30 533 | grid.eidx = get_proxy_eidx(evob, eval=False) 534 | grid.order = loop_order(evob) 535 | grid.co_order = grid.co[grid.order] 536 | grid.sharps = measure_angle_at_each_vert(grid) 537 | grid.segments = get_segments(grid) 538 | grid.size = 0.03 539 | grid.seg_vecs = [] # gets filled by the function below 540 | grid.seg_lengths = [] # gets filled by the function below 541 | iters = len(grid.segments) 542 | 543 | # generated grid 544 | gengrid = GenGrid() 545 | #gengrid.av_norm = grid.av_norm 546 | gengrid.co = np.empty((0,3), dtype=np.float32) 547 | gengrid.angle_limit = 45 548 | gengrid.size = grid.size 549 | 550 | # create points for every segment between sharps -------------------- 551 | for i in range(iters): 552 | x = generate_perimeter(grid, i) 553 | gengrid.co = np.append(gengrid.co, x, axis=0) 554 | # ------------------------------------------------------------------- 555 | 556 | # create edges 557 | e_count = gengrid.co.shape[0] 558 | e1 = np.arange(e_count, dtype=np.int32) 559 | e2 = np.roll(e1, -1) 560 | gen_edges = np.append(e1[:,nax], e2[:,nax], axis=1) 561 | gengrid.eidx = gen_edges 562 | # get gengrid angles 563 | gengrid.angles = gengrid_angle(gengrid) 564 | 565 | if False: 566 | project = gen_grid_project(gengrid) 567 | gengrid.co = np.append(gengrid.co, project, axis=0) 568 | # create the grid --------------------------------------------------- 569 | # creates a grid of points. Currently not used 570 | grid_fill = True 571 | grid_fill = False 572 | if grid_fill: 573 | resolution = np.array([grid.size, grid.size, 1], dtype = np.float32) 574 | vec_array = create_array(ob, resolution, override=True) 575 | gengrid.co = np.append(gengrid.co, vec_array, axis=0) 576 | #create_point_mesh(vec_array) 577 | # create the grid --------------------------------------------------- 578 | 579 | # begin filling in the grid 580 | 581 | get_direction(grid) 582 | 583 | # temporary return !!! 584 | #return 585 | 586 | 587 | create = False 588 | create = True 589 | if create: 590 | mesh = bpy.data.meshes.new('gen_grid') 591 | #print(v_locs) 592 | #for i in x_edges: 593 | #print(i, "this is x edges-----------") 594 | 595 | mesh.from_pydata(gengrid.co.tolist(), edges=gen_edges.tolist(), faces=[]) 596 | #mesh.from_pydata(v_locs, edges=[[0,1],[1,2]], faces=[]) 597 | mesh.update() 598 | 599 | #if False: 600 | grid_ob = bpy.data.objects.new('gen_grid', mesh) 601 | bpy.context.collection.objects.link(grid_ob) 602 | 603 | #M = ob.matrix_world.to_translation() 604 | grid.ob.matrix_world = ob.matrix_world 605 | 606 | grid_ob.location = obl 607 | grid_ob.rotation_quaternion = obr 608 | grid_ob.rotation_euler = obre 609 | grid_ob.scale = obs 610 | 611 | 612 | 613 | #ob = bpy.data.objects['spaced'] 614 | #ob = bpy.data.objects['start_grid'] 615 | #ob = bpy.data.objects['Circle'] 616 | ob = bpy.context.object 617 | main(ob) 618 | 619 | 620 | 621 | # notes: 622 | """ 623 | Probably need to have a distance threshold for 624 | the minimum length between sharps so I don't end up 625 | with a million little tiny segments if someone does 626 | something like a zigzag (might be working with procedural 627 | geometry that has those kinds of artifacts.) 628 | """ 629 | 630 | # features: 631 | # using it on a crappy topo terrain for example you could collide or shrinkwrap 632 | # to the crappy topo and smooth 633 | 634 | # by uv unrwapping like with the uv shape tool you could grid fill a 3d curve 635 | # could use the option of non-manifold edges so you could grid fill if there 636 | # is already a fill. 637 | # need to be able to convert a real curve to a grid 638 | # make the grid on top as a separate object. 639 | 640 | # one possible approach, would be to project all the way to 641 | # the edge that intersects and plot points close to the 642 | # grid size. 643 | 644 | # use the infinite plane thing to know which way to project 645 | # towards the inside of the grid. 646 | # Project these verts then form a row of faces. 647 | # Now we can do a uv shape key (can only unwrap faces) 648 | 649 | # the angle of the edges seems irrelevant because 650 | # it could be a sharp angle but the plotted point 651 | # would be perp to both because of where the plane 652 | # intersects another edge. Like in the tennis ball 653 | # pattern in 3d space. 654 | # Maybe just do a distance check for nearby points 655 | 656 | # There could be more than one solution for the direction 657 | # to plot points based on the infinite plane 658 | # Could use a nearest intersection or compare nearby projections 659 | # or projections where there is one clear choice based on distance 660 | 661 | """features """ 662 | # !!! For something like text which already has faces 663 | # could do a non-manifold in edge mode to get perimeter 664 | # or just pull out the verts that are part of edges with 665 | # between zero and one link face... 666 | 667 | # !!! add an edge smooth feature that respects sharps not smoothing them 668 | # There is an edge smooth feature in one of the dental tools 669 | -------------------------------------------------------------------------------- /new_methods_2.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import numpy as np 3 | from numpy import newaxis as nax 4 | 5 | 6 | def get_uv_index_from_3d(ob): 7 | """Creates a two dimensional including where each vert 8 | occurs in the uv layers. Second dimension is N size so not numpy""" 9 | # figure out every index where the 3d verts occur in the uv maps 10 | obm = get_bmesh(ob) 11 | obm.verts.ensure_lookup_table() 12 | obm.faces.ensure_lookup_table() 13 | 14 | # currently works on a selected set of verts 15 | selected_verts = get_selected_verts(ob) 16 | sel_idx = np.arange(selected_verts.shape[0])[selected_verts] 17 | 18 | indexed_sum = [] 19 | cum_sum = 0 20 | for i in obm.faces: 21 | indexed_sum.append(cum_sum) 22 | cum_sum += len(i.verts) 23 | 24 | v_sets = [] 25 | for i in sel_idx: 26 | uv = [] 27 | for f in obm.verts[i].link_faces: 28 | vidx = np.array([v.index for v in f.verts]) 29 | idx = (np.arange(vidx.shape[0])[vidx == i])[0] 30 | uv.append(idx + indexed_sum[f.index]) 31 | v_sets.append(uv) 32 | # v_sets is now the uv index of each vert wherever it occurs in the uv map 33 | # v_sets[0] is vert zero and [uv[5], uv[390], uv[25]] or something like that 34 | # v_sets[0] looks like [5, 390, 16] 35 | return v_sets 36 | 37 | 38 | def grid_sample(ob, box_count=10, offset=0.00001): 39 | """divide mesh into grid and sample from each segment. 40 | offset prevents boxes from excluding any verts""" 41 | co = get_co(ob) 42 | 43 | # get bounding box corners 44 | min = np.min(co, axis=0) 45 | max = np.max(co, axis=0) 46 | 47 | # box count is based on largest dimension 48 | dimensions = max - min 49 | largest_dimension = np.max(dimensions) 50 | box_size = largest_dimension / box_count 51 | 52 | # get box count for each axis 53 | xyz_count = dimensions // box_size # number of boxes on each axis 54 | 55 | # number of boxes on each axis: 56 | box_dimensions = dimensions / xyz_count # each box is this size 57 | 58 | line_end = max - box_dimensions # we back up one from the last value 59 | 60 | x_line = np.linspace(min[0], line_end[0], num=xyz_count[0], dtype=np.float32) 61 | y_line = np.linspace(min[1], line_end[1], num=xyz_count[1], dtype=np.float32) 62 | z_line = np.linspace(min[2], line_end[2], num=xyz_count[2], dtype=np.float32) 63 | 64 | idxer = np.arange(co.shape[0]) 65 | 66 | # get x bools 67 | x_grid = co[:, 0] - x_line[:,nax] 68 | x_bools = (x_grid + offset > 0) & (x_grid - offset < box_dimensions[0]) 69 | cull_x_bools = x_bools[np.any(x_bools, axis=1)] # eliminate grid sections with nothing 70 | xb = cull_x_bools 71 | 72 | x_idx = np.tile(idxer, (xyz_count[0], 1)) 73 | 74 | samples = [] 75 | 76 | for boo in xb: 77 | xidx = idxer[boo] 78 | y_grid = co[boo][:, 1] - y_line[:,nax] 79 | y_bools = (y_grid + offset > 0) & (y_grid - offset < box_dimensions[1]) 80 | cull_y_bools = y_bools[np.any(y_bools, axis=1)] # eliminate grid sections with nothing 81 | yb = cull_y_bools 82 | for yboo in yb: 83 | yidx = xidx[yboo] 84 | z_grid = co[yidx][:, 2] - z_line[:,nax] 85 | z_bools = (z_grid + offset > 0) & (z_grid - offset < box_dimensions[2]) 86 | cull_z_bools = z_bools[np.any(z_bools, axis=1)] # eliminate grid sections with nothing 87 | zb = cull_z_bools 88 | for zboo in zb: 89 | samples.append(yidx[zboo][0]) 90 | 91 | # !!! to use this for collisions !!!: 92 | if False: 93 | samples.extend(yidx[zboo]) 94 | 95 | return np.unique(samples) # if offset is zero we don't need unique... return samples 96 | 97 | 98 | def edge_to_edge(e1, e2, e3, e4 ): 99 | """Takes two edges defined by four vectors. 100 | Returns the two points that describe the shortest 101 | distance between the two edges. The two points comprise 102 | a segment that is orthagonal to both edges.""" 103 | v1 = e2 - e1 104 | v2 = e3 - e4 105 | v3 = e3 - e1 106 | cross = np.cross(v1, v2) 107 | d = (v3 @ cross) / (cross @ cross) 108 | spit = cross * d # spit because if you stand on cp1 and spit this is where it lands. 109 | cp1 = e1 + spit 110 | vec2 = cp1 - e3 111 | d = (vec2 @ v2) / (v2 @ v2) 112 | nor = v2 * d 113 | cp2 = e3 + nor 114 | normal = cp1 - cp2 115 | or_vec = e1 - cp2 116 | e_dot = normal @ v1 117 | e_n_dot = normal @ or_vec 118 | scale = e_n_dot / e_dot 119 | p_on_p = (or_vec - v1 * scale) + cp2 120 | return p_on_p, p_on_p + spit 121 | 122 | 123 | def curve_gen(scalars, type=0, height=1): 124 | """Takes points between zero and 1 and plots them on a curve""" 125 | if type == 0: # smooth middle 126 | mid = scalars ** 2 127 | mid_flip = (-scalars + 1) ** 2 128 | return mid * mid_flip * 16 * height 129 | 130 | if type == 1: # half circle 131 | return np.sqrt(scalars) * np.sqrt(-scalars + 1) 132 | 133 | if type == 2: # smooth bottom to top 134 | reverse = -scalars + 1 135 | c1 = (scalars ** 2) * reverse 136 | c2 = (-reverse ** 2 + 1) * scalars 137 | smooth = c1 + c2 138 | return smooth 139 | 140 | if type == 3: # smooth top to bottom flip 141 | reverse = -scalars + 1 142 | c1 = (scalars ** 2) * reverse 143 | c2 = (-reverse ** 2 + 1) * scalars 144 | smooth = c1 + c2 145 | return -smooth + 1 146 | 147 | if type == 4: # 1/4 circle top left 148 | return np.sqrt(-scalars + 2) * np.sqrt(scalars) 149 | 150 | if type == 5: #1/4 circle bottom right 151 | x = np.sqrt(-scalars + 1) * np.sqrt(scalars + 1) 152 | return -x + 1 153 | 154 | if type == 6: #1/4 circle bottom left 155 | return -(np.sqrt(-scalars + 2) * np.sqrt(scalars)) + 1 156 | 157 | if type == 7: #1/4 circle top right 158 | x = np.sqrt(-scalars + 1) * np.sqrt(scalars + 1) 159 | return x 160 | 161 | 162 | def get_selected_poly_verts(ob): 163 | """returns a list of lists of verts in each selected polygon. 164 | Works in any mode.""" 165 | if ob.type != "MESH": 166 | return [] 167 | 168 | if ob.mode == 'EDIT': 169 | bm = bmesh.from_edit_mesh(ob.data) 170 | return [[v.index for v in f.verts] for f in bm.faces if f.select] 171 | 172 | return [[i for i in p.vertices] for p in ob.data.polygons if p.select] 173 | 174 | 175 | def get_poly_verts(ob): 176 | """returns a list of lists of verts in each polygon. 177 | Works in any mode.""" 178 | if ob.type != "MESH": 179 | return [] 180 | 181 | if ob.mode == 'EDIT': 182 | bm = bmesh.from_edit_mesh(ob.data) 183 | return [[v.index for v in f.verts] for f in bm.faces] 184 | 185 | return [[i for i in p.vertices] for p in ob.data.polygons] 186 | 187 | 188 | def get_eidx(): 189 | ec = len(ob.data.edges) 190 | ed = np.zeros(ec * 2, dtype=np.int32) 191 | ob.data.edges.foreach_get('vertices', ed) 192 | ed.shape = (ec, 2) 193 | return ed 194 | 195 | 196 | def select_all(ob, select=False): 197 | """Fast select/deselect in object mode""" 198 | atts = [ob.data.vertices, ob.data.edges, ob.data.polygons] 199 | fun = np.zeros 200 | if select: 201 | fun = np.ones 202 | for att in atts: 203 | c = len(att) 204 | arr = fun(c, dtype=np.bool) 205 | att.foreach_set('select', arr) 206 | 207 | 208 | def hide_all(ob, hide=False): 209 | """Fast hide/unhide in object mode""" 210 | atts = [ob.data.vertices, ob.data.edges, ob.data.polygons] 211 | fun = np.zeros 212 | if hide: 213 | fun = np.ones 214 | for att in atts: 215 | c = len(att) 216 | arr = fun(c, dtype=np.bool) 217 | att.foreach_set('hide', arr) 218 | 219 | 220 | def coincident_points(group_a, group_b, threshold=.0001, inverse=True): 221 | """finds the index of points in group a that match the location of at 222 | least one point in group b. Returns the inverse by default: points that have no match 223 | returns a bool array matching the first dimension of group_a""" 224 | x = group_b - group_a[:, nax] 225 | dist = np.einsum('ijk, ijk->ij', x, x) 226 | min_dist = np.min(dist, axis=1) 227 | if inverse: 228 | return min_dist > threshold 229 | return min_dist < threshold 230 | 231 | 232 | def remove_doubles(group, threshold=.0001): 233 | """finds coincident points and returns a bool array eliminating all but the first 234 | occurance of the coincident points""" 235 | x = group - group[:, nax] 236 | dist = np.einsum('ijk, ijk->ij', x, x) 237 | pairs = dist < threshold 238 | doubles = np.sum(pairs, axis=0) > 1 239 | idx = np.arange(len(group))[doubles] 240 | all_true = np.ones(len(group), dtype=np.bool) 241 | for i in idx: 242 | this = np.all((group[i] - group[idx]) == 0, axis=1) 243 | all_true[idx[this][1:]] = False 244 | return all_true 245 | 246 | 247 | def get_quat(rad, axis): 248 | u_axis = axis / np.sqrt(axis @ axis) 249 | theta = (rad * 0.5) 250 | w = np.cos(theta) 251 | q_axis = u_axis * np.sin(theta) 252 | return w, q_axis 253 | 254 | 255 | def get_quat_2(v1, v2, rot_mag=1, convert=True, axis=None): 256 | """Returns the quaternion that will rotate the object based on two vectors. 257 | If axis is provided: it can be non-unit. The quaternion will rotate around the axis 258 | until v1 lines up with v2 on the axis 259 | To rotate part way rot mag can be a value between -1 and 1. 260 | For specific angles, rot_mag can be the cosine of the angle in radians (haven't tested this theory)""" 261 | if convert: # if vectors are non-unit 262 | v1 = v1 / np.sqrt(v1 @ v1) 263 | v2 = v2 / np.sqrt(v2 @ v2) 264 | if axis is None: 265 | mid = v1 + v2 * rot_mag # multiply for slerping 266 | Umid = mid / np.sqrt(mid @ mid) 267 | w = Umid @ v1 268 | xyz = np.cross(v1, Umid) 269 | return w, xyz 270 | vc1 = np.cross(axis, v1) 271 | vc2 = np.cross(axis, v2) 272 | v1 = vc1 / np.sqrt(vc1 @ vc1) 273 | v2 = vc2 / np.sqrt(vc2 @ vc2) 274 | mid = v1 + v2 * rot_mag # multiply for slerping 275 | Umid = mid / np.sqrt(mid @ mid) 276 | w = Umid @ v1 277 | xyz = np.cross(v1, Umid) 278 | return w, xyz 279 | 280 | 281 | def get_quat_from_perp_vecs(v1, v2): 282 | x = np.array([1, 0, 0]) 283 | z = np.array([0, 0, 1]) 284 | uv1 = v1 / np.sqrt(v1 @ v1) 285 | norm = np.cross(v1, v2) 286 | uv3 = norm / np.sqrt(norm @ norm) 287 | w1, axis1 = get_quat_2(uv3, z) 288 | rot = q_rotate(uv1, w1, axis1) 289 | w2, axis2 = get_quat_2(rot, x) 290 | n_w, n_xyz = quaternion_add(-w1, axis1, -w2, axis2) 291 | 292 | 293 | def q_rotate(co, w, axis): 294 | """Takes an N x 3 numpy array and returns that array rotated around 295 | the axis by the angle in radians w. (standard quaternion)""" 296 | move1 = np.cross(axis, co) 297 | move2 = np.cross(axis, move1) 298 | move1 *= w 299 | return co + (move1 + move2) * 2 300 | 301 | 302 | def quaternion_subtract(w1, v1, w2, v2): 303 | """Get the quaternion that rotates one object to another""" 304 | w = w1 * w2 - np.dot(v1, v2) 305 | v = w1 * v2 + w2 * v1 + np.cross(v1, v2) 306 | return w, -v 307 | 308 | 309 | # -------------------------------------------->>> 310 | 311 | def get_co(ob, arr=None, key=None): # key 312 | """Returns vertex coords as N x 3""" 313 | c = len(ob.data.vertices) 314 | if arr is None: 315 | arr = np.zeros(c * 3, dtype=np.float32) 316 | if key is not None: 317 | ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr.ravel()) 318 | arr.shape = (c, 3) 319 | return arr 320 | ob.data.vertices.foreach_get('co', arr.ravel()) 321 | arr.shape = (c, 3) 322 | return arr 323 | 324 | 325 | def get_proxy_co(ob, arr): 326 | """Returns vertex coords with modifier effects as N x 3""" 327 | me = ob.to_mesh(bpy.context.scene, True, 'PREVIEW') 328 | c = len(me.vertices) 329 | me.vertices.foreach_get('co', arr.ravel()) 330 | bpy.data.meshes.remove(me) 331 | arr.shape = (c, 3) 332 | return arr 333 | 334 | 335 | def apply_transforms(ob, co=None): 336 | """Get vert coords in world space""" 337 | if co is None: 338 | co = get_co(ob) 339 | m = np.array(ob.matrix_world) 340 | mat = m[:3, :3].T # rotates backwards without T 341 | loc = m[:3, 3] 342 | return co @ mat + loc 343 | 344 | 345 | def revert_transforms(ob, co): 346 | """Set world coords on object. 347 | Run before setting coords to deal with object transforms 348 | if using apply_transforms()""" 349 | m = np.linalg.inv(ob.matrix_world) 350 | mat = m[:3, :3].T # rotates backwards without T 351 | loc = m[:3, 3] 352 | return co @ mat + loc 353 | 354 | 355 | def closest_points_edge(vec, origin, p): 356 | '''Returns the location of the point on the edge''' 357 | vec2 = p - origin 358 | d = np.einsum('j,ij->i', vec, vec2) / (vec @ vec) 359 | cp = origin + vec * d[:, nax] 360 | return cp, d 361 | 362 | 363 | def cp_scalar(vec, origin, p, unitize=False): 364 | '''Returns the dot that would put the point on the edge. 365 | Useful for sorting the order of verts if they were 366 | projected to the closest point on the edge''' 367 | vec2 = p - origin 368 | if unitize: 369 | vec2 = vec2 / np.sqrt(np.einsum('ij,ij->i', vec2, vec2))[:, nax] 370 | d = np.einsum('j,ij->i', vec, vec2) 371 | return d 372 | 373 | 374 | def in_line_bounds(vec, origin, p): 375 | '''Returns returns a bool array indicating if points 376 | are in the range of the start and end of a vector''' 377 | vec2 = p - origin 378 | d = np.einsum('j,ij->i', vec, vec2) 379 | vd = vec @ vec 380 | bool = (d > 0) & (d < vd) 381 | return bool 382 | 383 | 384 | def loop_order(ob): 385 | """takes an object consisting of a single loop of edges and gives the order""" 386 | obm = get_bmesh(ob) 387 | obm.edges.ensure_lookup_table() 388 | e = obm.edges[0] 389 | v = e.verts[0] 390 | order = [] 391 | for i in range(len(obm.edges)): 392 | other = e.other_vert(v) 393 | order.append(other.index) 394 | e = [ed for ed in v.link_edges if ed != e][0] 395 | v = [ve for ve in e.verts if ve != other][0] 396 | return order 397 | 398 | 399 | def circular_order(co, v1, v2, center=None, edges=False, convex=False, normal=None): 400 | """Return an array that indexes the points in circular order. 401 | v1 and v2 must be perpindicular and their normal defines the axis. 402 | if edges is True, return the edges to connect the points""" 403 | if co.shape[0] is 0: 404 | return 405 | #if center is None: 406 | center = np.mean(co, axis=0) 407 | if convex: 408 | center_vecs = co - center 409 | center_dots = np.einsum('ij,ij->i', center_vecs, center_vecs) 410 | max = np.argmax(center_dots) 411 | out_vec = center_vecs[max] 412 | cross = np.cross(out_vec, normal) 413 | con_set = [max] 414 | point = max 415 | 416 | for i in range(co.shape[0]): 417 | spread = co - co[point] 418 | h = np.einsum('ij,ij->i', spread, spread) 419 | Uspread = np.nan_to_num(spread / np.sqrt(h)[:, nax]) 420 | dots = np.einsum('j,ij->i', cross, Uspread) 421 | new = np.argmax(dots) 422 | if new == point: 423 | new = np.argsort(dots)[-2] # for when the point gets narcissistic and finds itself 424 | if new == max: 425 | break 426 | 427 | con_set.append(new) 428 | cross = co[new] - co[point] 429 | point = new 430 | 431 | idxer = np.arange(len(con_set)) 432 | eidx = np.append([idxer],[np.roll(idxer, -1)], 0).T 433 | 434 | return con_set, eidx, center 435 | 436 | count = co.shape[0] 437 | idxer = np.arange(count) 438 | on_p1, center_vecs = cp_scalar(v1, center, co, False, True) # x_vec off center 439 | pos_x = on_p1 > 0 440 | co_pos = co[pos_x] 441 | co_neg = co[-pos_x] 442 | p_on_p2 = cp_scalar(v2, center, co_pos, True) 443 | n_on_p2 = cp_scalar(v2, center, co_neg, True) 444 | p_y_sort = np.argsort(p_on_p2) 445 | n_y_sort = np.argsort(n_on_p2) 446 | order = np.append(idxer[pos_x][p_y_sort], idxer[-pos_x][n_y_sort][::-1]) 447 | 448 | idxer = np.arange(len(order)) 449 | eidx = np.append([idxer],[np.roll(idxer, -1)], 0).T 450 | 451 | return order, eidx 452 | 453 | 454 | # Can get a speedup reusing the existing array 455 | def get_att(att, name, dim2=None, vecs=None, dtype=None, proxy=None, shape=None): 456 | """Returns a numpy array full of data with given shape 457 | att: (example) ob.data.vertices 458 | name: string in foreach_get (example) 'co' 459 | dim2: final shape of the array (example) co would be 3 for 3d vectors 460 | vecs: include the existing array speedup (example) ob_co 461 | dtype: numpy data type (example) np.float32 462 | proxy: object. uses modifier effects (example) ob 463 | shape: include for arrays with 3 dimensions 464 | If proxy is used it must be a mesh object.""" 465 | if proxy is not None: 466 | data = proxy.to_mesh(bpy.context.scene, True, 'PREVIEW') 467 | att = data.vertices 468 | dim1 = len(att) 469 | if vecs is None: 470 | vecs = np.zeros(dim1 * dim2, dtype=dtype) 471 | att.foreach_get(name, vecs.ravel()) 472 | bpy.data.meshes.remove(data) 473 | vecs.shape = (dim1, dim2) 474 | return vecs 475 | 476 | dim1 = len(att) 477 | if vecs is None: 478 | vecs = np.zeros(dim1 * dim2, dtype=dtype) 479 | att.foreach_get(name, vecs.ravel()) 480 | if shape is None: 481 | vecs.shape = (dim1, dim2) 482 | return vecs 483 | vecs.shape = (dim1, shape[0], shape[1]) 484 | return vecs 485 | 486 | 487 | def set_att(vecs, att, name): 488 | att.foreach_set(name, vecs.ravel()) 489 | -------------------------------------------------------------------------------- /self_collide.py: -------------------------------------------------------------------------------- 1 | try: 2 | import bpy 3 | import bmesh 4 | except ImportEroor: 5 | pass 6 | 7 | import numpy as np 8 | import time 9 | import json 10 | 11 | 12 | def timer(t, name='name'): 13 | ti = bpy.context.scene.timers 14 | if name not in ti: 15 | ti[name] = 0.0 16 | ti[name] += t 17 | 18 | 19 | def select_edit_mode(sc, ob, idx, type='v', deselect=False, obm=None): 20 | """Selects verts in edit mode and updates""" 21 | 22 | if ob.data.is_editmode: 23 | if obm is None: 24 | obm = bmesh.from_edit_mesh(ob.data) 25 | obm.verts.ensure_lookup_table() 26 | 27 | if type == 'v': 28 | x = obm.verts 29 | if type == 'f': 30 | x = obm.faces 31 | if type == 'e': 32 | x = obm.edges 33 | 34 | if deselect: 35 | for i in x: 36 | i.select = False 37 | 38 | for i in idx: 39 | sc.select_counter[i] += 1 40 | x[i].select = True 41 | 42 | if obm is None: 43 | bmesh.update_edit_mesh(ob.data) 44 | #bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) 45 | 46 | 47 | def bmesh_proxy(ob): 48 | """Get a bmesh contating modifier effects""" 49 | dg = bpy.context.evaluated_depsgraph_get() 50 | prox = ob.evaluated_get(dg) 51 | proxy = prox.to_mesh() 52 | obm = bmesh.new() 53 | obm.from_mesh(proxy) 54 | return obm 55 | 56 | 57 | def get_co_key(ob, key): 58 | k = ob.data.shape_keys.key_blocks[key] 59 | co = np.empty((len(ob.data.vertices), 3), dtype=np.float32) 60 | k.data.foreach_get('co', co.ravel()) 61 | return co 62 | 63 | 64 | def get_proxy_co(ob, co=None, proxy=None): 65 | """Gets co with modifiers like cloth""" 66 | if proxy is None: 67 | dg = bpy.context.evaluated_depsgraph_get() 68 | prox = ob.evaluated_get(dg) 69 | proxy = prox.to_mesh() 70 | 71 | if co is None: 72 | vc = len(proxy.vertices) 73 | co = np.empty((vc, 3), dtype=np.float32) 74 | 75 | proxy.vertices.foreach_get('co', co.ravel()) 76 | ob.to_mesh_clear() 77 | return co 78 | 79 | 80 | def get_edges(ob): 81 | ed = np.empty((len(ob.data.edges), 2), dtype=np.int32) 82 | ob.data.edges.foreach_get('vertices', ed.ravel()) 83 | return ed 84 | 85 | 86 | def get_faces(ob): 87 | """Only works on triangle mesh.""" 88 | fa = np.empty((len(ob.data.polygons), 3), dtype=np.int32) 89 | ob.data.polygons.foreach_get('vertices', fa.ravel()) 90 | return fa 91 | 92 | 93 | def generate_bounds(minc, maxc, margin): 94 | """from a min corner and a max corner 95 | generate the min and max corner of 8 boxes""" 96 | 97 | diag = (maxc - minc) / 2 98 | mid = minc + diag 99 | mins = np.zeros((8,3), dtype=np.float32) 100 | maxs = np.zeros((8,3), dtype=np.float32) 101 | 102 | # blf 103 | mins[0] = minc 104 | maxs[0] = mid 105 | # brf 106 | mins[1] = minc 107 | mins[1][0] += diag[0] 108 | maxs[1] = mid 109 | maxs[1][0] += diag[0] 110 | # blb 111 | mins[2] = minc 112 | mins[2][1] += diag[1] 113 | maxs[2] = mid 114 | maxs[2][1] += diag[1] 115 | # brb 116 | mins[3] = mins[2] 117 | mins[3][0] += diag[0] 118 | maxs[3] = maxs[2] 119 | maxs[3][0] += diag[0] 120 | # tlf 121 | mins[4] = mins[0] 122 | mins[4][2] += diag[2] 123 | maxs[4] = maxs[0] 124 | maxs[4][2] += diag[2] 125 | # trf 126 | mins[5] = mins[1] 127 | mins[5][2] += diag[2] 128 | maxs[5] = maxs[1] 129 | maxs[5][2] += diag[2] 130 | # tlb 131 | mins[6] = mins[2] 132 | mins[6][2] += diag[2] 133 | maxs[6] = maxs[2] 134 | maxs[6][2] += diag[2] 135 | # trb 136 | mins[7] = mins[3] 137 | mins[7][2] += diag[2] 138 | maxs[7] = maxs[3] 139 | maxs[7][2] += diag[2] 140 | 141 | return mid, [mins, maxs] 142 | 143 | 144 | # universal --------------------- 145 | def octree_et(sc, margin, idx=None, eidx=None, bounds=None): 146 | """Adaptive octree. Good for finding doubles or broad 147 | phase collision culling. et does edges and tris. 148 | Also groups edges in boxes.""" # first box is based on bounds so first box could be any shape rectangle 149 | 150 | T = time.time() 151 | margin = 0.0 # might be faster than >=, <= 152 | 153 | co = sc.co 154 | 155 | if bounds is None: 156 | b_min = np.min(co, axis=0) 157 | b_max = np.max(co, axis=0) 158 | else: 159 | b_min, b_max = bounds[0], bounds[1] 160 | 161 | #eco = co[sc.ed[eidx].ravel()] 162 | #b_min = np.min(eco, axis=0) 163 | #b_max = np.max(eco, axis=0) 164 | 165 | # bounds_8 is for use on the next iteration. 166 | mid, bounds_8 = generate_bounds(b_min, b_max, margin) 167 | 168 | #mid = b_min + ((b_max - b_min) / 2) 169 | mid_ = mid + margin 170 | _mid = mid - margin 171 | 172 | x_, y_, z_ = mid_[0], mid_[1], mid_[2] 173 | _x, _y, _z = _mid[0], _mid[1], _mid[2] 174 | 175 | # tris 176 | xmax = sc.txmax 177 | xmin = sc.txmin 178 | 179 | ymax = sc.tymax 180 | ymin = sc.tymin 181 | 182 | zmax = sc.tzmax 183 | zmin = sc.tzmin 184 | 185 | # edges 186 | exmin = sc.exmin 187 | eymin = sc.eymin 188 | ezmin = sc.ezmin 189 | 190 | exmax = sc.exmax 191 | eymax = sc.eymax 192 | ezmax = sc.ezmax 193 | 194 | # l = left, r = right, f = front, b = back, u = up, d = down 195 | if idx is None: 196 | idx = sc.tridex 197 | if eidx is None: 198 | eidx = sc.eidx 199 | 200 | # ------------------------------- 201 | B = xmin[idx] < x_# + margin 202 | il = idx[B] 203 | 204 | B = xmax[idx] > _x# - margin 205 | ir = idx[B] 206 | 207 | # edges 208 | eB = exmin[eidx] < x_# + margin 209 | eil = eidx[eB] 210 | 211 | eB = exmax[eidx] > _x# - margin 212 | eir = eidx[eB] 213 | 214 | # ------------------------------ 215 | B = ymax[il] > _y# - margin 216 | ilf = il[B] 217 | 218 | B = ymin[il] < y_# + margin 219 | ilb = il[B] 220 | 221 | B = ymax[ir] > _y# - margin 222 | irf = ir[B] 223 | 224 | B = ymin[ir] < y_# + margin 225 | irb = ir[B] 226 | 227 | # edges 228 | eB = eymax[eil] > _y# - margin 229 | eilf = eil[eB] 230 | 231 | eB = eymin[eil] < y_# + margin 232 | eilb = eil[eB] 233 | 234 | eB = eymax[eir] > _y# - margin 235 | eirf = eir[eB] 236 | 237 | eB = eymin[eir] < y_# + margin 238 | eirb = eir[eB] 239 | 240 | # ------------------------------ 241 | B = zmax[ilf] > _z# - margin 242 | ilfu = ilf[B] 243 | B = zmin[ilf] < z_# + margin 244 | ilfd = ilf[B] 245 | 246 | B = zmax[ilb] > _z# - margin 247 | ilbu = ilb[B] 248 | B = zmin[ilb] < z_# + margin 249 | ilbd = ilb[B] 250 | 251 | B = zmax[irf] > _z# - margin 252 | irfu = irf[B] 253 | B = zmin[irf] < z_# + margin 254 | irfd = irf[B] 255 | 256 | B = zmax[irb] > _z# - margin 257 | irbu = irb[B] 258 | B = zmin[irb] < z_# + margin 259 | irbd = irb[B] 260 | 261 | # edges 262 | eB = ezmax[eilf] > _z# - margin 263 | eilfu = eilf[eB] 264 | eB = ezmin[eilf] < z_# + margin 265 | eilfd = eilf[eB] 266 | 267 | eB = ezmax[eilb] > _z# - margin 268 | eilbu = eilb[eB] 269 | eB = ezmin[eilb] < z_# + margin 270 | eilbd = eilb[eB] 271 | 272 | eB = ezmax[eirf] > _z# - margin 273 | eirfu = eirf[eB] 274 | eB = ezmin[eirf] < z_# + margin 275 | eirfd = eirf[eB] 276 | 277 | eB = ezmax[eirb] > _z# - margin 278 | eirbu = eirb[eB] 279 | eB = ezmin[eirb] < z_# + margin 280 | eirbd = eirb[eB] 281 | 282 | boxes = [ilbd, irbd, ilfd, irfd, ilbu, irbu, ilfu, irfu] 283 | eboxes = [eilbd, eirbd, eilfd, eirfd, eilbu, eirbu, eilfu, eirfu] 284 | 285 | bbool = np.array([i.shape[0] > 0 for i in boxes]) 286 | ebool = np.array([i.shape[0] > 0 for i in eboxes]) 287 | both = bbool & ebool 288 | 289 | full = np.array(boxes, dtype=np.object)[both] 290 | efull = np.array(eboxes, dtype=np.object)[both] 291 | 292 | return full, efull, [bounds_8[0][both], bounds_8[1][both]] 293 | 294 | 295 | def get_link_faces(sc, e=None): 296 | """Create a set of indices for each edge of faces 297 | that are linked by the edge vert link faces. 298 | These faces cannot have the edge passing through them. 299 | !!! This group will not apply when doing live self colliions !!! 300 | !!! The points will need to be handled differently from edge segments""" 301 | 302 | if e is not None: 303 | i = sc.obm.edges[e] 304 | fa = [] 305 | for v in i.verts: 306 | for f in v.link_faces: 307 | if f.index not in fa: 308 | fa.append(f.index) 309 | return fa 310 | 311 | linked_by_edge = [] 312 | 313 | for i in sc.obm.edges: 314 | fa = [] 315 | for v in i.verts: 316 | for f in v.link_faces: 317 | if f.index not in fa: 318 | fa.append(f.index) 319 | linked_by_edge.append(fa) 320 | 321 | #timer(time.time()-T, "link faces") 322 | 323 | return linked_by_edge 324 | 325 | 326 | def point_in_tri(tri, point): 327 | """Checks if points are inside triangles""" 328 | origin = tri[0] 329 | cross_vecs = tri[1:] - origin 330 | v2 = point - origin 331 | 332 | # --------- 333 | v0 = cross_vecs[0] 334 | v1 = cross_vecs[1] 335 | 336 | #d00_d11 = np.einsum('ijk,ijk->ij', cross_vecs, cross_vecs) 337 | d00 = v0 @ v0 338 | d11 = v1 @ v1 339 | d01 = v0 @ v1 340 | d02 = v0 @ v2 341 | d12 = v1 @ v2 342 | 343 | div = 1 / (d00 * d11 - d01 * d01) 344 | u = (d11 * d02 - d01 * d12) * div 345 | v = (d00 * d12 - d01 * d02) * div 346 | 347 | #weights = np.array([1 - (u+v), u, v, ]) 348 | check = (u > 0) & (v > 0) & (u + v < 1) 349 | 350 | return check 351 | 352 | 353 | def inside_triangles(tris, points, cross_vecs): 354 | """Checks if points are inside triangles""" 355 | origins = tris[:, 0] 356 | #cross_vecs = tris[:, 1:] - origins[:, None] 357 | v2 = points - origins 358 | 359 | # --------- 360 | v0 = cross_vecs[:,0] 361 | v1 = cross_vecs[:,1] 362 | 363 | d00_d11 = np.einsum('ijk,ijk->ij', cross_vecs, cross_vecs) 364 | d00 = d00_d11[:,0] 365 | d11 = d00_d11[:,1] 366 | d01 = np.einsum('ij,ij->i', v0, v1) 367 | d02 = np.einsum('ij,ij->i', v0, v2) 368 | d12 = np.einsum('ij,ij->i', v1, v2) 369 | 370 | div = 1 / (d00 * d11 - d01 * d01) 371 | u = (d11 * d02 - d01 * d12) * div 372 | v = (d00 * d12 - d01 * d02) * div 373 | 374 | #weights = np.array([1 - (u+v), u, v, ]) 375 | check = (u > 0) & (v > 0) & (u + v < 1) 376 | 377 | return check 378 | 379 | 380 | def ray_check(sc, ed, trs): 381 | 382 | if True: # doesn't make much speed difference. Might even be slower 383 | a = np.concatenate((np.array(sc.ees)[:,None], np.array(sc.trs)[:,None]), 1) 384 | x = np.random.rand(a.shape[1]) 385 | y = a @ x 386 | unique, index = np.unique(y, return_index=True) 387 | ed = np.array(ed)[index] 388 | trs = np.array(trs)[index] 389 | 390 | e = sc.edges[ed] 391 | tris = sc.tris[trs] 392 | 393 | origins = tris[:, 0] 394 | 395 | ev0 = e[:, 0] - origins 396 | ev1 = e[:, 1] - origins 397 | 398 | cross_vecs = tris[:, 1:] - origins[:, None] 399 | tv0 = cross_vecs[:, 0] 400 | tv1 = cross_vecs[:, 1] 401 | 402 | norms = np.cross(tv0, tv1) 403 | 404 | d0d = np.einsum('ij,ij->i', norms, ev0) 405 | d0 = np.sign(d0d) 406 | d1 = np.sign(np.einsum('ij,ij->i', norms, ev1)) 407 | 408 | # check if edge verts are on opposite sides of the face 409 | in_edge = d0 != d1 410 | if np.any(in_edge): 411 | 412 | e_vec = sc.vecs[ed] 413 | 414 | e_dot = np.einsum('ij,ij->i', norms, e_vec) 415 | scale = d0d / e_dot 416 | 417 | #in_edge = (scale < 0) & (scale > -1) 418 | on_plane = (ev0 - e_vec * scale[:, None]) + origins 419 | 420 | in_tri = inside_triangles(tris[in_edge], on_plane[in_edge], cross_vecs[in_edge]) 421 | 422 | sc.has_col = False 423 | if np.any(in_tri): 424 | sc.has_col = True 425 | sc.trc = np.array(trs, dtype=np.int32)[in_edge][in_tri] 426 | sc.edc = np.array(ed, dtype=np.int32)[in_edge][in_tri] 427 | sc.on_plane = on_plane[in_edge][in_tri] 428 | sc.scale = scale[in_edge][in_tri] 429 | # selecting --------- 430 | if sc.sel: 431 | # edit mode 432 | select_edit_mode(sc, sc.ob, sc.edc, type='e', obm=sc.obm) 433 | select_edit_mode(sc, sc.ob, sc.trc, type='f', obm=sc.obm) 434 | # object mode 435 | for e in sc.edc: 436 | sc.ob.data.edges[e].select = True 437 | for t in sc.trc: 438 | #bpy.context.scene.self_collisions.append([ed, t]) 439 | sc.ob.data.polygons[t].select = True 440 | 441 | 442 | def b2(sc): 443 | 444 | if len(sc.big_boxes) == 0: 445 | print("ran out") 446 | return 447 | 448 | boxes = [] 449 | for oct in sc.big_boxes: 450 | t = oct[0] 451 | e = oct[1] 452 | b = oct[2] 453 | 454 | tfull, efull, bounds = octree_et(sc, margin=0.0, idx=t, eidx=e, bounds=b) 455 | 456 | for i in range(len(tfull)): 457 | t = tfull[i] 458 | e = efull[i] 459 | bmin = bounds[0][i] 460 | bmax = bounds[1][i] 461 | 462 | if (t.shape[0] < sc.box_max) | (e.shape[0] < sc.box_max): 463 | sc.small_boxes.append([t, e]) 464 | else: 465 | boxes.append([t, e, [bmin, bmax]]) 466 | sc.big_boxes = boxes 467 | 468 | 469 | def self_collisions_6(sc): 470 | 471 | T = time.time() 472 | 473 | tx = sc.tris[:, :, 0] 474 | ty = sc.tris[:, :, 1] 475 | tz = sc.tris[:, :, 2] 476 | 477 | txmax = np.max(tx, axis=1) 478 | txmin = np.min(tx, axis=1) 479 | 480 | tymax = np.max(ty, axis=1) 481 | tymin = np.min(ty, axis=1) 482 | 483 | tzmax = np.max(tz, axis=1) 484 | tzmin = np.min(tz, axis=1) 485 | 486 | sc.txmax = txmax 487 | sc.txmin = txmin 488 | 489 | sc.tymax = tymax 490 | sc.tymin = tymin 491 | 492 | sc.tzmax = tzmax 493 | sc.tzmin = tzmin 494 | 495 | sc.tridex = sc.indexer # will have to use bmesh tris on non-triangular mesh... 496 | 497 | # edge bounds: 498 | ex = sc.edges[:, :, 0] 499 | ey = sc.edges[:, :, 1] 500 | ez = sc.edges[:, :, 2] 501 | 502 | sc.exmin = np.min(ex, axis=1) 503 | sc.eymin = np.min(ey, axis=1) 504 | sc.ezmin = np.min(ez, axis=1) 505 | 506 | sc.exmax = np.max(ex, axis=1) 507 | sc.eymax = np.max(ey, axis=1) 508 | sc.ezmax = np.max(ez, axis=1) 509 | 510 | #sc.eidx = np.arange(sc.edges.shape[0]) 511 | 512 | timer(time.time()-T, "self col 5") 513 | # !!! can do something like check the octree to make sure the boxes are smaller 514 | # to know if we hit a weird case where we're no longer getting fewer in boxes 515 | 516 | tfull, efull, bounds = octree_et(sc, margin=0.0) 517 | 518 | T = time.time() 519 | for i in range(len(tfull)): 520 | t = tfull[i] 521 | e = efull[i] 522 | bmin = bounds[0][i] 523 | bmax = bounds[1][i] 524 | 525 | if (t.shape[0] < sc.box_max) | (e.shape[0] < sc.box_max): 526 | sc.small_boxes.append([t, e]) 527 | else: 528 | sc.big_boxes.append([t, e, [bmin, bmax]]) # using a dictionary or class might be faster !!! 529 | # !!! instead of passing bounds could figure out the min and max in the tree every time 530 | # we divide. So divide the left and right for example then get the new bounds for 531 | # each side and so on... 532 | 533 | timer(time.time()-T, 'sort boxes') 534 | T = time.time() 535 | 536 | limit = 20 537 | count = 0 538 | while len(sc.big_boxes) > 0: 539 | b2(sc) 540 | if sc.report: 541 | print("recursion level:", count) 542 | if count > limit: 543 | for b in sc.big_boxes: 544 | sc.small_boxes.append(b) 545 | break 546 | count += 1 547 | 548 | timer(time.time()-T, 'b2') 549 | if sc.report: 550 | print(len(sc.big_boxes), "how many big boxes") 551 | print(len(sc.small_boxes), "how many small boxes") 552 | 553 | for en, b in enumerate(sc.small_boxes): 554 | 555 | trs = b[0] 556 | ed = b[1] 557 | 558 | if ed.shape[0] == 0: 559 | continue 560 | 561 | tris = sc.tris[trs] 562 | eds = sc.edges[ed] 563 | 564 | # detect link faces and broadcast 565 | nlf_0 = sc.ed[ed][:, 0] == sc.fa[trs][:, :, None] 566 | nlf_1 = sc.ed[ed][:, 1] == sc.fa[trs][:, :, None] 567 | ab = np.any(nlf_0 | nlf_1, axis=1) 568 | 569 | rse = np.tile(ed, trs.shape[0]) 570 | rse.shape = (trs.shape[0], ed.shape[0]) 571 | rst = np.repeat(trs, ed.shape[0]) 572 | rst.shape = (trs.shape[0], ed.shape[0]) 573 | 574 | re = rse[~ab] # repeated edges with link faces removed 575 | rt = rst[~ab] # repeated triangles to match above edges 576 | 577 | in_x = txmax[rt] > sc.exmin[re] 578 | rt, re = rt[in_x], re[in_x] 579 | 580 | in_x2 = txmin[rt] < sc.exmax[re] 581 | rt, re = rt[in_x2], re[in_x2] 582 | 583 | in_y = tymax[rt] > sc.eymin[re] 584 | rt, re = rt[in_y], re[in_y] 585 | 586 | in_y2 = tymin[rt] < sc.eymax[re] 587 | rt, re = rt[in_y2], re[in_y2] 588 | 589 | in_z = tzmin[rt] < sc.ezmax[re] 590 | rt, re = rt[in_z], re[in_z] 591 | 592 | in_z2 = tzmax[rt] > sc.ezmin[re] 593 | rt, re = rt[in_z2], re[in_z2] 594 | 595 | timer(time.time()-T, 'edge bounds') 596 | 597 | T = time.time() 598 | 599 | if rt.shape[0] > 0: 600 | sc.ees += re.tolist() 601 | sc.trs += rt.tolist() 602 | 603 | #sc.ees = np.concatenate((sc.ees, re)) 604 | #sc.trs = np.concatenate((sc.trs, rt)) 605 | 606 | #print(sc.ees) 607 | 608 | 609 | class self_collide(): 610 | name = "sc" 611 | 612 | def __init__(self, precalc, co=None, test=False): 613 | if test: 614 | return 615 | self.has_col = False 616 | self.ob = precalc['ob'] 617 | self.ed = precalc['ed'] 618 | self.eidx = precalc['eidx'] 619 | self.fa = precalc['fa'] 620 | self.indexer = precalc['indexer'] 621 | self.box_max = precalc['box_max'] 622 | 623 | self.co = co 624 | if co is None: 625 | self.co = get_proxy_co(self.ob) 626 | self.vecs = self.co[self.ed[:, 1]] - self.co[self.ed[:, 0]] 627 | self.tris = self.co[self.fa] 628 | self.edges = self.co[self.ed] 629 | self.big_boxes = [] # boxes that still need to be divided 630 | self.small_boxes = [] # finished boxes less than the maximum box size 631 | 632 | # debug stuff 633 | self.sel = False 634 | #self.sel = True 635 | self.report = False 636 | self.report = True 637 | if self.report: 638 | self.select_counter = np.zeros(self.eidx.shape[0], dtype=np.int32) 639 | if self.sel: 640 | if self.ob.data.is_editmode: 641 | self.obm = bmesh.from_edit_mesh(self.ob.data) 642 | else: 643 | self.obm = bmesh.new() 644 | self.obm.from_mesh(self.ob.data) 645 | self.obm.edges.ensure_lookup_table() 646 | self.obm.verts.ensure_lookup_table() 647 | self.obm.faces.ensure_lookup_table() 648 | 649 | # store sets of edge and tris to check 650 | #self.trs = np.empty((0), dtype=np.int32) 651 | #self.ees = np.empty((0), dtype=np.int32) 652 | self.trs = [] 653 | self.ees = [] 654 | 655 | 656 | 657 | def detect_collisions(ob, co, cloth=None): 658 | 659 | bpy.types.Scene.timers = {} 660 | bpy.types.Scene.self_collisions = [] 661 | 662 | # precalc: 663 | 664 | precalc = {'ob': ob, 665 | 'ed': get_edges(ob), 666 | 'eidx': np.arange(len(ob.data.edges), dtype=np.int32), 667 | 'fa': get_faces(ob), 668 | 'indexer': np.arange(len(ob.data.polygons), dtype=np.int32), 669 | 'box_max': 150, 670 | } 671 | 672 | sc = self_collide(precalc, co) 673 | t = time.time() 674 | self_collisions_6(sc) 675 | ray_check(sc, sc.ees, sc.trs) 676 | 677 | # collisions: 678 | #if sc.has_col: # might not be any collisions 679 | #print(sc.edc.shape) 680 | #print(sc.trc.shape) 681 | # ----------- 682 | 683 | if sc.report: 684 | print(sc.box_max, "box max") 685 | print(np.sum(sc.select_counter > 1), ": In too many boxes") 686 | print(np.max(sc.select_counter), "max times and edge was selected") 687 | print(time.time() - t) 688 | 689 | if sc.sel: 690 | if ob.data.is_editmode: 691 | bmesh.update_edit_mesh(ob.data) 692 | 693 | ob.data.update() 694 | return sc 695 | 696 | 697 | def expand_selection(verts, steps): 698 | """Take a list of verts and find it's 699 | neighbors using bool indices from verts 700 | and tris.""" 701 | 702 | 703 | def selected(ob): 704 | if ob.data.is_editmode: 705 | obm = bmesh.from_edit_mesh(ob.data) 706 | obm.verts.ensure_lookup_table() 707 | 708 | selidx = np.array([v.index for v in obm.verts if v.select], dtype=np.int32) 709 | return selidx 710 | 711 | 712 | 713 | def which_side(v, faces, pr): 714 | """Find which direction the flap should fold. 715 | Verts on the wrong side get moved to the other side. 716 | 'v' is a vert in the flap. 717 | 'faces' are triangle indices in the flap. 718 | 'pr is the vertex pointers for the current flap 719 | as we iterate through the vertex pointers in the json file""" 720 | 721 | 722 | # for each vert: 723 | 724 | def grow_selection(verts, obm): 725 | """Stupid way of growing selection 726 | by taking link faces of everything""" 727 | grow = [] 728 | for v in verts: 729 | vert = obm.verts[v] 730 | lf = vert.link_faces 731 | for f in lf: 732 | vvv = [fv.index for fv in f.verts] 733 | grow += vvv 734 | return np.unique(grow) 735 | 736 | 737 | def shrink_selection(verts, obm): 738 | """Use link faces to find if surrounding 739 | verts are in the set. If not drop them.""" 740 | keep = [] 741 | for v in verts: 742 | vert = obm.verts[v] 743 | lf = vert.link_faces 744 | vvv = np.hstack([[fv.index for fv in f.verts] for f in lf]) 745 | 746 | keeper = True 747 | for vvvv in vvv: 748 | if vvvv not in verts: 749 | keeper = False 750 | if keeper: 751 | keep.append(v) 752 | return keep 753 | 754 | 755 | def sc_response(sc): 756 | #print(sc.verts) 757 | #print(sc.ees) 758 | #print(sc.trs) 759 | sc.avatar = [ob for ob in bpy.data.objects if ob.name.startswith('body_mannequin')][0] 760 | if sc.ob.data.is_editmode: 761 | sc.obm = bmesh.from_edit_mesh(sc.ob.data) 762 | else: 763 | sc.obm = bmesh.new() 764 | sc.obm.from_mesh(sc.ob.data) 765 | 766 | sc.obm.verts.ensure_lookup_table() 767 | sc.obm.edges.ensure_lookup_table() 768 | file = bpy.data.texts['flap_ptrs.json'] 769 | flaps = json.loads(file.as_string()) 770 | 771 | ptrs = [np.array(v) for k, v in flaps.items()] 772 | 773 | # mask data missing from slice targets 774 | sc.folded_co = get_co_key(sc.ob, 'folded') 775 | sc.flat_co = get_co_key(sc.ob, 'flat') 776 | 777 | isolate = [] 778 | 779 | for pr in ptrs: 780 | folded = np.any(sc.folded_co[pr][:, 2] != sc.flat_co[pr][:, 2]) 781 | print(folded) 782 | if folded: 783 | 784 | for v in pr: 785 | if np.all(sc.folded_co[v][:2] == sc.flat_co[v][:2]): 786 | #pass 787 | isolate.append(v) 788 | #sc.obm.verts[v].select = True 789 | 790 | 791 | #verts = grow_selection(isolate, sc.obm) 792 | #isolate = grow_selection(verts, sc.obm) 793 | isolate = shrink_selection(isolate, sc.obm) 794 | #isolate = shrink_selection(isolate, sc.obm) 795 | idx = np.arange(len(ob.data.vertices), dtype=np.int32) 796 | invert = np.delete(idx, isolate) 797 | 798 | for v in invert: 799 | sc.obm.verts[v].select = True 800 | 801 | #for v in invert: 802 | group = sc.ob.vertex_groups.new(name='mask') 803 | group.add(invert.tolist(), 1.0, 'REPLACE') 804 | 805 | mask = ob.modifiers.new(name='mask', type='MASK') 806 | mask.vertex_group = 'mask' 807 | print(np.array(isolate).shape, "isolate shape !!!!!!!!!!") 808 | 809 | return 810 | 811 | # figure out if flap folds in or out 812 | for pr in ptrs: 813 | 814 | # get boundary verts 815 | boundary_verts = [v for v in pr if sc.obm.verts[v].is_boundary] 816 | 817 | # get panel group 818 | v1 = sc.ob.data.vertices[pr[0]] 819 | grs = [g.group for g in v1.groups] 820 | panel_group = [sc.ob.vertex_groups[g].name for g in grs if sc.ob.vertex_groups[g].name.startswith('P_')][0] 821 | 822 | # get panel tris for flap panel 823 | plf = [] 824 | for v in pr: 825 | lf = sc.obm.verts[v].link_faces 826 | for f in lf: 827 | pf = True 828 | for fv in f.verts: 829 | if fv.index not in pr: 830 | pf = False 831 | if pf: 832 | if f.index not in plf: 833 | plf.append(f.index) 834 | 835 | # get vecs from boundary verts to avatar 836 | for v in boundary_verts: 837 | vco = sc.co[v] 838 | b, location, normal, index = sc.avatar.closest_point_on_mesh(vco) 839 | 840 | for v in pr: 841 | sc.obm.verts[v].select=True 842 | 843 | 844 | print(location, "location vec") 845 | 846 | if ob.data.is_editmode: 847 | bmesh.update_edit_mesh(sc.ob.data) 848 | 849 | ''' 850 | Could add to the selection 851 | to get the rest of the geomotry 852 | near the flap... 853 | Prolly only need 854 | to check the flap 855 | against the area the flap 856 | sews to and compare their 857 | normals to see if we're on the 858 | wrong side. Then flip flap 859 | to other side. Flap is almost 860 | always going to be the part 861 | that should move. 862 | 863 | once we get the direction of the 864 | fold I can use the normal 865 | of the fold to see which side 866 | of the panel Im on. I dont 867 | need the avatar normal. maybe 868 | check the the area around the fold 869 | for panel faces and check the side??? 870 | ''' 871 | 872 | 873 | return 874 | 875 | #for f in plf: 876 | #sc.obm.faces[f].select = True 877 | 878 | #for v in pr: 879 | #sc.obm.verts[v].select = True 880 | 881 | # get a vec between the closes point and 882 | # the vert. Check if that vec passes 883 | # through a face 884 | 885 | # get closest point on mesh for each boundary vert 886 | 887 | # do raycast for each vec to see if it passes 888 | # through any panl tris. 889 | 890 | # get the total number that pass through tris 891 | # and check if the majority are in or out. 892 | 893 | # Go through the panel and check all the flap verts 894 | # to see if they are in or out. 895 | 896 | # move points that are on the wrong side. 897 | 898 | # might want to only do this for detected 899 | # collisions because flaps might 900 | # turn over where they are far from the 901 | # body like sleeve ends or hood. 902 | 903 | #(could just check all the points in the flaps 904 | # any that aren't in or out that should be 905 | # can be moved to the opposite side.) 906 | # could use a similar logic on places 907 | # where panels poke through. 908 | # See if the vec between point and closest 909 | # points on avatar passes through a panel 910 | # if it passes through but it's on the wrong 911 | # side of the panel move it in. maybe??? 912 | 913 | #sc.ob.data.update() 914 | 915 | print("made it past ptrs") 916 | print("made it past ptrs") 917 | print("made it past ptrs") 918 | 919 | avatar = [ob for ob in bpy.data.objects if ob.name.startswith('body_mannequin')][0] 920 | 921 | 922 | 923 | sew = np.array([len(sc.obm.edges[e].link_faces) == 0 for e in sc.edc]) 924 | e = sc.edc[~sew] 925 | t = sc.trc[~sew] 926 | 927 | 928 | 929 | select_edit_mode(sc, sc.ob, e[[0]], type='e', obm=sc.obm) 930 | 931 | #pco = get_proxy_co(sc.ob) 932 | 933 | # working with just two for now... 934 | ve = sc.ed[e[:2]] 935 | vt = sc.fa[t[:2]] 936 | #ve_co = sc.co[ve[:2]] 937 | 938 | locs = [] 939 | for e, t in zip(ve, vt): 940 | 941 | ev1 = sc.ob.data.vertices[e[0]] 942 | ev2 = sc.ob.data.vertices[e[1]] 943 | 944 | tv1 = sc.ob.data.vertices[t[0]] 945 | 946 | evgs = [g.group for g in ev1.groups] 947 | tvgs = [g.group for g in tv1.groups] 948 | 949 | epanel = [sc.ob.vertex_groups[g].name for g in evgs if sc.ob.vertex_groups[g].name.startswith('P_')] 950 | tpanel = [sc.ob.vertex_groups[g].name for g in tvgs if sc.ob.vertex_groups[g].name.startswith('P_')] 951 | 952 | print(epanel, tpanel, "do these match???????") 953 | print(evgs) 954 | print(tvgs) 955 | 956 | match = epanel[0] == tpanel[0] 957 | print(match, 'this is match') 958 | 959 | if match: 960 | print('same panel logic') 961 | in_flap = [np.any(e[0] == p) for p in ptrs] 962 | print(np.any(in_flap)) 963 | 964 | in_flap = [np.any(e[1] == p) for p in ptrs] 965 | print(np.any(in_flap)) 966 | print() 967 | print() 968 | print() 969 | print() 970 | 971 | ''' 972 | maybe check if its in a flap? 973 | Use the flap json file 974 | If I can find it in a flap then 975 | I can go through the verts in that 976 | flap and see if they should 977 | be folded under or over based 978 | on what the majority of verts do. 979 | Do the closest point on mesh thing. 980 | get the dot of the vec 981 | get the do. 982 | could do like a raycast. 983 | see if it goes through a face 984 | if it goes through a face in the 985 | same panel I can check if the 986 | face is closer to the body or 987 | further from the body in most 988 | cases... 989 | could simplify by just checking the boundary edges... 990 | ''' 991 | 992 | else: 993 | print('different panel logic') 994 | 995 | vco = sc.co[e[0]] 996 | 997 | b, location, normal, index = avatar.closest_point_on_mesh(vco) 998 | locs.append(location) 999 | 1000 | # figure out if the edge is colliding with 1001 | # a face in the same panel or in a different one 1002 | print(locs) 1003 | 1004 | 1005 | bpy.data.objects['e1'].location = locs[0] 1006 | bpy.data.objects['e2'].location = locs[1] 1007 | 1008 | print(np.sum(sew), "sew edge count") 1009 | print(sc.obm.edges[sc.edc[0]]) 1010 | print(len(sc.edc), 'total edge count') 1011 | 1012 | 1013 | ''' 1014 | I have these two points 1015 | I have an edge that matches them 1016 | I can get closest point on mesh 1017 | first thing I need is to do is make 1018 | sure we''re not dealing with sew edges 1019 | 1020 | 1021 | identify what panels the point 1022 | and the 1023 | ''' 1024 | 1025 | 1026 | 1027 | 1028 | print() 1029 | print("new =====================================") 1030 | ob = bpy.data.objects['g8322'] 1031 | ob = bpy.data.objects['g8424'] 1032 | ob = bpy.data.objects['g8424.001'] 1033 | #ob = bpy.data.objects['Cube'] 1034 | #ob = bpy.data.objects['p'] 1035 | #ob = bpy.data.objects['a'] 1036 | #ob = bpy.data.objects['m'] 1037 | #ob = bpy.data.objects['pp'] 1038 | 1039 | # detect edges passing through faces: 1040 | if False: # finds collided edges 1041 | sc = detect_collisions(ob, None) 1042 | sc = self_collide(precalc=None, co=None, test=False) 1043 | 1044 | 1045 | # working on fold and stuff... 1046 | testing = False 1047 | if testing: 1048 | sc = self_collide(precalc=None, co=None, test=True) 1049 | sc.ob = ob 1050 | sc.co = get_proxy_co(sc.ob) 1051 | 1052 | sc_response(sc) 1053 | 1054 | 1055 | #could use my cloth engine on the collided parts. 1056 | #Could evaluate the sew edges... 1057 | #If sew edges connect panel to panel treat them as 1058 | #virtual springs or faces. 1059 | 1060 | #Maybe expand the selection and run mc cloth with bend stiff 1061 | #Will have to check collisions on everything that moves so on 1062 | #edges and verts that are part of the expanded selection around 1063 | #collisions. 1064 | #Could run smooth iters... might be better to just rund mc linear springs 1065 | #could also check against avatar to make sure body is not being penetrated. 1066 | 1067 | ''' 1068 | I could finish sewing, finish object collisions, 1069 | identify sew edges that are in the field as virtual springs, 1070 | find relationships between those edge to give them a bias spring, 1071 | create the folded shape from the flap slice target, 1072 | find relationships between the fold flap and the surface below 1073 | where topology has been altered and there are sew springs to hold 1074 | the fold (find the point between the fold and the panel where it 1075 | makes sense to create virtual spring faces with diagonals) 1076 | 1077 | 1078 | or: I could figure out a way to untangle self collisions. 1079 | 1080 | 1081 | # rules: 1082 | 1. if the edge and the face that are collided are part of the same panel 1083 | Check the direction of the normal relative to the avatar. 1084 | If the section should be fulded under the normal will 1085 | be the opposite direction. Move to the other side 1086 | of the face. 1087 | 2. if the edge and the face are different panels: 1088 | Could draw a line from points in the panels towards the closest point 1089 | on the avatar. Find the neares face in the other panel and see if its 1090 | closer to the avatar of further from it. This would gives us a layer 1091 | order for the panels in question. 1092 | In response I could move the inner points to the closest point on mesh 1093 | of the avatar. 1094 | ''' 1095 | 1096 | 1097 | def run_cloth_sim(ob, iters): 1098 | """Use high bend stiff to flatten. 1099 | Treat sew edges as sew or given length 1100 | if they connect in the field.""" 1101 | # the vert on at least one side of the edge 1102 | # will have no boundary edges. This should 1103 | # identify if it's boundary or field. 1104 | # might be able to treat field edges 1105 | # as square faces... 1106 | 1107 | # maybe find a way to treat area around 1108 | # selection as if it were pinned... 1109 | 1110 | 1111 | 1112 | 1113 | 1114 | """ 1115 | If I can get a decent self collide solver 1116 | I might be able to use the current state of the mesh 1117 | as a target and move towards that state while preventing 1118 | self collisions. Flatten the mesh first to untangle. 1119 | 1120 | The idea of being on one side of a tri, then being on 1121 | the other side of the tri. So at the start of the frame 1122 | we check what side we're on. 1123 | 1124 | At the end of the solve check what side in cloth.select_start. 1125 | check what side in cloth.co 1126 | Can treat the start and end as edges. where the cross it's like 1127 | a raycast. 1128 | if it's on the opposite side, try moving it back to on_plane + some fraction of the vec. 1129 | The EA games method was to use the barycentric weights to move 1130 | to the face instead of on_plane. 1131 | 1132 | 1133 | """ 1134 | 1135 | 1136 | 1137 | 1138 | 1139 | 1140 | def sc_response_old(sc, co, shape='MC_current', cloth=None): 1141 | 1142 | #print('-------- response --------') 1143 | #print(sc.on_plane.shape, "on_plane shape") 1144 | keys = sc.ob.data.shape_keys.key_blocks 1145 | k = keys[shape] 1146 | 1147 | 1148 | # get uniqe pairs of edges and faces 1149 | # !!! need to test that code for duplicate pairs to see if it's faster !!! 1150 | strs = [str(e) + str(t) for e, t in zip(sc.edc, sc.trc)] 1151 | uni = np.unique(strs, return_index=True)[1] 1152 | ecu = sc.edc[uni] 1153 | tcu = sc.trc[uni] 1154 | 1155 | # use scale to know which direction an edge should move 1156 | scale = sc.scale[uni] 1157 | 1158 | ze = np.zeros(scale.shape[0], dtype=np.int32) 1159 | #sig = -np.ones(scale.shape[0], dtype=np.float32) 1160 | flip = scale > -0.5 1161 | 1162 | ze[flip] = 1 1163 | #sig[flip] = 1 1164 | 1165 | side = np.take_along_axis(sc.ed[ecu], ze[:, None], 1) 1166 | 1167 | op = sc.on_plane[uni] 1168 | 1169 | tris = sc.tris[tcu] 1170 | 1171 | origins = tris[:, 0] 1172 | #print(tris[0]) 1173 | #print(origins[0]) 1174 | v1 = tris[:, 1] - tris[:, 0] 1175 | v2 = tris[:, 2] - tris[:, 0] 1176 | 1177 | norms = np.cross(v1, v2)# * sig[:, None] 1178 | unor = norms / np.sqrt(np.einsum('ij,ij->i', norms, norms))[:, None] 1179 | #print(side.shape, op.shape) 1180 | #print(sc.co[side.ravel()].shape, op.shape) 1181 | vec = sc.co[side.ravel()] - origins 1182 | dist = np.einsum('ij,ij->i', unor, vec) 1183 | move = unor * dist[:, None] 1184 | 1185 | co[side.ravel()] += (move * .2) 1186 | cloth.velocity[side.ravel()] *= 0 1187 | #cloth.velocity[side.ravel()] += (move * .2) 1188 | #sc.co[side.ravel()] += (unor * .1) 1189 | 1190 | 1191 | if False: 1192 | # find collided edges that share a point (like a single vert poking through a face) 1193 | # !!! could do a combination of this method and margin... 1194 | op = sc.on_plane[uni] 1195 | 1196 | erav = sc.ed[ecu].ravel() 1197 | 1198 | 1199 | oprav = np.repeat(op, 2, axis=0) 1200 | 1201 | uni, idx, counts = np.unique(erav, return_index=True, return_counts=True) 1202 | multiverts = uni[counts > 1] 1203 | 1204 | for v in multiverts: 1205 | ops = oprav[erav==v] 1206 | op_mean = np.mean(ops, axis=0) 1207 | 1208 | 1209 | vec = op_mean - sc.co[v] 1210 | co[v] += (vec * .2) 1211 | 1212 | #k.data.foreach_set('co', sc.co.ravel()) 1213 | #sc.ob.data.update() 1214 | 1215 | 1216 | return side 1217 | 1218 | np.core.defchararray.add(a1, a2) 1219 | 1220 | e0c = sc.ed[sc.edc][:, 0] 1221 | e1c = sc.ed[sc.edc][:, 1] 1222 | 1223 | #print(sc.scale) 1224 | 1225 | #e0 = sc.edc[:, 0] 1226 | #print(e0.shape, "e0 shape") 1227 | if False: 1228 | co[this] = sc.on_plane 1229 | #k.data.foreach_set('co', sc.co.ravel()) 1230 | #sc.ob.data.update() 1231 | #print(sc.ed.shape, "ed shape") 1232 | #print() 1233 | #sc.co[] 1234 | 1235 | -------------------------------------------------------------------------------- /slices.py: -------------------------------------------------------------------------------- 1 | try: 2 | import bpy 3 | import numpy as np 4 | import json 5 | 6 | except: 7 | pass 8 | 9 | 10 | def reset_shapes(ob): 11 | """Create shape keys if they are missing""" 12 | 13 | if ob.data.shape_keys == None: 14 | ob.shape_key_add(name='Basis') 15 | 16 | keys = ob.data.shape_keys.key_blocks 17 | if 'MC_source' not in keys: 18 | ob.shape_key_add(name='MC_source') 19 | keys['MC_source'].value=1 20 | 21 | if 'MC_current' not in keys: 22 | ob.shape_key_add(name='MC_current') 23 | keys['MC_current'].value=1 24 | keys['MC_current'].relative_key = keys['MC_source'] 25 | 26 | 27 | def get_co_shape(ob, key=None, ar=None): 28 | """Get vertex coords from a shape key""" 29 | v_count = len(ob.data.shape_keys.key_blocks[key].data) 30 | if ar is None: 31 | ar = np.empty(v_count * 3, dtype=np.float32) 32 | ob.data.shape_keys.key_blocks[key].data.foreach_get('co', ar) 33 | ar.shape = (v_count, 3) 34 | return ar 35 | 36 | 37 | def link_mesh(verts, edges=[], faces=[], name='name'): 38 | """Generate and link a new object from pydata""" 39 | mesh = bpy.data.meshes.new(name) 40 | mesh.from_pydata(verts, edges, faces) 41 | mesh.update() 42 | mesh_ob = bpy.data.objects.new(name, mesh) 43 | bpy.context.collection.objects.link(mesh_ob) 44 | return mesh_ob 45 | 46 | 47 | def create_triangles(Slice, s_count, x_off=None): 48 | """Creates equalateral triangles whose edge length 49 | is similar to the distance between slices so that 50 | the bend stifness is more stable. 51 | x_off is for debug moving the next slice over""" 52 | 53 | s = Slice.seam_sets[s_count] 54 | means = s['tri_means'] 55 | dist = np.copy(s['dst']) 56 | dist[0] = s['avd'] 57 | count = dist.shape[0] 58 | 59 | # constant values 60 | height = np.sqrt(3)/2 61 | offset = ((dist * height) * 0.5) 62 | 63 | # build tris 64 | a = np.copy(means) 65 | a[:, 0] -= (dist * .5) 66 | a[:, 1] -= offset - (offset * (1/3)) 67 | b = np.copy(a) 68 | b[:, 0] += dist 69 | c = np.copy(means) 70 | c[:, 1] += offset + (offset * (1/3)) 71 | # abc is counterclockwise starting at bottom left 72 | 73 | tri = np.zeros(count * 9) 74 | tri.shape = (count, 3, 3) 75 | tri[:, 0] += a 76 | tri[:, 1] += b 77 | tri[:, 2] += c 78 | 79 | if x_off is not None: 80 | tri[:, :, 0] += x_off 81 | 82 | return tri 83 | 84 | 85 | def create_mesh_data(Slice, s_count): 86 | """Build edge and face data for 87 | the tubes of triangles""" 88 | 89 | s = Slice.seam_sets[s_count] 90 | tri = s['tris'] 91 | count = tri.shape[0] 92 | 93 | # build edges 94 | edges = np.array([[0,1],[1,2],[2,0]]) 95 | ed = np.zeros(count * 6, dtype=np.int32) 96 | ed.shape = (count, 3, 2) 97 | ed += edges 98 | ed += np.arange(0, count * 3, 3)[:, None][:, None] 99 | 100 | # build faces 101 | faces = np.array([[0,1,4,3], [2,0,3,5], [2,1,4,5]]) 102 | fa = np.zeros((count -1) * 12, dtype=np.int32) 103 | fa.shape = (count -1, 3, 4) 104 | fa += faces 105 | fa += np.arange(0, (count -1) * 3, 3)[:, None][:, None] 106 | 107 | return ed, fa 108 | 109 | 110 | def slice_setup(Slice, testing=True): # !!! set testing to False !!! 111 | print("seam wrangler is reminding you to set slice_setup testing to False") 112 | file = bpy.data.texts['slice_targets.json'] 113 | slices = json.loads(file.as_string()) 114 | Slice.count = len(slices) 115 | 116 | ob = Slice.ob 117 | 118 | # get the name of the cloth state shape key (numbers will vary) 119 | keys = ob.data.shape_keys.key_blocks 120 | cloth_key = [i.name for i in keys if i.name.startswith("CLOTH")][0] 121 | Slice.cloth_key = cloth_key 122 | 123 | # flat shape coords 124 | flat_co = get_co_shape(ob, 'flat') 125 | Slice.flat_co = flat_co 126 | 127 | # cloth shape coords 128 | cloth_co = get_co_shape(ob, cloth_key) 129 | Slice.cloth_co = cloth_co 130 | 131 | # ------------ 132 | seam_sets = {} 133 | seam_sets['unresolved gaps'] = [] 134 | name = 0 135 | 136 | for s in slices: 137 | vp_with_nones = np.array(s['vert_ptrs']).T 138 | 139 | xys = [] 140 | vps = [] 141 | vpsN = [] # with Nones 142 | dst = [] 143 | avds = [] 144 | idxs = [] 145 | tri_means = [] 146 | av_tri_mean = [] 147 | last_idx = None 148 | last_j = None 149 | 150 | ticker = 0 151 | last_tick = 0 152 | 153 | for j in vp_with_nones: 154 | 155 | xy_with_nones = np.array(s['target_xys'], dtype=np.float32) 156 | flying_Nones = j != None 157 | 158 | vp = j[flying_Nones] 159 | xy = xy_with_nones[flying_Nones] 160 | 161 | # for testing !!! Disable !!! (already getting scaled in sims) 162 | if testing: 163 | xy *= np.array([0.1, 0.05], dtype=np.float32) 164 | # for testing !!! Disable !!! 165 | 166 | # get some triangle means (check later to make sure there is at least one) 167 | tri_mean = None 168 | if np.all(flying_Nones): 169 | tri_mean = np.mean(xy, axis=0) 170 | av_tri_mean += [tri_mean] 171 | 172 | # get distances ------------- 173 | vpc = len(j) 174 | dist = None 175 | 176 | if vpc > 0: 177 | idx = np.arange(vpc, dtype=np.int32)[flying_Nones] 178 | idxs += [idx] 179 | 180 | if last_idx is not None: 181 | in1d = np.in1d(idx, last_idx) 182 | 183 | if np.any(in1d): 184 | good = np.array(j[idx[in1d]], dtype=np.int32) 185 | last_good = np.array(last_j[idx[in1d]], dtype=np.int32) 186 | 187 | vecs = flat_co[good] - flat_co[last_good] 188 | dists = np.sqrt(np.einsum('ij,ij->i', vecs, vecs)) 189 | dist = np.mean(dists) 190 | 191 | # check if we stepped only once (for average distance) 192 | if ticker - last_tick == 1: 193 | avds += [dist] 194 | last_tick = ticker 195 | 196 | xys += [xy] 197 | vps += [vp] 198 | vpsN += [j] # with Nones 199 | dst += [dist] 200 | tri_means += [tri_mean] 201 | 202 | # ----------------- 203 | last_idx = idx 204 | last_j = j 205 | 206 | # for getting average distance from single steps 207 | ticker += 1 208 | 209 | avtm = np.mean(av_tri_mean, axis=0) 210 | # in case there are no complete sets of points in a slice 211 | if np.any(np.isnan(avtm)): 212 | av_tri_means = [] 213 | for j in vp_with_nones: 214 | 215 | xy_with_nones = np.array(s['target_xys'], dtype=np.float32) 216 | flying_Nones = j != None 217 | 218 | vp = j[flying_Nones] 219 | xy = xy_with_nones[flying_Nones] 220 | 221 | # for testing !!! Disable !!! (already getting scaled in sims) 222 | if testing: 223 | xy *= np.array([0.1, 0.05], dtype=np.float32) 224 | # for testing !!! Disable !!! 225 | 226 | # get some triangle means (check later to make sure there is at least one) 227 | tri_mean = None 228 | if np.any(flying_Nones): 229 | tri_mean = np.mean(xy, axis=0) 230 | av_tri_mean += [tri_mean] 231 | avtm = np.mean(av_tri_mean, axis=0) 232 | 233 | avd = np.mean(avds) 234 | seam_sets[name] = {'xys': xys, 235 | 'vps': vps, 236 | 'vpsN':vpsN, 237 | 'dst': dst, 238 | 'tri_means': tri_means, 239 | 'av_tri_mean':avtm, 240 | 'avd':avd, 241 | 'idx':idxs} 242 | 243 | name += 1 244 | Slice.seam_sets = seam_sets 245 | 246 | 247 | def missing_distance(Slice): 248 | """Fill in missing data. 249 | Find and deal with gaps between slices""" 250 | 251 | # Need a distance between each triangle (Some are None [wierd that that's a true statement]) 252 | ob = Slice.ob 253 | flat_co = Slice.flat_co 254 | 255 | # ------------------------- 256 | s_count = 0 257 | for i in range(Slice.count): 258 | s = Slice.seam_sets[i] 259 | count = 0 260 | 261 | # Create a state that checks for Nones between non-None distances 262 | # This way if there is a gap we get the right distance between the sections where there is a gap 263 | 264 | switch1 = False 265 | switch2 = False 266 | None_state = False 267 | last_vpN = None 268 | lidx = None 269 | 270 | for i in range(len(s['dst'])): 271 | d = s['dst'][i] 272 | 273 | if not switch1: 274 | if d is not None: 275 | switch1 = True 276 | 277 | if switch1: 278 | if d is None: 279 | switch2 = True 280 | 281 | if switch2: 282 | if d is not None: 283 | switch1 = False 284 | switch2 = False 285 | None_state = True 286 | 287 | if d is not None: 288 | if None_state: 289 | cvpN = s['vpsN'][i-1] 290 | idx = s['idx'][i-1] 291 | 292 | in1d = np.in1d(idx, lidx) 293 | if np.any(in1d): 294 | good = np.array(cvpN[idx[in1d]], dtype=np.int32) 295 | last_good = np.array(last_vpN[idx[in1d]], dtype=np.int32) 296 | 297 | vecs = flat_co[good] - flat_co[last_good] 298 | dists = np.sqrt(np.einsum('ij,ij->i', vecs, vecs)) 299 | dist = np.mean(dists) 300 | 301 | # count backwards to last good distance 302 | div = 1 303 | bc = i - 2 304 | while s['dst'][bc] is None: 305 | div += 1 306 | bc -= 1 307 | 308 | # fast forward where we just rewound 309 | for r in range(div): 310 | s['dst'][i-div + r] = dist/div 311 | 312 | print('Seam wrangler resolved gap') 313 | 314 | else: 315 | Slice.seam_sets['unresolved gaps'] += [s_count] 316 | print("Unresolved gaps in seam_wrangler") 317 | print("Might distort some seams (but probably not)") 318 | 319 | None_state = False 320 | for v in s['vps'][i]: 321 | ob.data.vertices[v].select = True 322 | 323 | last_vpN = s['vpsN'][i] 324 | lidx = s['idx'][i] 325 | 326 | count += 1 327 | 328 | # overwrite remaining Nones with avd 329 | s['dst'][0] = 0.0 330 | 331 | for i in range(len(s['dst'])): 332 | d = s['dst'][i] 333 | if d is None: 334 | s['dst'][i] = s['avd'] 335 | 336 | cum_dst = np.cumsum(s['dst']) 337 | s['cum_dst'] = cum_dst 338 | 339 | # overwrite tri mean Nones 340 | for i in range(len(s['tri_means'])): 341 | if s['tri_means'][i] is None: 342 | s['tri_means'][i] = s['av_tri_mean'] 343 | 344 | add_z = np.zeros(cum_dst.shape[0] * 3, dtype=np.float32) 345 | add_z.shape = (cum_dst.shape[0], 3) 346 | add_z[:, :2] = s['tri_means'] 347 | add_z[:, 2] = cum_dst 348 | s['tri_means'] = add_z 349 | 350 | # iterate tick ----------- 351 | s_count += 1 352 | 353 | 354 | def build_data(Slice): 355 | """Generate meshes and such""" 356 | 357 | ob = Slice.ob 358 | flat_co = Slice.flat_co 359 | cloth_co = Slice.cloth_co 360 | 361 | # ------------------------- 362 | s_count = 0 363 | for i in range(Slice.count): 364 | s = Slice.seam_sets[i] 365 | 366 | # build triangles for the mesh 367 | s['tris'] = create_triangles(Slice, s_count) 368 | 369 | ed, fa = create_mesh_data(Slice, s_count) 370 | es = ed.shape 371 | ed.shape = (es[0] * 3, 2) 372 | 373 | fs = fa.shape 374 | fa.shape = (fs[0] * 3, 4) 375 | 376 | 377 | test_count = 1 378 | if s_count == test_count: 379 | 380 | # create the mesh or merge lists to make one mesh 381 | ts = s['tris'].shape 382 | s['tris'].shape = (ts[0] * 3, 3) 383 | 384 | if "tri_" + str(s_count) not in bpy.data.objects: 385 | tri_mesh = link_mesh(s['tris'].tolist(), ed.tolist(), fa.tolist(), "tri_" + str(s_count)) 386 | reset_shapes(tri_mesh) 387 | 388 | vpm = [] 389 | 390 | for v in s['vps']: 391 | idx = np.array(v, dtype=np.int32) 392 | m = np.array([0.0, 0.0, 0.0], dtype=np.float32) 393 | 394 | if v.shape[0] > 0: 395 | m = [np.mean(cloth_co[idx], axis=0)] 396 | vpm += [m] 397 | 398 | #print(np.array(vpm)) 399 | for i in np.array(vpm): 400 | print(i) 401 | 402 | 403 | 404 | # iterate tick ----------- 405 | s_count += 1 406 | 407 | 408 | 409 | 410 | 411 | 412 | def testing(Slice): 413 | 414 | seam_sets = Slice.seam_sets 415 | 416 | id0 = 5 417 | id1 = 17 418 | 419 | if False: 420 | print(seam_sets[id1]['xys'][id0]) 421 | print(seam_sets[id1]['vps'][id0]) 422 | print(seam_sets[id1]['dst'][id0]) 423 | print(seam_sets[id1]['dst'][id0]) 424 | print(seam_sets[id1]['avd'], "this val") 425 | #print(seam_sets[id1]['dst']) 426 | #print(seam_sets[id1]['vps']) 427 | 428 | print(seam_sets[id1]['dst']) 429 | print(seam_sets[id1]['cum_dst']) 430 | ob.data.update() 431 | return 432 | for i in seam_sets[id1]['vps']: 433 | for j in i: 434 | ob.data.vertices[j].select = True 435 | ob.data.update() 436 | 437 | 438 | 439 | print() 440 | print() 441 | print() 442 | print('start ========================') 443 | print('start ========================') 444 | print('start ========================') 445 | print('start ========================') 446 | print('start ========================') 447 | 448 | class Slices(): 449 | pass 450 | 451 | def slices_main(ob): 452 | Slice = Slices() 453 | Slice.ob = ob 454 | 455 | # setup functions 456 | slice_setup(Slice) 457 | missing_distance(Slice) 458 | build_data(Slice) 459 | 460 | 461 | ob = bpy.data.objects['g6774'] 462 | slices_main(ob) 463 | 464 | 465 | 466 | 467 | 468 | # At each triangle we need to be spaced according to the 469 | # spacing between slices. 470 | # Some slices have no way to give us distances 471 | # We want to get the average of distances that are one step apart 472 | # We also want to get the right distance where slices are more 473 | # than one step apart. 474 | -------------------------------------------------------------------------------- /text_from_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import bpy 3 | 4 | def py_from_object(ob, round=3): 5 | """Writes the verts and faces to this file 6 | when in blender""" 7 | np.set_printoptions(suppress=True) 8 | vc = len(ob.data.vertices) 9 | co = np.empty((vc, 3), dtype=np.float32) 10 | ob.data.vertices.foreach_get('co', co.ravel()) 11 | r = np.round(co, round) 12 | 13 | col = [] 14 | for i in r: 15 | vco = [] 16 | for c in i: 17 | vco.append(c) 18 | col.append(vco) 19 | 20 | f = [[v for v in f.vertices] for f in ob.data.polygons] 21 | #print(col) 22 | 23 | 24 | return str(col), str(f) 25 | 26 | v, f = py_from_object(bpy.context.object) 27 | 28 | t = bpy.data.texts['py_from_object.py'] 29 | t.cursor_set(line = 34) 30 | t.write('verts = ' + v) 31 | t.cursor_set(line = 35) 32 | t.write('faces = ' + f) 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | # -------------------------- 42 | --------------------------------------------------------------------------------