├── README.md └── houdini ├── afanasy_scheduler ├── README.md ├── afanasyscheduler.py ├── afanasyscheduler_asset.png ├── afanasyscheduler_webgui.png ├── otls │ └── top_afanasyscheduler.hda └── pdgcmd.py ├── dependency_nodes └── traversenetwork.py ├── drag_drop_files ├── README.md ├── externaldragdrop.py ├── nodes_color.py └── type_extensions.py ├── scenegraphtree_dragdrop ├── README.md └── houdini20.5 │ └── scripts │ └── nodegraph │ └── dragdrop.py └── update_build ├── update_daily_build.py └── update_daily_build.pyc /README.md: -------------------------------------------------------------------------------- 1 | # python 2 | -------------------------------------------------------------------------------- /houdini/afanasy_scheduler/README.md: -------------------------------------------------------------------------------- 1 | PDG Afanasy Scheduler 2 | 3 | Defines a Afanasy scheduler implementation. This module depends on the CGRU Afanasy API, it must be run from under the wrapper Afanasy Keeper. 4 | Using this scheduler will allow to use CGRU Afanasy to schedule work items on remote machines such as a farm. 5 | 6 | The current implementation is only a base, a lot has not been done yet. 7 | 8 | Installation: 9 | 1. Copy scripts to the $HOUDINI_USER_PREF_DIR/pdg/types directory 10 | 2. Copy the asset top_afanasyscheduler.hda to the $HOUDINI_USER_PREF_DIR/otls directory 11 | 12 | -------------------------------------------------------------------------------- /houdini/afanasy_scheduler/afanasyscheduler.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import re 5 | import shutil 6 | import shlex 7 | import sys 8 | import traceback 9 | import time 10 | 11 | import pdg 12 | from pdg.scheduler import PyScheduler, evaluateParamOr, convertEnvMapToUTF8 13 | from pdg.job.callbackserver import CallbackServerMixin 14 | from pdg.utils import TickTimer, expand_vars 15 | from pdgjob import pdgcmd 16 | 17 | import af 18 | import afcommon 19 | import services.service 20 | 21 | logging.basicConfig(level = logging.DEBUG) 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | class AfanasyScheduler(CallbackServerMixin, PyScheduler): 26 | """ 27 | Scheduler implementation that interfaces with a Afanasy farm instance. 28 | """ 29 | def __init__(self, scheduler, name): 30 | PyScheduler.__init__(self, scheduler, name) 31 | CallbackServerMixin.__init__(self, False) 32 | self.active_jobs = {} 33 | self.tick_timer = None 34 | self.custom_port_range = None 35 | self.cook_id = '0' 36 | 37 | @classmethod 38 | def templateName(cls): 39 | return 'afanasyscheduler' 40 | 41 | @classmethod 42 | def templateBody(cls): 43 | return json.dumps({ 44 | "name": "afanasyscheduler", 45 | "parameters" : [ 46 | { 47 | "name" : "address", 48 | "label" : "Afanasy Server Address", 49 | "type" : "String", 50 | "size" : 1, 51 | }, 52 | { 53 | "name" : "callbackportrange", 54 | "label" : "Callback Port Range", 55 | "type" : "Integer", 56 | "size" : 2, 57 | }, 58 | { 59 | "name" : "overrideportrange", 60 | "label" : "Enable callbackportrange", 61 | "type" : "Integer", 62 | "size" : 1, 63 | }, 64 | { 65 | "name" : "localsharedroot", 66 | "label" : "Local Root Path", 67 | "type" : "String", 68 | "size" : 1, 69 | }, 70 | { 71 | "name" : "overrideremoterootpath", 72 | "label" : "", 73 | "type" : "Integer", 74 | "size" : 1, 75 | }, 76 | { 77 | "name" : "remotesharedroot", 78 | "label" : "Farm Root Path", 79 | "type" : "String", 80 | "size" : 1, 81 | }, 82 | { 83 | "name" : "hfs_linux_path", 84 | "label" : "Linux HFS Path", 85 | "type" : "String", 86 | "size" : 1, 87 | }, 88 | { 89 | "name" : "hfs_macosx_path", 90 | "label" : "macOS HFS Path", 91 | "type" : "String", 92 | "size" : 1, 93 | }, 94 | { 95 | "name" : "hfs_windows_path", 96 | "label" : "Windows HFS Path", 97 | "type" : "String", 98 | "size" : 1, 99 | }, 100 | { 101 | "name" : "hfspathuniversal", 102 | "label" : "Universal HFS Path", 103 | "type" : "String", 104 | "size" : 1, 105 | }, 106 | { 107 | "name" : "useuniversalhfs", 108 | "label" : "Use Universal HFS", 109 | "type" : "Integer", 110 | "size" : 1, 111 | } 112 | ] 113 | }) 114 | 115 | def __del__(self): 116 | if self.tick_timer: 117 | self.tick_timer.cancel() 118 | 119 | def _localsharedroot(self): 120 | """ 121 | returns the local path to sharedroot, possibly by contacting the 122 | server. Returns None on failure. 123 | """ 124 | localsharedroot = self["localsharedroot"].evaluateString() 125 | if not os.path.exists(localsharedroot): 126 | raise RuntimeError('localsharedroot file path not found: ' + localsharedroot) 127 | return localsharedroot 128 | 129 | def _updateWorkingDir(self): 130 | """ 131 | returns the full path to working dir, rooted with env var which can be interpreted by slave on farm. 132 | Local working dir is set as user provided. 133 | """ 134 | workingbase = self["pdg_workingdir"].evaluateString() 135 | if os.path.isabs(workingbase): 136 | raise RuntimeError("Relative Job Directory \'" + workingbase + "\' must be relative path!") 137 | 138 | local_wd = os.path.normpath(self["localsharedroot"].evaluateString() + "/" + workingbase) 139 | local_wd = local_wd.replace("\\", "/") 140 | if self["overrideremoterootpath"].evaluateInt() == 0: 141 | remote_wd = local_wd 142 | else: 143 | remote_wd = '{}/{}'.format(self['remotesharedroot'].evaluateString(), workingbase) 144 | self.setWorkingDir(local_wd, remote_wd) 145 | 146 | def _getHFSPath(self, platform): 147 | pth = None 148 | if self["useuniversalhfs"].evaluateInt() > 0: 149 | pth = self["hfspathuniversal"].evaluateString() 150 | elif platform.startswith('win'): 151 | pth = self["hfs_windows_path"].evaluateString() 152 | elif platform.startswith('darwin') or platform.startswith('mac'): 153 | pth = self["hfs_macosx_path"].evaluateString() 154 | elif platform.startswith('linux'): 155 | pth = self["hfs_linux_path"].evaluateString() 156 | return pth 157 | 158 | def pythonBin(self, platform): 159 | """ 160 | [virtual] Returns the path to a python executable. This executable 161 | will be used to execute generic python and is substituted in commands 162 | with the __PDG_PYTHON__ token. 163 | 164 | platform Is an identifier with the same rules as python's sys.platform. 165 | (should be 'linux*' | 'darwin' | 'win*') 166 | local True means returns the absolute path on the local file system. 167 | """ 168 | # local python can be overriden with PDG_PYTHON env var 169 | val = 'python' 170 | if platform.startswith('win'): 171 | val = '$HFS/python27/python.exe' 172 | elif platform.startswith('linux'): 173 | val = '$HFS/python/bin/python' 174 | val = os.environ.get('PDG_PYTHON') or os.path.expandvars(val) 175 | return val 176 | 177 | def hythonBin(self, platform): 178 | """ 179 | [virtual] Returns the path to a hython executable. This executable 180 | will be used to execute hython and is substituted in commands 181 | with the __PDG_HYTHON__ token. 182 | 183 | platform Is an identifier with the same rules as python's sys.platform. 184 | (should be 'linux*' | 'darwin' | 'win*') 185 | """ 186 | # local hython can be overriden with PDG_HYTHON env var 187 | val = 'hython' 188 | if platform.startswith('win'): 189 | val = '$HFS/bin/hython.exe' 190 | elif platform.startswith('linux') or platform.startswith('darwin'): 191 | val = '$HFS/bin/hython' 192 | val = os.environ.get('PDG_HYTHON') or os.path.expandvars(val) 193 | return val 194 | 195 | 196 | def workItemResultServerAddr(self): 197 | return self['address'].evaluateString() 198 | 199 | def onSchedule(self, work_item): 200 | """ 201 | onSchedule(self, pdg.PyWorkItem) -> pdg.SchedulerResult 202 | 203 | Schedules the work item, e.g. submits a job to Tractor to perform 204 | work described in the work item. Returns pdg.scheduleResult.Succeeded 205 | on success, and pdg.scheduleResult.Failed on failures. Never cooks 206 | the work item directly, and thus does not return 207 | pdg.scheduleResult.CookSucceeded unless command is empty 208 | """ 209 | if len(work_item.command) == 0: 210 | return pdg.scheduleResult.CookSucceeded 211 | try: 212 | item_name = work_item.name 213 | item_id = work_item.id 214 | node = work_item.node 215 | node_name = node.name 216 | item_command = work_item.command 217 | 218 | logger.debug('onSchedule input: {} {} {}'.format(node_name, item_name, item_command)) 219 | 220 | job_name = 'workitem_{}'.format(node_name) 221 | task_name = item_name 222 | 223 | temp_dir = self.tempDir(False) 224 | work_dir = self.workingDir(False) 225 | script_dir = self.scriptDir(False) 226 | 227 | item_command = item_command.replace("__PDG_ITEM_NAME__", item_name) 228 | item_command = item_command.replace("__PDG_SHARED_TEMP__", temp_dir) 229 | item_command = item_command.replace("__PDG_TEMP__", temp_dir) 230 | item_command = item_command.replace("__PDG_DIR__", work_dir) 231 | item_command = item_command.replace("__PDG_SCRIPTDIR__", script_dir) 232 | item_command = item_command.replace("__PDG_RESULT_SERVER__", self.workItemResultServerAddr()) 233 | item_command = item_command.replace("__PDG_PYTHON__", self.pythonBin(sys.platform)) 234 | item_command = item_command.replace("__PDG_HYTHON__", self.hythonBin(sys.platform)) 235 | 236 | cmd_argv = ' '.join(shlex.split(item_command)) 237 | 238 | if len(cmd_argv) < 2: 239 | logger.error('Could not shelx command: ' + item_command) 240 | return pdg.scheduleResult.Succeeded 241 | 242 | # Ensure directories exist and serialize the work item 243 | self.createJobDirsAndSerializeWorkItems(work_item) 244 | 245 | # Path PDGcmd file 246 | src_file = os.environ.get('HOUDINI_USER_PREF_DIR') + '/pdg/types/pdgcmd.py' 247 | dest_file = script_dir + '/' + 'pdgcmd.py' 248 | shutil.copy(src_file, dest_file) 249 | 250 | # Create Job 251 | job = af.Job(job_name) 252 | 253 | # Job Parameters 254 | job.setBranch(self['job_branch'].evaluateString()) 255 | job.setDependMask(self['depend_mask'].evaluateString()) 256 | job.setDependMaskGlobal(self['depend_mask_global'].evaluateString()) 257 | job.setPriority(self['priority'].evaluateInt()) 258 | job.setMaxRunningTasks(self['max_runtasks'].evaluateInt()) 259 | job.setMaxRunTasksPerHost(self['maxperhost'].evaluateInt()) 260 | job.setHostsMask(self['hosts_mask'].evaluateString()) 261 | job.setHostsMaskExclude(self['hosts_mask_exclude'].evaluateString()) 262 | 263 | service = 'generic' 264 | parser = 'generic' 265 | 266 | # Create a block with provided name and service type 267 | block = af.Block(job_name, service) 268 | block.setService(service) 269 | block.setParser(parser) 270 | block.setCapacity(self['capacity'].evaluateInt()) 271 | #block.setVariableCapacity(self['capacity_coefficient1'].evaluateInt(), self['capacity_coefficient2'].evaluateInt()) 272 | block.setTaskMinRunTime(self['minruntime'].evaluateInt()) 273 | block.setTaskMaxRunTime(self['maxruntime'].evaluateInt() * 3600) 274 | 275 | # Set Enviroment Task Variables 276 | block.setEnv('PDG_RESULT_SERVER', str(self.workItemResultServerAddr())) 277 | block.setEnv('PDG_ITEM_NAME', str('workitem_{}'.format(item_name))) 278 | block.setEnv('PDG_DIR', str(work_dir)) 279 | block.setEnv('PDG_TEMP', str(temp_dir)) 280 | block.setEnv('PDG_SHARED_TEMP', str(temp_dir)) 281 | block.setEnv('PDG_INDEX', str(work_item.index)) 282 | block.setEnv('PDG_INDEX4', "{:04d}".format(work_item.index)) 283 | block.setEnv('PDG_SCRIPTDIR', str(script_dir)) 284 | block.setEnv('PDG_JOBID', self.cook_id) 285 | block.setEnv('PDG_JOBID_VAR', 'PDG_JOBID') 286 | 287 | task = af.Task(task_name) 288 | task.setCommand(cmd_argv) 289 | block.tasks.append(task) 290 | 291 | job.blocks.append(block) 292 | 293 | newjid = 0 294 | logger.debug('onScheduler new job [jid=%d]:' % newjid) 295 | 296 | try: 297 | newjid = job.send() 298 | newjid = newjid[1]['id'] 299 | except Exception,err: 300 | import traceback 301 | traceback.print_exc() 302 | sys.stderr.flush() 303 | raise RuntimeError('Error creating job for ' + item_name + ':\n' + str(err)) 304 | 305 | # add to active jobs list 306 | self.active_jobs[newjid] = item_name 307 | work_item.data.setInt("afanasy_jobid", newjid, 0) 308 | 309 | return pdg.scheduleResult.Succeeded 310 | 311 | except: 312 | import traceback 313 | traceback.print_exc() 314 | sys.stderr.flush() 315 | return pdg.scheduleResult.Failed 316 | 317 | def onScheduleStatic(self, dependencies, dependents, ready_items): 318 | return 319 | 320 | def onStart(self): 321 | logger.debug("onStart") 322 | self.startCallbackServer() 323 | """ 324 | onStart(self) -> boolean 325 | 326 | [virtual] Scheduler start callback. Starts the XMLRPC server for 327 | communicating with Tractor. 328 | """ 329 | return True 330 | 331 | def onStop(self): 332 | logger.debug("onStop") 333 | """ 334 | onStop(self) -> boolean 335 | 336 | [virtual] Scheduler stop callback. Shuts down the XMLRPC server for 337 | communicating with Tractor. 338 | """ 339 | self.stopCallbackServer() 340 | self._stopSharedServers() 341 | return True 342 | 343 | def onStartCook(self, static, cook_set): 344 | """ 345 | onStartCook(self, static, cook_set) -> boolean 346 | 347 | [virtual] Cook start callback. Starts a root job for the cook session 348 | """ 349 | self.cook_id = str(int(self.cook_id) + 1) 350 | 351 | # sanity check the local shared root 352 | localsharedroot = self._localsharedroot() 353 | 354 | # update our working dir 355 | self._updateWorkingDir() 356 | 357 | file_root = self.workingDir(True) 358 | if not os.path.exists(file_root): 359 | os.makedirs(file_root) 360 | if not os.path.exists(self.tempDir(True)): 361 | os.makedirs(self.tempDir(True)) 362 | 363 | # override the listening port 364 | overrideportrange = self['overrideportrange'].evaluateInt() 365 | if overrideportrange > 0: 366 | callbackportrange = self["callbackportrange"].evaluateInt() 367 | if callbackportrange != self.custom_port_range: 368 | self.custom_port_range = callbackportrange 369 | self.stopCallbackServer() 370 | self.startCallbackServer() 371 | 372 | if not self.isCallbackServerRunning(): 373 | self.startCallbackServer() 374 | 375 | self.tick_timer = TickTimer(0.25, self.tick) 376 | self.tick_timer.start() 377 | 378 | return True 379 | 380 | def tick(self): 381 | """ 382 | Called during a cook. Checks on jobs in flight to see if 383 | any have finished. 384 | """ 385 | # create job command 386 | cmd = af.Cmd() 387 | try: 388 | # check job/task statuses and remove them if finished 389 | for id in self.active_jobs.keys(): 390 | # query job progress 391 | if cmd.getJobProgress(id) is None: 392 | work_item_name = self.active_jobs[id] 393 | self.workItemFailed(work_item_name, -1) 394 | del self.active_jobs[id] 395 | continue 396 | 397 | query = cmd.getJobProgress(id)['progress'][0] 398 | job_info = cmd.getJobInfo(id)[0] 399 | while query: 400 | if isinstance(query, dict): 401 | break 402 | query = query[0] 403 | 404 | # task state 405 | task = query['state'] 406 | 407 | logger.debug('task_state for {} = {}'.format(id, task)) 408 | if len(task) < 1: 409 | # this is a ghost-job - consider it failed 410 | work_item_name = self.active_jobs[id] 411 | self.workItemFailed(work_item_name, -1) 412 | del self.active_jobs[id] 413 | continue 414 | 415 | task_state = task.strip() 416 | 417 | if task_state == 'RUN': 418 | work_item_name = self.active_jobs[id] 419 | self.workItemStartCook(work_item_name, -1) 420 | 421 | if task_state == 'RDY RER': 422 | work_item_name = self.active_jobs[id] 423 | self.workItemFailed(work_item_name, -1) 424 | del self.active_jobs[id] 425 | continue 426 | 427 | elif task_state == 'DON' or task_state == 'SKP': 428 | statetime = job_info.get('time_started') 429 | activetime = job_info.get('time_done') 430 | cook_timedelta = float(activetime - statetime) 431 | work_item_name = self.active_jobs[id] 432 | self.workItemSucceeded(work_item_name, -1, cook_timedelta) 433 | del self.active_jobs[id] 434 | continue 435 | except: 436 | import traceback 437 | traceback.print_exc() 438 | sys.stderr.flush() 439 | return False 440 | return True 441 | 442 | def onStopCook(self, cancel): 443 | """ 444 | Callback invoked by PDG when graph cook ends. 445 | """ 446 | if self.tick_timer: 447 | self.tick_timer.cancel() 448 | self._stopSharedServers() 449 | 450 | return True 451 | 452 | def submitAsJob(self, graph_file, node_path): 453 | # we don't support cooking network 454 | logger.debug("submitAsJob({},{})".format(graph_file, node_path)) 455 | return "" 456 | 457 | def workItemSucceeded(self, name, index, cook_duration, jobid=''): 458 | """ 459 | Called by CallbackServerMixin when a workitem signals success. 460 | """ 461 | logger.debug('Job Succeeded: {}'.format(name)) 462 | self.onWorkItemSucceeded(name, index, cook_duration) 463 | 464 | def workItemFailed(self, name, index, jobid=''): 465 | """ 466 | Called by CallbackServerMixin when a workitem signals failure. 467 | """ 468 | logger.debug('Job Failed: name={}, index={}, jobid={}'.format(name, index, jobid)) 469 | self.onWorkItemFailed(name, index) 470 | 471 | def workItemCancelled(self, name, index, jobid=''): 472 | """ 473 | Called by CallbackServerMixin when a workitem signals cancelled. 474 | """ 475 | logger.debug('Job Cancelled: {}'.format(name)) 476 | self.onWorkItemCanceled(name, index) 477 | 478 | def workItemStartCook(self, name, index, jobid=''): 479 | """ 480 | Called by CallbackServerMixin when a workitem signals started. 481 | """ 482 | logger.debug('Job Start Cook: {}'.format(name)) 483 | self.onWorkItemStartCook(name, index) 484 | 485 | def workItemFileResult(self, item_name, subindex, result, tag, checksum, jobid=''): 486 | """ 487 | Called by CallbackServerMixin when a workitem signals file result data reported. 488 | """ 489 | self.onWorkItemFileResult(item_name, subindex, result, tag, checksum) 490 | 491 | def workItemSetAttribute(self, item_name, subindex, attr_name, data, jobid=''): 492 | """ 493 | Called by CallbackServerMixin when a workitem signals simple result data reported. 494 | """ 495 | self.onWorkItemSetAttribute(item_name, subindex, attr_name, data) 496 | 497 | 498 | def _stopSharedServers(self): 499 | for sharedserver_name in self.getSharedServers(): 500 | self.endSharedServer(sharedserver_name) 501 | 502 | def onSharedServerStarted(self, args): 503 | """ 504 | Called when a job has started a new sharedserver 505 | """ 506 | logger.debug("sharedserver started: {}, args = {}".format(args["name"], args)) 507 | self.setSharedServerInfo(args["name"], args) 508 | return True 509 | 510 | def endSharedServer(self, sharedserver_name): 511 | """ 512 | Called by a job or on cook end to terminate the sharedserver 513 | """ 514 | try: 515 | info = self.getSharedServerInfo(sharedserver_name) 516 | logger.debug("Killing sharedserver: " + sharedserver_name) 517 | from pdgjob.sharedserver import shutdownServer 518 | # FIXME: 519 | # at this point we need to kill the server which is running somewhere on the farm 520 | # it would be nice to do this directly with hqueue, but the server is not officially a job. 521 | # This will need to be reworked so that the onFailed/onSuccess callbacks of the top-level 522 | # job are responsible for cleaning up the server. 523 | shutdownServer(info) 524 | # Setting info to empty string removes from the scheduler internal list 525 | self.clearSharedServerInfo(sharedserver_name) 526 | except: 527 | return False 528 | return True 529 | 530 | def getLogURI(self, work_item): 531 | log_path = '{}/logs/{}.log'.format(self.tempDir(True), work_item.name) 532 | uri = 'file:///' + log_path 533 | return uri 534 | 535 | def getStatusURI(self, work_item): 536 | # no seperate status page for afanasy scheduler 537 | return '' 538 | 539 | 540 | def registerTypes(type_registry): 541 | type_registry.registerScheduler(AfanasyScheduler, label="Afanasy Scheduler") -------------------------------------------------------------------------------- /houdini/afanasy_scheduler/afanasyscheduler_asset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexwheezy/python/1509851ccf591e5123a44a7d3f2ea64688a205a4/houdini/afanasy_scheduler/afanasyscheduler_asset.png -------------------------------------------------------------------------------- /houdini/afanasy_scheduler/afanasyscheduler_webgui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexwheezy/python/1509851ccf591e5123a44a7d3f2ea64688a205a4/houdini/afanasy_scheduler/afanasyscheduler_webgui.png -------------------------------------------------------------------------------- /houdini/afanasy_scheduler/otls/top_afanasyscheduler.hda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexwheezy/python/1509851ccf591e5123a44a7d3f2ea64688a205a4/houdini/afanasy_scheduler/otls/top_afanasyscheduler.hda -------------------------------------------------------------------------------- /houdini/afanasy_scheduler/pdgcmd.py: -------------------------------------------------------------------------------- 1 | # 2 | # PROPRIETARY INFORMATION. This software is proprietary to 3 | # Side Effects Software Inc., and is not to be reproduced, 4 | # transmitted, or disclosed in any way without written permission. 5 | # 6 | # Produced by: 7 | # Side Effects Software Inc 8 | # 123 Front Street West, Suite 1401 9 | # Toronto, Ontario 10 | # Canada M5J 2M2 11 | # 416-504-9876 12 | # 13 | # NAME: pdgcmd.py ( Python ) 14 | # 15 | # COMMENTS: Utility methods for jobs that need to report back to PDG. 16 | # Not dependent on Houdini install. 17 | # 18 | 19 | import json 20 | import logging 21 | import os 22 | import socket 23 | import subprocess 24 | import sys 25 | import time 26 | import xmlrpclib 27 | import httplib 28 | import shlex 29 | 30 | logging.basicConfig(level=logging.DEBUG) 31 | 32 | # 33 | # Path Utilities 34 | 35 | def delocalizePath(local_path): 36 | """ 37 | Delocalize the given path to be rooted at __PDG_DIR__ 38 | Requires PDG_DIR env var to be present 39 | """ 40 | # de-localize the result_data path if possible 41 | # we do this by replacing the file prefix if it matches our expected env var 42 | deloc_path = local_path 43 | try: 44 | pdg_dir_local = os.environ['PDG_DIR'] 45 | # our env var value might be in terms of another env var - so expand again 46 | pdg_dir_local = os.path.expandvars(pdg_dir_local) 47 | # normalize path to forward slashes 48 | pdg_dir_local = pdg_dir_local.replace('\\','/') 49 | deloc_path = local_path.replace('\\','/') 50 | deloc_path = deloc_path.replace(pdg_dir_local, '__PDG_DIR__', 1) 51 | except KeyError: 52 | pass 53 | return deloc_path 54 | 55 | # Makes a directory if it does not exist, and is made to be safe against 56 | # directory creation happening concurrent while we're attemtping to make it 57 | def makeDirSafe(local_path): 58 | if not local_path: 59 | return 60 | 61 | try: 62 | os.makedirs(local_path) 63 | except OSError: 64 | if not os.path.isdir(local_path): 65 | raise 66 | 67 | def _substitute_scheduler_vars(data): 68 | for var in ('PDG_DIR', 'PDG_ITEM_NAME', 'PDG_TEMP', 'PDG_RESULT_SERVER', 'PDG_INDEX'): 69 | varsym = '__' + var + '__' 70 | if varsym in data: 71 | data = data.replace(varsym, os.environ[var]) 72 | return data 73 | 74 | def localizePath(deloc_path): 75 | """ 76 | Localize the given path. This means replace any __PDG* tokens and 77 | expand env vars with the values in the current environment 78 | """ 79 | loc_path = _substitute_scheduler_vars(deloc_path) 80 | loc_path = os.path.expandvars(loc_path) 81 | # support env vars defined as other env vars 82 | loc_path = os.path.expandvars(loc_path) 83 | loc_path = loc_path.replace("\\", "/") 84 | return loc_path 85 | 86 | # Callback Helper Functions. 87 | # These functions are used in task code to report status and results 88 | # to the PDG callback server 89 | # 90 | 91 | def execBatchPoll(item_name, subindex, server_addr): 92 | """ 93 | Blocks until a batch sub item can begin cooking 94 | """ 95 | s = xmlrpclib.ServerProxy('http://'+server_addr) 96 | while True: 97 | r = s.check_ready_batch(item_name, subindex) 98 | if r and int(r)==1: 99 | break 100 | time.sleep(0.5) 101 | 102 | def execItemFailed(item_name, server_addr, to_stdout = True): 103 | """ 104 | Executes an item callback directly to report when an item has failed. 105 | 106 | item_name: name of the associated workitem 107 | server_addr: callback server in format 'IP:PORT', or emptry string to ignore 108 | to_stdout: also emit status messages to stdout 109 | 110 | If there is an error connecting to the callback server an error will be printed, but no 111 | exception raised. 112 | 113 | Note: Batch items not supported. 114 | """ 115 | try: 116 | jobid = os.environ[os.environ['PDG_JOBID_VAR']] 117 | except: 118 | jobid = '' 119 | 120 | s = xmlrpclib.ServerProxy('http://' + server_addr) 121 | s.failed(item_name, jobid) 122 | 123 | def execStartCook(item_name, subindex=-1, server_addr="", to_stdout = True): 124 | """ 125 | Executes an item callback directly to report than a work item with a 126 | specific index has started cooking 127 | """ 128 | if to_stdout: 129 | print("PDG_START: {};{}".format(item_name, subindex)) 130 | 131 | try: 132 | jobid = os.environ[os.environ['PDG_JOBID_VAR']] 133 | except: 134 | jobid = '' 135 | 136 | s = xmlrpclib.ServerProxy('http://' + server_addr) 137 | if subindex >= 0: 138 | s.start_cook_batch(item_name, subindex, jobid) 139 | else: 140 | s.start_cook(item_name, jobid) 141 | 142 | def reportResultData(result_data, item_name=None, server_addr=None, 143 | result_data_tag="", subindex=-1, and_success=False, to_stdout = True, 144 | duration = 0.0, hash_code = 0): 145 | """ 146 | Reports a result to PDG via the callback server. 147 | 148 | item_name: name of the associated workitem (default $PDG_ITEM_NAME) 149 | server_addr: callback server in format 'IP:PORT' (default $PDG_RESULT_SERVER) 150 | if there is no env var it will default to stdout reporting only. 151 | result_data: result data - treated as bytes if result_data_tag is passed 152 | result_data_tag: result tag to categorize result. Eg: 'file/geo' 153 | Default is empty which means attempt to categorize using file extension. 154 | subindex: The batch subindex if this is a batch item. 155 | and_success: If True, report success in addition to result_data 156 | to_stdout: also emit status messages to stdout 157 | duration: cook time of the item in seconds, only report with and_success 158 | hash_code: hashcode for result 159 | """ 160 | if not isinstance(result_data, (list, tuple)): 161 | result_data_list = [result_data] 162 | else: 163 | result_data_list = result_data 164 | 165 | if not result_data_list: 166 | raise TypeError("result_data is invalid") 167 | if not isinstance(result_data_list[0], (bytes,bytearray,unicode)): 168 | raise TypeError("result_data must be string-like or a list of string-like") 169 | 170 | if not item_name: 171 | item_name = os.environ['PDG_ITEM_NAME'] 172 | 173 | do_socket = True 174 | if not server_addr: 175 | try: 176 | server_addr = os.environ['PDG_RESULT_SERVER'] 177 | except KeyError: 178 | do_socket = False 179 | 180 | is_filepath = result_data_tag.startswith('file') or not result_data_tag 181 | 182 | server_proxy = xmlrpclib.ServerProxy('http://' + server_addr) 183 | 184 | multicall = False 185 | proxy = server_proxy 186 | if len(result_data_list) > 1: 187 | proxy = xmlrpclib.MultiCall(server_proxy) 188 | multicall = True 189 | 190 | try: 191 | jobid = os.environ[os.environ['PDG_JOBID_VAR']] 192 | except: 193 | jobid = '' 194 | 195 | for result_data_elem in result_data_list: 196 | if is_filepath: 197 | # de-localize the result_data path if possible 198 | # we do this by replacing the file prefix if it matches our expected env var 199 | if not result_data_elem.startswith('__PDG_DIR__'): 200 | result_data_elem = delocalizePath(result_data_elem) 201 | 202 | log_dir = os.environ['PDG_SHARED_TEMP'] 203 | item_log_path = os.path.join(log_dir, 'logs', item_name).replace('\\', '/') + '.log' 204 | 205 | def open_output_file(): 206 | outf = open(item_log_path, 'w') 207 | return outf 208 | 209 | output_file = open_output_file() 210 | if to_stdout: 211 | if len(result_data_elem) > 100: 212 | print_result_data_elem = repr(result_data_elem)[0:90] + '...('+str(len(result_data_elem))+' bytes)' 213 | else: 214 | print_result_data_elem = repr(result_data_elem) 215 | print("PDG_RESULT: {};{};{};{};{}".format(item_name, subindex, print_result_data_elem, result_data_tag, hash_code)) 216 | output_file.write("PDG_RESULT: {};{};{};{};{}".format(item_name, subindex, print_result_data_elem, result_data_tag, hash_code)) 217 | if and_success: 218 | print("PDG_SUCCESS: {};{};{}".format(item_name, subindex, duration)) 219 | output_file.close() 220 | 221 | ''' 222 | if do_socket: 223 | if and_success: 224 | if subindex >= 0: 225 | proxy.success_and_result_batch(item_name, xmlrpclib.Binary(result_data_elem), 226 | result_data_tag, subindex, hash_code, duration, jobid) 227 | else: 228 | proxy.success_and_result(item_name, xmlrpclib.Binary(result_data_elem), 229 | result_data_tag, hash_code, duration, jobid) 230 | else: 231 | if subindex >= 0: 232 | proxy.result_batch(item_name, xmlrpclib.Binary(result_data_elem), 233 | result_data_tag, subindex, hash_code, jobid) 234 | else: 235 | proxy.result(item_name, xmlrpclib.Binary(result_data_elem), 236 | result_data_tag, hash_code, jobid) 237 | ''' 238 | 239 | if multicall: 240 | proxy() 241 | 242 | return True 243 | 244 | def writeAttribute(attr_name, attr_value, item_name=None, server_addr=None): 245 | """ 246 | Writes attribute data back into a work item in PDG via the callback server. 247 | 248 | item_name: name of the associated workitem (default $PDG_ITEM_NAME) 249 | server_addr: callback server in format 'IP:PORT' (default $PDG_RESULT_SERVER) 250 | if there is no env var it will default to stdout reporting only. 251 | attr_name: name of the attribute 252 | attr_value: single value or array of string/float/int data 253 | """ 254 | if not isinstance(attr_value, (list, tuple)): 255 | attr_value_list = [attr_value] 256 | else: 257 | attr_value_list = attr_value 258 | 259 | if not attr_value_list: 260 | raise TypeError("attr_value is invalid") 261 | if not isinstance(attr_value_list[0], (bytes,bytearray,unicode,int,float)): 262 | raise TypeError("result_data must be string, int or float (array)") 263 | 264 | if not item_name: 265 | item_name = os.environ['PDG_ITEM_NAME'] 266 | 267 | if not server_addr: 268 | server_addr = os.environ['PDG_RESULT_SERVER'] 269 | 270 | server_proxy = xmlrpclib.ServerProxy('http://' + server_addr) 271 | 272 | proxy = server_proxy 273 | try: 274 | jobid = os.environ[os.environ['PDG_JOBID_VAR']] 275 | except: 276 | jobid = '' 277 | 278 | print("PDG_RESULT_ATTR: {};{};{}".format(item_name, attr_name, attr_value_list)) 279 | proxy.write_attr(item_name, attr_name, attr_value_list, jobid) 280 | 281 | def reportServerStarted(servername, pid, host, port, proto_type, item_name=None, server_addr=None): 282 | """ 283 | Reports that a shared server has been started. 284 | 285 | item_name: name of the associated workitem (default $PDG_ITEM_NAME) 286 | server_addr: callback server in format 'IP:PORT' (default $PDG_RESULT_SERVER) 287 | """ 288 | sharedserver_message = { 289 | "name" : servername, 290 | "pid" : pid, 291 | "host" : host, 292 | "port" : port, 293 | "proto_type" : proto_type 294 | } 295 | 296 | if not item_name: 297 | item_name = os.environ['PDG_ITEM_NAME'] 298 | 299 | if not server_addr: 300 | server_addr = os.environ['PDG_RESULT_SERVER'] 301 | 302 | server_proxy = xmlrpclib.ServerProxy('http://' + server_addr) 303 | try: 304 | jobid = os.environ[os.environ['PDG_JOBID_VAR']] 305 | except: 306 | jobid = '' 307 | 308 | server_proxy.sharedserver_started(sharedserver_message, jobid) 309 | reportResultData(str(host), item_name=item_name, 310 | server_addr=server_addr, result_data_tag="socket/ip") 311 | reportResultData(str(port), item_name=item_name, 312 | server_addr=server_addr, result_data_tag="socket/port") 313 | 314 | def getSharedServerInfo(servername, server_addr=None): 315 | """ 316 | Returns the dict of server info 317 | """ 318 | if not server_addr: 319 | server_addr = os.environ['PDG_RESULT_SERVER'] 320 | 321 | server_proxy = xmlrpclib.ServerProxy('http://' + server_addr) 322 | return server_proxy.get_sharedserver_info(servername) 323 | 324 | def execCommand(command, toolName=None): 325 | """ 326 | Executes a command 327 | """ 328 | 329 | print "Executing command: {}".format(command) 330 | 331 | try: 332 | process = subprocess.Popen(shlex.split(command)) 333 | process.communicate() 334 | if process.returncode != 0: 335 | exit(1) 336 | except subprocess.CalledProcessError as cmd_err: 337 | print "ERROR: problem executing command {}".format(command) 338 | print cmd_err 339 | exit(1) 340 | except OSError as os_err: 341 | 342 | # OSError might be due to missing executable, if that's the 343 | # case, inform the user about it. 344 | # We could check this before trying to execute, but considering this is 345 | # the exception, I'd rather not check this every time we run the command 346 | 347 | try: 348 | import distutils.spawn 349 | 350 | executableName = command.split(' ')[0] 351 | if not distutils.spawn.find_executable(executableName): 352 | print "ERROR: could not find executable {}".format(executableName) 353 | print "Are you sure you have {} installed?".format(toolName or executableName) 354 | else: 355 | print "ERROR: problem executing command {}".format(command) 356 | print os_err 357 | except: 358 | print "ERROR: problem executing command {}".format(command) 359 | print os_err 360 | 361 | 362 | exit(1) 363 | -------------------------------------------------------------------------------- /houdini/dependency_nodes/traversenetwork.py: -------------------------------------------------------------------------------- 1 | import hou 2 | from typing import List 3 | 4 | 5 | def isSopNode(node: hou.OpNode) -> bool: 6 | """Returns True if this node is top-level and SOP level.""" 7 | return isinstance(node, hou.SopNode) and not node.isInsideLockedHDA() 8 | 9 | 10 | def traverseNetwork(node: hou.OpNode, nodes: List[hou.OpNode]): 11 | """Recursively traverses all dependencies including references, 12 | parameters and incoming connections.""" 13 | if not node: 14 | return 15 | 16 | curr_node = None 17 | dependents = set(filter(isSopNode, (*node.dependents(False), *node.references(False)))) 18 | for node in (*node.inputs(), *dependents): 19 | if node and not node in nodes: 20 | nodes.append(node) 21 | curr_node = node 22 | traverseNetwork(curr_node, nodes) 23 | 24 | 25 | def dependencyNodes(node: hou.OpNode) -> List[hou.ObjNode]: 26 | """Returns all nodes that are dependencies of the given node.""" 27 | nodes = [] 28 | for node in (node, *node.inputAncestors()): 29 | if not node in nodes: 30 | nodes.append(node) 31 | traverseNetwork(node, nodes) 32 | return nodes 33 | 34 | 35 | # Example usage 36 | color = (0.5, 0.2, 0.20) 37 | node = hou.selectedNodes()[0] 38 | for node in dependencyNodes(node): 39 | node.setColor(hou.Color(color)) 40 | -------------------------------------------------------------------------------- /houdini/drag_drop_files/README.md: -------------------------------------------------------------------------------- 1 | The script implements the ability to drag files of geometry, caches, textures, images right into the viewport. 2 | 3 | How to Install 4 | 5 | Windows: 6 | - Download scripts, unpack archive 7 | - Copy externaldragdrop.py to directory "C:\Users\user\Documents\houdini16.5\scripts" 8 | - Copy nodes_color.py, type_extensions to directory "C:\Users\alekhin\Documents\houdini16.5\scripts\python" 9 | 10 | Linux: 11 | - Download scripts, unpack 12 | - Copy externaldragdrop.py to directory "/home/user/houdini16.5/scripts" 13 | - Copy nodes_color.py, type_extensions to directory "/home/user/houdini16.5/scripts/python" -------------------------------------------------------------------------------- /houdini/drag_drop_files/externaldragdrop.py: -------------------------------------------------------------------------------- 1 | import os, re 2 | 3 | import hou 4 | from type_extensions import * 5 | from nodes_color import * 6 | 7 | 8 | position = None 9 | 10 | def cursorPosition(network): 11 | global position 12 | position = network.cursorPosition() 13 | 14 | 15 | def getNetworkEditor(): 16 | editors = [pane for pane in hou.ui.paneTabs() \ 17 | if isinstance(pane, hou.NetworkEditor) and pane.isCurrentTab()][-1] 18 | ctx = editors.pwd() 19 | type_ctx = ctx.type().childTypeCategory() 20 | return editors, ctx, type_ctx 21 | 22 | 23 | def matchTypes(extensions): 24 | return re.compile(r'({})$'.format('|'.join(re.escape(x) for x in extensions))) 25 | 26 | 27 | def baseName(filename, extensions): 28 | filepath, basename = os.path.split(filename) 29 | name = basename.split(os.extsep)[0] 30 | ext = matchTypes(extensions).search(filename).group() if matchTypes(extensions).search(filename) else '' 31 | return name, ext 32 | 33 | 34 | def copImages(network, ctx, filename): 35 | if not matchTypes(IMAGE_EXTENSIONS).search(filename): 36 | return 37 | 38 | name = baseName(filename, IMAGE_EXTENSIONS)[0] 39 | name = re.sub(r'\s+', '_', name) 40 | 41 | image = ctx.createNode('file', node_name=name) 42 | image.setColor(hou.Color(IMAGE_NODE_COLOR)) 43 | image.setPosition(position) 44 | image.parm('filename1').set(filename) 45 | 46 | 47 | def chanFiles(network, ctx, filename): 48 | if not matchTypes(CHAN_EXTENSIONS).search(filename): 49 | return 50 | 51 | name = baseName(filename, CHAN_EXTENSIONS)[0] 52 | name = re.sub(r'\s+', '_', name) 53 | 54 | chan = ctx.createNode('file', node_name=name) 55 | chan.setColor(hou.Color(CLIP_NODE_COLOR)) 56 | chan.setPosition(position) 57 | chan.parm('file').set(filename) 58 | 59 | 60 | def shopImages(network, ctx, filename): 61 | if not matchTypes(IMAGE_EXTENSIONS).search(filename): 62 | return 63 | 64 | name = baseName(filename, IMAGE_EXTENSIONS)[0] 65 | name = re.sub(r'\s+', '_', name) 66 | 67 | if ctx.type().name() == 'mat' or ctx.type().name() == 'vopmaterial': 68 | image = ctx.createNode('texture', node_name=name) 69 | image.parm('map').set(filename) 70 | 71 | elif ctx.type().name() == 'arnold_vopnet': 72 | image = ctx.createNode('image', node_name=name) 73 | image.parm('filename').set(filename) 74 | 75 | image.setColor(hou.Color(IMAGE_NODE_COLOR)) 76 | image.setPosition(position) 77 | 78 | 79 | def objGeom(network, ctx, filename): 80 | if not matchTypes(GEO_EXTENSIONS).search(filename): 81 | return 82 | 83 | name, ext = baseName(filename, GEO_EXTENSIONS) 84 | name = re.sub(r'\s+', '_', name) 85 | 86 | if ext == '.fbx': 87 | hou.hipFile.importFBX(filename) 88 | return 89 | 90 | if ext == '.ass': 91 | procedural = ctx.createNode('arnold_procedural', node_name=name.title()) 92 | procedural.setPosition(position) 93 | procedural.parm('ar_filename').set(filename) 94 | return 95 | 96 | geo = ctx.createNode('geo', node_name=name.title()) 97 | geo.setPosition(position) 98 | 99 | for child in geo.children(): 100 | child.destroy() 101 | 102 | if ext == '.abc': 103 | alembic = geo.createNode('alembic', node_name='Import_Alembic') 104 | geo.setColor(hou.Color(ALEMBIC_NODE_COLOR)) 105 | alembic.parm('fileName').set(filename) 106 | else: 107 | geometry = geo.createNode('file', node_name='Import_Geometry') 108 | geo.setColor(hou.Color(GEO_NODE_COLOR)) 109 | geometry.parm('file').set(filename) 110 | 111 | 112 | def sopGeom(network, ctx, filename): 113 | if not matchTypes(GEO_EXTENSIONS).search(filename): 114 | return 115 | 116 | name, ext = baseName(filename, GEO_EXTENSIONS) 117 | name = re.sub(r'\s+', '_', name) 118 | 119 | if ext == '.ass': 120 | procedural = ctx.createNode('arnold_asstoc', node_name=name.title()) 121 | procedural.setPosition(position) 122 | procedural.parm('ass_file').set(filename) 123 | return 124 | 125 | if ext == '.abc': 126 | alembic = ctx.createNode('alembic', node_name=name) 127 | alembic.setColor(hou.Color(ALEMBIC_NODE_COLOR)) 128 | alembic.setPosition(position) 129 | alembic.parm('fileName').set(filename) 130 | else: 131 | geometry = ctx.createNode('file', node_name=name) 132 | geometry.setColor(hou.Color(GEO_NODE_COLOR)) 133 | geometry.setPosition(position) 134 | geometry.parm('file').set(filename) 135 | 136 | 137 | def hdaAsset(network, ctx, filename): 138 | if not matchTypes(ASSET_EXTENSIONS).search(filename): 139 | return 140 | 141 | name, ext = baseName(filename, ASSET_EXTENSIONS) 142 | hou.hda.installFile(filename) 143 | 144 | 145 | def loadContents(network, ctx, type_ctx, filename): 146 | if type_ctx == hou.objNodeTypeCategory(): 147 | hdaAsset(network, ctx, filename) 148 | 149 | if type_ctx == hou.objNodeTypeCategory(): 150 | objGeom(network, ctx, filename) 151 | 152 | if type_ctx == hou.sopNodeTypeCategory(): 153 | sopGeom(network, ctx, filename) 154 | 155 | if type_ctx == hou.vopNodeTypeCategory(): 156 | shopImages(network, ctx, filename) 157 | 158 | if type_ctx == hou.chopNodeTypeCategory(): 159 | chanFiles(network, ctx, filename) 160 | 161 | if type_ctx == hou.cop2NodeTypeCategory(): 162 | copImages(network, ctx, filename) 163 | 164 | 165 | def dropAccept(filelist): 166 | # Exclude hip files 167 | if filelist and os.path.splitext(filelist[0])[1] == ".hip": 168 | return False 169 | 170 | for filename in filelist: 171 | network, ctx, type_ctx = getNetworkEditor() 172 | cursorPosition(network) # The current cursor position for creating a node 173 | loadContents(network, ctx, type_ctx, filename) 174 | 175 | return True -------------------------------------------------------------------------------- /houdini/drag_drop_files/nodes_color.py: -------------------------------------------------------------------------------- 1 | # Custom Nodes Color 2 | 3 | ALEMBIC_NODE_COLOR = (0.38, 0.38, 0.38) 4 | FBX_NODE_COLOR = (0.451, 0.369, 0.796) 5 | CLIP_NODE_COLOR = (0.518, 0.561, 0.741) 6 | GEO_NODE_COLOR = (0.38, 0.38, 0.38) 7 | IMAGE_NODE_COLOR = (0.29, 0.565, 0.886) -------------------------------------------------------------------------------- /houdini/drag_drop_files/type_extensions.py: -------------------------------------------------------------------------------- 1 | # Houdini Type Extensions 2 | 3 | GEO_EXTENSIONS = ['.abc', 4 | '.ai', 5 | '.ass', 6 | '.bgeo','.bgeo.sc','.bgeo.bz2','.bgeo.gz','.bgeo.lzma','.bgeo.sc', 7 | '.bhclassic','.bhclassic.bz2','.bhclassic.lzma','.bhclassic.gz','.bhclassic.sc','.bhclassicgz','.bhclassicsc', 8 | '.bjson','.bjson.gz','.bjson.sc','.bjsongz','.bjsonsc', 9 | '.bpoly', 10 | '.bstl', 11 | '.d', 12 | '.dxf', 13 | '.eps', 14 | '.fbx', 15 | '.geo','.geo.bz2','.geo.gz','.geo.lzma','.geo.sc','.geogz','.geosc', 16 | '.hclassic','.hclassic.bz2','.hclassic.gz','.hclassic.lzma','.hclassic.sc','.hclassicgz','.hclassicsc', 17 | '.iges', 18 | '.igs', 19 | '.json','.json.gz','.json.sc','.jsongz','.jsonsc', 20 | '.lw', 21 | '.lwp', 22 | '.obj', 23 | '.pc', 24 | '.pdb', 25 | '.ply', 26 | '.pmap', 27 | '.poly', 28 | '.rib', 29 | '.stl', 30 | '.vdb'] 31 | 32 | 33 | IMAGE_EXTENSIONS = ['.als', 34 | '.bmp', 35 | '.cin', 36 | '.dsm', 37 | '.exr', 38 | '.hdr', 39 | '.ies', 40 | '.jpeg', 41 | '.jpg', 42 | '.kdk', 43 | '.pic', 44 | '.pic.gz', 'pic.Z', 'picgz', '.piclc', 'picnc', 'picZ', 45 | '.pix', 46 | '.png', 47 | '.psb', 48 | '.psd', 49 | '.ptex', 50 | '.ptx', 51 | '.qtl', 52 | '.rat', 53 | '.rgb', 'rgba', 54 | '.rla', '.rla16', 55 | '.rlb', 'rlb16', 56 | '.sgi', 57 | '.si', 58 | '.tbf', 59 | '.tga', 60 | '.tif', 'tif16', 'tif3', 'tif32', 'tiff', 61 | '.vst', 62 | '.vtg', 63 | '.yuv'] 64 | 65 | 66 | CHAN_EXTENSIONS = ['.aif', 67 | '.aifc', 68 | '.aiff', 69 | '.bchan', 70 | '.bclip', 71 | '.bclipnc', 72 | '.chan', 73 | '.chn', 74 | '.clip', 75 | '.mp2', 76 | '.mp3', 77 | '.wav'] 78 | 79 | 80 | ASSET_EXTENSIONS = ['.hda','hdalc','hdanc', 81 | '.otl','.otllc','otlnc'] -------------------------------------------------------------------------------- /houdini/scenegraphtree_dragdrop/README.md: -------------------------------------------------------------------------------- 1 | # SceneGraphTree Drag and Drop 2 | 3 | This little script allows you to drag and drop prims from the scene graph tree into the node editor. 4 | 5 | The installation is as simple as possible. Copy this directory completely to your `HOUDINI_USER_PREF_DIR`. 6 | -------------------------------------------------------------------------------- /houdini/scenegraphtree_dragdrop/houdini20.5/scripts/nodegraph/dragdrop.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script is used to customize the handling when user drags something onto 3 | the network editor. 4 | 5 | This script must define 3 functions that will be called by the network editor. 6 | 7 | 1. dropTest 8 | 2. dropGetChoices 9 | 3. dropAccept 10 | """ 11 | 12 | import hou 13 | 14 | 15 | # Called to test whether we want to handle a drop of the source at the 16 | # specified position. The return value is a boolean tuple. The first 17 | # value indicates whether we handle the drop in the script while the 18 | # second value indicates whether to allow the default handling to take 19 | # place. 20 | # 21 | # ARGUMENTS 22 | # pane 23 | # The hou.NetworkEditor the cursor is currently over. 24 | # source 25 | # An object to query what is currently being dragged. 26 | # position 27 | # A tuple of two integers representing the pixel position 28 | # 29 | 30 | 31 | def dropTest(pane, source, position): 32 | if source.hasData(hou.qt.mimeType.usdPrimitivePath): 33 | return True, False 34 | return False, False 35 | 36 | 37 | # Called to get the handling options to present to the user. Returns 38 | # (token_list, label_list, help_list), a tuple of string lists. The 39 | # first string list represents menu tokens, the second, menu labels, 40 | # and the third, help for the corresponding menu entry. 41 | # 42 | # ARGUMENTS 43 | # pane 44 | # The hou.NetworkEditor the cursor is currently over. 45 | # source 46 | # An object to query what is currently being dragged. 47 | # position 48 | # A tuple of two integers representing the pixel position 49 | # 50 | 51 | 52 | def dropGetChoices(pane, source, position): 53 | return ["allprims"], ["All Prims"], ["All Prims"] 54 | 55 | 56 | def itemUnderCursor(pane): 57 | if not pane: 58 | return 59 | 60 | rect = pane.visibleBounds() 61 | rect_data = pane.networkItemsInBox( 62 | pane.posToScreen(rect.min()), 63 | pane.posToScreen(rect.max()), 64 | for_select=True, 65 | ) 66 | cursor_node = None 67 | if rect_data: 68 | cursor_pos = pane.cursorPosition() 69 | min_dist_to_cursor = 0.5 70 | 71 | for item, item_type, _ in rect_data: 72 | if item_type == "node": 73 | node_center = item.position() + item.size() / 2 74 | dist = node_center.distanceTo(cursor_pos) 75 | if dist < min_dist_to_cursor: 76 | min_dist_to_cursor = dist 77 | cursor_node = item 78 | 79 | return cursor_node 80 | 81 | 82 | # Called to process the handling option selected by user. Returns whether 83 | # or not the drop was successfully processed. 84 | # 85 | # ARGUMENTS 86 | # pane 87 | # The hou.NetworkEditor the cursor is currently over. 88 | # source 89 | # An object to query what is currently being dragged. 90 | # position 91 | # A tuple of two integers representing the pixel position 92 | # token 93 | # The handling option selected from the dropGetChoices() list. 94 | 95 | 96 | def dropAccept(pane, source, position, token): 97 | if isinstance(pane, hou.NetworkEditor) and isinstance(pane.pwd(), hou.LopNetwork): 98 | node = itemUnderCursor(pane) 99 | if node: 100 | primpaths = [] 101 | data = source.data(hou.qt.mimeType.usdPrimitivePath) 102 | if token == "allprims": 103 | for primpath in data: 104 | primpaths.append(primpath) 105 | else: 106 | primpaths.append(next(data)) 107 | primpattern = node.parm("primpath") or node.parm("primpattern") 108 | if primpattern: 109 | primpattern.set(" ".join(primpaths)) 110 | 111 | return True 112 | -------------------------------------------------------------------------------- /houdini/update_build/update_daily_build.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | 3 | import sys 4 | import os 5 | import re 6 | import shutil 7 | import requests 8 | import platform 9 | 10 | # Configuration-related constants 11 | SIDEFX_OFFICIAL_URL = 'https://www.sidefx.com' 12 | SIDEFX_SIGNUP_URL = 'https://www.sidefx.com/login/' 13 | BUILD_DEVEL_URL = '/download/daily-builds/#category-devel' 14 | 15 | 16 | def download_daily_build(login, password): 17 | ''' Downloads the file from the given url and places it in 18 | specified destination folder.''' 19 | 20 | with requests.session() as client: 21 | # sets cookie 22 | client.get(SIDEFX_SIGNUP_URL) 23 | 24 | # Retrieve the CSRF token first 25 | csrftoken = client.cookies['csrftoken'] 26 | 27 | login_data = dict(username=login, 28 | password=password, 29 | csrfmiddlewaretoken=csrftoken, 30 | next=BUILD_DEVEL_URL) 31 | 32 | # Authorized on a site for access to builds 33 | request = client.post(SIDEFX_SIGNUP_URL, 34 | data=login_data, 35 | headers=dict(Referer=SIDEFX_SIGNUP_URL), 36 | timeout=None) 37 | 38 | # Find all references to assemblies 39 | build_pattern = r']\w+.\w+.\w+.\d+.)">([\w+\_.-]+)' 40 | get_builds = re.findall(build_pattern, request.text) 41 | 42 | if not get_builds: 43 | print "Builds not found. \ 44 | Please check your username and password for authorization." 45 | return 46 | 47 | # Sort the list by the most recent build 48 | unix_builds = sorted(filter(lambda build: 49 | get_platform() in build[-1], get_builds), 50 | key=lambda num: num[0]) 51 | 52 | unix_builds = filter(lambda build: re.match(r'\w+-\d+', build[-1]), unix_builds) 53 | link_build, file_build = unix_builds[-1] 54 | 55 | # Check updates new build 56 | current_build_version = check_updates() 57 | if current_build_version: 58 | build_version = re.search(r'\d{2}.\d.\d{3}', file_build).group(0) 59 | if build_version <= current_build_version: 60 | print 'The current build %s of the latest.' % build_version 61 | return 62 | 63 | build_latest_link = '%s%s%s' % (SIDEFX_OFFICIAL_URL, link_build, 'get') 64 | 65 | dst_dirname = os.path.dirname(os.path.realpath(__file__)) 66 | dst_file = '%s/%s' % (dst_dirname, file_build) 67 | 68 | with open(dst_file, 'wb') as f: 69 | # Sending a request to get the build 70 | get_save_link = client.get(build_latest_link, stream=True) 71 | if get_save_link.status_code != 200: 72 | return 73 | 74 | file_size = int(get_save_link.headers.get('content-length')) 75 | print "Downloading: %s Bytes: %s" % (file_size, file_build) 76 | 77 | if file_size is None: 78 | f.write(get_save_link.content) 79 | else: 80 | count = 0 81 | for data in get_save_link.iter_content(chunk_size=8192): 82 | count += len(data) 83 | f.write(data) 84 | done = int(50 * count / file_size) 85 | sys.stdout.write("\r[%s%s] %s%c" % ('=' * done, ' ' * (50-done), done * 2, 37)) 86 | sys.stdout.flush() 87 | f.close() 88 | 89 | 90 | def check_updates(): 91 | ''' Checking for the latest updates.''' 92 | try: 93 | current_build_version = os.environ['HOUDINI_VERSION'] 94 | except KeyError: 95 | current_build_version = None 96 | return current_build_version 97 | 98 | 99 | def get_platform(): 100 | system = platform.system().lower() 101 | if system == 'windows' or system == 'win32' or system == 'win64': 102 | return 'win' 103 | elif system == 'linux' or system == 'linux2': 104 | return 'linux' 105 | elif system == 'darwin': 106 | return 'macosx' 107 | 108 | 109 | if __name__ == "__main__": 110 | import optparse 111 | 112 | # Parse command-line arguments. 113 | usage = 'usage: %prog login password' 114 | parser = optparse.OptionParser(usage=usage) 115 | 116 | options, args = parser.parse_args() 117 | 118 | if len(args) < 2: 119 | parser.error('Both login and password must be specified.') 120 | 121 | client_login = str(args[0]) 122 | client_password = str(args[1]) 123 | 124 | download_daily_build(client_login, client_password) -------------------------------------------------------------------------------- /houdini/update_build/update_daily_build.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexwheezy/python/1509851ccf591e5123a44a7d3f2ea64688a205a4/houdini/update_build/update_daily_build.pyc --------------------------------------------------------------------------------