├── .gitignore ├── README.md ├── lmon ├── .gitignore ├── lmon │ ├── __init__.py │ ├── lmon.py │ ├── lmonbe.py │ └── lmonfe.py └── setup.py ├── mrnet ├── .gitignore ├── mrnet_module.cpp ├── mrnetbind.h ├── mrnetbind.py └── setup.py └── pgdb ├── .gitignore ├── INSTALL ├── LICENSE ├── README.md ├── TODO.org ├── build-load-file.sh ├── buildScripts ├── compilerLinks.py ├── hostEquivUpdate.py ├── rshCheck.py └── yamaCheck.py ├── depinstall.py ├── doc └── pgdbman.tex ├── gdbinit ├── launchmon-mpich-mvapich.diff ├── launchmon-mpich.diff ├── mrnet-filters ├── arec_filter.cc ├── build.py └── filter_hook.py ├── pgdb ├── src ├── __init__.py ├── comm.py ├── conf │ ├── __init__.py │ └── gdbconf.py ├── gdb_load_file.c ├── gdb_shared.py ├── gdbbe.py ├── gdbfe.py ├── gdblocal.py ├── interval.py ├── mi │ ├── __init__.py │ ├── commands.py │ ├── gdbmi.py │ ├── gdbmi_parser.py │ ├── gdbmi_recordhandler.py │ ├── gdbmi_records.py │ ├── gdbmiarec.py │ ├── gdbmicmd.py │ ├── gdbmipprinter.py │ └── varobj.py ├── misc │ ├── clean_shmem.py │ └── shmem_test.py ├── sbd.py └── varprint.py └── stlpprinters ├── Makefile.am ├── Makefile.in ├── hook.in └── libstdcxx ├── __init__.py └── v6 ├── __init__.py └── printers.py /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | \#*\# 3 | *.py[cod] 4 | raw_dump_* 5 | topo_* 6 | *.so 7 | *.idx 8 | *.ilg 9 | *.ind 10 | *.out 11 | *.toc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | PGDB 2 | ==== 3 | 4 | **Warning: PGDB currently has several known major bugs and is somewhat out of date.** Real work on it will resume when my current project is more stable. 5 | 6 | PGDB is a parallel/distributed debugger, based upon GDB, designed for debugging 7 | MPI jobs on a cluster. The tool has been tested on Linux clusters and presently 8 | scales to about 1K processes. 9 | 10 | This package includes the PGDB application and associated Python bindings for 11 | LaunchMON and MRNet, two tools that PGDB requires. 12 | 13 | For further instructions on installing and using PGDB, see pgdb/README for an 14 | overview or the documentation in pgdb/doc for complete instructions. For PGDB 15 | licensing information, see pgdb/LICENSE. 16 | -------------------------------------------------------------------------------- /lmon/.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | \#*\# 3 | *.py[cod] 4 | raw_dump_* 5 | topo_* -------------------------------------------------------------------------------- /lmon/lmon/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ndryden/PGDB/88df53eca358c4478fa5e8f734b5ba47970e6bb9/lmon/lmon/__init__.py -------------------------------------------------------------------------------- /lmon/lmon/lmon.py: -------------------------------------------------------------------------------- 1 | """Common LaunchMON Python interface definitions. 2 | 3 | This defines some useful things common to both the LaunchMON front- and back-end 4 | interfaces, as well as things useful to things using this interface, such as 5 | exceptions and CTypes structures. 6 | 7 | """ 8 | 9 | from __future__ import print_function 10 | from ctypes import (Structure, c_int, c_char_p, POINTER, cast, c_void_p, 11 | memmove, string_at) 12 | import cPickle 13 | 14 | # The version of the LaunchMON API. This must match the header file. 15 | LMON_VERSION = 900100 16 | 17 | # Base path to the LaunchMON install directory. 18 | lmon_path = "/usr/local" 19 | # The library for the LaunchMON front-end. 20 | lmon_fe_lib = lmon_path + "/lib/libmonfeapi.so" 21 | # The library for the LaunchMON back-end. 22 | lmon_be_lib = lmon_path + "/lib/libmonbeapi.so" 23 | # Launchmon environment variables. 24 | lmon_environ = {"LMON_REMOTE_LOGIN": "/usr/bin/ssh", 25 | "LMON_PREFIX": lmon_path, 26 | "LMON_LAUNCHMON_ENGINE_PATH": lmon_path + "/bin/launchmon"} 27 | 28 | def set_lmon_paths(path, fe_lib=None, be_lib=None): 29 | """Set the LaunchMON paths.""" 30 | global lmon_path, lmon_fe_lib, lmon_be_lib 31 | lmon_path = path 32 | if fe_lib: 33 | lmon_fe_lib = fe_lib 34 | else: 35 | lmon_fe_lib = path + "/lib/libmonfeapi.so" 36 | if be_lib: 37 | lmon_be_lib = be_lib 38 | else: 39 | lmon_be_lib = path + "/lib/libmonbeapi.so" 40 | lmon_environ["LMON_PREFIX"] = path 41 | lmon_environ["LMON_LAUNCHMON_ENGINE_PATH"] = path + "/bin/launchmon" 42 | 43 | def set_lmon_env(var, val): 44 | """Set environment variable var to val.""" 45 | lmon_environ[var] = val 46 | 47 | # LaunchMON constants. 48 | (LMON_OK, LMON_EINVAL, LMON_EBDARG, LMON_ELNCHR, 49 | LMON_EINIT, LMON_ESYS, LMON_ESUBCOM, LMON_ESUBSYNC, 50 | LMON_ETOUT, LMON_ENOMEM, LMON_ENCLLB, LMON_ECLLB, 51 | LMON_ENEGCB, LMON_ENOPLD, LMON_EBDMSG, LMON_EDUNAV, 52 | LMON_ETRUNC, LMON_EBUG, LMON_NOTIMPL, LMON_YES, 53 | LMON_NO) = list(range(21)) 54 | lmon_const_map = [ 55 | "LMON_OK", 56 | "LMON_EINVAL", 57 | "LMON_EDBARG", 58 | "LMON_ELNCHR", 59 | "LMON_EINIT", 60 | "LMON_ESYS", 61 | "LMON_ESUBCOM", 62 | "LMON_ESUBSYNC", 63 | "LMON_ETOUT", 64 | "LMON_ENOMEM", 65 | "LMON_ENCLLB", 66 | "LMON_ECLLB", 67 | "LMON_ENEGCB", 68 | "LMON_ENOPLD", 69 | "LMON_EBDMSG", 70 | "LMON_EDUNAV", 71 | "LMON_ETRUNC", 72 | "LMON_EBUG", 73 | "LMON_NOTIMPL", 74 | "LMON_YES", 75 | "LMON_NO" 76 | ] 77 | 78 | class LMONException(Exception): 79 | """An error from LaunchMON. 80 | 81 | This is raised whenever a LaunchMON function returns an error code that is 82 | not one of: LMON_OK, LMON_YES, or LMON_NO. 83 | 84 | """ 85 | def __init__(self, value): 86 | self.value = int(value) 87 | 88 | def __str__(self): 89 | if self.value < len(lmon_const_map): 90 | return lmon_const_map[self.value] 91 | else: 92 | return "Unknown ({0})".format(self.value) 93 | 94 | def print_lmon_error(self): 95 | """Print a short error message.""" 96 | print("Caught LaunchMON error, code = {0} ({1})".format(self, 97 | self.value)) 98 | 99 | class MPIR_PROCDESC(Structure): 100 | """A CTypes structure for the MPIR_PROCDESC structure.""" 101 | _fields_ = [("host_name", c_char_p), 102 | ("executable_name", c_char_p), 103 | ("pid", c_int)] 104 | 105 | class MPIR_PROCDESC_EXT(Structure): 106 | """A CTypes structure for the MPIR_PROCDESC_EXT structure.""" 107 | _fields_ = [("pd", MPIR_PROCDESC), 108 | ("mpirank", c_int)] 109 | 110 | class lmon_daemon_env_t(Structure): 111 | """A CTypes structure for the lmon_daemon_env_t structure.""" 112 | pass 113 | lmon_daemon_env_t._fields_ = [("envName", c_char_p), 114 | ("envValue", c_char_p), 115 | ("next", POINTER(lmon_daemon_env_t))] 116 | 117 | def call(func, *args): 118 | """Call a LaunchMON function and handle raising exceptions. 119 | 120 | func is the function to call. 121 | args are expanded to the positional arguments to pass to func. 122 | 123 | The return code is returned if it is not an error. 124 | 125 | """ 126 | ret_code = func(*args) 127 | if ret_code not in [LMON_OK, LMON_YES, LMON_NO]: 128 | raise LMONException(ret_code) 129 | return ret_code 130 | 131 | def create_array(ctype, lis): 132 | """A helper function to lay out a CTypes array.""" 133 | if len(lis): 134 | array_type = ctype * len(lis) 135 | _lis = array_type(*tuple(lis)) 136 | return _lis 137 | else: 138 | return None 139 | 140 | def udata_serialize(udata): 141 | """Serialize data into a CType.""" 142 | # Must use protocol 0; the binary protocol appears to break ctypes somehow. 143 | return cast(c_char_p(cPickle.dumps(udata, 0)), c_void_p) 144 | 145 | def udata_serialize_len(udata): 146 | """Unserialize data, returning the data and the length.""" 147 | serialized = cPickle.dumps(udata, 0) 148 | return cast(c_char_p(serialized), c_void_p), len(serialized) 149 | 150 | def udata_unserialize(udata): 151 | """Unserialize data.""" 152 | return cPickle.loads(udata) 153 | 154 | def pack(udata, msgbuf, msgbufmax, msgbuflen): 155 | """The pack callback for LaunchMON; see the relevant manpages.""" 156 | udata_size = len(string_at(udata)) 157 | if udata_size > msgbufmax: 158 | raise ValueError("LMon pack got data larger than the message buffer.") 159 | memmove(msgbuf, udata, udata_size) 160 | msgbuflen[0] = udata_size 161 | return 0 162 | 163 | def unpack(udatabuf, udatabuflen, udata): 164 | """The unpack callback for LaunchMON; see the relevant manpages.""" 165 | memmove(udata, udatabuf, udatabuflen) 166 | return 0 167 | -------------------------------------------------------------------------------- /lmon/lmon/lmonbe.py: -------------------------------------------------------------------------------- 1 | """A CTypes interface to the LaunchMON back-end library.""" 2 | 3 | from ctypes import * 4 | import lmon 5 | 6 | class LMON_be(object): 7 | """An interface to the LaunchMON back-end library using CTypes. 8 | 9 | This loads the library, provides for type-checking of arguments, and handles 10 | some convenience things. See the LaunchMON manpages for additional 11 | information. 12 | 13 | """ 14 | 15 | def __init__(self): 16 | """Initialize the LaunchMON back-end library.""" 17 | self.lib = cdll.LoadLibrary(lmon.lmon_be_lib) 18 | self.pack_type = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, 19 | POINTER(c_int)) 20 | self.unpack_type = CFUNCTYPE(c_int, c_void_p, c_int, c_void_p) 21 | self.pack_cb = None 22 | self.unpack_cb = None 23 | # We use c_void_p because ctypes isn't good at the whole 24 | # multiple-pointers thing. 25 | self.lib.LMON_be_init.argtypes = [c_int, POINTER(c_int), c_void_p] 26 | self.lib.LMON_be_getMyRank.argtypes = [POINTER(c_int)] 27 | self.lib.LMON_be_amIMaster.argtypes = [] 28 | self.lib.LMON_be_getSize.argtypes = [POINTER(c_int)] 29 | self.lib.LMON_be_handshake.argtypes = [c_void_p] 30 | self.lib.LMON_be_ready.argtypes = [c_void_p] 31 | self.lib.LMON_be_getMyProctabSize.argtypes = [POINTER(c_int)] 32 | # See above for why we use c_void_p. 33 | self.lib.LMON_be_getMyProctab.argtypes = [c_void_p, POINTER(c_int), 34 | c_int] 35 | self.lib.LMON_be_finalize.argtypes = [] 36 | self.lib.LMON_be_regPackForBeToFe.argtypes = [self.pack_type] 37 | self.lib.LMON_be_regUnpackForFeToBe.argtypes = [self.unpack_type] 38 | self.lib.LMON_be_sendUsrData.argtypes = [c_void_p] 39 | self.lib.LMON_be_recvUsrData.argtypes = [c_void_p] 40 | self.lib.LMON_be_barrier.argtypes = [] 41 | self.lib.LMON_be_broadcast.argtypes = [c_void_p, c_int] 42 | self.lib.LMON_be_scatter.argtypes = [c_void_p, c_int, c_void_p] 43 | 44 | def init(self, argc, argv): 45 | """Invoke LMON_be_init. 46 | 47 | argc is the number of arguments in argv. 48 | argv is a list of arguments to pass as argv. 49 | 50 | """ 51 | _argc = c_int(argc) 52 | # This is horribly ugly code to properly reconstruct argv for LaunchMON. 53 | # We stuff the arguments in string buffers (since they can be modified). 54 | # We stuff those into an array. 55 | # We add an entry at the end with a bunch of null bytes, since the last 56 | # argv entry is supposed to be a null and otherwise LaunchMON will make 57 | # malloc *very* unhappy. 58 | # We create a pointer to this array. 59 | # We pass that pointer by reference (another pointer). 60 | tmp_argv = [cast(create_string_buffer(x), c_char_p) for x in argv] 61 | tmp_argv.append(cast(create_string_buffer(32), c_char_p)) 62 | _argv = lmon.create_array(c_char_p, tmp_argv) 63 | argv_ref = c_void_p(addressof(_argv)) 64 | lmon.call(self.lib.LMON_be_init, lmon.LMON_VERSION, byref(_argc), 65 | byref(argv_ref)) 66 | 67 | def getMyRank(self): 68 | """Return the rank of this process.""" 69 | rank = c_int() 70 | lmon.call(self.lib.LMON_be_getMyRank, byref(rank)) 71 | return rank.value 72 | 73 | def amIMaster(self): 74 | """Return whether this process is the master.""" 75 | rc = lmon.call(self.lib.LMON_be_amIMaster) 76 | if rc == lmon.LMON_YES: 77 | return True 78 | return False 79 | 80 | def getSize(self): 81 | """Return the number of LaunchMON processes.""" 82 | size = c_int() 83 | lmon.call(self.lib.LMON_be_getSize, byref(size)) 84 | return size.value 85 | 86 | def handshake(self, udata): 87 | """Do the LaunchMON handshake. 88 | 89 | If udata is not None, it should be the length of the buffer to 90 | unserialize front-end data to. 91 | 92 | """ 93 | if udata is None: 94 | lmon.call(self.lib.LMON_be_handshake, cast(None, c_void_p)) 95 | else: 96 | buf = create_string_buffer(udata) 97 | lmon.call(self.lib.LMON_be_handshake, cast(buf, c_void_p)) 98 | # Check if we actually received data. 99 | if buf.raw[0] != "\0": 100 | return lmon.udata_unserialize(buf.value) 101 | return None 102 | 103 | def ready(self, udata): 104 | """Inform the front-end that we are ready.""" 105 | if udata is None: 106 | lmon.call(self.lib.LMON_be_ready, cast(None, c_void_p)) 107 | else: 108 | lmon.call(self.lib.LMON_be_ready, lmon.udata_serialize(udata)) 109 | 110 | def getMyProctabSize(self): 111 | """Return the size of the process table.""" 112 | size = c_int() 113 | lmon.call(self.lib.LMON_be_getMyProctabSize, byref(size)) 114 | return size.value 115 | 116 | def getMyProctab(self, maxsize): 117 | """Return the process table and size for this process.""" 118 | proctab_type = lmon.MPIR_PROCDESC_EXT * maxsize 119 | proctab = proctab_type() 120 | size = c_int() 121 | lmon.call(self.lib.LMON_be_getMyProctab, byref(proctab), byref(size), 122 | maxsize) 123 | return proctab, size.value 124 | 125 | def finalize(self): 126 | """Finalize this session.""" 127 | lmon.call(self.lib.LMON_be_finalize) 128 | 129 | def regPackForBeToFe(self, callback): 130 | """Register a pack function.""" 131 | self.pack_cb = self.pack_type(callback) 132 | lmon.call(self.lib.LMON_be_regPackForBeToFe, self.pack_cb) 133 | 134 | def regUnpackForFeToBe(self, callback): 135 | """Register an unpack function.""" 136 | self.unpack_cb = self.unpack_type(callback) 137 | lmon.call(self.lib.LMON_be_regUnpackForFeToBe, self.unpack_cb) 138 | 139 | def sendUsrData(self, udata): 140 | """Send data to the front-end.""" 141 | lmon.call(self.lib.LMON_be_sendUsrData, lmon.udata_serialize(udata)) 142 | 143 | def recvUsrData(self, buf_size): 144 | """Receive data from the front-end.""" 145 | udata = create_string_buffer(buf_size) 146 | lmon.call(self.lib.LMON_be_recvUsrData, cast(udata, c_void_p)) 147 | if self.amIMaster(): 148 | return lmon.udata_unserialize(udata.raw) 149 | return None 150 | 151 | def barrier(self): 152 | """Make a barrier.""" 153 | lmon.call(self.lib.LMON_be_barrier) 154 | 155 | def broadcast(self, udata, size): 156 | """Broadcast data. 157 | 158 | The master provides data and the size of it, and this returns None. 159 | The slaves can provide None for the data, and size is the size of the 160 | buffer, which is returned after unserialization. 161 | 162 | """ 163 | if self.amIMaster(): 164 | # Master sends the data, returns None. 165 | lmon.call(self.lib.LMON_be_broadcast, lmon.udata_serialize(udata), 166 | size) 167 | return None 168 | else: 169 | # Slave returns the data. 170 | buf = create_string_buffer(size) 171 | lmon.call(self.lib.LMON_be_broadcast, cast(buf, c_void_p), size) 172 | # Depending on the application, there appears to be a spurious null 173 | # byte at the start of the data. I do not know why. This avoids it. 174 | if buf.raw[0] == "\0" and buf.raw[1] != "\0": 175 | buf = string_at(addressof(buf) + 1) 176 | else: 177 | buf = buf.value 178 | return lmon.udata_unserialize(buf) 179 | 180 | def scatter(self, udata_array, elem_size): 181 | """Scatter data. 182 | 183 | The master provides udata_array, which is an array of data to scatter. 184 | The slaves may provide None for the data. 185 | elem_size is the size of each element. Due to serialization of data, 186 | this should be the maximum size of the data, and the elements are padded 187 | to this length. 188 | 189 | All callers return their respective data (including the master). 190 | 191 | """ 192 | buf = create_string_buffer(elem_size) 193 | if self.amIMaster(): 194 | # Master pads data if needed and sends. 195 | total_size = len(udata_array) * elem_size 196 | send_buf = create_string_buffer(total_size) 197 | buf_addr = addressof(send_buf) 198 | idx = 0 199 | for elem in udata_array: 200 | tmp_buf = create_string_buffer( 201 | string_at(lmon.udata_serialize(elem)), elem_size) 202 | memmove(buf_addr + (idx * elem_size), tmp_buf, elem_size) 203 | idx += 1 204 | lmon.call(self.lib.LMON_be_scatter, cast(send_buf, c_void_p), 205 | elem_size, cast(buf, c_void_p)) 206 | else: 207 | lmon.call(self.lib.LMON_be_scatter, cast(None, c_void_p), elem_size, 208 | cast(buf, c_void_p)) 209 | # This is here for the same reason as in broadcast. 210 | # Note that it appears that the null byte is only present for children. 211 | if buf.raw[0] == "\0" and buf.raw[1] != "\0": 212 | buf = string_at(addressof(buf) + 1) 213 | else: 214 | buf = buf.value 215 | return lmon.udata_unserialize(buf) 216 | -------------------------------------------------------------------------------- /lmon/lmon/lmonfe.py: -------------------------------------------------------------------------------- 1 | """A CTypes interface to the LaunchMON front-end library.""" 2 | 3 | from ctypes import * 4 | import lmon 5 | 6 | class LMON_fe(object): 7 | """An interface to the LaunchMON front-end library using CTypes. 8 | 9 | This loads the library, provides for type-checking of arguments, and handles 10 | some convenience things. See the LaunchMON manpages for additional 11 | information. 12 | 13 | """ 14 | 15 | def __init__(self): 16 | """Initialize the LaunchMON front-end library.""" 17 | self.lib = cdll.LoadLibrary(lmon.lmon_fe_lib) 18 | # Used for keeping callbacks alive. 19 | self.pack_cbs = {} 20 | self.unpack_cbs = {} 21 | self.pack_type = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, 22 | POINTER(c_int)) 23 | self.unpack_type = CFUNCTYPE(c_int, c_void_p, c_int, c_void_p) 24 | # Set up argument types. 25 | self.lib.LMON_fe_init.argtypes = [c_int] 26 | self.lib.LMON_fe_createSession.argtypes = [POINTER(c_int)] 27 | self.lib.LMON_fe_attachAndSpawnDaemons.argtypes = [c_int, c_char_p, 28 | c_int, c_char_p, 29 | POINTER(c_char_p), 30 | c_void_p, c_void_p] 31 | self.lib.LMON_fe_launchAndSpawnDaemons.argtypes = [c_int, c_char_p, 32 | c_char_p, 33 | POINTER(c_char_p), 34 | c_char_p, 35 | POINTER(c_char_p), 36 | c_void_p, c_void_p] 37 | self.lib.LMON_fe_regPackForFeToBe.argtypes = [c_int, self.pack_type] 38 | self.lib.LMON_fe_regUnpackForBeToFe.argtypes = [c_int, self.unpack_type] 39 | self.lib.LMON_fe_sendUsrDataBe.argtypes = [c_int, c_void_p] 40 | self.lib.LMON_fe_recvUsrDataBe.argtypes = [c_int, c_void_p] 41 | self.lib.LMON_fe_putToBeDaemonEnv.argtypes = [c_int, 42 | POINTER(lmon.lmon_daemon_env_t), 43 | c_int] 44 | self.lib.LMON_fe_getProctableSize.argtypes = [c_int, POINTER(c_uint)] 45 | # We use c_void_p here because ctypes isn't good at resolving the 46 | # multiple-pointers. 47 | self.lib.LMON_fe_getProctable.argtypes = [c_int, c_void_p, 48 | POINTER(c_uint), c_uint] 49 | self.lib.LMON_fe_detach.argtypes = [c_int] 50 | self.lib.LMON_fe_shutdownDaemons.argtypes = [c_int] 51 | self.lib.LMON_fe_kill.argtypes = [c_int] 52 | 53 | def init(self): 54 | """Invoke LMON_fe_init.""" 55 | lmon.call(self.lib.LMON_fe_init, lmon.LMON_VERSION) 56 | 57 | def createSession(self): 58 | """Create and return a session handle with LMON_fe_createSession.""" 59 | session = c_int() 60 | lmon.call(self.lib.LMON_fe_createSession, byref(session)) 61 | return session.value 62 | 63 | def attachAndSpawnDaemons(self, session, hostname, pid, daemon, d_argv, 64 | febe_data, befe_data): 65 | """Invoke LMON_fe_attachAndSpawnDaemons. 66 | 67 | See the manpages. d_argv is a list. 68 | befe_data is the size of the desired buffer or None. 69 | 70 | """ 71 | # Need a trailing null entry on the array. 72 | d_argv += [None] 73 | _d_argv = lmon.create_array(c_char_p, d_argv) 74 | if febe_data is not None: 75 | _febe_data = lmon.udata_serialize(febe_data) 76 | else: 77 | _febe_data = cast(None, c_void_p) 78 | buf = None 79 | if befe_data is not None: 80 | buf = create_string_buffer(befe_data) 81 | _befe_data = cast(buf, c_void_p) 82 | else: 83 | _befe_data = cast(None, c_void_p) 84 | lmon.call(self.lib.LMON_fe_attachAndSpawnDaemons, session, hostname, 85 | pid, daemon, _d_argv, _febe_data, _befe_data) 86 | if befe_data: 87 | return lmon.udata_unserialize(buf.value) 88 | else: 89 | return None 90 | 91 | def launchAndSpawnDaemons(self, session, hostname, launcher, l_argv, daemon, 92 | d_argv, febe_data, befe_data): 93 | """Invoke LMON_fe_launchAndSpawnDaemons.""" 94 | # Need trailing null entries on the arrays. 95 | l_argv += [None] 96 | d_argv += [None] 97 | _l_argv = lmon.create_array(c_char_p, l_argv) 98 | _d_argv = lmon.create_array(c_char_p, d_argv) 99 | if febe_data is not None: 100 | _febe_data = lmon.udata_serialize(febe_data) 101 | else: 102 | _febe_data = cast(None, c_void_p) 103 | buf = None 104 | if befe_data is not None: 105 | buf = create_string_buffer(befe_data) 106 | _befe_data = cast(buf, c_void_p) 107 | else: 108 | _befe_data = cast(None, c_void_p) 109 | lmon.call(self.lib.LMON_fe_launchAndSpawnDaemons, session, hostname, 110 | launcher, _l_argv, daemon, _d_argv, _febe_data, _befe_data) 111 | if befe_data: 112 | return lmon.udata_unserialize(buf.value) 113 | else: 114 | return None 115 | 116 | def regPackForFeToBe(self, session, callback): 117 | """Register a pack function with LMON_fe_regPackForFeToBe.""" 118 | cb = self.pack_type(callback) 119 | self.pack_cbs[session] = cb 120 | lmon.call(self.lib.LMON_fe_regPackForFeToBe, session, cb) 121 | 122 | def regUnpackForBeToFe(self, session, callback): 123 | """Register an unpack function with LMON_fe_regUnpackForBeToFe.""" 124 | cb = self.unpack_type(callback) 125 | self.unpack_cbs[session] = cb 126 | lmon.call(self.lib.LMON_fe_regUnpackForBeToFe, session, cb) 127 | 128 | def sendUsrDataBe(self, session, febe_data): 129 | """Send user data to the backend.""" 130 | lmon.call(self.lib.LMON_fe_sendUsrDataBe, session, 131 | lmon.udata_serialize(febe_data)) 132 | 133 | def recvUsrDataBe(self, session, buf_size): 134 | """Receive user data from the backend.""" 135 | befe_data = create_string_buffer(buf_size) 136 | lmon.call(self.lib.LMON_fe_recvUsrDataBe, session, 137 | cast(befe_data, c_void_p)) 138 | return lmon.udata_unserialize(befe_data.raw) 139 | 140 | def putToBeDaemonEnv(self, session, environ): 141 | """Set up the backend environment with LMON_fe_putToBeDaemonEnv. 142 | 143 | Environ is a list of tuples of keys and values. 144 | 145 | """ 146 | env_list_type = lmon.lmon_daemon_env_t * len(environ) 147 | env_list = env_list_type() 148 | for k, env_item in enumerate(environ): 149 | env_list[k].envName = env_item[0] 150 | env_list[k].envValue = env_item[1] 151 | if k < (len(environ) - 1): 152 | env_list[k].next = pointer(env_list[k + 1]) 153 | else: 154 | env_list[k].next = None 155 | lmon.call(self.lib.LMON_fe_putToBeDaemonEnv, session, env_list, 156 | len(environ)) 157 | 158 | def getProctableSize(self, session): 159 | """Return the size of the process table.""" 160 | size = c_uint() 161 | lmon.call(self.lib.LMON_fe_getProctableSize, session, byref(size)) 162 | return size.value 163 | 164 | def getProctable(self, session, maxsize): 165 | """Return the process table and its size.""" 166 | proctab_type = lmon.MPIR_PROCDESC_EXT * maxsize 167 | proctab = proctab_type() 168 | size = c_uint() 169 | lmon.call(self.lib.LMON_fe_getProctable, session, byref(proctab), 170 | byref(size), c_uint(maxsize)) 171 | return proctab, size.value 172 | 173 | def detach(self, session): 174 | """Detach from the resource manager but leave daemons running.""" 175 | lmon.call(self.lib.LMON_fe_detach, session) 176 | 177 | def shutdownDaemons(self, session): 178 | """Detach from the resource manager and shut down daemons.""" 179 | lmon.call(self.lib.LMON_fe_shutdownDaemons, session) 180 | 181 | def kill(self, session): 182 | """Destroy all resources associated with the session.""" 183 | lmon.call(self.lib.LMON_fe_kill, session) 184 | -------------------------------------------------------------------------------- /lmon/setup.py: -------------------------------------------------------------------------------- 1 | """Setup script for installing LaunchMON.""" 2 | 3 | from distutils.core import setup 4 | 5 | setup(name="lmon", 6 | version="0.01", 7 | description="Python interface to LaunchMON", 8 | packages=["lmon"]) 9 | -------------------------------------------------------------------------------- /mrnet/.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | \#*\# 3 | *.py[cod] 4 | raw_dump_* 5 | topo_* -------------------------------------------------------------------------------- /mrnet/mrnetbind.h: -------------------------------------------------------------------------------- 1 | typedef char* CharStar; 2 | typedef void* VoidStar; 3 | typedef const char* ConstCharStar; 4 | typedef const void* ConstVoidStar; 5 | 6 | /** 7 | * Layout a list of strings into an array of pointers to the C-style strings. 8 | */ 9 | char** layout_argv(PyObject *list) { 10 | if (!PyList_Check(list)) { 11 | PyErr_SetString(PyExc_TypeError, "You must provide a list."); 12 | return NULL; 13 | } 14 | Py_ssize_t size = PyList_Size(list); 15 | Py_ssize_t i; 16 | PyObject* tmp; 17 | char** argv = (char**) malloc(sizeof(char*) * (size + 1)); 18 | if (!argv) { 19 | PyErr_NoMemory(); 20 | return NULL; 21 | } 22 | for (i = 0; i < size; ++i) { 23 | tmp = PyList_GetItem(list, i); 24 | if (!tmp) { 25 | free(argv); 26 | return NULL; 27 | } 28 | argv[i] = PyString_AsString(tmp); 29 | if (!argv[i]) { 30 | free(argv); 31 | return NULL; 32 | } 33 | } 34 | argv[size] = 0; 35 | return argv; 36 | } 37 | 38 | /** 39 | * Clean up the above function. 40 | */ 41 | void layout_argv_cleanup(char** argv) { 42 | free(argv); 43 | } 44 | 45 | /** 46 | * Wrap a Python callback for MRNet's event callback system. 47 | */ 48 | void _wrap_EventCallback(MRN::Event* e, void* data) { 49 | // This lets us check if threads have been initialized, and if not, we don't 50 | // use the locking API. 51 | int threads_inited = PyEval_ThreadsInitialized(); 52 | PyGILState_STATE gstate; 53 | if (threads_inited) { 54 | gstate = PyGILState_Ensure(); 55 | } 56 | PyObject* callback = (PyObject*) data; 57 | PyObject* ret = PyObject_CallFunction(callback, NULL); 58 | if (ret == NULL) { 59 | // There was some sort of error. This is really ugly. 60 | exit(1); 61 | } 62 | // Discard the reference to the result. 63 | Py_DECREF(ret); 64 | if (threads_inited) { 65 | PyGILState_Release(gstate); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /mrnet/setup.py: -------------------------------------------------------------------------------- 1 | """Build and install MRNet.""" 2 | 3 | from distutils.core import setup, Extension 4 | 5 | mrnet_path = raw_input("Please type the base path to MRNet: ") 6 | if mrnet_path[-1:] == '/': 7 | mrnet_path = mrnet_path[:-1] 8 | mrnet = Extension("MRNet", 9 | sources=["mrnet_module.cpp"], 10 | include_dirs=[mrnet_path + "/include"], 11 | library_dirs=[mrnet_path + "/lib"], 12 | libraries=["mrnet", "xplat", 13 | "boost_timer-mt", "boost_system-mt"], 14 | extra_compile_args=[], 15 | extra_link_args=["-Wl,-rpath=" + mrnet_path + "/lib", 16 | "-Wl,-E"]) 17 | setup(name="MRNet", version="0.01", 18 | description="Python interface to MRNet", ext_modules=[mrnet]) 19 | -------------------------------------------------------------------------------- /pgdb/.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | \#*\# 3 | *.py[cod] 4 | raw_dump_* 5 | topo_* -------------------------------------------------------------------------------- /pgdb/INSTALL: -------------------------------------------------------------------------------- 1 | Requirements: 2 | - Python version 2.6 (should work with 2.7 as well, probably not with 3.x). 3 | - GDB version >= 7.0. The most recent version possible is highly recommended, and at least 7.2 4 | is highly encouraged. 5 | - LaunchMON 0.7.2 or 1.0. 6 | - MRNet 4.0.0. 7 | - PyBindGen 0.15.0. 8 | - mpi4py 1.3. 9 | 10 | There is a Python script, depinstall.py, provided, to download, build, and install the above 11 | versions of LaunchMON (0.7.2 is the latest packaged version), MRNet, PyBindGen and mpi4py. It 12 | works as follows: 13 | 14 | - To build everything: python depinstall.py [configure parameters] 15 | This downloads, unpacks, builds, and installs the above packages. Optional parameters can be 16 | provided that are passed verbatim to all configure scripts. 17 | - To clean everything: python depinstall.py clean 18 | This removes all downloaded and extracted files. 19 | 20 | If additional control is needed, depinstall.py can be edited. Alternately, these packages may 21 | be built manually. 22 | 23 | Additionally, this script should automatically build and install the MRNet bindings. 24 | 25 | Installation: 26 | 1. Install the requirements above. Ensure that PyBindGen, mpi4py, and the MRNet bindings are in 27 | your Python path. 28 | 2. Update the LaunchMON configuration in conf/lmonconf.py. The comments there are helpful. 29 | - Indicate whether you are using LaunchMON 1.0 or not. 30 | - For whichever version of LaunchMON you are using, update the paths for lmon_fe_lib and 31 | lmon_be_lib to point to the LaunchMON front- and back-end libraries. 32 | - Update the environment variables for LMON_PREFIX and LMON_LAUNCHMON_ENGINE_PATH for the 33 | LaunchMON prefix and the launchmon binary, respectively. 34 | 3. Update the GDB configuration. 35 | - You can set the path to load MRNet or other Python modules with: 36 | import sys 37 | sys.path.append("/path/to/installation/") 38 | - Update the backend_args to the full path to your gdbbe.py. 39 | - Update the MRNet environment variables MRNET_COMM_PATH and LD_LIBRARY_PATH to point to 40 | the MRNet mrnet_commnode binary and library directory, respectively. 41 | - Update the gdb_init_path to the location of the gdbinit file included here. 42 | - Update any other configuration options you want. -------------------------------------------------------------------------------- /pgdb/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012, Lawrence Livermore National Security, LLC. 2 | Produced at the Lawrence Livermore National Laboratory 3 | Written by Nikoli Dryden 4 | CODE-OCEC-12-059 5 | All rights reserved. 6 | 7 | This file is part of PGDB. For details, see 8 | 9 | https://github.com/ndryden/PGDB 10 | 11 | Please also read this link – Additional BSD Notice. 12 | 13 | Redistribution and use in source and binary forms, with or without modification, are 14 | permitted provided that the following conditions are met: 15 | 16 | • Redistributions of source code must retain the above copyright notice, 17 | this list of conditions and the disclaimer below. 18 | • Redistributions in binary form must reproduce the above copyright notice, 19 | this list of conditions and the disclaimer (as noted below) in the 20 | documentation and/or other materials provided with the distribution. 21 | • Neither the name of the LLNS/LLNL nor the names of its contributors may be 22 | used to endorse or promote products derived from this software without 23 | specific prior written permission. 24 | 25 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 | ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, 29 | THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 30 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 33 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 35 | THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 | 37 | Additional BSD Notice 38 | 39 | 1. This notice is required to be provided under our contract with the 40 | U.S. Department of Energy (DOE). This work was produced at Lawrence 41 | Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with 42 | the DOE. 43 | 44 | 2. Neither the United States Government nor Lawrence Livermore National 45 | Security, LLC nor any of their employees, makes any warranty, express or 46 | implied, or assumes any liability or responsibility for the accuracy, 47 | completeness, or usefulness of any information, apparatus, product, or 48 | process disclosed, or represents that its use would not infringe 49 | privately-owned rights. 50 | 51 | 3. Also, reference herein to any specific commercial products, process, or 52 | services by trade name, trademark, manufacturer or otherwise does not 53 | necessarily constitute or imply its endorsement, recommendation, or 54 | favoring by the United States Government or Lawrence Livermore National 55 | Security, LLC. The views and opinions of authors expressed herein do not 56 | necessarily state or reflect those of the United States Government or 57 | Lawrence Livermore National Security, LLC, and shall not be used for 58 | advertising or product endorsement purposes. -------------------------------------------------------------------------------- /pgdb/README.md: -------------------------------------------------------------------------------- 1 | PGDB 2 | ==== 3 | PGDB is a GDB-based parallel/distributed debugger, written primarily in Python, 4 | for debugging MPI applications on a cluster. 5 | 6 | License 7 | ------- 8 | PGDB is licensed under the BSD License with an additional notice. See LICENSE 9 | for more details. 10 | 11 | Availability 12 | ------------ 13 | PGDB is available at its Github project page, . 14 | 15 | Installation 16 | ------------ 17 | This is a brief summary of the installation process. For full details, see the 18 | installation section in the manual available in docs/pgdbman.pdf. See the 19 | Further Documentation section if the manual is not present. 20 | 21 | ### Requirements ### 22 | * Python 2.6 or greater (2.7 recommended, >= 3 not supported) 23 | 24 | * GDB 7.0 or greater (7.4 or greater recommended) 25 | 26 | * LaunchMON 0.7.2 or greater (1.0 recommended) 27 | 28 | * MRNet 4.0.0 29 | 30 | * PyBindGen 0.16.0 or greater 31 | 32 | This is only needed if you need to re-generate the MRNet bindings. 33 | * Python bindings for MRNet (included) 34 | * Python bindings for LaunchMON (included) 35 | 36 | ### Installing ### 37 | This pgdb folder can be placed anywhere on your system to install it. There are 38 | no further steps, assuming all dependencies have been installed correctly. 39 | 40 | You will need to update the conf/gdbconf.py configuration file with your local 41 | configuration options. Additionally, you may need to update the LaunchMON 42 | configuration file before you install it. 43 | 44 | Usage 45 | ----- 46 | PGDB can run in either attach or launch modes. 47 | 48 | In attach mode, PGDB will debug an already-running MPI job. PGDB will need to be 49 | run on the same machine that you run your `mpirun` command (or equivalent) from. 50 | You will need the PID of this process, which you can obtain with e.g. 51 | `ps x | grep mpirun`. You then need to run PGDB with `pgdb -p PID` to begin 52 | debugging the job. 53 | 54 | In launch mode, PGDB will launch a new MPI job directly under its control. The 55 | syntax for this is `pgdb --launcher launcher -a args` where launcher is the MPI 56 | launcher to use (defaults to `srun` if not specified) and args are the arguments 57 | you would typically pass to the launcher in order to launch the job. 58 | 59 | Additional information on using PGDB is available in the manual (see Further 60 | Documentation), and brief command-line documentation is available by running 61 | `pgdb -h`. 62 | 63 | Further Documentation 64 | --------------------- 65 | Complete documentation on PGDB is available in docs/pgdbman.pdf. If the manual 66 | is not present, it can be generated from the included LaTeX sources using 67 | pdflatex. 68 | 69 | Credits 70 | ------- 71 | PGDB is written and maintained by Nikoli Dryden, who you can reach at either of 72 | these emails: and . 73 | 74 | Development of PGDB has been supported by Lawrence Livermore National Laboratory 75 | (LLNL), the National Center for Supercomputing Applications (NCSA), and the 76 | Extreme Science and Engineering Discovery Environment (XSEDE). 77 | -------------------------------------------------------------------------------- /pgdb/TODO.org: -------------------------------------------------------------------------------- 1 | * PGDB 2 | ** TODO MRNet bindings 3 | ** TODO Improve depinstall.py: 4 | *** TODO Remove temporary directory 5 | *** TODO PyBindGen soft req 6 | *** TODO Better configuration/options 7 | *** TODO Write python version to pgdb 8 | ** TODO Searching for LMon libs. 9 | ** TODO info locals 10 | ** TODO NetworkTopology::Node::get_Parent from root 11 | ** TODO Table display class 12 | ** TODO Specify gcc version/pretty printers on command line 13 | -------------------------------------------------------------------------------- /pgdb/build-load-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcc -O0 -g -std=c99 -Wall -Wno-int-to-pointer-cast -Wno-pointer-to-int-cast -shared -fPIC src/gdb_load_file.c -o load_file.so -ldl -pthread -lrt 4 | -------------------------------------------------------------------------------- /pgdb/buildScripts/compilerLinks.py: -------------------------------------------------------------------------------- 1 | import os 2 | if "4.4" not in os.path.realpath('/usr/bin/gcc'): 3 | if os.path.isfile('/usr/bin/gcc-4.4'): 4 | print "PGDB requires gcc 4.4. Will update gcc to symlink to gcc-4.4" 5 | os.unlink("/usr/bin/gcc") 6 | os.symlink('/usr/bin/gcc-4.4','/usr/bin/gcc') 7 | else: 8 | print "PGDB requires gcc 4.4 which is not installed" 9 | if "4.4" not in os.path.realpath('/usr/bin/g++'): 10 | if os.path.isfile('/usr/bin/g++-4.4'): 11 | print "PGDB requires g++ 4.4. Will update g++ to symlink to g++-4.4" 12 | os.unlink("/usr/bin/g++") 13 | os.symlink('/usr/bin/g++-4.4','/usr/bin/g++') 14 | else: 15 | print "PGDB requires g++ 4.4 which is not installed" 16 | -------------------------------------------------------------------------------- /pgdb/buildScripts/hostEquivUpdate.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.system("echo \"\nlocalhost\" >> /etc/hosts.equiv") 3 | -------------------------------------------------------------------------------- /pgdb/buildScripts/rshCheck.py: -------------------------------------------------------------------------------- 1 | import os 2 | a = os.path.realpath("/usr/bin/rsh") 3 | if "ssh" in a: 4 | os.unlink("usr/bin/rsh") 5 | os.symlink("/etc/alternatives/rsh", "usr/bin/rsh") 6 | -------------------------------------------------------------------------------- /pgdb/buildScripts/yamaCheck.py: -------------------------------------------------------------------------------- 1 | import os 2 | print "Yama kernel patches must be disabled for PGDB to work. This is a potential security risk." 3 | a = raw_input('Continue? y or n: ') 4 | if a=='y': 5 | os.system("echo \"0\" | sudo tee /proc/sys/kernel/yama/ptrace_scope") 6 | else: 7 | print "exiting..." 8 | -------------------------------------------------------------------------------- /pgdb/depinstall.py: -------------------------------------------------------------------------------- 1 | """Download and install PGDB dependencies. 2 | 3 | This downloads LaunchMON and MRNet, builds, and installs them.""" 4 | 5 | import subprocess, sys, os, compileall, re 6 | 7 | # The curl command to invoke. 8 | curl = "curl -L {0}" 9 | 10 | # The extraction command for .tar.gz. 11 | extract_tgz = "tar -xzf {0}" 12 | # The extraction command for .tar.bz2. 13 | extract_tbz2 = "tar -xjf {0}" 14 | # The extraction command for .zip. 15 | extract_zip = "unzip {0}" 16 | 17 | # The configure command to use. 18 | configure = "./configure {0}" 19 | 20 | # The make command to build. 21 | make = "make" 22 | 23 | # The make install command to install. 24 | make_install = "make install" 25 | 26 | # Waf configure/build/install commands. 27 | waf_configure = "./waf configure {0}" 28 | waf_build = "./waf" 29 | waf_install = "./waf install" 30 | 31 | # Python distutils build/install. 32 | pydist_build = "python setup.py build" 33 | pydist_install = "python setup.py install {0}" 34 | 35 | # How to clean. 36 | clean_file = "rm -f {0}" 37 | clean_dir = "rm -rf {0}" 38 | 39 | # Default configuration options. 40 | config_opts = "" 41 | 42 | # The directory to download/build things in. 43 | working_dir = "third-party" 44 | 45 | # LaunchMON sources. 46 | launchmon_url = "http://sourceforge.net/projects/launchmon/files/launchmon/0.7%20stable%20releases/launchmon-0.7.2.tar.gz/download" 47 | launchmon_dist = "launchmon-0.7.2.tar.gz" 48 | launchmon_dir = launchmon_dist.replace(".tar.gz", "") 49 | launchmon_extract = extract_tgz 50 | launchmon_config = "--with-rm=slurm" 51 | 52 | # MRNet sources. 53 | mrnet_url = "ftp://ftp.cs.wisc.edu/paradyn/mrnet/mrnet_4.0.0.tar.gz" 54 | mrnet_dist = "mrnet_4.0.0.tar.gz" 55 | mrnet_dir = mrnet_dist.replace(".tar.gz", "") 56 | mrnet_extract = extract_tgz 57 | mrnet_config = "" 58 | 59 | # PyBindGen sources. 60 | pybindgen_url = "http://pybindgen.googlecode.com/files/pybindgen-0.16.0.tar.bz2" 61 | pybindgen_dist = "pybindgen-0.16.0.tar.bz2" 62 | pybindgen_dir = pybindgen_dist.replace(".tar.bz2", "") 63 | pybindgen_extract = extract_tbz2 64 | pybindgen_config = "" 65 | 66 | # MRNet bindings. 67 | mrnetbind_url = None 68 | mrnetbind_dist = None 69 | mrnetbind_dir = "../mrnet" 70 | mrnetbind_extract = None 71 | mrnetbind_config = "" 72 | 73 | def download_extract(url, dist, extract): 74 | """Download and extract something.""" 75 | ret = subprocess.call(curl.format(url) + " > " + dist, stdout = sys.stdout, shell = True) 76 | if ret != 0: 77 | print "Non-zero return code {0} on downloading {1}!".format(ret, url) 78 | sys.exit(1) 79 | ret = subprocess.call(extract.format(dist), stdout = sys.stdout, shell = True) 80 | if ret != 0: 81 | print "Non-zero return code {0} on extract: `{1}'".format(ret, extract.format(dist)) 82 | sys.exit(1) 83 | 84 | def make_build_install(directory, config_opts): 85 | """Use configure/make/make install to build something.""" 86 | # Change to the directory to make path handling easier. 87 | cwd = os.getcwd() 88 | os.chdir(directory) 89 | # Configure. 90 | ret = subprocess.call(configure.format(config_opts), stdout = sys.stdout, shell = True) 91 | if ret != 0: 92 | print "Non-zero return code {0} on configure `{1}' in {2}!".format( 93 | ret, configure.format(config_opts), directory) 94 | sys.exit(1) 95 | # Build. 96 | ret = subprocess.call(make, stdout = sys.stdout, shell = True) 97 | if ret != 0: 98 | print "Non-zero return code {0} on make `{1}' in {2}!".format(ret, make, directory) 99 | sys.exit(1) 100 | # Install. 101 | ret = subprocess.call(make_install, stdout = sys.stdout, shell = True) 102 | if ret != 0: 103 | print "Non-zero return code {0} on install `{1}' in {2}!".format(ret, make_install, 104 | directory) 105 | sys.exit(1) 106 | os.chdir(cwd) 107 | 108 | def waf_build_install(directory, config_opts): 109 | """Use waf to build something (PyBindGen only for now).""" 110 | cwd = os.getcwd() 111 | os.chdir(directory) 112 | # Configure. 113 | ret = subprocess.call(waf_configure.format(config_opts), stdout = sys.stdout, shell = True) 114 | if ret != 0: 115 | print "Non-zero return code {0} on configure `{1}' in {2}!".format( 116 | ret, waf_configure.format(config_opts), directory) 117 | sys.exit(1) 118 | # Build. 119 | ret = subprocess.call(waf_build, stdout = sys.stdout, shell = True) 120 | if ret != 0: 121 | print "Non-zero return code {0} on build `{1}' in {2}!".format(ret, waf_build, directory) 122 | sys.exit(1) 123 | # Install. 124 | ret = subprocess.call(waf_install, stdout = sys.stdout, shell = True) 125 | if ret != 0: 126 | print "Non-zero return code {0} on install `{1}' in {2}!".format(ret, waf_install, directory) 127 | sys.exit(1) 128 | os.chdir(cwd) 129 | 130 | def pydist_build_install(directory, config_opts): 131 | """Use Python's distutils to build something.""" 132 | cwd = os.getcwd() 133 | os.chdir(directory) 134 | # Build. 135 | ret = subprocess.call(pydist_build, stdout = sys.stdout, shell = True) 136 | if ret != 0: 137 | print "Non-zero return code {0} on build `{1}' in {2}!".format(ret, pydist_build, directory) 138 | sys.exit(1) 139 | # Install. 140 | ret = subprocess.call(pydist_install.format(config_opts), stdout = sys.stdout, shell = True) 141 | if ret != 0: 142 | print "Non-zero return code {0} on install `{1}' in {2}!".format( 143 | ret, pydist_install.format(config_opts), directory) 144 | sys.exit(1) 145 | os.chdir(cwd) 146 | 147 | def download_build_install(url, dist, directory, extract, build_func, config_opts = ""): 148 | """Download, build, and install something.""" 149 | download_extract(url, dist, extract) 150 | build_func(directory, config_opts) 151 | 152 | def clean(dist, directory): 153 | """Clean something.""" 154 | ret = subprocess.call(clean_dir.format(directory), stdout = sys.stdout, shell = True) 155 | if ret != 0: 156 | print "Non-zero return code {0} on clean `{1}'!".format(ret, clean_dir.format(directory)) 157 | sys.exit(1) 158 | if not dist: 159 | return 160 | ret = subprocess.call(clean_file.format(dist), stdout = sys.stdout, shell = True) 161 | if ret != 0: 162 | print "Non-zero return code {0} on clean `{1}'!".format(ret, clean_file.format(dist)) 163 | sys.exit(1) 164 | 165 | def do_build(): 166 | """Build everything.""" 167 | # Change into the working directory. 168 | if not os.path.isdir(working_dir): 169 | if os.path.exists(working_dir): 170 | print "Working directory {0} exists and is not a directory!".format(working_dir) 171 | sys.exit(1) 172 | os.makedirs(working_dir) 173 | cwd = os.getcwd() 174 | os.chdir(working_dir) 175 | # Build LaunchMON. 176 | print "==========" 177 | print "Building LaunchMON." 178 | print "==========" 179 | print "LaunchMON 1.0 sources are not yet available!" 180 | print "You can check them out from version control at http://sourceforge.net/projects/launchmon/ and build them yourself (recommended)." 181 | print "Note that if you use the 0.7.2 release, you probably will need to update conf/lmonconf.py." 182 | print "Using default config options '" + launchmon_config + "'" 183 | download_build_install(launchmon_url, launchmon_dist, launchmon_dir, launchmon_extract, 184 | make_build_install, config_opts + " " + launchmon_config) 185 | # Build MRNet. 186 | print "==========" 187 | print "Building MRNet." 188 | print "==========" 189 | download_build_install(mrnet_url, mrnet_dist, mrnet_dir, mrnet_extract, 190 | make_build_install, config_opts + " " + mrnet_config) 191 | # Build PyBindGen. 192 | print "==========" 193 | print "Building PyBindGen." 194 | print "==========" 195 | download_build_install(pybindgen_url, pybindgen_dist, pybindgen_dir, pybindgen_extract, 196 | waf_build_install, config_opts + " " + pybindgen_config) 197 | # Return to the regular directory for building the MRNet bindings. 198 | os.chdir(cwd) 199 | # Build the MRNet bindings. 200 | print "==========" 201 | print "Building MRNet Python bindings. (Not regenerating bindings.)" 202 | print "==========" 203 | pydist_build_install(mrnetbind_dir, config_opts + " " + mrnetbind_config) 204 | 205 | # Compile Python files. 206 | print "==========" 207 | print "Compiling Python files." 208 | print "==========" 209 | rx = re.compile(working_dir) 210 | compileall.compile_dir(".", rx = rx) 211 | 212 | print "==========" 213 | print "Everything built. Remember that you will probably need to edit the config files." 214 | 215 | def do_clean(): 216 | """Clean everything.""" 217 | # Enter the working directory. 218 | if not os.path.isdir(working_dir): 219 | print "Cannot find working directory {0}!".format(working_dir) 220 | sys.exit(1) 221 | cwd = os.getcwd() 222 | os.chdir(working_dir) 223 | print "Cleaning all." 224 | # Clean LaunchMON. 225 | clean(launchmon_dist, launchmon_dir) 226 | # Clean MRNet. 227 | clean(mrnet_dist, mrnet_dir) 228 | # Clean PyBindGen. 229 | clean(pybindgen_dist, pybindgen_dir) 230 | # Return to the regular directory. 231 | os.chdir(cwd) 232 | # Clean the MRNet bindings. 233 | clean(None, os.path.join(mrnetbind_dir, "build")) 234 | 235 | # Parse input arguments and do stuff. 236 | action = "build" 237 | if len(sys.argv) == 2 and sys.argv[1] == "clean": 238 | action = "clean" 239 | 240 | if action == "build": 241 | if len(sys.argv) > 1: 242 | # Any additional arguments are passed to configure. 243 | config_opts = " ".join(sys.argv[2:]) 244 | do_build() 245 | elif action == "clean": 246 | do_clean() 247 | else: 248 | print "Unknown action!" 249 | sys.exit(1) 250 | -------------------------------------------------------------------------------- /pgdb/gdbinit: -------------------------------------------------------------------------------- 1 | python 2 | import sys 3 | sys.path.insert(0, "stlpprinters") 4 | from libstdcxx.v6.printers import register_libstdcxx_printers 5 | register_libstdcxx_printers(None) 6 | end -------------------------------------------------------------------------------- /pgdb/launchmon-mpich-mvapich.diff: -------------------------------------------------------------------------------- 1 | Index: test/src/fe_launch_smoketest.cxx 2 | =================================================================== 3 | --- test/src/fe_launch_smoketest.cxx (revision 497) 4 | +++ test/src/fe_launch_smoketest.cxx (working copy) 5 | @@ -270,6 +270,30 @@ 6 | "[LMON_FE] launching the job/daemons via %s\n", 7 | mylauncher); 8 | } 9 | + else if (rmenv_str == std::string("RC_mpich")) 10 | + { 11 | + launcher_argv = (char **) malloc(5*sizeof(char*)); 12 | + launcher_argv[0] = strdup(mylauncher); 13 | + launcher_argv[1] = strdup("-np"); 14 | + launcher_argv[2] = strdup(argv[2]); 15 | + launcher_argv[3] = strdup(argv[1]); 16 | + launcher_argv[4] = NULL; 17 | + fprintf (stdout, 18 | + "[LMON_FE] launching the job/daemons via %s\n", 19 | + mylauncher); 20 | + } 21 | + else if (rmenv_str == std::string("RC_mpirun_rsh")) 22 | + { 23 | + launcher_argv = (char **) malloc(5*sizeof(char*)); 24 | + launcher_argv[0] = strdup(mylauncher); 25 | + launcher_argv[1] = strdup("-np"); 26 | + launcher_argv[2] = strdup(argv[2]); 27 | + launcher_argv[3] = strdup(argv[1]); 28 | + launcher_argv[4] = NULL; 29 | + fprintf (stdout, 30 | + "[LMON_FE] launching the job/daemons via %s\n", 31 | + mylauncher); 32 | + } 33 | 34 | if ( ( rc = LMON_fe_init ( LMON_VERSION ) ) 35 | != LMON_OK ) 36 | Index: launchmon/src/lmon_api/lmon_api_std.h 37 | =================================================================== 38 | --- launchmon/src/lmon_api/lmon_api_std.h (revision 497) 39 | +++ launchmon/src/lmon_api/lmon_api_std.h (working copy) 40 | @@ -117,6 +117,8 @@ 41 | RC_alps, 42 | RC_orte, 43 | RC_gupc, 44 | + RC_mpich, 45 | + RC_mpirun_rsh, 46 | RC_none 47 | /* 48 | new RMs should be added here as LaunchMON is ported 49 | Index: launchmon/src/rm_mpirun_rsh.conf 50 | =================================================================== 51 | --- launchmon/src/rm_mpirun_rsh.conf (revision 0) 52 | +++ launchmon/src/rm_mpirun_rsh.conf (revision 0) 53 | @@ -0,0 +1,8 @@ 54 | +RM=mpirun_rsh 55 | +RM_MPIR=STD 56 | +RM_launcher=mpirun_rsh 57 | +RM_launcher_id=RM_launcher|sym|mpispawn_checkin 58 | +RM_launch_helper=mpirun_rsh 59 | +RM_signal_for_kill=SIGINT 60 | +RM_fail_detection=false 61 | +RM_launch_str=-np %n -hostfile %l %d %o --lmonsharedsec=%s --lmonsecchk=%c 62 | \ No newline at end of file 63 | Index: launchmon/src/sdbg_rm_map.cxx 64 | =================================================================== 65 | --- launchmon/src/sdbg_rm_map.cxx (revision 497) 66 | +++ launchmon/src/sdbg_rm_map.cxx (working copy) 67 | @@ -245,6 +245,14 @@ 68 | { 69 | rm = RC_gupc; 70 | } 71 | + else if (v == std::string("mpich")) 72 | + { 73 | + rm = RC_mpich; 74 | + } 75 | + else if (v == std::string("mpirun_rsh")) 76 | + { 77 | + rm = RC_mpirun_rsh; 78 | + } 79 | else 80 | { 81 | rm = RC_none; 82 | Index: launchmon/src/rm_info.conf 83 | =================================================================== 84 | --- launchmon/src/rm_info.conf (revision 497) 85 | +++ launchmon/src/rm_info.conf (working copy) 86 | @@ -39,6 +39,8 @@ 87 | rm_slurm.conf 88 | rm_openrte.conf 89 | rm_mchecker.conf 90 | +rm_mpich.conf 91 | +rm_mpirun_rsh.conf 92 | 93 | [linux-x86_64] 94 | rm_slurm.conf 95 | @@ -46,6 +48,8 @@ 96 | rm_alps.conf 97 | rm_mchecker.conf 98 | rm_gupc.conf 99 | +rm_mpich.conf 100 | +rm_mpirun_rsh.conf 101 | 102 | [linux-power] 103 | rm_bglrm.conf 104 | Index: launchmon/src/rm_mpich.conf 105 | =================================================================== 106 | --- launchmon/src/rm_mpich.conf (revision 0) 107 | +++ launchmon/src/rm_mpich.conf (revision 0) 108 | @@ -0,0 +1,8 @@ 109 | +RM=mpich 110 | +RM_MPIR=STD 111 | +RM_launcher=mpiexec.hydra 112 | +RM_launcher_id=RM_launcher|sym|HYD_ui_mpich_info 113 | +RM_launch_helper=mpiexec.hydra 114 | +RM_signal_for_kill=SIGINT 115 | +RM_fail_detection=false 116 | +RM_launch_str=-n %n -f %l %d %o --lmonsharedsec=%s --lmonsecchk=%c 117 | \ No newline at end of file 118 | Index: launchmon/src/linux/lmon_api/lmon_be_sync_mpi.cxx 119 | =================================================================== 120 | --- launchmon/src/linux/lmon_api/lmon_be_sync_mpi.cxx (revision 497) 121 | +++ launchmon/src/linux/lmon_api/lmon_be_sync_mpi.cxx (working copy) 122 | @@ -127,6 +127,8 @@ 123 | case RC_orte: 124 | case RC_alps: 125 | case RC_gupc: 126 | + case RC_mpich: 127 | + case RC_mpirun_rsh: 128 | // 129 | // Call generic Linux init 130 | // 131 | @@ -183,6 +185,8 @@ 132 | case RC_orte: 133 | case RC_alps: 134 | case RC_gupc: 135 | + case RC_mpich: 136 | + case RC_mpirun_rsh: 137 | // 138 | // Call generic Linux stop 139 | // 140 | @@ -251,6 +255,8 @@ 141 | case RC_orte: 142 | case RC_alps: 143 | case RC_gupc: 144 | + case RC_mpich: 145 | + case RC_mpirun_rsh: 146 | // 147 | // Call generic Linux run 148 | // 149 | @@ -316,6 +322,8 @@ 150 | case RC_orte: 151 | case RC_alps: 152 | case RC_gupc: 153 | + case RC_mpich: 154 | + case RC_mpirun_rsh: 155 | // 156 | // Call generic Linux initdone 157 | // 158 | @@ -372,6 +380,8 @@ 159 | case RC_orte: 160 | case RC_alps: 161 | case RC_gupc: 162 | + case RC_mpich: 163 | + case RC_mpirun_rsh: 164 | // 165 | // You need to do nothing for these resource managers 166 | // 167 | @@ -432,6 +442,8 @@ 168 | case RC_orte: 169 | case RC_alps: 170 | case RC_gupc: 171 | + case RC_mpich: 172 | + case RC_mpirun_rsh: 173 | // 174 | // You need to do nothing for these resource managers 175 | // 176 | -------------------------------------------------------------------------------- /pgdb/launchmon-mpich.diff: -------------------------------------------------------------------------------- 1 | Index: test/src/fe_launch_smoketest.cxx 2 | =================================================================== 3 | --- test/src/fe_launch_smoketest.cxx (revision 470) 4 | +++ test/src/fe_launch_smoketest.cxx (working copy) 5 | @@ -280,6 +280,18 @@ 6 | "[LMON_FE] launching the job/daemons via %s\n", 7 | mylauncher); 8 | } 9 | + else if (rmenv_str == std::string("RC_mpich")) 10 | + { 11 | + launcher_argv = (char **) malloc(5*sizeof(char*)); 12 | + launcher_argv[0] = strdup(mylauncher); 13 | + launcher_argv[1] = strdup("-np"); 14 | + launcher_argv[2] = strdup(argv[2]); 15 | + launcher_argv[3] = strdup(argv[1]); 16 | + launcher_argv[4] = NULL; 17 | + fprintf (stdout, 18 | + "[LMON_FE] launching the job/daemons via %s\n", 19 | + mylauncher); 20 | + } 21 | 22 | if ( ( rc = LMON_fe_init ( LMON_VERSION ) ) 23 | != LMON_OK ) 24 | Index: launchmon/src/lmon_api/lmon_api_std.h 25 | =================================================================== 26 | --- launchmon/src/lmon_api/lmon_api_std.h (revision 470) 27 | +++ launchmon/src/lmon_api/lmon_api_std.h (working copy) 28 | @@ -117,6 +117,7 @@ 29 | RC_alps, 30 | RC_orte, 31 | RC_gupc, 32 | + RC_mpich, 33 | RC_none 34 | /* 35 | new RMs should be added here as LaunchMON is ported 36 | Index: launchmon/src/sdbg_rm_map.cxx 37 | =================================================================== 38 | --- launchmon/src/sdbg_rm_map.cxx (revision 470) 39 | +++ launchmon/src/sdbg_rm_map.cxx (working copy) 40 | @@ -275,6 +275,10 @@ 41 | { 42 | rm = RC_gupc; 43 | } 44 | + else if (v == std::string("mpich")) 45 | + { 46 | + rm = RC_mpich; 47 | + } 48 | else 49 | { 50 | rm = RC_none; 51 | Index: launchmon/src/rm_info.conf 52 | =================================================================== 53 | --- launchmon/src/rm_info.conf (revision 470) 54 | +++ launchmon/src/rm_info.conf (working copy) 55 | @@ -39,6 +39,7 @@ 56 | rm_slurm.conf 57 | rm_openrte.conf 58 | rm_mchecker.conf 59 | +rm_mpich.conf 60 | 61 | [linux-x86_64] 62 | rm_slurm.conf 63 | @@ -46,6 +47,7 @@ 64 | rm_alps.conf 65 | rm_mchecker.conf 66 | rm_gupc.conf 67 | +rm_mpich.conf 68 | 69 | [linux-power] 70 | rm_bglrm.conf 71 | Index: launchmon/src/rm_mpich.conf 72 | =================================================================== 73 | --- launchmon/src/rm_mpich.conf (revision 0) 74 | +++ launchmon/src/rm_mpich.conf (revision 0) 75 | @@ -0,0 +1,9 @@ 76 | +RM=mpich 77 | +RM_MPIR=STD 78 | +RM_launcher=mpiexec.hydra 79 | +RM_launcher_id=RM_launcher|sym|HYD_ui_mpich_info 80 | +#RM_launcher_so=libopen-rte.so 81 | +RM_launch_helper=mpir 82 | +RM_signal_for_kill=SIGINT 83 | +RM_fail_detection=false 84 | +RM_launch_str=%d %o --lmonsharedsec=%s --lmonsecchk=%c 85 | \ No newline at end of file 86 | 87 | Property changes on: launchmon/src/rm_mpich.conf 88 | ___________________________________________________________________ 89 | Added: svn:executable 90 | + * 91 | 92 | Index: launchmon/src/linux/lmon_api/lmon_be_sync_mpi.cxx 93 | =================================================================== 94 | --- launchmon/src/linux/lmon_api/lmon_be_sync_mpi.cxx (revision 470) 95 | +++ launchmon/src/linux/lmon_api/lmon_be_sync_mpi.cxx (working copy) 96 | @@ -127,6 +127,7 @@ 97 | case RC_orte: 98 | case RC_alps: 99 | case RC_gupc: 100 | + case RC_mpich: 101 | // 102 | // Call generic Linux init 103 | // 104 | @@ -183,6 +184,7 @@ 105 | case RC_orte: 106 | case RC_alps: 107 | case RC_gupc: 108 | + case RC_mpich: 109 | // 110 | // Call generic Linux stop 111 | // 112 | @@ -251,6 +253,7 @@ 113 | case RC_orte: 114 | case RC_alps: 115 | case RC_gupc: 116 | + case RC_mpich: 117 | // 118 | // Call generic Linux run 119 | // 120 | @@ -316,6 +319,7 @@ 121 | case RC_orte: 122 | case RC_alps: 123 | case RC_gupc: 124 | + case RC_mpich: 125 | // 126 | // Call generic Linux initdone 127 | // 128 | @@ -372,6 +376,7 @@ 129 | case RC_orte: 130 | case RC_alps: 131 | case RC_gupc: 132 | + case RC_mpich: 133 | // 134 | // You need to do nothing for these resource managers 135 | // 136 | @@ -432,6 +437,7 @@ 137 | case RC_orte: 138 | case RC_alps: 139 | case RC_gupc: 140 | + case RC_mpich: 141 | // 142 | // You need to do nothing for these resource managers 143 | // 144 | -------------------------------------------------------------------------------- /pgdb/mrnet-filters/arec_filter.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "mrnet/MRNet.h" 8 | #define PGDB_PATH "/home/ndryden/PGDB/pgdb/mrnet-filters" 9 | #define MSG_TAG 3141 10 | #define COMP_TAG 3142 11 | 12 | extern "C" { 13 | 14 | using namespace MRN; 15 | 16 | const char* arec_filter_format_string = "%s"; 17 | PyThreadState* py_state; 18 | 19 | void send_error_packet(unsigned int stream_id, int tag, std::vector &packets_out) { 20 | PacketPtr err_packet(new Packet(stream_id, tag, "%s", "ERROR")); 21 | packets_out.push_back(err_packet); 22 | } 23 | 24 | void arec_filter(std::vector &packets_in, std::vector &packets_out, 25 | std::vector &packets_out_reverse, void** state, PacketPtr& config_params, 26 | const TopologyLocalInfo& topo_info) { 27 | // Ensure Python is initialized; if it is, this does nothing. 28 | Py_Initialize(); 29 | // We must serialize access to the Python interpreter. 30 | // This deals with both the cases where threads are not initialized, 31 | // which occurs when running on remote comm nodes, and the case where 32 | // they are already initialized, which occurs on the front-end. 33 | PyGILState_STATE gstate; 34 | if (PyEval_ThreadsInitialized() == 0) { 35 | PyEval_InitThreads(); 36 | py_state = PyThreadState_Get(); 37 | } 38 | else { 39 | if (py_state != NULL) { 40 | PyEval_RestoreThread(py_state); 41 | } 42 | else { 43 | gstate = PyGILState_Ensure(); 44 | } 45 | } 46 | // Add the relevant search path to the Python module search path. 47 | PyRun_SimpleString("import sys\n\ 48 | sys.path.append('" PGDB_PATH "')\n"); 49 | // Load the relevant file. 50 | PyObject* module = PyImport_ImportModule("filter_hook"); 51 | if (module == NULL) { 52 | PyErr_Print(); 53 | send_error_packet(packets_in[0]->get_StreamId(), 54 | MSG_TAG, 55 | packets_out); 56 | return; 57 | } 58 | // Get the function to call. 59 | PyObject* filter_func = PyObject_GetAttrString(module, "filter_hook"); 60 | if ((filter_func == NULL) || !PyCallable_Check(filter_func)) { 61 | PyErr_Print(); 62 | send_error_packet(packets_in[0]->get_StreamId(), 63 | MSG_TAG, 64 | packets_out); 65 | Py_DECREF(module); 66 | return; 67 | } 68 | // Create the list to pass to the function. 69 | PyObject* packet_list = PyList_New(packets_in.size()); 70 | if (packet_list == NULL) { 71 | PyErr_Print(); 72 | send_error_packet(packets_in[0]->get_StreamId(), 73 | MSG_TAG, 74 | packets_out); 75 | Py_DECREF(module); 76 | Py_DECREF(filter_func); 77 | return; 78 | } 79 | for (size_t i = 0; i < packets_in.size(); ++i) { 80 | char* packet_buf; 81 | PacketPtr cur_packet = packets_in[i]; 82 | // Check the tag; we only deal with uncompressed data for now. 83 | if (cur_packet->get_Tag() == COMP_TAG) { 84 | packets_out.push_back(cur_packet); 85 | continue; 86 | } 87 | // Unpack the packet into a buffer. 88 | if (cur_packet->unpack("%s", &packet_buf) == -1) { 89 | send_error_packet(packets_in[0]->get_StreamId(), 90 | MSG_TAG, 91 | packets_out); 92 | Py_DECREF(module); 93 | Py_DECREF(filter_func); 94 | Py_DECREF(packet_list); 95 | return; 96 | } 97 | // Create a string from the packet data. 98 | PyObject* unpacked = PyString_FromString(packet_buf); 99 | if (unpacked == NULL) { 100 | PyErr_Print(); 101 | send_error_packet(packets_in[0]->get_StreamId(), 102 | MSG_TAG, 103 | packets_out); 104 | Py_DECREF(module); 105 | Py_DECREF(filter_func); 106 | Py_DECREF(packet_list); 107 | return; 108 | } 109 | // Unpack allocates a buffer that we need to free. 110 | free(packet_buf); 111 | // Add the packet to the list. 112 | // Note this steals the reference to unpacked. 113 | if (PyList_SetItem(packet_list, i, unpacked) != 0) { 114 | PyErr_Print(); 115 | send_error_packet(packets_in[0]->get_StreamId(), 116 | MSG_TAG, 117 | packets_out); 118 | Py_DECREF(module); 119 | Py_DECREF(filter_func); 120 | Py_DECREF(packet_list); 121 | Py_XDECREF(unpacked); 122 | return; 123 | } 124 | } 125 | // Create the arguments tuple and add the list. 126 | PyObject* arguments = PyTuple_New(1); 127 | if (arguments == NULL) { 128 | PyErr_Print(); 129 | send_error_packet(packets_in[0]->get_StreamId(), 130 | MSG_TAG, 131 | packets_out); 132 | Py_DECREF(module); 133 | Py_DECREF(filter_func); 134 | Py_DECREF(packet_list); 135 | return; 136 | } 137 | if (PyTuple_SetItem(arguments, 0, packet_list) != 0) { 138 | PyErr_Print(); 139 | send_error_packet(packets_in[0]->get_StreamId(), 140 | MSG_TAG, 141 | packets_out); 142 | Py_DECREF(module); 143 | Py_DECREF(filter_func); 144 | Py_DECREF(packet_list); 145 | Py_DECREF(arguments); 146 | return; 147 | } 148 | // Call the Python function. 149 | PyObject* ret_list = PyObject_CallObject(filter_func, arguments); 150 | if (ret_list == NULL) { 151 | PyErr_Print(); 152 | send_error_packet(packets_in[0]->get_StreamId(), 153 | MSG_TAG, 154 | packets_out); 155 | Py_DECREF(module); 156 | Py_DECREF(filter_func); 157 | Py_DECREF(packet_list); 158 | Py_DECREF(arguments); 159 | return; 160 | } 161 | if (!PyList_Check(ret_list)) { 162 | send_error_packet(packets_in[0]->get_StreamId(), 163 | MSG_TAG, 164 | packets_out); 165 | Py_DECREF(module); 166 | Py_DECREF(filter_func); 167 | Py_DECREF(packet_list); 168 | Py_DECREF(arguments); 169 | Py_DECREF(ret_list); 170 | return; 171 | } 172 | // Iterate over each element of the returned list. 173 | Py_ssize_t ret_length = PyList_Size(ret_list); 174 | for (ssize_t i = 0; i < ret_length; ++i) { 175 | // Convert the result to a usable string. 176 | // Note this string may not be modified. 177 | PyObject* ret_data = PyList_GetItem(ret_list, i); 178 | char* python_packet_data = PyString_AsString(ret_data); 179 | if (python_packet_data == NULL) { 180 | PyErr_Print(); 181 | send_error_packet(packets_in[0]->get_StreamId(), 182 | MSG_TAG, 183 | packets_out); 184 | Py_DECREF(module); 185 | Py_DECREF(filter_func); 186 | Py_DECREF(packet_list); 187 | Py_DECREF(arguments); 188 | Py_DECREF(ret_list); 189 | return; 190 | } 191 | char* new_packet_data = (char*) malloc(sizeof(char) * (strlen(python_packet_data) + 1)); 192 | strcpy(new_packet_data, python_packet_data); 193 | // Construct the new packet and send it off. 194 | // Use MSG_TAG because we removed all COMP_TAG messages above. 195 | PacketPtr new_packet(new Packet(packets_in[0]->get_StreamId(), 196 | MSG_TAG, 197 | "%s", 198 | new_packet_data)); 199 | packets_out.push_back(new_packet); 200 | } 201 | // Release all the Python references. 202 | Py_DECREF(module); 203 | Py_DECREF(filter_func); 204 | Py_DECREF(packet_list); 205 | Py_DECREF(arguments); 206 | Py_DECREF(ret_list); 207 | // Release the Python interpreter. 208 | if (py_state != NULL) { 209 | py_state = PyEval_SaveThread(); 210 | } 211 | else { 212 | PyGILState_Release(gstate); 213 | } 214 | /*size_t i; 215 | for (i = 0; i < packets_in.size(); ++i) { 216 | packets_out.push_back(packets_in[i]); 217 | }*/ 218 | } 219 | 220 | } /* extern "C" */ 221 | -------------------------------------------------------------------------------- /pgdb/mrnet-filters/build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Quick Python script to build the arec filter.""" 3 | 4 | import os 5 | mrnet_path = "/home/ndryden/mrnet" 6 | # Version of python. 7 | python_lib = "python2.7" 8 | # Boost libraries that need to be used when building. 9 | boost_libs = ["boost_timer-mt", "boost_system-mt"] 10 | build_command = "g++ -Wall -Wextra -O0 -g -I" + mrnet_path + "/include -L" + mrnet_path + "/lib -Wl,-rpath=" + mrnet_path + "/lib -fPIC -shared -rdynamic -o arec_filter.so arec_filter.cc -lmrnet -lxplat" 11 | build_command += " -l" + python_lib 12 | build_command += "".join([" -l" + lib for lib in boost_libs]) 13 | print build_command 14 | os.system(build_command); 15 | -------------------------------------------------------------------------------- /pgdb/mrnet-filters/filter_hook.py: -------------------------------------------------------------------------------- 1 | """Python filter hook for record aggregation.""" 2 | 3 | # Need to specify the directory for PGDB. 4 | import sys 5 | sys.path.append("/home/ndryden/PGDB/pgdb/src") 6 | from conf import gdbconf 7 | gdbconf.set_path() 8 | import cPickle 9 | from mi.gdbmiarec import * 10 | from gdb_shared import * 11 | 12 | def filter_hook(packet_list): 13 | """PGDB deduplication filter for MRNet. 14 | 15 | This is invoked via a C filter called from MRNet. 16 | Messages with type OUT_MSG are merged into combined aggregated records. 17 | packet_list is a list of serialized packets provided by MRNet. 18 | These packets cannot be compressed. 19 | Returns a serialized list of packets to MRNet. 20 | 21 | """ 22 | msg_list = map(cPickle.loads, packet_list) 23 | # Compute earliest sent time, if messages have them. 24 | # Performance must be enabled globally, so only check first message. 25 | new_time = None 26 | if hasattr(msg_list[0], '_send_time'): 27 | new_time = min(msg_list, key = lambda x: x._send_time) 28 | new_time = new_time._send_time 29 | packets = [] 30 | record_msgs = [] 31 | for msg in msg_list: 32 | # Only process messages of type OUT_MSG. 33 | if msg.msg_type == OUT_MSG: 34 | record_msgs.append(msg) 35 | else: 36 | packets.append(msg) 37 | if record_msgs: 38 | arec_list = map(lambda x: x.record, record_msgs) 39 | new_list = arec_list.pop(0) 40 | for l in arec_list: 41 | new_list = combine_aggregated_records(new_list + l) 42 | packets.append(GDBMessage(OUT_MSG, record = new_list)) 43 | for i, msg in enumerate(packets): 44 | if new_time: 45 | msg._send_time = new_time 46 | packets[i] = cPickle.dumps(msg, 0) 47 | return packets 48 | -------------------------------------------------------------------------------- /pgdb/pgdb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from gdbfe import GDBFE 4 | 5 | if __name__ == "__main__": 6 | gdbfe = GDBFE() 7 | gdbfe.run() 8 | -------------------------------------------------------------------------------- /pgdb/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ndryden/PGDB/88df53eca358c4478fa5e8f734b5ba47970e6bb9/pgdb/src/__init__.py -------------------------------------------------------------------------------- /pgdb/src/conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ndryden/PGDB/88df53eca358c4478fa5e8f734b5ba47970e6bb9/pgdb/src/conf/__init__.py -------------------------------------------------------------------------------- /pgdb/src/conf/gdbconf.py: -------------------------------------------------------------------------------- 1 | """Base configuration file for PGDB.""" 2 | 3 | import sys, os 4 | def set_path(): 5 | """Set appropriate Python paths.""" 6 | sys.path.append("/g/g21/dryden1/lib/python2.6/site-packages/") 7 | set_path() 8 | 9 | from lmon import lmon 10 | 11 | mrnet_base = "/collab/usr/global/tools/mrnet/chaos_5_x86_64_ib/mrnet-4.0.0" 12 | pgdb_path = "/g/g21/dryden1/pgdb/pgdb" 13 | 14 | # Set up LaunchMON. 15 | lmon.set_lmon_paths("/g/g21/dryden1/launchmon") 16 | 17 | # The binary for the backend daemons. 18 | backend_bin = "python" 19 | # The list of arguments to give to the backend daemons. 20 | backend_args = [pgdb_path + "/src/gdbbe.py"] 21 | # Environment variables to set in the front-end and back-end. 22 | environ = dict(lmon.lmon_environ) 23 | environ["XPLAT_RSH"] = "rsh" 24 | environ["MRNET_COMM_PATH"] = mrnet_base + "/bin/mrnet_commnode" 25 | environ["LD_LIBRARY_PATH"] = mrnet_base + "/lib:" + os.environ.get("LD_LIBRARY_PATH", "") 26 | # The path to the GDB binary. 27 | gdb_path = "gdb" 28 | # The path to the topology file for MRNet. 29 | topology_path = "." 30 | # The path to the GDB init file to use. 31 | gdb_init_path = pgdb_path + "/gdbinit" 32 | # Whether to use pretty printing, raw printing, or both. 33 | # Possible values are "yes", "no", or "both". 34 | pretty_print = "yes" 35 | # Whether to dump raw printing output to a file. False to not, otherwise the file. 36 | import os 37 | print_dump_file = "raw_dump_{0}".format(os.getpid()) 38 | # Varprint configuration options. 39 | # The maximum depth to descend when printing an object. 40 | varprint_max_depth = 3 41 | # The maximum number of children of an object to consider unless explicitly printing the object. 42 | # (Note, this is just children, not descendants.) 43 | varprint_max_children = 60 44 | # The branching factor to use when constructing the MRNet topology. 45 | mrnet_branch_factor = 32 46 | # The size of each topology broadcast, from the front-end to the back-end master, and the master to 47 | # all the other backends. 48 | topology_transmit_size = 32768 49 | # Maximum length of a message before it is compressed. 50 | compress_threshold = 10240 51 | # The maximum length of a message before it is split into smaller messages for transmission over MRNet. 52 | multi_len = 5242880 53 | # A list of tuples of the form (path, function), where path is a path to an MRNet filter 54 | # and function is the name of the filter function. 55 | mrnet_filters = [(pgdb_path + "/mrnet-filters/arec_filter.so", "arec_filter")] 56 | # Whether to enable collection of MRNet performance data or not. 57 | mrnet_collect_perf_data = True 58 | # Whether to write a DOT file of the topology. The path of the file if yes, None otherwise. 59 | mrnet_topology_dot = "/home/ndryden/topo.dot" 60 | # The length of history to keep. 61 | history_length = 100 62 | # Whether to load files using the SBD system. 63 | use_sbd = True 64 | # Path to the SBD shared library. 65 | sbd_bin = pgdb_path + "/load_file.so" 66 | # Size of shared memory the SBD uses, in bytes. Currently 32 MiB. 67 | sbd_shmem_size = 33554432 68 | -------------------------------------------------------------------------------- /pgdb/src/gdb_shared.py: -------------------------------------------------------------------------------- 1 | """Miscellaneous shared things for both the PGDB FE and BE.""" 2 | 3 | import inspect 4 | 5 | class NodeInfo: 6 | """Stores MRNet node information for braodcast.""" 7 | def __init__(self, mrnrank, host, port, parent, be_rank): 8 | self.mrnrank = mrnrank 9 | self.host = host 10 | self.port = port 11 | self.parent = parent 12 | self.be_rank = be_rank 13 | 14 | def __str__(self): 15 | return "{0} {1} {2} {3} {4}".format(self.mrnrank, self.host, 16 | self.port, self.parent, 17 | self.be_rank) 18 | 19 | def __repr__(self): 20 | return "<" + self.__str__() + ">" 21 | 22 | MSG_TAG = 3141 23 | COMP_TAG = 3142 24 | DIE_MSG = 0 25 | QUIT_MSG = 1 26 | CMD_MSG = 2 27 | OUT_MSG = 3 28 | FILTER_MSG = 4 29 | UNFILTER_MSG = 5 30 | HELLO_MSG = 6 31 | VARPRINT_MSG = 7 32 | VARPRINT_RES_MSG = 8 33 | MULTI_MSG = 9 34 | MULTI_PAYLOAD_MSG = 10 35 | KILL_MSG = 11 36 | LOAD_FILE = 12 37 | FILE_DATA = 13 38 | 39 | class GDBMessage: 40 | """A simple class for transmitting messages and related information.""" 41 | 42 | def __init__(self, msg_type, **kwargs): 43 | """Set up the message. 44 | 45 | msg_type is the type of the message; see above constants. 46 | All additional keyword arguments are set as attributes for message- 47 | specific use. 48 | 49 | """ 50 | self.msg_type = msg_type 51 | for k, v in kwargs.items(): 52 | setattr(self, k, v) 53 | 54 | def __str__(self): 55 | """Produce a string representation of the message. 56 | 57 | Prints the message type and then the keys and values. 58 | 59 | """ 60 | string = "" 61 | members = inspect.getmembers(self, lambda x: not inspect.isroutine(x)) 62 | for k, v in members: 63 | if k[0:2] != "__": 64 | # Keep out things like __doc__ and __module__. 65 | string += "{0} = {1}, ".format(k, v) 66 | return "GDBMessage: " + string[:-2] 67 | -------------------------------------------------------------------------------- /pgdb/src/gdbbe.py: -------------------------------------------------------------------------------- 1 | """The back-end daemon invoked by LaunchMON. 2 | 3 | This handles initializing the back-end side of the LaunchMON and MRNet 4 | communication systems, deploying GDB, and sending commands and data back 5 | and forth. 6 | 7 | """ 8 | 9 | from __future__ import print_function 10 | from conf import gdbconf 11 | from comm import CommunicatorBE 12 | from gdb_shared import * 13 | from lmon.lmonbe import LMON_be 14 | import mi.gdbmi_parser as gdbparser 15 | import mi.gdbmi_records 16 | from mi.gdbmi import GDBMachineInterface 17 | from mi.varobj import VariableObject, VariableObjectManager 18 | from mi.commands import Command 19 | from mi.gdbmiarec import (GDBMIAggregatedRecord, combine_records, 20 | combine_aggregated_records) 21 | from mi.gdbmi_recordhandler import GDBMIRecordHandler 22 | from interval import Interval 23 | from varprint import VariablePrinter 24 | from sbd import SBDBE 25 | import signal 26 | import os 27 | import os.path 28 | import mmap 29 | import struct 30 | import time 31 | import sys 32 | import posix_ipc 33 | 34 | class GDBBE: 35 | """The back-end GDB daemon process.""" 36 | 37 | def init_gdb(self): 38 | """Initialize GDB-related things, and launch the GDB process.""" 39 | # Indexed by MPI rank. 40 | self.varobjs = {} 41 | # Maps tokens to MPI rank. 42 | self.token_rank_map = {} 43 | self.record_handler = GDBMIRecordHandler() 44 | self.record_handler.add_type_handler( 45 | self._watch_thread_created, 46 | set([mi.gdbmi_records.ASYNC_NOTIFY_THREAD_CREATED])) 47 | self.startup_stop_hid = self.record_handler.add_type_handler( 48 | self._watch_startup_stop, 49 | set([mi.gdbmi_records.ASYNC_EXEC_STOPPED])) 50 | gdb_env = {} 51 | if gdbconf.use_sbd: 52 | self.sbd = SBDBE(self.comm) 53 | gdb_env["LD_PRELOAD"] = gdbconf.sbd_bin 54 | else: 55 | self.sbd = None 56 | 57 | enable_pprint_cmd = Command("enable-pretty-printing") 58 | enable_target_async_cmd = Command("gdb-set", 59 | args=["target-async", "on"]) 60 | disable_pagination_cmd = Command("gdb-set", args=["pagination", "off"]) 61 | enable_non_stop_cmd = Command("gdb-set", args=["non-stop", "on"]) 62 | add_inferior_cmd = Command("add-inferior") 63 | self.gdb = GDBMachineInterface(gdb=gdbconf.gdb_path, 64 | gdb_args=["-x", gdbconf.gdb_init_path], 65 | env=gdb_env) 66 | procs = self.comm.get_proctab() 67 | # Set up GDB. 68 | if not self.run_gdb_command(enable_pprint_cmd): 69 | raise RuntimeError("Could not enable pretty printing!") 70 | if not self.run_gdb_command(enable_target_async_cmd): 71 | raise RuntimeError("Could not enable target-async!") 72 | if not self.run_gdb_command(disable_pagination_cmd): 73 | raise RuntimeError("Could not disable pagination!") 74 | if not self.run_gdb_command(enable_non_stop_cmd): 75 | raise RuntimeError("Could not enable non-stop!") 76 | 77 | # Create inferiors and set up MPI rank/inferior map. 78 | # First inferior is created by default. 79 | self.rank_inferior_map = {procs[0].mpirank: 'i1'} 80 | self.inferior_rank_map = {'i1': procs[0].mpirank} 81 | i = 2 82 | for proc in procs[1:]: 83 | # Hackish: Assume that the inferiors follow the iN naming scheme. 84 | self.rank_inferior_map[proc.mpirank] = 'i' + str(i) 85 | self.inferior_rank_map['i' + str(i)] = proc.mpirank 86 | i += 1 87 | if not self.run_gdb_command(add_inferior_cmd, no_thread=True): 88 | raise RuntimeError('Cound not add inferior i{0}!'.format(i - 1)) 89 | 90 | # Maps MPI ranks to associated threads and vice-versa. 91 | self.rank_thread_map = {} 92 | self.thread_rank_map = {} 93 | 94 | if self.sbd: 95 | # Set up the list of executables for load file checking. 96 | self.sbd.set_executable_names( 97 | [os.path.basename(proc.pd.executable_name) for proc in procs]) 98 | 99 | # Attach processes. 100 | for proc in procs: 101 | if not self.run_gdb_command( 102 | Command("target-attach", 103 | opts={'--thread-group': 104 | self.rank_inferior_map[proc.mpirank]}, 105 | args=[proc.pd.pid]), 106 | proc.mpirank, no_thread=True): 107 | raise RuntimeError("Could not attach to rank {0}!".format( 108 | proc.mpirank)) 109 | self.varobjs[proc.mpirank] = VariableObjectManager() 110 | # Cludge to fix GDB not outputting records for the i1 attach. 111 | if self.rank_inferior_map[proc.mpirank] == 'i1': 112 | time.sleep(0.1) 113 | 114 | def _watch_thread_created(self, record, **kwargs): 115 | """Handle watching thread creation.""" 116 | inferior = record.thread_group_id 117 | thread_id = int(record.thread_id) 118 | rank = self.inferior_rank_map[inferior] 119 | if rank in self.rank_thread_map: 120 | self.rank_thread_map[rank].append(thread_id) 121 | else: 122 | self.rank_thread_map[rank] = [thread_id] 123 | # Always ensure smallest thread is first. 124 | self.rank_thread_map[rank].sort() 125 | self.thread_rank_map[thread_id] = rank 126 | 127 | def _watch_startup_stop(self, record, **kwargs): 128 | """Handle watching for initial inferior stops during startup.""" 129 | self.startup_done_count += 1 130 | if self.startup_done_count == self.comm.get_proctab_size(): 131 | self.doing_startup = False 132 | self.record_handler.remove_handler(self.startup_stop_hid) 133 | # Reset token counts to sync with front-end. 134 | self.token_rank_map = {} 135 | Command._cur_token = 0 136 | 137 | def kill_inferiors(self): 138 | """Terminate all targets being debugged. 139 | 140 | This sends SIGTERM. 141 | 142 | """ 143 | for proc in self.comm.get_proctab(): 144 | os.kill(proc.pd.pid, signal.SIGTERM) 145 | 146 | def run_gdb_command(self, command, ranks=None, no_thread=False): 147 | """Run a GDB command. 148 | 149 | command is a Command object representing the command. 150 | ranks is an Interval of the ranks to run the command on. 151 | If ranks is None, run on the current inferior. 152 | If no_thread is True, this does not specify a particular thread. 153 | 154 | Returns True on success, False on error. 155 | 156 | """ 157 | if isinstance(ranks, int): 158 | # Special case for a single int. 159 | # Toss it in a list; don't need a full Interval. 160 | ranks = Interval(ranks) 161 | if ranks is None: 162 | self.token_rank_map[command.token] = self.comm.get_mpiranks() 163 | return self.gdb.send(command) 164 | else: 165 | if command.get_opt('--thread') is not None: 166 | # If --thread provided, don't override it. 167 | no_thread = True 168 | for rank in ranks: 169 | if rank in self.rank_inferior_map: 170 | # Most recent option with same name takes precedence. 171 | if (not no_thread and 172 | rank in self.rank_thread_map): 173 | command.add_opt('--thread', 174 | self.rank_thread_map[rank][0]) 175 | if not self.gdb.send(command): 176 | return False 177 | self.token_rank_map[command.token] = rank 178 | return True 179 | 180 | def init_handlers(self): 181 | """Initialize message handlers used on data we receive over MRNet.""" 182 | self.msg_handlers = { 183 | DIE_MSG: self.die_handler, 184 | CMD_MSG: self.cmd_handler, 185 | FILTER_MSG: self.filter_handler, 186 | UNFILTER_MSG: self.unfilter_handler, 187 | VARPRINT_MSG: self.varprint_handler, 188 | KILL_MSG: self.kill_handler, 189 | FILE_DATA: self.file_data_handler, 190 | } 191 | 192 | def init_filters(self): 193 | """Initialize default filters.""" 194 | self.filters = set() 195 | #an_lower = ASYNC_NOTIFY.lower() 196 | #self.filters = [ 197 | # (an_lower, "shlibs-updated"), 198 | # (an_lower, "shlibs-added"), 199 | # (an_lower, "shlibs-removed"), 200 | # (an_lower, "library-loaded"), 201 | # (an_lower, "thread-created"), 202 | # (an_lower, "thread-group-added"), 203 | # (an_lower, "thread-group-started"), 204 | # (RESULT.lower(), "exit") 205 | # ] 206 | 207 | def __init__(self): 208 | """Initialize LaunchMON, MRNet, GDB, and other things.""" 209 | self.is_shutdown = False 210 | self.quit = False 211 | self.doing_startup = True 212 | self.startup_done_count = 0 213 | self.startup_arecs = [] 214 | self.token_handlers = {} 215 | self.comm = CommunicatorBE() 216 | if not self.comm.init_lmon(sys.argv): 217 | sys.exit(1) 218 | if not self.comm.init_mrnet(): 219 | # TODO: This should cleanly terminate LaunchMON, but does not. 220 | sys.exit(1) 221 | self.init_gdb() 222 | self.init_handlers() 223 | self.init_filters() 224 | self.variable_printer = VariablePrinter(self) 225 | 226 | def shutdown(self): 227 | """Cleanly shut things down if we have not already done so.""" 228 | if not self.comm.is_shutdown(): 229 | self.comm.shutdown() 230 | if self.sbd: 231 | self.sbd.cleanup() 232 | 233 | def __del__(self): 234 | """Invoke shutdown().""" 235 | # Exception guard if we have an error before comm init. 236 | try: 237 | self.shutdown() 238 | except AttributeError: 239 | pass 240 | 241 | def die_handler(self, msg): 242 | """Handle a die message by exiting.""" 243 | sys.exit("Told to die.") 244 | 245 | def cmd_handler(self, msg): 246 | """Handle a CMD message by running the command. 247 | 248 | The message contains the following fields: 249 | command - A Command object to run. 250 | ranks - An optional interval of ranks on which to run. 251 | 252 | """ 253 | if self.doing_startup: 254 | print("Ignoring command during startup.") 255 | return 256 | if msg.command.command == "gdb-exit": 257 | # Special case for quit. 258 | self.quit = True 259 | ranks = self.comm.get_mpiranks() 260 | if hasattr(msg, "ranks"): 261 | ranks = msg.ranks 262 | if not self.run_gdb_command(msg.command, ranks): 263 | # TODO: Send die message. 264 | print("Managed to get a bad command '{0}'.".format(msg.command)) 265 | 266 | def kill_handler(self, msg): 267 | """Handle a kill message, killing all processes.""" 268 | self.kill_inferiors() 269 | 270 | def filter_handler(self, msg): 271 | """Handle a filter message by adding the filter.""" 272 | self.filters.update(msg.filter_types) 273 | 274 | def unfilter_handler(self, msg): 275 | """Handle an unfilter message by removing the filter.""" 276 | self.filters.difference_update(msg.filter_types) 277 | 278 | def varprint_handler(self, msg): 279 | """Handle the varprint message and begin sequence.""" 280 | self.variable_printer.varprint_handler(msg) 281 | 282 | def is_filterable(self, record): 283 | """Check whether a given record can be filtered.""" 284 | record_set = record.record_subtypes.union([record.record_type]) 285 | if record_set.intersection(self.filters): 286 | return True 287 | return False 288 | 289 | def file_data_handler(self, msg): 290 | """Handle a response with file data.""" 291 | if self.sbd: 292 | self.sbd.file_data_handler(msg) 293 | else: 294 | print("Got SBD file data when SBD is not enabled") 295 | 296 | def main(self): 297 | """Main send/receive loop. 298 | 299 | This receives data on MRNet (non-blocking), processes the messages, 300 | and then sends any data that was read from GDB. This then sleeps for a 301 | short while to avoid heavy CPU use. 302 | 303 | """ 304 | while True: 305 | if self.quit: 306 | break 307 | 308 | if self.sbd: 309 | # Check for data from the GDB process for LOAD_FILE. 310 | self.sbd.sbd_check() 311 | 312 | msg = self.comm.recv(blocking=False) 313 | if msg is not None: 314 | # Received data. 315 | if msg.msg_type in self.msg_handlers: 316 | self.msg_handlers[msg.msg_type](msg) 317 | else: 318 | print("Got a message {0} with no handler.".format( 319 | msg.msg_type)) 320 | 321 | records = [] 322 | ranks = [] 323 | for record in self.gdb.read(): 324 | self.record_handler.handle(record) 325 | if not self.is_filterable(record): 326 | records.append(record) 327 | if (record.token is not None and 328 | record.token in self.token_rank_map): 329 | ranks.append(self.token_rank_map[record.token]) 330 | elif (hasattr(record, "thread_id") and 331 | record.thread_id in self.thread_rank_map): 332 | ranks.append(self.thread_rank_map[record.thread_id]) 333 | else: 334 | ranks.append(self.comm.get_mpiranks()) 335 | if records: 336 | arecs = combine_records(records, ranks) 337 | if self.doing_startup: 338 | self.startup_arecs = combine_aggregated_records( 339 | self.startup_arecs + arecs) 340 | else: 341 | if not self.doing_startup and self.startup_arecs: 342 | arecs = combine_aggregated_records( 343 | self.startup_arecs + arecs) 344 | self.comm.send(GDBMessage(OUT_MSG, record=arecs), 345 | self.comm.frontend) 346 | self.startup_arecs = None 347 | else: 348 | self.comm.send(GDBMessage(OUT_MSG, record=arecs), 349 | self.comm.frontend) 350 | 351 | # Sleep a bit to reduce banging on the CPU. 352 | time.sleep(0.01) 353 | # Wait for GDB to exit. 354 | exited = False 355 | while not exited: 356 | exited = not self.gdb.is_running() 357 | # Shut everything else down. 358 | self.shutdown() 359 | 360 | if __name__ == "__main__": 361 | # This is run by LaunchMON. 362 | gdbbe = GDBBE() 363 | gdbbe.main() 364 | -------------------------------------------------------------------------------- /pgdb/src/gdbfe.py: -------------------------------------------------------------------------------- 1 | """The front-end interface to PGDB. 2 | 3 | This handles user input, deploying the network and remote debuggers, and everything else related to 4 | this. 5 | 6 | """ 7 | 8 | import os, os.path, threading, signal 9 | from collections import deque 10 | from conf import gdbconf 11 | from gdb_shared import * 12 | from comm import * 13 | from mi.gdbmicmd import GDBMICmd 14 | from mi.gdbmi_recordhandler import GDBMIRecordHandler 15 | from mi.varobj import VariableObject, VariableObjectManager 16 | from mi.commands import Command 17 | from mi.gdbmiarec import GDBMIAggregatedRecord, combine_aggregated_records 18 | from mi.gdbmipprinter import GDBMIPrettyPrinter 19 | from interval import Interval 20 | from sbd import SBDFE 21 | 22 | class GDBFE (GDBMICmd): 23 | """The front-end to PGDB.""" 24 | 25 | def init_handlers(self): 26 | """Initialize the message handlers and the record handler.""" 27 | # Set up message handlers. 28 | self.msg_handlers = { 29 | DIE_MSG: self.die_handler, 30 | QUIT_MSG: self.quit_handler, 31 | OUT_MSG: self.out_handler, 32 | VARPRINT_RES_MSG: self.varprint_res_handler, 33 | LOAD_FILE: self.load_file_handler 34 | } 35 | # Now record handlers. 36 | self.record_handler = GDBMIRecordHandler() 37 | 38 | def remote_init(self): 39 | """Initialize things related to the remote communication and back-end daemons.""" 40 | self.comm = CommunicatorFE(True) # Initialize with locking. 41 | # One of {pid} and {launcher, launcher_args} will not be none, based 42 | # upon the command line input parsing. 43 | ret = self.comm.init_lmon(self.lmon_attach, pid = self.lmon_pid, 44 | launcher = self.lmon_launcher, 45 | launcher_args = self.lmon_launcher_argv, 46 | host = self.lmon_host) 47 | if not ret: 48 | # Terminate. Note at this point main is still waiting on the remote_up event, 49 | # so we have to set it. 50 | self.remote_up.set() 51 | self.interrupt_main() 52 | return False 53 | ret = self.comm.init_mrnet(local = self.local_launch) 54 | if not ret: 55 | # Terminate. See prior comment about remote_up. 56 | self.remote_up.set() 57 | self.interrupt_main() 58 | return False 59 | self.varobjs = {} 60 | for rank in self.comm.get_mpiranks(): 61 | self.varobjs[rank] = VariableObjectManager() 62 | self.init_handlers() 63 | self.pprinter = GDBMIPrettyPrinter() 64 | self.sleep_time = 0.1 65 | self.blocks = [] 66 | try: 67 | self.blocks += gdbconf.default_blocks 68 | except AttributeError: pass 69 | # Initialize the SBD system if needed. 70 | if gdbconf.use_sbd: 71 | self.sbd = SBDFE(self.comm) 72 | else: 73 | self.sbd = None 74 | return True 75 | 76 | def __init__(self): 77 | """Initialize some local things; the remote initialization must be done seperately.""" 78 | GDBMICmd.__init__(self) 79 | self.quit = False 80 | self.is_shutdown = False 81 | # Need to disable readline. 82 | self.completekey = None 83 | # Event triggered when remote_init completes in the remote thread.. 84 | self.remote_up = threading.Event() 85 | # Temporary list for building up aggregated records from OUT messages. 86 | self.arec_list = [] 87 | # Output history for expanding commands. 88 | self.output_history = [] 89 | # Get our PID for signals. 90 | self.my_pid = os.getpid() 91 | 92 | def interrupt_main(self): 93 | """Interrupt the main thread. 94 | 95 | This works because in Python, the main thread is the one that processes signals. 96 | If using Python 3, this could be replaced with signal.pthread_kill (but this will 97 | work in Python 3). 98 | """ 99 | os.kill(self.my_pid, signal.SIGINT) 100 | 101 | def parse_args(self): 102 | """Parse the command-line arguments and set appropriate variables.""" 103 | # Optparse unfortunately doesn't work here. 104 | self.lmon_attach = None 105 | self.lmon_pid = None 106 | self.lmon_launcher = None 107 | self.lmon_launcher_argv = None 108 | self.lmon_host = None 109 | self.local_launch = False 110 | for i in range(1, len(sys.argv)): 111 | if sys.argv[i] == "-p" or sys.argv[i] == "--pid": 112 | self.lmon_attach = True 113 | if len(sys.argv) == i + 1: 114 | print "Must provide a PID with {0}.".format(sys.argv[i]) 115 | sys.exit(0) 116 | try: 117 | self.lmon_pid = int(sys.argv[i + 1]) 118 | except ValueError: 119 | print "Must provide a valid PID." 120 | sys.exit(0) 121 | i += 1 122 | elif sys.argv[i] == "--launcher": 123 | if len(sys.argv) == i + 1: 124 | print "Must provide a launcher with --launcher." 125 | sys.exit(0) 126 | self.lmon_launcher = sys.argv[i + 1] 127 | i += 1 128 | elif sys.argv[i] == "--local": 129 | self.local_launch = True 130 | elif sys.argv[i] == "-h" or sys.argv[i] == "--host": 131 | if len(sys.argv) == i + 1: 132 | print "Must provide a host with --host." 133 | sys.exit(0) 134 | self.lmon_host = sys.argv[i + 1] 135 | i += 1 136 | elif sys.argv[i] == "-a": 137 | if not hasattr(self, "lmon_launcher"): 138 | self.lmon_launcher = "srun" 139 | self.lmon_attach = False 140 | self.lmon_launcher_argv = sys.argv[i + 1:] 141 | break 142 | elif sys.argv[i] == "--sbd": 143 | # Override the configuration option. 144 | gdbconf.use_sbd = True 145 | if self.lmon_attach is None: 146 | print "Arguments: (one of -p/--pid and -a is required)" 147 | print "-p, --pid : attach to mpirun process " 148 | print "-a : pass verbatim to the resource manager for launching." 149 | print "--launcher : use binary to launch." 150 | print "--local: deploy for debugging just on the local node" 151 | print "-h/--host: the host the mpirun process is running on" 152 | print "--sbd: use the Scalable Binary Deployment system" 153 | sys.exit(0) 154 | 155 | def shutdown(self): 156 | """Shut down the network if not already shut down.""" 157 | if not self.comm.is_shutdown(): 158 | self.comm.shutdown() 159 | 160 | def __del__(self): 161 | """Invoke shutdown().""" 162 | # Need to catch a potential exception when comm does not exist. 163 | # This occurs if there is an error before comm init. 164 | try: 165 | self.shutdown() 166 | except AttributeError: pass 167 | 168 | def die_handler(self, msg): 169 | """Handle a die message. Presently does nothing.""" 170 | pass 171 | 172 | def quit_handler(self, msg): 173 | """Handle a quit message. Presently does nothing.""" 174 | pass 175 | 176 | def out_handler(self, msg): 177 | """Handle an out message by adding the arec to the temporary list.""" 178 | if self.arec_list: 179 | self.arec_list = combine_aggregated_records(self.arec_list + msg.record) 180 | else: 181 | self.arec_list = msg.record 182 | 183 | def process_out_messages(self): 184 | """Go through the temporary arec_list and pretty-print records.""" 185 | for arec in self.arec_list: 186 | # Add the record to the history. 187 | self.output_history = [arec] + self.output_history 188 | if len(self.output_history) > gdbconf.history_length: 189 | # Remove the last (oldest) element. 190 | self.output_history.pop() 191 | record_classes = arec.get_record_classes() 192 | class_key = max(record_classes, 193 | key = lambda x: len(record_classes[x])) 194 | # Only print the lowest-rank entry in the class. 195 | ranks = record_classes[class_key] 196 | record = arec.get_record(ranks.get_smallest()) 197 | # Note that this may not work if things don't support lists of ranks. 198 | if all(self.record_handler.handle(record, rank = ranks)): 199 | self.pprinter.pretty_print(record, ranks) 200 | if len(record_classes) > 1: 201 | print "Some results from {0} omitted; use expand to view.".format(arec.get_ranks()) 202 | self.arec_list = [] 203 | 204 | def varprint_res_handler(self, msg): 205 | """Handle a varprint result message by pretty-printing the variable objects.""" 206 | if msg.err: 207 | print "[{0}] {1}".format(msg.rank, msg.msg) 208 | elif msg.varobj: 209 | self.varobjs[msg.rank].add_var_obj(msg.varobj) 210 | print self.pprinter.varobj_pretty_print(msg.varobj, tag = msg.rank)[:-1] 211 | else: 212 | print "[{0}] Received a bad varobj!".format(msg.rank) 213 | 214 | def load_file_handler(self, msg): 215 | """Handle a load file message by loading the file and broadcasting it.""" 216 | if self.sbd: 217 | self.sbd.load_file(msg.filename) 218 | else: 219 | print "Received SBD LOAD_FILE request when SBD is not enabled." 220 | 221 | def parse_filter_spec(self, spec): 222 | """Parse a filter specification into a list of record type.""" 223 | split = spec.lower().split() 224 | if len(split) == 0: 225 | print "Bad filter specification." 226 | return None 227 | return split 228 | 229 | def do_filter(self, cmd, targets = None): 230 | """Tell the back-end daemons to filter something. 231 | 232 | The input is a list of record types and subtypes. A record containing 233 | any of these will be filtered. 234 | 235 | """ 236 | record_types = set(self.parse_filter_spec(cmd)) 237 | if not record_types: 238 | return 239 | self.comm.send(GDBMessage(FILTER_MSG, filter_types = record_types), 240 | self.comm.broadcast) 241 | 242 | def do_unfilter(self, cmd, targets = None): 243 | """Tell the back-end daemons to unfilter something.""" 244 | record_types = set(self.parse_filter_spec(cmd)) 245 | if not record_types: 246 | return 247 | self.comm.send(GDBMessage(UNFILTER_MSG, filter_types = record_types), 248 | self.comm.broadcast) 249 | 250 | def parse_proc_spec(self, proc_spec): 251 | """Parse a processor specification.""" 252 | targets = [] 253 | # Handle some special cases for sending to all processors. 254 | if proc_spec.lower() == "all" or proc_spec == "-1": 255 | return -1 256 | for group in proc_spec.split(","): 257 | tup = group.split("-") 258 | try: 259 | if len(tup) == 1: 260 | targets.append((int(tup[0]), int(tup[0]))) 261 | else: 262 | targets.append((int(tup[0]), int(tup[1]))) 263 | except ValueError: 264 | print "Bad processor specification." 265 | return 266 | return Interval(targets) 267 | 268 | def do_proc(self, cmd, targets = None): 269 | """Handle the "proc" command to send commands to a subset of remote nodes based on MPI rank.""" 270 | if targets: 271 | print "Recursive proc is not recursive." 272 | return 273 | proc_spec = None 274 | for i, char in enumerate(cmd): 275 | if char == " ": 276 | proc_spec = cmd[0:i] 277 | line = cmd[i + 1:].strip() 278 | break 279 | if not proc_spec: 280 | print "Bad processor specification." 281 | return 282 | 283 | targets = self.parse_proc_spec(proc_spec) 284 | if not (targets - self.comm.get_mpiranks()).empty(): 285 | print "Out-of-range processor specification." 286 | return 287 | cmd = self.resolve_gdbmi_command(line, err = False) 288 | if cmd: 289 | self.comm.send(GDBMessage(CMD_MSG, command = cmd, ranks = targets), targets) 290 | else: 291 | split = line.split() 292 | cmd = split[0] 293 | rest = " ".join(split[1:]) 294 | if hasattr(self, "do_" + cmd): 295 | func = getattr(self, "do_" + cmd) 296 | func(rest, targets = targets) 297 | 298 | def do_block(self, cmd, targets = None): 299 | """Block all output from a subset of nodes.""" 300 | to_block = self.parse_proc_spec(cmd) 301 | if not to_block: 302 | return 303 | # This is quite inefficient and will not scale. 304 | for target in to_block.members(): 305 | if target not in self.blocks and target in self.comm.get_mpiranks(): 306 | self.blocks.append(target) 307 | 308 | def do_unblock(self, cmd, targets = None): 309 | """Unblock output from a subset of nodes.""" 310 | to_unblock = self.parse_proc_spec(cmd) 311 | if not to_unblock: 312 | return 313 | keys = [] 314 | for k, v in enumerate(self.blocks): 315 | if v in to_unblock: 316 | keys.append(k) 317 | for k in keys: 318 | del self.blocks[k] 319 | 320 | def do_varprint(self, cmd, targets = None): 321 | """Run the varprint command.""" 322 | if not targets: 323 | targets = self.comm.get_mpiranks() 324 | cmd_split = cmd.split(" ") 325 | var = cmd 326 | # Strip quotes, if present. 327 | if var[0] == '"' and var[-1] == '"': 328 | var = var[1:-1] 329 | self.comm.send(GDBMessage(VARPRINT_MSG, name = var, ranks = targets), targets) 330 | 331 | def do_varassign(self, cmd, targets = None): 332 | """Run the varassign command.""" 333 | if not targets: 334 | targets = self.comm.get_mpiranks() 335 | split = cmd.split("=") 336 | if len(split) != 2: 337 | print "varassign format is: var = val" 338 | return 339 | var = split[0].strip() 340 | if var[0] == '"' and var[-1] == '"': 341 | var = var[1:-1] 342 | val = split[1].strip() 343 | for rank in targets.members(): 344 | full_name = self.varobjs[rank].get_full_name(var) 345 | if not full_name: 346 | print "Variable not found on rank {0}.".format(rank) 347 | continue 348 | self.comm.send(GDBMessage(CMD_MSG, 349 | command = Command("var-assign", 350 | args = ('"' + full_name + '"', '"' + val + '"')), 351 | ranks = rank), 352 | rank) 353 | 354 | def do_help(self, cmd, targets = None): 355 | """Run the help command.""" 356 | if not targets: 357 | # Because this makes the most sense, unless told otherwise, we run this on one processor. 358 | targets = 0 359 | self.comm.send(GDBMessage(CMD_MSG, command = Command("interpreter-exec", 360 | args = ("console", '"help ' + cmd + '"')), 361 | ranks = targets), 362 | targets) 363 | 364 | def do_kill(self, cmd, targets = None): 365 | """Kill all targets being debugged.""" 366 | # This always sends to all targets, for now. 367 | print "Sending SIGTERM to all inferiors. (May need to step them for them to die.)" 368 | self.comm.send(GDBMessage(KILL_MSG), self.comm.broadcast) 369 | 370 | def do_quit(self, cmd, targets = None): 371 | """Gracefully quit PGDB.""" 372 | self.quit = True 373 | self.comm.send(GDBMessage(CMD_MSG, command = Command("gdb-exit")), self.comm.broadcast) 374 | 375 | def do_expand(self, cmd, targets = None): 376 | """Expand output. 377 | 378 | Use: [proc ] expand [history-item] 379 | Expand history-item for the given processors. 380 | 381 | """ 382 | if not targets: 383 | targets = self.comm.get_mpiranks() 384 | split = cmd.split(" ") 385 | history_item = 0 386 | if len(split) > 1: 387 | if not split[1].isdigit(): 388 | print "Incorrect history specificiation." 389 | return 390 | history_item = int(split[1]) 391 | if history_item >= len(self.output_history): 392 | print "No such history item {0}".format(history_item) 393 | return 394 | arec = self.output_history[history_item] 395 | # We only care about the IDs that are present in both. 396 | ids = targets.intersect(arec.get_ranks()) 397 | for vid in ids: 398 | self.pprinter.pretty_print(arec.get_record(vid), Interval(vid)) 399 | 400 | def dispatch_gdbmi_command(self, command): 401 | """Send a GDB command to every rank (use proc to send to subsets).""" 402 | if self.comm.is_shutdown(): 403 | return False 404 | return self.comm.send(GDBMessage(CMD_MSG, command = command), 405 | self.comm.broadcast) 406 | 407 | def handle_msg(self, msg): 408 | """Handle a received message.""" 409 | if msg.msg_type in self.msg_handlers: 410 | self.msg_handlers[msg.msg_type](msg) 411 | else: 412 | print "Got a message {0} with no handler.".format(msg.msg_type) 413 | 414 | def remote_body(self): 415 | """The main remote body thread. 416 | 417 | This initializes the remote infrastructure, and receives and processes data. 418 | 419 | """ 420 | # Must do the init inside of this thread, or else LaunchMON steals stdin. 421 | if not self.remote_init(): 422 | return False 423 | # Signal main thread we can use stdin. 424 | self.remote_up.set() 425 | print "PGDB deployed to {0} hosts and {1} processors.".format( 426 | self.comm.get_mrnet_network_size(), 427 | self.comm.get_proctab_size()) 428 | recvd = False 429 | while not self.quit and not self.comm.all_nodes_exited(): 430 | # Receive data, if any. 431 | msg = self.comm.recv(blocking = False) 432 | if msg is not None: 433 | # Received data. 434 | self.handle_msg(msg) 435 | recvd = True 436 | else: 437 | recvd = False 438 | 439 | # Keep from beating up the CPU too much. 440 | if not recvd: 441 | self.process_out_messages() 442 | time.sleep(self.sleep_time) 443 | self.shutdown() 444 | print "Remote shut down." 445 | self.interrupt_main() 446 | 447 | def local_body(self): 448 | """The local command input loop.""" 449 | # Wait until we can use stdin. 450 | try: 451 | self.remote_up.wait() 452 | os.dup2(self.stdin_copy, 0) 453 | os.close(self.stdin_copy) 454 | self.cmdloop() 455 | except KeyboardInterrupt: 456 | print "Terminating." 457 | sys.exit(0) 458 | 459 | def run(self): 460 | """Start the remote thread and run the local command input loop.""" 461 | self.parse_args() 462 | # This is part of a hack to keep LaunchMON from stealing stdin. 463 | self.stdin_copy = os.dup(0) 464 | os.close(0) 465 | self.remote_thread = threading.Thread(target = self.remote_body) 466 | self.remote_thread.daemon = True 467 | self.remote_thread.start() 468 | self.local_body() 469 | -------------------------------------------------------------------------------- /pgdb/src/gdblocal.py: -------------------------------------------------------------------------------- 1 | """A simple wrapper for running one GDB instance locally for testing. 2 | 3 | This simply starts a single GDB process, passes input through with relatively little command 4 | parsing, and data out using the standard pretty-printer. A lot of the more fancy special commands 5 | in the full interface are not available. This is primarily for testing things. 6 | 7 | """ 8 | 9 | import threading 10 | from conf import gdbconf 11 | from mi.gdbmi import GDBMachineInterface 12 | from mi.gdbmicmd import GDBMICmd 13 | from mi.gdbmipprinter import GDBMIPrettyPrinter 14 | 15 | class GDBMILocal (GDBMICmd): 16 | """Simple class for running one instance of GDB locally via the MI interface.""" 17 | 18 | # Override the prompt from GDBMICmd and Cmd. 19 | prompt = "" 20 | 21 | def __init__(self): 22 | """Initialize GDBMICmd and load the machine interface, spawning GDB.""" 23 | GDBMICmd.__init__(self) 24 | self.pprinter = GDBMIPrettyPrinter() 25 | self.gdb = GDBMachineInterface(gdb_args = ["-x", gdbconf.gdb_init_path]) 26 | self.dispatch_gdbmi_command_string("enable-pretty-printing") 27 | 28 | def dispatch_gdbmi_command(self, command): 29 | """Over-ridden dispatch command to run GDBMI commands.""" 30 | if self.gdb.is_running(): 31 | self.gdb.send(command) 32 | 33 | def read_thread(self): 34 | """Primary thread for reading from GDB. 35 | 36 | This repeatedly invokes the read command with a short pause and pretty-prints the output. 37 | 38 | """ 39 | while True: 40 | for record in self.gdb.read(1): 41 | self.pprinter.pretty_print(record) 42 | 43 | def run(self): 44 | """Over-ridden run. Sets up the read thread and starts it.""" 45 | read_thread = threading.Thread(target = self.read_thread) 46 | read_thread.daemon = True 47 | read_thread.start() 48 | self.cmdloop() 49 | 50 | gdb = GDBMILocal() 51 | gdb.run() 52 | -------------------------------------------------------------------------------- /pgdb/src/interval.py: -------------------------------------------------------------------------------- 1 | """A simple integer interval representation.""" 2 | 3 | class Interval(object): 4 | """Efficiently store and support queries for disjoint integer intervals. 5 | 6 | This uses O(n) memory, less if there are many contiguous intervals, and 7 | O(logn) to test for membership. Construction takes O(nlogn) for unsorted 8 | data and O(n) for sorted data. 9 | 10 | This compresses contiguous intervals when possible and uses binary search 11 | for membership testing. 12 | 13 | """ 14 | 15 | def __init__(self, src, is_sorted=False): 16 | """Initialize the intervals. 17 | 18 | src - the data to construct the interval from. One of the following: 19 | - a single integer 20 | - an Interval 21 | - a list of disjoint intervals as tuples 22 | - a list of integers 23 | These should all be non-negative. 24 | is_sorted - whether the aforementioned lists are already sorted or not. 25 | 26 | """ 27 | if isinstance(src, int): 28 | src = [src] 29 | is_sorted = True 30 | elif isinstance(src, Interval): 31 | self.intervals = src.intervals 32 | return 33 | elif not isinstance(src, list): 34 | raise ValueError("Input is not a list or integer.") 35 | self.intervals = [] 36 | if not len(src): 37 | # Empty, nothing more to do. 38 | return 39 | lis = False # Identify the type of input. 40 | if isinstance(src[0], int): 41 | lis = True 42 | elif not isinstance(src[0], tuple): 43 | raise ValueError("Input is not a list of ints or tuples.") 44 | if not is_sorted: 45 | if lis: 46 | src.sort() 47 | else: 48 | src.sort(key=lambda x: x[0]) 49 | if lis: 50 | # Construct compressed intervals from a list of integers. 51 | cur_min = src[0] 52 | cur_max = src[0] 53 | for i in src[1:]: 54 | if i == cur_max + 1: 55 | # We have another contiguous integer. 56 | # Add it to the current interval. 57 | cur_max += 1 58 | else: 59 | # Not contiguous, store the present interval and start anew. 60 | self.intervals.append((cur_min, cur_max)) 61 | cur_min = i 62 | cur_max = i 63 | # Append the last interval. 64 | self.intervals.append((cur_min, cur_max)) 65 | else: 66 | # Interval compression for existing intervals. 67 | cur = src[0] 68 | for interval in src[1:]: 69 | if interval[0] == cur[1] + 1: 70 | # The intervals are contiguous. 71 | cur = (cur[0], interval[1]) 72 | else: 73 | # Not contiguous, store cur. 74 | self.intervals.append(cur) 75 | cur = interval 76 | # Append the last interval. 77 | self.intervals.append(cur) 78 | 79 | def _binary_search_intervals(self, i): 80 | """Return the index of the interval that contains i, if any. 81 | 82 | This uses a binary search over the intervals. 83 | 84 | """ 85 | low = 0 86 | high = len(self.intervals) 87 | while low < high: 88 | mid = (low + high) // 2 89 | v = self.intervals[mid] 90 | if i < v[0]: 91 | high = mid 92 | elif i > v[1]: 93 | low = mid + 1 94 | else: 95 | return mid 96 | return None 97 | 98 | @staticmethod 99 | def _interval_intersect(intv1, intv2): 100 | """Return the intersection of intervals intv1 and intv2. 101 | 102 | intv1 and intv2 should be tuples of the form (low, high). 103 | 104 | """ 105 | if intv1[0] <= intv2[1] and intv2[0] <= intv1[1]: 106 | # The intervals have a non-empty intersection. 107 | return (max(intv1[0], intv2[0]), min(intv1[1], intv2[1])) 108 | else: 109 | return None 110 | 111 | @staticmethod 112 | def _interval_difference(intv1, intv2): 113 | """Return the difference of intervals intv1 and intv2. 114 | 115 | intv1 and intv2 should be tuples of the form (low, high). 116 | 117 | """ 118 | if intv1[0] <= intv2[1] and intv2[0] <= intv1[1]: 119 | # We have a non-empty intersection. 120 | if intv1[0] < intv2[0]: 121 | if intv1[1] <= intv2[1]: 122 | return [(intv1[0], intv2[0] - 1)] 123 | else: 124 | return [(intv1[0], intv2[0] - 1), (intv2[1] + 1, intv1[1])] 125 | elif intv2[0] < intv1[0]: 126 | if intv1[1] <= intv2[1]: 127 | return None 128 | else: 129 | return [(intv2[1] + 1, intv1[1])] 130 | elif intv2[1] < intv1[1]: 131 | return [(intv2[1] + 1, intv1[1])] 132 | else: 133 | return None 134 | else: 135 | return [intv1] 136 | 137 | @staticmethod 138 | def _union_intersecting_intervals(intv1, intv2): 139 | """Return the union of two intersecting intervals.""" 140 | return (min(intv1[0], intv2[0]), max(intv1[1], intv2[1])) 141 | 142 | def in_interval(self, i): 143 | """Check if an integer i is in one of the intervals here. 144 | 145 | This does a binary search of the intervals. 146 | 147 | """ 148 | if self._binary_search_intervals(i) is not None: 149 | return True 150 | return False 151 | 152 | def get_smallest(self): 153 | """Return the smallest value in the interval.""" 154 | return self.intervals[0][0] 155 | 156 | def get_largest(self): 157 | """Return the largest value in the interval.""" 158 | return self.intervals[-1][1] 159 | 160 | def members(self): 161 | """A generator of every integer in the intervals.""" 162 | if not self.intervals: 163 | return 164 | cur_intv = 0 165 | cur_i = self.intervals[cur_intv][0] 166 | while True: 167 | yield cur_i 168 | cur_i += 1 169 | if cur_i > self.intervals[cur_intv][1]: 170 | cur_intv += 1 171 | if cur_intv >= len(self.intervals): 172 | break 173 | cur_i = self.intervals[cur_intv][0] 174 | 175 | def intersect(self, other): 176 | """Return the intersection of this interval with the given interval. 177 | 178 | This takes O(n) time. 179 | 180 | """ 181 | if not len(other): 182 | return Interval([], is_sorted=True) 183 | k = 0 184 | intersection = [] 185 | for interval in self.intervals: 186 | while k < len(other): 187 | intersect = self._interval_intersect(interval, 188 | other.intervals[k]) 189 | if intersect: 190 | intersection.append(intersect) 191 | if other.intervals[k][1] <= interval[1]: 192 | k += 1 193 | else: 194 | break 195 | else: 196 | if other.intervals[k][1] < interval[0]: 197 | k += 1 198 | else: 199 | break 200 | return Interval(intersection, is_sorted=True) 201 | 202 | def intersect_list(self, lis): 203 | """Return a list of items that are in both the list and this interval. 204 | 205 | Takes O(klogn) time where k = len(lis). 206 | 207 | """ 208 | intersection = [] 209 | for i in lis: 210 | if self.in_interval(i): 211 | intersection.append(i) 212 | return intersection 213 | 214 | def union(self, other): 215 | """Return the union of this interval with the given interval. 216 | 217 | This takes O(n) time. 218 | 219 | """ 220 | if not len(other): 221 | return self.intervals 222 | if not len(self): 223 | return Interval([], is_sorted=True) 224 | i = 1 225 | k = 0 226 | new = [] 227 | cur = self.intervals[0] 228 | while i < len(self) or k < len(other): 229 | pasti = False 230 | pastk = False 231 | if i < len(self): 232 | interval = self.intervals[i] 233 | if self._interval_intersect(interval, cur): 234 | # Extend the current interval and advance. 235 | cur = self._union_intersecting_intervals(interval, cur) 236 | i += 1 237 | else: 238 | if interval[1] < cur[0]: 239 | # We're before the current interval. 240 | new.append(interval) 241 | i += 1 242 | else: 243 | # We're past the current interval. 244 | pasti = True 245 | else: 246 | pasti = True 247 | if k < len(other): 248 | interval = other.intervals[k] 249 | if self._interval_intersect(interval, cur): 250 | # Extend and advance. 251 | cur = self._union_intersecting_intervals(interval, cur) 252 | k += 1 253 | else: 254 | if interval[1] < cur[0]: 255 | # Before current interval. 256 | new.append(interval) 257 | k += 1 258 | else: 259 | # Past the current interval. 260 | pastk = True 261 | else: 262 | pastk = True 263 | if pasti and pastk: 264 | new.append(cur) 265 | if i < len(self): 266 | cur = self.intervals[i] 267 | elif k < len(other): 268 | cur = other.intervals[k] 269 | elif i == len(self) and k == len(other): 270 | new.append(cur) 271 | return Interval(new, is_sorted=True) 272 | 273 | def difference(self, other): 274 | """Return the set-theoretic difference of this interval with other. 275 | 276 | This is every element of this interval that is not also in other. 277 | 278 | This takes O(n) time. 279 | 280 | """ 281 | if not len(other): 282 | # If other is empty, it doesn't remove anything. 283 | return self 284 | k = 0 285 | new = [] 286 | for interval in self.intervals: 287 | while k < len(other): 288 | # Iterate over intervals in other until the intervals cannot 289 | # modify the current interval, then add whatever is left of 290 | # the current interval to new. 291 | if self._interval_intersect(interval, other.intervals[k]): 292 | # We need to remove something from the current interval. 293 | diff = self._interval_difference(interval, 294 | other.intervals[k]) 295 | if diff: 296 | # There is some portion remaining, but we can only add 297 | # the portion that is to the left of the start of the 298 | # other interval, as it may be removed later. 299 | if len(diff) == 1: 300 | if interval[0] < other.intervals[k][0]: 301 | # interval begins before other, so we can just 302 | # add the difference, as other extends past the 303 | # right end of interval. 304 | new += diff 305 | # Since other goes past the right end, advance 306 | # interval. 307 | break 308 | else: 309 | # We cut off the left portion of interval, so we 310 | # need to advance other. 311 | k += 1 312 | interval = diff[0] 313 | else: 314 | # len(diff) == 2, and the middle was removed. 315 | # Keep the right portion and advance other. 316 | new.append(diff[0]) 317 | interval = diff[1] 318 | k += 1 319 | else: 320 | # Completely removed interval, advance to next. 321 | break 322 | else: 323 | # No intersection, advance to next in other and possibly 324 | # add what's left of the current interval and advance. 325 | if interval[1] < other[k][0]: 326 | # Other is to the right of interval, add what's left 327 | # and advance interval. 328 | new.append(interval) 329 | break 330 | else: 331 | # Other is not to the right and did not intersect, so it 332 | # must be to the left. Advance other. 333 | k += 1 334 | if k >= len(other) and interval: 335 | # We've exhaused other, but still have some of interval, add it. 336 | new.append(interval) 337 | return Interval(new, is_sorted=True) 338 | 339 | def symmetric_difference(self, other): 340 | """Return the symmetric difference of this interval with other. 341 | 342 | This takes O(n) time. 343 | 344 | """ 345 | # This is equivalent to symmetric difference. 346 | return self.union(other).difference(self.intersect(other)) 347 | 348 | def range(self): 349 | """Return the interval from the left to the right side of this interval. 350 | 351 | More specifically, if this consists of {[x1,x'1],...,[xn,x'n]}, we 352 | return {[x1,x'n]}. 353 | 354 | """ 355 | return Interval([(self.intervals[0][0], 356 | self.intervals[-1][1])], 357 | is_sorted=True) 358 | 359 | def empty(self): 360 | """Return whether this interval is empty or not.""" 361 | return len(self.intervals) == 0 362 | 363 | def count(self): 364 | """Return the number of elements that this interval represents.""" 365 | num = 0 366 | for tup in self.intervals: 367 | num += tup[1] - tup[0] + 1 368 | return num 369 | 370 | def __contains__(self, i): 371 | """Invoke in_interval.""" 372 | return self.in_interval(i) 373 | 374 | def __len__(self): 375 | """Return the number of intervals stored in this interval.""" 376 | return len(self.intervals) 377 | 378 | def __eq__(self, other): 379 | """Return true if two intervals are precisely the same.""" 380 | if not isinstance(other, Interval): 381 | return NotImplemented 382 | return all([x == y for x, y in zip(self.intervals, other.intervals)]) 383 | 384 | def __ne__(self, other): 385 | """Return the negation of what __eq__ returns.""" 386 | return not self.__eq__(other) 387 | 388 | def __hash__(self): 389 | """Return a hash for this set.""" 390 | if not self.intervals: 391 | return hash(None) 392 | cur_hash = hash(self.intervals[0]) 393 | for interval in self.intervals[1:]: 394 | cur_hash ^= hash(interval) 395 | return cur_hash 396 | 397 | def __str__(self): 398 | """Get a string representation of the set.""" 399 | string = "" 400 | for i in self.intervals: 401 | if i[0] == i[1]: 402 | string += "{0},".format(i[0]) 403 | else: 404 | string += "{0}-{1},".format(i[0], i[1]) 405 | return string[:-1] 406 | 407 | def __repr__(self): 408 | """Get a raw representation of the set.""" 409 | return repr(self.intervals) 410 | 411 | def __iter__(self): 412 | """Get an iterator for this set.""" 413 | return self.members() 414 | 415 | def __add__(self, other): 416 | """Return the union of this set and other.""" 417 | if not isinstance(other, Interval): 418 | return NotImplemented 419 | return self.union(other) 420 | 421 | def __sub__(self, other): 422 | """Return the set-theoretic difference of this set and other.""" 423 | if not isinstance(other, Interval): 424 | return NotImplemented 425 | return self.difference(other) 426 | 427 | def __and__(self, other): 428 | """Return the intersection of this set and other.""" 429 | if isinstance(other, Interval): 430 | return self.intersect(other) 431 | elif isinstance(other, list): 432 | return self.intersect_list(other) 433 | return NotImplemented 434 | 435 | def __xor__(self, other): 436 | """Return the symmetric difference of this set and other.""" 437 | if not isinstance(other, Interval): 438 | return NotImplemented 439 | return self.symmetric_difference(other) 440 | -------------------------------------------------------------------------------- /pgdb/src/mi/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ndryden/PGDB/88df53eca358c4478fa5e8f734b5ba47970e6bb9/pgdb/src/mi/__init__.py -------------------------------------------------------------------------------- /pgdb/src/mi/gdbmi.py: -------------------------------------------------------------------------------- 1 | """An interface to GDB using its Machine Interface.""" 2 | 3 | import subprocess 4 | import fcntl 5 | import os 6 | import select 7 | from gdbmi_parser import GDBMIParser 8 | 9 | class GDBMachineInterface: 10 | """Manages the GDB Machine Interface.""" 11 | 12 | def __init__(self, gdb="gdb", gdb_args=None, env=None): 13 | """Initialize a new machine interface session with GDB.""" 14 | gdb_args = gdb_args or [] 15 | env = env or {} 16 | env.update(os.environ) 17 | args = [gdb, '--quiet', '--nx', '--nw', '--interpreter=mi2'] + gdb_args 18 | self.process = subprocess.Popen( 19 | args=args, 20 | stdin=subprocess.PIPE, 21 | stdout=subprocess.PIPE, 22 | close_fds=True, 23 | env=env 24 | ) 25 | flags = fcntl.fcntl(self.process.stdout, fcntl.F_GETFL) 26 | fcntl.fcntl(self.process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK) 27 | 28 | self.buffer = "" # Buffer for output from GDB. 29 | self.parser = GDBMIParser() # Parser for output from GDB. 30 | 31 | def _read(self, timeout=0): 32 | """A generator to read data from GDB's stdout.""" 33 | while True: 34 | ready = select.select([self.process.stdout], [], [], timeout) 35 | if not ready[0]: 36 | # No data to read. 37 | break 38 | try: 39 | yield self.process.stdout.read() 40 | # Don't block on subsequent reads while we still have data. 41 | timeout = 0 42 | except IOError: 43 | break 44 | 45 | def _write(self, data): 46 | """Write data to GDB.""" 47 | try: 48 | self.process.stdin.write(data + "\n") 49 | self.process.stdin.flush() 50 | except IOError: 51 | return False 52 | return True 53 | 54 | def send(self, command): 55 | """Send a command to GDB. 56 | 57 | command is a Command object with the data to send. 58 | 59 | """ 60 | return self._write(command.generate_mi_command()) 61 | 62 | def read(self, timeout=0): 63 | """Generator to read, parse, and return data from GDB.""" 64 | for data in self._read(timeout): 65 | self.buffer += data 66 | while True: 67 | (before, newline, self.buffer) = self.buffer.rpartition("\n") 68 | if newline: 69 | records = self.parser.parse_output(before) 70 | for record in records: 71 | yield record 72 | else: 73 | return 74 | 75 | def is_running(self): 76 | """Check if the GDB process is running.""" 77 | return self.process.poll() is None 78 | 79 | def get_pid(self): 80 | """Return the PID of the GDB process.""" 81 | return self.process.pid 82 | -------------------------------------------------------------------------------- /pgdb/src/mi/gdbmi_parser.py: -------------------------------------------------------------------------------- 1 | """Parses GDB Machine Interface output into Python structures.""" 2 | 3 | import re 4 | from gdbmi_records import (RESULT_CLASS_DONE, RESULT_CLASS_RUNNING, 5 | RESULT_CLASS_CONNECTED, RESULT_CLASS_ERROR, 6 | RESULT_CLASS_EXIT, ASYNC_EXEC, ASYNC_STATUS, 7 | ASYNC_NOTIFY, STREAM_CONSOLE, STREAM_TARGET, 8 | STREAM_LOG, GDBMIAsyncRecord, GDBMIStreamRecord, 9 | GDBMIResultRecord, GDBMIUnknownRecord) 10 | 11 | class GDBMIParser: 12 | """Parse output from GDB into an AST.""" 13 | 14 | _term = "(gdb)" # The terminator symbol 15 | _result_record_symbol = "^" 16 | _async_record_symbols = ["*", "+", "="] 17 | _stream_record_symbols = ["~", "@", "&"] 18 | _all_record_symbols = ([_result_record_symbol] + _async_record_symbols + 19 | _stream_record_symbols) 20 | _result_class = {"done": RESULT_CLASS_DONE, 21 | "running": RESULT_CLASS_RUNNING, 22 | "connected": RESULT_CLASS_CONNECTED, 23 | "error": RESULT_CLASS_ERROR, 24 | "exit": RESULT_CLASS_EXIT} 25 | _oob_mapper = {"*": ASYNC_EXEC, 26 | "+": ASYNC_STATUS, 27 | "=": ASYNC_NOTIFY, 28 | "~": STREAM_CONSOLE, 29 | "@": STREAM_TARGET, 30 | "&": STREAM_LOG} 31 | 32 | def __init__(self): 33 | """Set up the parser.""" 34 | self.output_re = re.compile(r"([0-9]*)(" + "|".join( 35 | ["\\" + item for item in self._all_record_symbols]) + ")(.*)") 36 | self.result_re = re.compile(r"(" + "|".join( 37 | self._result_class.keys()) + ")(.*)") 38 | self.async_re = re.compile(r"([a-zA-Z0-9_\-]*)(\,.*)?") 39 | self._value_parsers = {'{': self.parse_tuple, 40 | '[': self.parse_list, 41 | '"': self.parse_const} 42 | 43 | def parse_output(self, src): 44 | """Take a set of output from GDB and parse it into an AST. 45 | 46 | Returns a list of records. 47 | 48 | """ 49 | lines = src.split("\n") 50 | records = [] 51 | for line in lines: 52 | line = line.strip() 53 | # Check for the terminator. 54 | if line == self._term: 55 | continue 56 | else: 57 | parts = self.output_re.match(line) 58 | if not parts: 59 | records.append(GDBMIUnknownRecord.create_record(line)) 60 | continue 61 | token, symbol, rest = parts.groups() 62 | if not token: 63 | token = None 64 | else: 65 | token = int(token) 66 | if symbol == self._result_record_symbol: 67 | records.append(self.parse_result_record(token, rest)) 68 | else: 69 | records.append(self.parse_oob_record(token, symbol, rest)) 70 | return records 71 | 72 | def parse_result_record(self, token, src): 73 | """Parse a result record into a GDBMIResultRecord().""" 74 | parts = self.result_re.match(src) 75 | if not parts: 76 | raise ValueError(src) 77 | result_class, results = parts.groups() 78 | if not result_class: 79 | raise ValueError(src) 80 | return GDBMIResultRecord.create_record( 81 | token, 82 | self._result_class[result_class], 83 | self.parse_result_list(results[1:])) 84 | 85 | def parse_oob_record(self, token, symbol, src): 86 | """Parse an out-of-band record, either an async or a stream record.""" 87 | if symbol in self._async_record_symbols: 88 | return self.parse_async_record(token, symbol, src) 89 | else: 90 | # Stream records do not have tokens. 91 | return self.parse_stream_record(symbol, src) 92 | 93 | def parse_async_record(self, token, symbol, src): 94 | """Parse an exec, status, or notify async record.""" 95 | output_class, output = self.parse_async_output(src) 96 | return GDBMIAsyncRecord.create_record(self._oob_mapper[symbol], 97 | token, 98 | output_class, 99 | output) 100 | 101 | def parse_stream_record(self, symbol, src): 102 | """Parse a console, target, or log stream record.""" 103 | return GDBMIStreamRecord.create_record(self._oob_mapper[symbol], src) 104 | 105 | def parse_async_output(self, src): 106 | """Parse the output of an async record. 107 | 108 | Returns a tuple of the async class and a dict of results. 109 | 110 | """ 111 | match = self.async_re.match(src) 112 | if not match: 113 | raise ValueError(src) 114 | async_class, rest = match.groups() 115 | if rest: 116 | # Remove first comma. 117 | rest = rest[1:] 118 | if rest == "end": 119 | # Hack to catch the =traceframe-changed,end record. 120 | return async_class, {} 121 | return async_class, self.parse_result_list(rest) 122 | else: 123 | return async_class, {} 124 | 125 | def parse_result(self, src): 126 | """Parse a result into a (variable, value) tuple.""" 127 | variable, equal, value = src.partition("=") 128 | return variable, self.parse_value(value) 129 | 130 | def parse_value(self, src): 131 | """Parse a value, either a tuple, a list, or a constant.""" 132 | if src[0] in self._value_parsers: 133 | return self._value_parsers[src[0]](src) 134 | else: 135 | # There is a legacy format, key=value. Not supported. 136 | raise ValueError(src) 137 | 138 | def parse_tuple(self, src): 139 | """Parse a tuple into a dict of results.""" 140 | if src == "{}": 141 | # Empty tuple. 142 | return {} 143 | return self.parse_result_list(src[1:-1]) 144 | 145 | def parse_list(self, src): 146 | """Parse a list into either a list of values, or a dict of results.""" 147 | if src == "[]": 148 | return [] 149 | src = src[1:-1] 150 | brackets = 0 151 | in_quote = False 152 | end = 0 153 | start = 0 154 | prev_char = "" 155 | results = [] 156 | # The structure of this is similar to parse_result_list. 157 | # But we may have a list of values instead, so we need to identify that. 158 | for char in src: 159 | if (char == "{" or char == "[") and not in_quote: 160 | brackets += 1 161 | elif (char == "}" or char == "]") and not in_quote: 162 | brackets -= 1 163 | elif char == '"' and prev_char != "\\": 164 | in_quote = not in_quote 165 | elif char == "=" and brackets == 0 and not in_quote: 166 | # We have a list of results, so use that logic instead. 167 | return self.parse_result_list(src) 168 | elif char == "," and brackets == 0 and not in_quote: 169 | # Found end of entry. 170 | results.append(self.parse_value(src[start:end])) 171 | start = end + 1 172 | end += 1 173 | prev_char = char 174 | # Parse the last value, if needed. 175 | if src[start:end]: 176 | results.append(self.parse_value(src[start:end])) 177 | return results 178 | 179 | def parse_const(self, src): 180 | """Parse a constant and return its value.""" 181 | # Just remove the quotes. 182 | return src[1:-1] 183 | 184 | def parse_result_list(self, src): 185 | """Parse a result list into a dict of results.""" 186 | length = 0 187 | brackets = 0 188 | in_quote = False 189 | results = {} 190 | variable_counts = {} 191 | variable = None 192 | right = "" 193 | prev_char = "" 194 | while True: 195 | (variable, sep, right) = src.partition("=") 196 | if not sep: 197 | break 198 | # Seek forward until we find the end of the value. 199 | # Account for nested lists and tuples. 200 | for char in right: 201 | if (char == "{" or char == "[") and not in_quote: 202 | brackets += 1 203 | elif (char == "}" or char == "]") and not in_quote: 204 | brackets -= 1 205 | elif char == '"' and prev_char != "\\": 206 | # Ignore the \" escape sequence. 207 | in_quote = not in_quote 208 | elif char == "," and brackets == 0 and not in_quote: 209 | # Found the end of the value. 210 | value = self.parse_value(right[:length]) 211 | # Add it to the results dict. 212 | if variable in variable_counts: 213 | if variable_counts[variable] == 1: 214 | # Convert entry to list. 215 | results[variable] = [results[variable], value] 216 | else: 217 | results[variable].append(value) 218 | variable_counts[variable] += 1 219 | else: 220 | results[variable] = value 221 | variable_counts[variable] = 1 222 | src = right[length + 1:] 223 | length = 0 224 | break 225 | length += 1 226 | prev_char = char 227 | if length >= len(right): 228 | break 229 | # Parse last entry. 230 | if variable and right: 231 | value = self.parse_value(right) 232 | if variable in variable_counts: 233 | if variable_counts[variable] == 1: 234 | results[variable] = [results[variable], value] 235 | else: 236 | results[variable].append(value) 237 | else: 238 | results[variable] = value 239 | return results 240 | -------------------------------------------------------------------------------- /pgdb/src/mi/gdbmi_recordhandler.py: -------------------------------------------------------------------------------- 1 | """A simple interface for invoking callbacks based on records.""" 2 | 3 | class GDBMIRecordHandler: 4 | """Invoke callbacks based on record identifications. 5 | 6 | This supports callbacks based upon either the token or the record type and 7 | subtypes. 8 | 9 | """ 10 | 11 | def __init__(self): 12 | """Initialize the record handler.""" 13 | self.token_handlers = {} 14 | self.type_handlers = {} 15 | self.handler_id = 0 16 | 17 | def add_token_handler(self, func, token, data = None): 18 | """Add a token handler. 19 | 20 | func is the function to invoke. 21 | token is the token to invoke this handler on. 22 | data is passed to func in the keyword argument data. 23 | 24 | Returns a handler ID. 25 | 26 | """ 27 | hid = self.handler_id 28 | self.handler_id += 1 29 | if token in self.token_handlers: 30 | self.token_handlers[token].append((hid, func, data)) 31 | else: 32 | self.token_handlers[token] = [(hid, func, data)] 33 | return hid 34 | 35 | def add_type_handler(self, func, types, data = None): 36 | """Add a type handler. 37 | 38 | func is the function to invoke. 39 | types is a set and will invoke the handler on a record if types is a 40 | subset of the record's types. 41 | data is passed to func in the keyword argument data. 42 | 43 | Returns a handler ID. 44 | 45 | """ 46 | frozen_types = frozenset(types) 47 | hid = self.handler_id 48 | self.handler_id += 1 49 | if frozen_types in self.type_handlers: 50 | self.type_handlers[frozen_types].append((hid, func, data)) 51 | else: 52 | self.type_handlers[frozen_types] = [(hid, func, data)] 53 | return hid 54 | 55 | def remove_handler(self, hid): 56 | """Remove a handler. 57 | 58 | hid is the handler ID returned by add_{token,type}_handler. 59 | 60 | """ 61 | key = None 62 | for k in self.token_handlers: 63 | if self.token_handlers[k][0] == hid: 64 | key = k 65 | break 66 | if key is not None: 67 | del self.token_handlers[k] 68 | for k in self.type_handlers: 69 | if self.type_handlers[k][0] == hid: 70 | key = k 71 | break 72 | if key is not None: 73 | del self.type_handlers[k] 74 | 75 | def handle(self, record, **kwargs): 76 | """Handle a record, passing any keyword arguments to the handlers.""" 77 | ret = [] 78 | if record.token in self.token_handlers: 79 | tup = self.token_handlers[record.token] 80 | kwargs["data"] = tup[2] 81 | ret.append(tup[1](record, **kwargs)) 82 | types = record.record_subtypes.union([record.record_type]) 83 | for k in self.type_handlers: 84 | if k.issubset(types): 85 | for handler in self.type_handlers[k]: 86 | kwargs["data"] = handler[2] 87 | ret.append(handler[1](record, **kwargs)) 88 | return ret 89 | -------------------------------------------------------------------------------- /pgdb/src/mi/gdbmiarec.py: -------------------------------------------------------------------------------- 1 | """Handles aggregated records.""" 2 | 3 | import copy 4 | from collections import defaultdict 5 | from mi.gdbmi_parser import * 6 | from mi.gdbmi_records import * 7 | from interval import Interval 8 | 9 | def _is_dict(v): 10 | """Check whether an object is a dictionary.""" 11 | return isinstance(v, dict) 12 | 13 | def _is_list(v): 14 | """Check whether an object is a list.""" 15 | return isinstance(v, list) 16 | 17 | def _is_tuple(v): 18 | """Check whether an object is a tuple.""" 19 | return isinstance(v, tuple) 20 | 21 | def _is_str(v): 22 | """Check whether an object is a string.""" 23 | return isinstance(v, str) 24 | 25 | def _is_int(v): 26 | """Check whether an object is an integer.""" 27 | return isinstance(v, int) 28 | 29 | def _is_primitive(v): 30 | """Check whether an object is a primitive. 31 | 32 | An object is primitive if it is a string, integer, None, or a list of 33 | primitives. 34 | 35 | """ 36 | return (_is_str(v) or _is_int(v) or (v is None) or 37 | (_is_list(v) and all([_is_primitive(x) for x in v]))) 38 | 39 | def combine_records(records, ranks): 40 | """Combine a list of records into the smallest set of aggregated records. 41 | 42 | records is a list of records. 43 | ranks is a list of associated ranks, in the same order. 44 | Returns a list of aggregated records. 45 | 46 | """ 47 | type_dict = defaultdict(list) 48 | arecs = [] 49 | for record, rank in zip(records, ranks): 50 | # This is immutable and should identify records of the same type. 51 | t = tuple([record.record_type] + list(record.record_subtypes)) 52 | type_dict[t].append((rank, record)) 53 | for t in type_dict: 54 | first_rec = type_dict[t].pop(0) 55 | arec = GDBMIAggregatedRecord(first_rec[0], first_rec[1]) 56 | for rank, record in type_dict[t]: 57 | other_arec = GDBMIAggregatedRecord(rank, record) 58 | arec.merge(other_arec) 59 | arecs.append(arec) 60 | return arecs 61 | 62 | def combine_aggregated_records(arecs): 63 | """Combine a list of aggregated records into the smallest such set.""" 64 | type_dict = defaultdict(list) 65 | new_arecs = [] 66 | for arec in arecs: 67 | t = tuple([arec.record_type] + list(arec.record_subtypes)) 68 | type_dict[t].append(arec) 69 | for t in type_dict: 70 | first_arec = type_dict[t].pop(0) 71 | for arec in type_dict[t]: 72 | first_arec.merge(arec) 73 | new_arecs.append(first_arec) 74 | return new_arecs 75 | 76 | class _Substitution: 77 | """Substitution for aggregated records. 78 | 79 | This stores substitutions using the following system: 80 | - A default value, which is the value taken by the majority of the records. 81 | - The remaining values, stored in a dictionary indexed by rank. 82 | 83 | The record that stores this will have an Interval of all ranks. This is 84 | passed in as needed. 85 | 86 | """ 87 | 88 | def __init__(self, data): 89 | """Initialize the substitution, with initial data.""" 90 | self.default = data 91 | self.other = {} 92 | 93 | def add(self, data, rank, ranks): 94 | """Add an entry to the substitution. 95 | 96 | Cases: 97 | - data is the same as current default: do nothing 98 | - data differs: 99 | - check whether a different entry should become default 100 | - if yes, replace default 101 | - if no, add to other 102 | 103 | """ 104 | if self.default == data: 105 | # Same as current default: do nothing. 106 | return 107 | default_count = len(ranks) - len(self.other) 108 | _data = data 109 | if _is_list(_data): 110 | _data = tuple(_data) 111 | counter_dict = {_data: 1} 112 | for v in self.other.values(): 113 | if _is_list(v): 114 | v = tuple(v) 115 | if v in counter_dict: 116 | counter_dict[v] += 1 117 | else: 118 | counter_dict[v] = 1 119 | # We know that data is the only thing that could cause a change. 120 | max_count = counter_dict[data] 121 | if default_count >= max_count: 122 | # No change to default. 123 | self.other[rank] = data 124 | else: 125 | # Replace default with data. 126 | new_other = {} 127 | for r in ranks: 128 | if r not in self.other: 129 | new_other[r] = self.default 130 | else: 131 | if self.other[r] != data: 132 | new_other[r] = self.other[r] 133 | self.default = data 134 | self.other = new_other 135 | 136 | def merge(self, other, my_ranks, other_ranks): 137 | """Merge other substitution into this one. 138 | 139 | To keep things simple, this presently just does a full rebuild. 140 | 141 | """ 142 | my_default_count = len(my_ranks) - len(self.other) 143 | other_default_count = len(other_ranks) - len(other.other) 144 | my_default = self.default 145 | other_default = other.default 146 | if _is_list(my_default): 147 | my_default = tuple(my_default) 148 | if _is_list(other_default): 149 | other_default = tuple(other_default) 150 | counter_dict = {my_default: my_default_count, 151 | other_default: other_default_count} 152 | for v in self.other.values(): 153 | if _is_list(v): 154 | # Convert for immutability. 155 | v = tuple(v) 156 | if v in counter_dict: 157 | counter_dict[v] += 1 158 | else: 159 | counter_dict[v] = 1 160 | for v in other.other.values(): 161 | if _is_list(v): 162 | # Convert for immutability. 163 | v = tuple(v) 164 | if v in counter_dict: 165 | counter_dict[v] += 1 166 | else: 167 | counter_dict[v] = 1 168 | max_value = max(counter_dict.keys(), key=lambda x: counter_dict[x]) 169 | new_other = {} 170 | for r in my_ranks: 171 | if r in self.other: 172 | if self.other[r] != max_value: 173 | new_other[r] = self.other[r] 174 | else: 175 | new_other[r] = self.default 176 | for r in other_ranks: 177 | if r in other.other: 178 | if other.other[r] != max_value: 179 | new_other[r] = other.other[r] 180 | else: 181 | new_other[r] = other.default 182 | self.default = max_value 183 | self.other = new_other 184 | 185 | def get_substitution(self, rank): 186 | """Return the substitution for the rank.""" 187 | if rank in self.other: 188 | return self.other[rank] 189 | else: 190 | return self.default 191 | 192 | def __str__(self): 193 | return "_Substitution: default = {0}\nothers = {1}".format(self.default, 194 | self.other) 195 | 196 | class GDBMIAggregatedRecord: 197 | """Aggregated GDBMIRecord making use of substitutions. 198 | 199 | The record has an Interval of all ranks involved in it. 200 | 201 | """ 202 | 203 | def __init__(self, rank, record): 204 | self.init_record(rank, record) 205 | 206 | def create_structure(self, data): 207 | """Return a structure based upon data. 208 | 209 | If data is a primitive type, use it. 210 | If data is a non-primitive list, return a list with the entries filled 211 | in according to their individual types. 212 | If the data is a dictionary, return a dictionary with the same keys and 213 | values filled in according to their individual types. 214 | If the data is an object (GDBMIFrame/Breakpoint/Thread), return an 215 | instance. 216 | 217 | This sets up the initial substitution structure as well. 218 | 219 | """ 220 | if _is_primitive(data): 221 | return _Substitution(data) 222 | if _is_list(data): 223 | struct = [] 224 | for d in data: 225 | if _is_primitive(data): 226 | struct.append(_Substitution(d)) 227 | else: 228 | struct.append(self.create_structure(d)) 229 | return struct 230 | if _is_dict(data): 231 | struct = {} 232 | for k, v in data.items(): 233 | if _is_primitive(v): 234 | struct[k] = _Substitution(v) 235 | else: 236 | struct[k] = self.create_structure(v) 237 | return struct 238 | if isinstance(data, GDBMIFrame): 239 | return _Substitution(copy.copy(data)) 240 | if isinstance(data, GDBMIBreakpoint): 241 | return _Substitution(copy.copy(data)) 242 | if isinstance(data, GDBMIThread): 243 | return _Substitution(copy.copy(data)) 244 | raise ValueError(data) 245 | 246 | def copy_structure(self, rank, field, data): 247 | if isinstance(field, _Subsitution): 248 | field.add(data, rank, self.ranks) 249 | elif _is_list(field): 250 | for d1, d2 in zip(field, data): 251 | copy_structure(rank, d1, d2, False) 252 | elif _is_dict(field): 253 | for k in field: 254 | copy_structure(rank, field[k], data[k], False) 255 | 256 | def init_record(self, rank, record): 257 | self.record_type = record.record_type 258 | self.record_subtypes = record.record_subtypes 259 | self.fields = record.fields 260 | self.ranks = Interval(rank) 261 | for field in self.fields: 262 | other_attr = getattr(record, field) 263 | setattr(self, field, self.create_structure(other_attr)) 264 | 265 | def add_record(self, rank, record): 266 | self.ranks += Interval(rank) 267 | if ((record.record_type != self.record_type) or 268 | (record.record_subtypes != self.record_subtypes)): 269 | raise ValueError(record) 270 | for field in self.fields: 271 | self.copy_structure(rank, getattr(self, field), 272 | getattr(record, field)) 273 | self.ranks += Interval(rank) 274 | 275 | def merge_recursive(self, field, other_field, other_ranks): 276 | if isinstance(field, _Substitution): 277 | field.merge(other_field, self.ranks, other_ranks) 278 | elif _is_list(field): 279 | if len(field) != len(other_field): 280 | print "Warning: List lengths differ; not supported!" 281 | for d1, d2 in zip(field, other_field): 282 | self.merge_recursive(d1, d2, other_ranks) 283 | elif _is_dict(field): 284 | if len(field) != len(other_field): 285 | print "Warning: Dict lengths differ; not supported!" 286 | for k in field: 287 | self.merge_recursive(field[k], other_field[k], other_ranks) 288 | 289 | def merge(self, other): 290 | if ((self.record_type != other.record_type) or 291 | (self.record_subtypes != other.record_subtypes)): 292 | raise ValueError(record) 293 | for field in self.fields: 294 | self.merge_recursive(getattr(self, field), 295 | getattr(other, field), 296 | other.ranks) 297 | self.ranks += other.ranks 298 | 299 | def reconstruct_recursive(self, rank, data): 300 | if isinstance(data, _Substitution): 301 | return data.get_substitution(rank) 302 | elif _is_list(data): 303 | new_data = [] 304 | for d in data: 305 | new_data.append(self.reconstruct_recursive(rank, d)) 306 | return new_data 307 | elif _is_dict(data): 308 | new_data = {} 309 | for k in data: 310 | new_data[k] = self.reconstruct_recursive(rank, data[k]) 311 | return new_data 312 | 313 | def get_record(self, rank): 314 | """Return the reconstructed record for the given rank.""" 315 | if self.record_type in [ASYNC_EXEC, ASYNC_STATUS, ASYNC_NOTIFY]: 316 | record = GDBMIAsyncRecord() 317 | elif self.record_type in [STREAM_CONSOLE, STREAM_TARGET, STREAM_LOG]: 318 | record = GDBMIStreamRecord() 319 | elif self.record_type == RESULT: 320 | record = GDBMIResultRecord() 321 | else: 322 | record = GDBMIUnknownRecord() 323 | record.record_type = self.record_type 324 | record.record_subtypes = self.record_subtypes 325 | record.fields = self.fields 326 | for field in self.fields: 327 | setattr(record, field, 328 | self.reconstruct_recursive(rank, getattr(self, field))) 329 | return record 330 | 331 | def get_record_classes(self): 332 | """Get the classes of records in this aggregated record. 333 | 334 | A record class is a set of records that use the same substitutions for 335 | every field. 336 | 337 | This returns a dictionary indexed by records, containing ranks. 338 | 339 | """ 340 | class_dict = {} 341 | for rank in self.ranks: 342 | record = self.get_record(rank) 343 | if record in class_dict: 344 | class_dict[record] += Interval(rank) 345 | else: 346 | class_dict[record] = Interval(rank) 347 | return class_dict 348 | 349 | def get_ranks(self): 350 | return self.ranks 351 | 352 | def __str__(self): 353 | return "AggregatedRecord({0}, {1})".format(self.record_type, 354 | self.record_subtypes) 355 | 356 | def __repr__(self): 357 | return ("").format(self.record_type, 359 | self.record_subtypes, 360 | self.ranks) 361 | -------------------------------------------------------------------------------- /pgdb/src/mi/gdbmicmd.py: -------------------------------------------------------------------------------- 1 | """A generic implementation of Cmd for use with GDB.""" 2 | 3 | from __future__ import print_function 4 | import cmd 5 | from commands import Commands 6 | 7 | class GDBMICmd(cmd.Cmd): 8 | """Simple extension of Cmd for controlling GDB.""" 9 | prompt = "" 10 | intro = "" 11 | 12 | def __init__(self): 13 | """Initialize Cmd and load the commands.""" 14 | cmd.Cmd.__init__(self) 15 | self.use_rawinput = 1 16 | self.completekey = "tab" 17 | self.commands = Commands() 18 | 19 | def do_EOF(self, line): 20 | """Terminate.""" 21 | return True 22 | 23 | def dispatch_gdbmi_command_string(self, string): 24 | """Dispatch a GDBMI command from a string.""" 25 | command = self.resolve_gdbmi_command(string) 26 | if command: 27 | self.dispatch_gdbmi_command(cmd) 28 | 29 | def dispatch_gdbmi_command(self, command): 30 | """Execute a GDBMI command. Should be over-ridden by children.""" 31 | print("Would invoke {0} with arguments {1} and options {2}".format( 32 | command.command, 33 | command.args, 34 | command.opts)) 35 | 36 | def check_gdbmi_command(self, string): 37 | """Check whether a string is a valid command.""" 38 | if self.commands.complete(string): 39 | return True 40 | return False 41 | 42 | def run(self): 43 | """Main run loop. Should be over-ridden by children if needed.""" 44 | self.cmdloop() 45 | 46 | def resolve_gdbmi_command(self, line, err=True): 47 | """Parse a line into a GDBMI command.""" 48 | command = self.commands.generate_command(line) 49 | if not command and err: 50 | print("Bad command: " + line) 51 | return command 52 | 53 | def default(self, line): 54 | """Catch and handle all GDBMI commands.""" 55 | command = self.resolve_gdbmi_command(line) 56 | if command: 57 | self.dispatch_gdbmi_command(command) 58 | -------------------------------------------------------------------------------- /pgdb/src/mi/gdbmipprinter.py: -------------------------------------------------------------------------------- 1 | """Pretty-printer for nicely-formatted versions of parsed MI output.""" 2 | 3 | from conf import gdbconf 4 | import sys 5 | 6 | class GDBMIPrettyPrinter: 7 | """This handles all pretty-printing. 8 | 9 | If the GDB configuration specifies a dump_file, whenever pretty_print is 10 | called, the output from default_pretty_print is written to that file. 11 | 12 | To be pretty-printed, an object should implement the pretty_print function. 13 | This function should return either a list of strings, which will be printed 14 | using the convention that each string in the list is one line; or it should 15 | return None, in which case the object will be converted to a string directly 16 | and the result printed. 17 | 18 | If an object does not implement pretty_print, it is converted to a string. 19 | 20 | """ 21 | 22 | def __init__(self): 23 | """Initialize the pretty printer.""" 24 | if gdbconf.print_dump_file: 25 | self.dump_file = open(gdbconf.print_dump_file, "wt") 26 | else: 27 | self.dump_file = None 28 | 29 | @staticmethod 30 | def indent(level, string): 31 | """Prepend level indents to string.""" 32 | return (" " * level) + string 33 | 34 | @staticmethod 35 | def default_pretty_print(record): 36 | """Do a simple pretty-print displaying the raw data within a record.""" 37 | return str(record) 38 | 39 | def pretty_print(self, record, tag=None, output=None): 40 | """Pretty-print a record. 41 | 42 | record is the record to pretty print. 43 | tag, if present, is prepended to each line of output. 44 | output is the stream to output to; defaults to stdout. 45 | 46 | """ 47 | raw = "" 48 | pretty = "" 49 | if self.dump_file: 50 | self.dump_file.write(self.default_pretty_print(record)) 51 | self.dump_file.flush() 52 | if gdbconf.pretty_print == "no" or gdbconf.pretty_print == "both": 53 | raw = "[{0}] {1}".format(tag, self.default_pretty_print(record)) 54 | if raw[-1] != "\n": 55 | raw += "\n" 56 | if gdbconf.pretty_print == "yes" or gdbconf.pretty_print == "both": 57 | try: 58 | line = record.pretty_print() 59 | if line is None: 60 | pretty = "[{0}] {1}".format(tag, 61 | self.default_pretty_print( 62 | record)) 63 | if pretty[-1] != "\n": 64 | pretty += "\n" 65 | else: 66 | pretty = "\n".join(["[{0}] {1}".format(tag, x) 67 | for x in line]) + "\n" 68 | except AttributeError: 69 | pretty = self.default_pretty_print(record) 70 | string = raw + pretty 71 | output = output or sys.stdout 72 | output.write(string) 73 | -------------------------------------------------------------------------------- /pgdb/src/mi/varobj.py: -------------------------------------------------------------------------------- 1 | """Management classes for GDB variable objects. 2 | 3 | These maintain a hierarchy of the variable objects and associated information, but must be maintained 4 | externally. 5 | 6 | """ 7 | 8 | import re 9 | 10 | DISPLAY_HINT_STRING = "string" 11 | DISPLAY_HINT_ARRAY = "array" 12 | DISPLAY_HINT_MAP = "map" 13 | 14 | class VariableObject: 15 | """A variable object. 16 | 17 | This stores the associated data for the variable object, plus any children. 18 | 19 | """ 20 | 21 | def __init__(self, name, vartype, value = None, thread_id = None, 22 | display_hint = None, is_dynamic = False, has_more = False, num_child = 0): 23 | """Initialize the variable object.""" 24 | self.name = name 25 | self.vartype = vartype 26 | self.value = value 27 | self.children = {} 28 | self.thread_id = thread_id 29 | self.display_hint = display_hint 30 | self.is_dynamic = is_dynamic 31 | self.has_more = has_more 32 | self.num_child = num_child 33 | self.listed = False 34 | self.more_children = False 35 | 36 | def get_parent(self): 37 | """Return the name of the parent of this variable object.""" 38 | name_parts = self.name.split(".") 39 | if len(name_parts) > 1: 40 | return ".".join(name_parts[0:-1]) 41 | # No parent. 42 | return None 43 | 44 | def get_name(self): 45 | """Return the shortened name of this variable object.""" 46 | return self.name.split(".")[-1] 47 | 48 | def get_sorted_children(self): 49 | """Return the children of this variable object, sorted intelligently. 50 | 51 | This looks to determine whether the childrens' names are keys, and sorts based on the integer 52 | value of the keys; otherwise it uses a standard comparison based on names. 53 | 54 | """ 55 | if not self.children: 56 | return [] 57 | name_sample = self.children.values()[0].get_name() 58 | if re.match(r"\[[0-9]+\]", name_sample): 59 | # We can sort these, assume they're all the same. 60 | children = sorted(self.children.items(), key = lambda x: int(x[1].get_name()[1:-1])) 61 | return children 62 | else: 63 | # Cannot easily sort, so just use the standard comparison. 64 | return sorted(self.children.items(), key = lambda x: x[1].get_name()) 65 | 66 | def __str__(self): 67 | """Return the name of the variable object.""" 68 | return self.get_name() 69 | 70 | def __repr__(self): 71 | """Return the name of the variable object.""" 72 | return self.get_name() 73 | 74 | class VariableObjectManager: 75 | """A top-level manager for variable objects.""" 76 | pseudochildren = ["public", "protected", "private"] 77 | 78 | def __init__(self): 79 | """Initialization.""" 80 | self.varobjs = {} 81 | 82 | def get_child(self, varobj, name): 83 | """A helper function to get the child of a variable object based on a name. 84 | 85 | This examines the children of the given variable object and searches for one with the given 86 | name (this is the short name). This also examines the children of any pseudochildren the 87 | varobj has. 88 | 89 | """ 90 | if name in varobj.children: 91 | return varobj.children[name] 92 | else: 93 | # If it's not present, check our pseduo-children, if any. 94 | if "public" in varobj.children and name in varobj.children["public"].children: 95 | return varobj.children["public"].children[name] 96 | elif "protected" in varobj.children and name in varobj.children["protected"].children: 97 | return varobj.children["protected"].children[name] 98 | elif "private" in varobj.children and name in varobj.children["private"].children: 99 | return varobj.children["private"].children[name] 100 | else: 101 | return None # Not present at all. 102 | 103 | def get_var_obj(self, name): 104 | """Get a variable object based on a name.""" 105 | name_parts = name.split(".") 106 | if len(name_parts) == 1: 107 | if name in self.varobjs: 108 | return self.varobjs[name] 109 | return None 110 | if name_parts[0] not in self.varobjs: 111 | return None 112 | varobj = self.varobjs[name_parts[0]] 113 | for part in name_parts[1:]: 114 | varobj = self.get_child(varobj, part) 115 | if varobj is None: 116 | return None 117 | return varobj 118 | 119 | def add_var_obj(self, newvarobj): 120 | """Add a variable object to the manager.""" 121 | name_parts = newvarobj.name.split(".") 122 | if len(name_parts) == 1: 123 | self.varobjs[newvarobj.name] = newvarobj 124 | return True 125 | parent = self.get_var_obj(".".join(name_parts[:-1])) 126 | if parent: 127 | parent.children[name_parts[-1]] = newvarobj 128 | return True 129 | return False 130 | 131 | def del_var_obj(self, varobj): 132 | """Remove a variable object from the manager.""" 133 | name_parts = varobj.name.split(".") 134 | if len(name_parts) == 1: 135 | del self.varobjs[varobj.name] 136 | return True 137 | parent = self.get_var_obj(".".join(name_parts[:-1])) 138 | if parent: 139 | del parent.children[name_parts[-1]] 140 | return True 141 | return False 142 | 143 | def get_lowest_ancestor(self, name): 144 | """Get the lowest ancestor of a name that the manager has a variable object for.""" 145 | name_parts = name.split(".") 146 | if name_parts[0] not in self.varobjs: 147 | return None 148 | varobj = self.varobjs[name_parts[0]] 149 | for part in name_parts[1:]: 150 | child = self.get_child(varobj, part) 151 | if child is None: 152 | return varobj 153 | varobj = child 154 | return varobj 155 | 156 | def get_full_name(self, name): 157 | """Get the full name from a provided name, including the pseudochildren in the name.""" 158 | full_name = "" 159 | name_parts = name.split(".") 160 | if name_parts[0] not in self.varobjs: 161 | return None 162 | full_name += name_parts[0] 163 | varobj = self.varobjs[name_parts[0]] 164 | for part in name_parts[1:]: 165 | if part in varobj.children: 166 | full_name += "." + part 167 | varobj = varobj.children[part] 168 | else: 169 | # Check the pseduo-children. 170 | if "public" in varobj.children and part in varobj.children["public"].children: 171 | full_name += ".public." + part 172 | varobj = varobj.children["public"].children[part] 173 | elif "protected" in varobj.children and part in varobj.children["protected"].children: 174 | full_name += ".protected." + part 175 | varobj = varobj.children["protected"].children[part] 176 | elif "private" in varobj.children and part in varobj.children["private"].children: 177 | full_name += ".private." + part 178 | varobj = varobj.children["private"].children[part] 179 | else: 180 | return None # Not present at all. 181 | return full_name 182 | 183 | @staticmethod 184 | def get_name_depth(name): 185 | """Get the depth of a name.""" 186 | return len(name.split(".")) 187 | 188 | @staticmethod 189 | def get_base_name(name): 190 | """Get the root of a variable object's name.""" 191 | return name.split(".")[0] 192 | 193 | @staticmethod 194 | def same_branch(name1, name2): 195 | """Return whether two names are on the same branch of the variable object tree.""" 196 | def _name_filter(part): 197 | return part not in VariableObjectManager.pseudochildren 198 | name1_split = filter(_name_filter, name1.split(".")) 199 | name2_split = filter(_name_filter, name2.split(".")) 200 | branch = zip(name1_split, name2_split) 201 | return all(map(lambda x: x[0] == x[1], branch)) 202 | 203 | @staticmethod 204 | def is_pseudochild(varobj): 205 | """Return whether a variable object is a pseudochild.""" 206 | return varobj.get_name() in VariableObjectManager.pseudochildren 207 | 208 | @staticmethod 209 | def create_var_obj(var): 210 | """Create a new variable object based on entries from a record.""" 211 | if "name" not in var: 212 | return False 213 | if "type" in var: 214 | vartype = var["type"] 215 | if "value" in var: 216 | value = var["value"] 217 | else: 218 | value = None 219 | else: 220 | vartype = None 221 | value = None 222 | if "displayhint" in var: 223 | displayhint = var["displayhint"] 224 | else: 225 | displayhint = None 226 | if "dynamic" in var: 227 | dynamic = var["dynamic"] 228 | else: 229 | dynamic = False 230 | if "thread-id" in var: 231 | thread_id = var["thread-id"] 232 | else: 233 | thread_id = None 234 | return VariableObject(var["name"], vartype, value = value, 235 | thread_id = thread_id, 236 | display_hint = displayhint, 237 | is_dynamic = dynamic, num_child = var["numchild"]) 238 | 239 | def print_hierarchy(self, children = None, indent = 0): 240 | """Debug function to print the hierarchy of variable objects.""" 241 | if children is None: 242 | children = self.varobjs 243 | print (" " * indent) + str(children) 244 | for child in children.values(): 245 | self.print_hierarchy(child.children, indent = indent + 1) 246 | -------------------------------------------------------------------------------- /pgdb/src/misc/clean_shmem.py: -------------------------------------------------------------------------------- 1 | import sys, socket 2 | sys.path.append("/home/dryden2/lib64/python2.6/site-packages") 3 | import posix_ipc 4 | 5 | hostname = socket.gethostname() 6 | try: 7 | gdb_semaphore = posix_ipc.Semaphore("/PGDBSemaphore" + hostname) 8 | gdb_semaphore.unlink() 9 | gdb_semaphore.close() 10 | print "Closed semaphore." 11 | except posix_ipc.ExistentialError: 12 | pass 13 | try: 14 | gdb_shmem = posix_ipc.SharedMemory("/PGDBMem" + hostname) 15 | gdb_shmem.unlink() 16 | print "Closed shared memory." 17 | except posix_ipc.ExistentialError: 18 | pass 19 | -------------------------------------------------------------------------------- /pgdb/src/misc/shmem_test.py: -------------------------------------------------------------------------------- 1 | import sys, socket, mmap, struct, re, os.path 2 | sys.path.append("/home/dryden2/lib64/python2.6/site-packages") 3 | import posix_ipc 4 | 5 | hostname = socket.gethostname() 6 | gdb_semaphore = posix_ipc.Semaphore("/PGDBSemaphore" + hostname, 7 | posix_ipc.O_CREX) 8 | gdb_shmem = posix_ipc.SharedMemory("/PGDBMem" + hostname, 9 | posix_ipc.O_CREX, 10 | size = 33554432) 11 | gdb_mem = mmap.mmap(gdb_shmem.fd, gdb_shmem.size) 12 | gdb_shmem.close_fd() 13 | gdb_semaphore.release() 14 | 15 | load_file_re = re.compile(r".*\.so.*") 16 | load_file_bins = set(["mpitest", "test"]) 17 | manual = False 18 | 19 | def clean_up(): 20 | gdb_semaphore.unlink() 21 | gdb_semaphore.close() 22 | gdb_mem.close() 23 | gdb_shmem.unlink() 24 | 25 | def load_file(filename): 26 | f = open(filename, "r") 27 | d = f.read() 28 | f.close() 29 | return d 30 | 31 | def check_gdb_memory_flag(): 32 | flag = struct.unpack_from("=B", gdb_mem, 1)[0] 33 | return flag == 1 34 | 35 | def read_memory(): 36 | struct.pack_into("=B", gdb_mem, 1, 0) 37 | size = struct.unpack_from("=I", gdb_mem, 2)[0] 38 | if size <= 0: 39 | print "Invalid size {0}".format(size) 40 | return False 41 | return struct.unpack_from("={0}s".format(size), gdb_mem, 6)[0] 42 | 43 | def write_memory(data): 44 | struct.pack_into("=B", gdb_mem, 0, 1) 45 | size = len(data) 46 | return struct.pack_into("=I{0}s".format(size + 1), gdb_mem, 2, size, data) 47 | 48 | def load_file_check(filename): 49 | filename = os.path.abspath(filename) 50 | base = os.path.basename(filename) 51 | if base in load_file_bins: 52 | return True 53 | if base[-4:] == ".gdb" or base[-3:] == ".py": 54 | return False 55 | if load_file_re.match(base) is not None: 56 | return True 57 | return False 58 | 59 | def respond(prompt = False): 60 | gdb_semaphore.acquire() 61 | if check_gdb_memory_flag(): 62 | filename = read_memory() 63 | check = load_file_check(filename) 64 | print "Check for {0}: {1}".format(filename, check) 65 | load = check 66 | if manual: 67 | load = True 68 | if prompt: 69 | y = raw_input("Load file {0}? ".format(filename)) 70 | if y.lower() == "n": 71 | load = False 72 | if load: 73 | try: 74 | data = load_file(filename) 75 | write_memory(data) 76 | print "Loaded {0} ({1}b)".format(filename, len(data)) 77 | except IOError: 78 | write_memory("error") 79 | print "Could not find file, sent error" 80 | else: 81 | write_memory("error") 82 | print "Sent error" 83 | gdb_semaphore.release() 84 | -------------------------------------------------------------------------------- /pgdb/src/sbd.py: -------------------------------------------------------------------------------- 1 | """PGDB scalable binary distribution (SBD) system. 2 | 3 | This handles deploying files via MRNet instead of the parallel filesystem. 4 | 5 | """ 6 | 7 | from __future__ import print_function 8 | import os.path, mmap, struct, re, socket 9 | import posix_ipc 10 | from gdb_shared import GDBMessage, FILE_DATA, LOAD_FILE 11 | from conf import gdbconf 12 | 13 | class SBDFE: 14 | """Front-end SBD system.""" 15 | 16 | def __init__(self, comm): 17 | """Initialization. 18 | 19 | comm is the FE comm object. 20 | 21 | """ 22 | self.comm = comm 23 | self.loaded_files = set() 24 | 25 | def load_file(self, filename): 26 | """Load a file and broadcast it. 27 | 28 | This will attempt to load a file, and broadcasts either the file or an 29 | error notice. If the file has already been loaded, this does nothing. 30 | 31 | """ 32 | if filename in self.loaded_files: 33 | # File has been broadcast to everyone. 34 | # TODO: Time this out somehow so that further requests can be made. 35 | return 36 | if not os.path.isfile(filename): 37 | print("Invalid SBD load file request for '{0}'".format(filename)) 38 | self.comm.send(GDBMessage(FILE_DATA, filename=filename, 39 | data=None, error=True), 40 | self.comm.broadcast) 41 | return 42 | try: 43 | sbd_file = open(filename, "rb") 44 | except IOError as e: 45 | print("Cannot open {0} for SBD load file: {1}.".format(filename, 46 | e.strerror)) 47 | self.comm.send(GDBMessage(FILE_DATA, filename=filename, 48 | data=None, error=True), 49 | self.comm.broadcast) 50 | return 51 | try: 52 | data = sbd_file.read() 53 | except IOError as e: 54 | print("Cannot read {0} for SBD load file: {1}.".format(filename, 55 | e.strerror)) 56 | self.comm.send(GDBMessage(FILE_DATA, filename=filename, 57 | data=None, error=True), 58 | self.comm.broadcast) 59 | return 60 | sbd_file.close() 61 | self.loaded_files.add(filename) 62 | self.comm.send(GDBMessage(FILE_DATA, filename=filename, 63 | data=data, error=False), 64 | self.comm.broadcast) 65 | 66 | class SBDBE: 67 | """Back-end SBD system.""" 68 | 69 | def __init__(self, comm): 70 | """Initialize the SBD system. 71 | 72 | comm is the BE comm object. 73 | 74 | """ 75 | self.comm = comm 76 | # Regex for checking whether to load a file. 77 | self.load_file_re = re.compile(r".*\.so.*") 78 | # Stores data for LOAD_FILE/FILE_DATA. Indexed by filename. 79 | # Entries are None when there is no data, False when error was received, 80 | # and data otherwise. 81 | self.load_files = {} 82 | # Set of executable names for all processes. 83 | self.load_file_bins = set() 84 | self.current_load_file = None 85 | # We may have to clean stuff up from the last run. 86 | self.clean_leftovers() 87 | # Shared memory and semaphore for communicating with GDB to load files. 88 | hostname = socket.gethostname() 89 | self.gdb_semaphore = posix_ipc.Semaphore("/PGDBSemaphore" + hostname, 90 | posix_ipc.O_CREX) 91 | try: 92 | self.gdb_shmem = posix_ipc.SharedMemory("/PGDBMem" + hostname, 93 | posix_ipc.O_CREX, 94 | size=gdbconf.sbd_shmem_size) 95 | except posix_ipc.ExistentialError as e: 96 | self.gdb_semaphore.unlink() 97 | self.gdb_semaphore.close() 98 | raise e 99 | try: 100 | self.gdb_mem = mmap.mmap(self.gdb_shmem.fd, self.gdb_shmem.size) 101 | except mmap.error as e: 102 | self.gdb_semaphore.unlink() 103 | self.gdb_semaphore.close() 104 | self.gdb_shmem.close_fd() 105 | self.gdb_shmem.unlink() 106 | raise e 107 | # Created acquired, so release. 108 | self.gdb_semaphore.release() 109 | 110 | @staticmethod 111 | def clean_leftovers(): 112 | """Clean up leftover semaphores and shared memory. 113 | 114 | This is used because sometimes it isn't successfully cleaned. 115 | 116 | """ 117 | hostname = socket.gethostname() 118 | try: 119 | sem = posix_ipc.Semaphore("/PGDBSemaphore" + hostname) 120 | sem.unlink() 121 | sem.close() 122 | except posix_ipc.ExistentialError: 123 | pass 124 | try: 125 | shmem = posix_ipc.SharedMemory("/PGDBMem" + hostname) 126 | shmem.unlink() 127 | except posix_ipc.ExistentialError: 128 | pass 129 | 130 | def set_executable_names(self, names): 131 | """Set the names of all the binaries of the processes under control. 132 | 133 | names is a list of all the executable names. 134 | This should be called only once, as it overwrites the prior set. 135 | 136 | """ 137 | self.load_file_bins = set(names) 138 | 139 | def load_file(self, filename): 140 | """Send a request for a file to be loaded.""" 141 | filename = os.path.abspath(filename) 142 | if filename in self.load_files: 143 | self.file_data_respond(filename) 144 | self.gdb_semaphore.release() 145 | return 146 | self.load_files[filename] = None 147 | self.current_load_file = filename 148 | self.comm.send(GDBMessage(LOAD_FILE, filename=filename, 149 | rank=self.comm.get_mpiranks()), 150 | self.comm.frontend) 151 | 152 | def load_file_check(self, filename): 153 | """Check whether we should load the filename.""" 154 | filename = os.path.abspath(filename) 155 | base = os.path.basename(filename) 156 | if base in self.load_file_bins: 157 | return True 158 | if base[-4:] == ".gdb" or base[-3:] == ".py": 159 | return False 160 | if filename[0:6] == "/lib64": 161 | # This often causes front-end/back-end mismatches. 162 | # TODO: Generalize this to a config option. 163 | return False 164 | if self.load_file_re.match(base) is not None: 165 | return True 166 | return False 167 | 168 | def check_gdb_memory_flag(self): 169 | """Check whether the GDB process has indicated it wrote something.""" 170 | flag = struct.unpack_from("=B", self.gdb_mem, 1)[0] 171 | return flag == 1 172 | 173 | def read_memory(self): 174 | """Read memory from the GDB process.""" 175 | # Clear GDB-DW flag. 176 | struct.pack_into("=B", self.gdb_mem, 1, 0) 177 | size = struct.unpack_from("=I", self.gdb_mem, 2)[0] 178 | if size <= 0: 179 | print("Invalid read-memory size {0}!".format(size)) 180 | return False 181 | return struct.unpack_from("={0}s".format(size), self.gdb_mem, 6)[0] 182 | 183 | def write_memory(self, data): 184 | """Write memory to the GDB process.""" 185 | # Set PGDB-DW flag. 186 | struct.pack_into("=B", self.gdb_mem, 0, 1) 187 | size = len(data) 188 | # size + 1 to account for packing adding a null byte. 189 | return struct.pack_into("=I{0}s".format(size + 1), self.gdb_mem, 2, 190 | size, data) 191 | 192 | def file_data_respond(self, filename): 193 | """Write the loaded file to shared memory. 194 | 195 | Assumes the file data is in self.load_files[filename] and the semaphore 196 | has been acquired. This does not release it. 197 | 198 | """ 199 | if filename not in self.load_files or self.load_files[filename] is None: 200 | return False 201 | self.write_memory(self.load_files[filename]) 202 | 203 | def file_data_handler(self, msg): 204 | """Handle a response with file data.""" 205 | filename = msg.filename 206 | if msg.error: 207 | self.load_files[filename] = "error" 208 | else: 209 | self.load_files[filename] = msg.data 210 | if self.current_load_file != filename: 211 | # Got response, but not for the currently-requested file. 212 | return 213 | self.file_data_respond(filename) 214 | self.current_load_file = None 215 | self.gdb_semaphore.release() 216 | 217 | def sbd_check(self): 218 | """Check for and process a load file request from GDB.""" 219 | try: 220 | self.gdb_semaphore.acquire(0) 221 | if self.check_gdb_memory_flag(): 222 | # Read filename. 223 | filename = self.read_memory() 224 | if filename and self.load_file_check(filename): 225 | self.load_file(filename) 226 | # The file_data_handler releases the semaphore after the 227 | # file data is received. 228 | else: 229 | self.write_memory("error") 230 | self.gdb_semaphore.release() 231 | else: 232 | self.gdb_semaphore.release() 233 | except posix_ipc.BusyError: 234 | pass 235 | 236 | def cleanup(self): 237 | """Clean up the SBD.""" 238 | self.gdb_semaphore.unlink() 239 | self.gdb_semaphore.close() 240 | self.gdb_mem.close() 241 | self.gdb_shmem.unlink() 242 | self.gdb_shmem.close_fd() 243 | -------------------------------------------------------------------------------- /pgdb/src/varprint.py: -------------------------------------------------------------------------------- 1 | """Class for managing variable printing. 2 | 3 | This is intended for the back-end GDB processes, to manage variable printing 4 | and the like. 5 | 6 | """ 7 | 8 | from conf import gdbconf 9 | from mi.commands import Command 10 | from mi.varobj import VariableObjectManager 11 | from gdb_shared import * 12 | 13 | class VariablePrinter: 14 | """Manage variable printing on the back-end.""" 15 | 16 | def __init__(self, be): 17 | """Initialization. 18 | 19 | be is the GDBBE associated with this. 20 | 21 | """ 22 | self.be = be 23 | self.comm = be.comm 24 | self.varobjs = be.varobjs 25 | self.run_gdb_command = be.run_gdb_command 26 | self.varprint_id = 0 27 | self.varprint_stacks = {} 28 | 29 | def varprint_handler(self, msg): 30 | """Handle the varprint message and begin the varprint sequence. 31 | 32 | The message has the fields 33 | ranks - the ranks to query. 34 | name - the name of the variable. 35 | 36 | To varprint, we first run varprint_update, to update our cached varobjs. 37 | Then we run another handler that checks if we have the variable object already. 38 | If not, we do a depth-first search and eventually return the result. 39 | 40 | """ 41 | for rank in msg.ranks.intersect(self.comm.get_mpiranks()): 42 | self.varprint_update(msg.name, rank) 43 | 44 | def varprint_handler2(self, name, rank): 45 | """Follow-up varprint handler after udating, starts the DFS.""" 46 | # Check if we already have the variable object. 47 | varobj = self.varobjs[rank].get_var_obj(name) 48 | if varobj: 49 | if not varobj.listed or varobj.more_children: 50 | # If we explicitly list this variable, print all of its children. 51 | self.varprint_start_no_create(rank, varobj, name, 52 | max_children = sys.maxsize, reset_maxes = True) 53 | else: 54 | self.comm.send(GDBMessage(VARPRINT_RES_MSG, varobj = varobj, rank = rank, err = False), self.comm.frontend) 55 | else: 56 | # We do not. Start from the closest ancestor we have. 57 | ancestor = self.varobjs[rank].get_lowest_ancestor(name) 58 | if ancestor: 59 | # Start by listing the children of the ancestor. 60 | self.varprint_start_no_create(rank, ancestor, name) 61 | else: 62 | self.varprint_start(rank, name) 63 | 64 | def varprint_update(self, name, rank): 65 | """Check for updates on any of our variable objects.""" 66 | def _update_handler(record): 67 | if "changelist" not in record.results: 68 | print "Got a bad update record." 69 | return True 70 | for change in record.results["changelist"]: 71 | varobj = self.varobjs[rank].get_var_obj(change["name"]) 72 | if varobj: 73 | # Potentially, a variable object could be manually created that we're not tracking. 74 | if "in_scope" in change: 75 | if change["in_scope"] in ["false", "invalid"]: 76 | self.varobjs[rank].del_var_obj(varobj) 77 | del varobj # This probably isn't necessary. 78 | return False 79 | if "type_changed" in change and change["type_changed"] == "true": 80 | self.varobjs[rank].del_var_obj(varobj) 81 | del varobj 82 | return False 83 | if "value" in change: 84 | varobj.value = change["value"] 85 | if "dynamic" in change: 86 | varobj.is_dynamic = change["dynamic"] 87 | if "displayhint" in change: 88 | varobj.display_hint = change["displayhint"] 89 | if "num_new_children" in change: 90 | new_num = int(change["new_num_children"]) 91 | if new_num < len(varobj.children): 92 | # There has been a removal, so we no longer have child information. 93 | varobj.children = [] 94 | varobj.listed = False 95 | varobj.has_more = False 96 | else: 97 | if "new_children" in change: 98 | for child in change["new_children"]: 99 | varobj = VariableObjectManager.create_var_obj(child) 100 | if not varobj: 101 | print "Could not create child varobj!" 102 | return True 103 | if not self.varobjs[rank].add_var_obj(varobj): 104 | print "Could not add child varobj!" 105 | return True 106 | self.varprint_handler2(name, rank) 107 | tokens = self.run_gdb_command(Command("var-update", args = ("1", "*")), rank) 108 | self.be.add_token_handler(tokens[rank], _update_handler) 109 | 110 | def varprint_start(self, rank, name, max_depth = gdbconf.varprint_max_depth, 111 | max_children = gdbconf.varprint_max_children, reset_maxes = False): 112 | """Start a varprint command sequence by creating the varobj in GDB.""" 113 | v_id = self.varprint_id 114 | self.varprint_id += 1 115 | base_name = VariableObjectManager.get_base_name(name) 116 | branch_depth = max_depth + VariableObjectManager.get_name_depth(name) 117 | def _list_handler(record): 118 | return self.varprint_dfs(record, rank, v_id, name, max_depth = max_depth, 119 | max_children = max_children, reset_maxes = False, 120 | branch_depth = branch_depth, branch_name = name) 121 | def _create_handler(record): 122 | varobj = VariableObjectManager.create_var_obj(record.results) 123 | if not varobj: 124 | # Bad variable name. 125 | return True 126 | if not self.varobjs[rank].add_var_obj(varobj): 127 | print "Could not add varobj." 128 | if int(varobj.num_child) > 0 or varobj.is_dynamic: 129 | # Set up our stack. 130 | self.varprint_stacks[v_id] = [(varobj, 0)] 131 | tokens = self.run_gdb_command(Command("var-list-children", args = ("1", record.results["name"])), 132 | rank) 133 | self.be.add_token_handler(tokens[rank], _list_handler) 134 | else: 135 | self.comm.send(GDBMessage(VARPRINT_RES_MSG, varobj = varobj, rank = rank, err = False), self.comm.frontend) 136 | tokens = self.run_gdb_command(Command("var-create", args = (base_name, "*", base_name)), rank) 137 | self.be.add_token_handler(tokens[rank], _create_handler) 138 | 139 | def varprint_start_no_create(self, rank, varobj, name, max_depth = gdbconf.varprint_max_depth, 140 | max_children = gdbconf.varprint_max_children, reset_maxes = False): 141 | """Start a varprint sequence where we have already created the variable object.""" 142 | v_id = self.varprint_id 143 | self.varprint_id += 1 144 | self.varprint_stacks[v_id] = [(varobj, 0)] 145 | branch_depth = max_depth = VariableObjectManager.get_name_depth(name) 146 | def _list_handler(record): 147 | return self.varprint_dfs(record, rank, v_id, name, max_depth = max_depth, 148 | max_children = max_children, reset_maxes = reset_maxes, 149 | branch_depth = branch_depth, branch_name = name) 150 | tokens = self.run_gdb_command(Command("var-list-children", args = ("1", '"' + varobj.name + '"')), rank) 151 | self.be.add_token_handler(tokens[rank], _list_handler) 152 | 153 | def varprint_dfs(self, record, rank, v_id, name, max_depth = gdbconf.varprint_max_depth, 154 | max_children = gdbconf.varprint_max_children, 155 | reset_maxes = False, branch_depth = None, branch_name = None): 156 | """Do the depth-first search for expanding a variable object's children.""" 157 | cur_varobj, parent_depth = self.varprint_stacks[v_id].pop() 158 | cur_varobj.listed = True 159 | if "has_more" not in record.results: 160 | self.comm.send(GDBMessage(VARPRINT_RES_MSG, rank = rank, err = True, msg = "Got bad variable data."), self.comm.frontend) 161 | elif "children" in record.results: 162 | if len(record.results["children"]) > max_children: 163 | cur_varobj.more_children = True 164 | for child_tup in record.results["children"][:max_children]: 165 | child = child_tup[1] 166 | varobj = VariableObjectManager.create_var_obj(child) 167 | if not varobj: 168 | print "Could not create child varobj!" 169 | return True 170 | if not self.varobjs[rank].add_var_obj(varobj): 171 | print "Could not add child varobj!" 172 | return True 173 | if int(varobj.num_child) > 0 or varobj.is_dynamic: 174 | # Only potentially push if the varobj can have children. 175 | do_listing = True 176 | if parent_depth > max_depth: 177 | # If the depth of the parent of this node is greater than five, 178 | # we want to terminate the search of this branch, unless this 179 | # node is a pseduo-child, or we want to go deeper on one branch. 180 | if branch_name and VariableObjectManager.same_branch(varobj.name, branch_name): 181 | if parent_depth > branch_depth and not VariableObjectManager.is_pseudochild(varobj): 182 | do_listing = False 183 | elif not VariableObjectManager.is_pseudochild(varobj): 184 | do_listing = False 185 | # Don't list null-pointers. 186 | if varobj.vartype and varobj.value and varobj.vartype[-1] == "*": 187 | try: 188 | if int(varobj.value, 0) == 0: 189 | do_listing = False 190 | except ValueError: pass 191 | # Do not evaluate children further when there's an excessive number. 192 | if len(record.results["children"]) > 128: 193 | do_listing = False 194 | # Add to the stack to list if we meet the requirements. 195 | if do_listing: 196 | self.varprint_stacks[v_id].append((varobj, parent_depth + 1)) 197 | if not self.varprint_stacks[v_id]: 198 | to_send = self.varobjs[rank].get_var_obj(name) 199 | if to_send: 200 | self.comm.send(GDBMessage(VARPRINT_RES_MSG, varobj = to_send, rank = rank, err = False), self.comm.frontend) 201 | else: 202 | self.comm.send(GDBMessage(VARPRINT_RES_MSG, rank = rank, err = True, msg = "Variable does not exist."), self.comm.frontend) 203 | else: 204 | to_list, depth = self.varprint_stacks[v_id][-1] 205 | if reset_maxes: 206 | def _list_handler(record): 207 | return self.varprint_dfs(record, rank, v_id, name, branch_depth = branch_depth, 208 | branch_name = branch_name) 209 | else: 210 | def _list_handler(record): 211 | return self.varprint_dfs(record, rank, v_id, name, max_depth = max_depth, 212 | max_children = max_children, reset_maxes = reset_maxes, 213 | branch_depth = branch_depth, branch_name = branch_name) 214 | tokens = self.run_gdb_command(Command("var-list-children", args = ("1", '"' + to_list.name + '"')), rank) 215 | self.be.add_token_handler(tokens[rank], _list_handler) 216 | -------------------------------------------------------------------------------- /pgdb/stlpprinters/Makefile.am: -------------------------------------------------------------------------------- 1 | ## Makefile for the python subdirectory of the GNU C++ Standard library. 2 | ## 3 | ## Copyright (C) 2009 Free Software Foundation, Inc. 4 | ## 5 | ## This file is part of the libstdc++ version 3 distribution. 6 | ## Process this file with automake to produce Makefile.in. 7 | 8 | ## This file is part of the GNU ISO C++ Library. This library is free 9 | ## software; you can redistribute it and/or modify it under the 10 | ## terms of the GNU General Public License as published by the 11 | ## Free Software Foundation; either version 3, or (at your option) 12 | ## any later version. 13 | ## 14 | ## This library is distributed in the hope that it will be useful, 15 | ## but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | ## GNU General Public License for more details. 18 | ## 19 | ## You should have received a copy of the GNU General Public License along 20 | ## with this library; see the file COPYING3. If not see 21 | ## . 22 | 23 | include $(top_srcdir)/fragment.am 24 | 25 | ## Where to install the module code. 26 | pythondir = $(datadir)/gcc-$(gcc_version)/python 27 | 28 | all-local: gdb.py 29 | 30 | nobase_python_DATA = \ 31 | libstdcxx/v6/printers.py \ 32 | libstdcxx/v6/__init__.py \ 33 | libstdcxx/__init__.py 34 | 35 | gdb.py: hook.in Makefile 36 | sed -e 's,@pythondir@,$(pythondir),' \ 37 | -e 's,@toolexeclibdir@,$(toolexeclibdir),' < $(srcdir)/hook.in > $@ 38 | 39 | install-data-local: gdb.py 40 | @$(mkdir_p) $(DESTDIR)$(toolexeclibdir) 41 | ## We want to install gdb.py as SOMETHING-gdb.py. SOMETHING is the 42 | ## full name of the final library. We want to ignore symlinks, the 43 | ## .la file, and any previous -gdb.py file. This is inherently 44 | ## fragile, but there does not seem to be a better option, because 45 | ## libtool hides the real names from us. 46 | @here=`pwd`; cd $(DESTDIR)$(toolexeclibdir); \ 47 | for file in libstdc++*; do \ 48 | case $$file in \ 49 | *-gdb.py) ;; \ 50 | *.la) ;; \ 51 | *) if test -h $$file; then \ 52 | continue; \ 53 | fi; \ 54 | libname=$$file;; \ 55 | esac; \ 56 | done; \ 57 | cd $$here; \ 58 | echo " $(INSTALL_DATA) gdb.py $(DESTDIR)$(toolexeclibdir)/$$libname-gdb.py"; \ 59 | $(INSTALL_DATA) gdb.py $(DESTDIR)$(toolexeclibdir)/$$libname-gdb.py 60 | -------------------------------------------------------------------------------- /pgdb/stlpprinters/Makefile.in: -------------------------------------------------------------------------------- 1 | # Makefile.in generated by automake 1.11.1 from Makefile.am. 2 | # @configure_input@ 3 | 4 | # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 5 | # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, 6 | # Inc. 7 | # This Makefile.in is free software; the Free Software Foundation 8 | # gives unlimited permission to copy and/or distribute it, 9 | # with or without modifications, as long as this notice is preserved. 10 | 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY, to the extent permitted by law; without 13 | # even the implied warranty of MERCHANTABILITY or FITNESS FOR A 14 | # PARTICULAR PURPOSE. 15 | 16 | @SET_MAKE@ 17 | 18 | VPATH = @srcdir@ 19 | pkgdatadir = $(datadir)/@PACKAGE@ 20 | pkgincludedir = $(includedir)/@PACKAGE@ 21 | pkglibdir = $(libdir)/@PACKAGE@ 22 | pkglibexecdir = $(libexecdir)/@PACKAGE@ 23 | am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd 24 | install_sh_DATA = $(install_sh) -c -m 644 25 | install_sh_PROGRAM = $(install_sh) -c 26 | install_sh_SCRIPT = $(install_sh) -c 27 | INSTALL_HEADER = $(INSTALL_DATA) 28 | transform = $(program_transform_name) 29 | NORMAL_INSTALL = : 30 | PRE_INSTALL = : 31 | POST_INSTALL = : 32 | NORMAL_UNINSTALL = : 33 | PRE_UNINSTALL = : 34 | POST_UNINSTALL = : 35 | build_triplet = @build@ 36 | host_triplet = @host@ 37 | target_triplet = @target@ 38 | DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/Makefile.in \ 39 | $(srcdir)/Makefile.am 40 | subdir = python 41 | ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 42 | am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \ 43 | $(top_srcdir)/../config/enable.m4 \ 44 | $(top_srcdir)/../config/futex.m4 \ 45 | $(top_srcdir)/../config/iconv.m4 \ 46 | $(top_srcdir)/../config/lead-dot.m4 \ 47 | $(top_srcdir)/../config/lib-ld.m4 \ 48 | $(top_srcdir)/../config/lib-link.m4 \ 49 | $(top_srcdir)/../config/lib-prefix.m4 \ 50 | $(top_srcdir)/../config/lthostflags.m4 \ 51 | $(top_srcdir)/../config/multi.m4 \ 52 | $(top_srcdir)/../config/no-executables.m4 \ 53 | $(top_srcdir)/../config/override.m4 \ 54 | $(top_srcdir)/../config/stdint.m4 \ 55 | $(top_srcdir)/../config/unwind_ipinfo.m4 \ 56 | $(top_srcdir)/../libtool.m4 $(top_srcdir)/../ltoptions.m4 \ 57 | $(top_srcdir)/../ltsugar.m4 $(top_srcdir)/../ltversion.m4 \ 58 | $(top_srcdir)/../lt~obsolete.m4 $(top_srcdir)/crossconfig.m4 \ 59 | $(top_srcdir)/linkage.m4 $(top_srcdir)/acinclude.m4 \ 60 | $(top_srcdir)/../config/gc++filt.m4 \ 61 | $(top_srcdir)/../config/tls.m4 $(top_srcdir)/configure.ac 62 | am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ 63 | $(ACLOCAL_M4) 64 | CONFIG_HEADER = $(top_builddir)/config.h 65 | CONFIG_CLEAN_FILES = 66 | CONFIG_CLEAN_VPATH_FILES = 67 | depcomp = 68 | am__depfiles_maybe = 69 | SOURCES = 70 | am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; 71 | am__vpath_adj = case $$p in \ 72 | $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ 73 | *) f=$$p;; \ 74 | esac; 75 | am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; 76 | am__install_max = 40 77 | am__nobase_strip_setup = \ 78 | srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` 79 | am__nobase_strip = \ 80 | for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" 81 | am__nobase_list = $(am__nobase_strip_setup); \ 82 | for p in $$list; do echo "$$p $$p"; done | \ 83 | sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ 84 | $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ 85 | if (++n[$$2] == $(am__install_max)) \ 86 | { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ 87 | END { for (dir in files) print dir, files[dir] }' 88 | am__base_list = \ 89 | sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ 90 | sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' 91 | am__installdirs = "$(DESTDIR)$(pythondir)" 92 | DATA = $(nobase_python_DATA) 93 | ABI_TWEAKS_SRCDIR = @ABI_TWEAKS_SRCDIR@ 94 | ACLOCAL = @ACLOCAL@ 95 | ALLOCATOR_H = @ALLOCATOR_H@ 96 | ALLOCATOR_NAME = @ALLOCATOR_NAME@ 97 | AMTAR = @AMTAR@ 98 | AR = @AR@ 99 | AS = @AS@ 100 | ATOMICITY_SRCDIR = @ATOMICITY_SRCDIR@ 101 | ATOMIC_FLAGS = @ATOMIC_FLAGS@ 102 | ATOMIC_WORD_SRCDIR = @ATOMIC_WORD_SRCDIR@ 103 | AUTOCONF = @AUTOCONF@ 104 | AUTOHEADER = @AUTOHEADER@ 105 | AUTOMAKE = @AUTOMAKE@ 106 | AWK = @AWK@ 107 | BASIC_FILE_CC = @BASIC_FILE_CC@ 108 | BASIC_FILE_H = @BASIC_FILE_H@ 109 | CC = @CC@ 110 | CCODECVT_CC = @CCODECVT_CC@ 111 | CCOLLATE_CC = @CCOLLATE_CC@ 112 | CCTYPE_CC = @CCTYPE_CC@ 113 | CFLAGS = @CFLAGS@ 114 | CLOCALE_CC = @CLOCALE_CC@ 115 | CLOCALE_H = @CLOCALE_H@ 116 | CLOCALE_INTERNAL_H = @CLOCALE_INTERNAL_H@ 117 | CMESSAGES_CC = @CMESSAGES_CC@ 118 | CMESSAGES_H = @CMESSAGES_H@ 119 | CMONEY_CC = @CMONEY_CC@ 120 | CNUMERIC_CC = @CNUMERIC_CC@ 121 | CPP = @CPP@ 122 | CPPFLAGS = @CPPFLAGS@ 123 | CPU_DEFINES_SRCDIR = @CPU_DEFINES_SRCDIR@ 124 | CSTDIO_H = @CSTDIO_H@ 125 | CTIME_CC = @CTIME_CC@ 126 | CTIME_H = @CTIME_H@ 127 | CXX = @CXX@ 128 | CXXCPP = @CXXCPP@ 129 | CXXFILT = @CXXFILT@ 130 | CXXFLAGS = @CXXFLAGS@ 131 | CYGPATH_W = @CYGPATH_W@ 132 | C_INCLUDE_DIR = @C_INCLUDE_DIR@ 133 | DBLATEX = @DBLATEX@ 134 | DBTOEPUB = @DBTOEPUB@ 135 | DEBUG_FLAGS = @DEBUG_FLAGS@ 136 | DEFS = @DEFS@ 137 | DOT = @DOT@ 138 | DOXYGEN = @DOXYGEN@ 139 | DSYMUTIL = @DSYMUTIL@ 140 | DUMPBIN = @DUMPBIN@ 141 | ECHO_C = @ECHO_C@ 142 | ECHO_N = @ECHO_N@ 143 | ECHO_T = @ECHO_T@ 144 | EGREP = @EGREP@ 145 | ERROR_CONSTANTS_SRCDIR = @ERROR_CONSTANTS_SRCDIR@ 146 | EXEEXT = @EXEEXT@ 147 | EXTRA_CXX_FLAGS = @EXTRA_CXX_FLAGS@ 148 | FGREP = @FGREP@ 149 | GLIBCXX_INCLUDES = @GLIBCXX_INCLUDES@ 150 | GLIBCXX_LIBS = @GLIBCXX_LIBS@ 151 | GREP = @GREP@ 152 | INSTALL = @INSTALL@ 153 | INSTALL_DATA = @INSTALL_DATA@ 154 | INSTALL_PROGRAM = @INSTALL_PROGRAM@ 155 | INSTALL_SCRIPT = @INSTALL_SCRIPT@ 156 | INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ 157 | LD = @LD@ 158 | LDFLAGS = @LDFLAGS@ 159 | LIBICONV = @LIBICONV@ 160 | LIBOBJS = @LIBOBJS@ 161 | LIBS = @LIBS@ 162 | LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ 163 | LIBTOOL = @LIBTOOL@ 164 | LIPO = @LIPO@ 165 | LN_S = @LN_S@ 166 | LTLIBICONV = @LTLIBICONV@ 167 | LTLIBOBJS = @LTLIBOBJS@ 168 | MAINT = @MAINT@ 169 | MAKEINFO = @MAKEINFO@ 170 | MKDIR_P = @MKDIR_P@ 171 | NM = @NM@ 172 | NMEDIT = @NMEDIT@ 173 | OBJDUMP = @OBJDUMP@ 174 | OBJEXT = @OBJEXT@ 175 | OPTIMIZE_CXXFLAGS = @OPTIMIZE_CXXFLAGS@ 176 | OPT_LDFLAGS = @OPT_LDFLAGS@ 177 | OS_INC_SRCDIR = @OS_INC_SRCDIR@ 178 | OTOOL = @OTOOL@ 179 | OTOOL64 = @OTOOL64@ 180 | PACKAGE = @PACKAGE@ 181 | PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ 182 | PACKAGE_NAME = @PACKAGE_NAME@ 183 | PACKAGE_STRING = @PACKAGE_STRING@ 184 | PACKAGE_TARNAME = @PACKAGE_TARNAME@ 185 | PACKAGE_URL = @PACKAGE_URL@ 186 | PACKAGE_VERSION = @PACKAGE_VERSION@ 187 | PATH_SEPARATOR = @PATH_SEPARATOR@ 188 | PDFLATEX = @PDFLATEX@ 189 | RANLIB = @RANLIB@ 190 | RUBY = @RUBY@ 191 | SECTION_FLAGS = @SECTION_FLAGS@ 192 | SECTION_LDFLAGS = @SECTION_LDFLAGS@ 193 | SED = @SED@ 194 | SET_MAKE = @SET_MAKE@ 195 | SHELL = @SHELL@ 196 | STRIP = @STRIP@ 197 | SYMVER_FILE = @SYMVER_FILE@ 198 | TOPLEVEL_INCLUDES = @TOPLEVEL_INCLUDES@ 199 | USE_NLS = @USE_NLS@ 200 | VERSION = @VERSION@ 201 | WARN_FLAGS = @WARN_FLAGS@ 202 | WERROR = @WERROR@ 203 | XMLLINT = @XMLLINT@ 204 | XSLTPROC = @XSLTPROC@ 205 | XSL_STYLE_DIR = @XSL_STYLE_DIR@ 206 | abs_builddir = @abs_builddir@ 207 | abs_srcdir = @abs_srcdir@ 208 | abs_top_builddir = @abs_top_builddir@ 209 | abs_top_srcdir = @abs_top_srcdir@ 210 | ac_ct_CC = @ac_ct_CC@ 211 | ac_ct_CXX = @ac_ct_CXX@ 212 | ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ 213 | am__leading_dot = @am__leading_dot@ 214 | am__tar = @am__tar@ 215 | am__untar = @am__untar@ 216 | baseline_dir = @baseline_dir@ 217 | bindir = @bindir@ 218 | build = @build@ 219 | build_alias = @build_alias@ 220 | build_cpu = @build_cpu@ 221 | build_os = @build_os@ 222 | build_vendor = @build_vendor@ 223 | builddir = @builddir@ 224 | check_msgfmt = @check_msgfmt@ 225 | datadir = @datadir@ 226 | datarootdir = @datarootdir@ 227 | docdir = @docdir@ 228 | dvidir = @dvidir@ 229 | enable_shared = @enable_shared@ 230 | enable_static = @enable_static@ 231 | exec_prefix = @exec_prefix@ 232 | glibcxx_MOFILES = @glibcxx_MOFILES@ 233 | glibcxx_PCHFLAGS = @glibcxx_PCHFLAGS@ 234 | glibcxx_POFILES = @glibcxx_POFILES@ 235 | glibcxx_builddir = @glibcxx_builddir@ 236 | glibcxx_localedir = @glibcxx_localedir@ 237 | glibcxx_prefixdir = @glibcxx_prefixdir@ 238 | glibcxx_srcdir = @glibcxx_srcdir@ 239 | glibcxx_thread_h = @glibcxx_thread_h@ 240 | glibcxx_toolexecdir = @glibcxx_toolexecdir@ 241 | glibcxx_toolexeclibdir = @glibcxx_toolexeclibdir@ 242 | gxx_include_dir = @gxx_include_dir@ 243 | host = @host@ 244 | host_alias = @host_alias@ 245 | host_cpu = @host_cpu@ 246 | host_os = @host_os@ 247 | host_vendor = @host_vendor@ 248 | htmldir = @htmldir@ 249 | includedir = @includedir@ 250 | infodir = @infodir@ 251 | install_sh = @install_sh@ 252 | libdir = @libdir@ 253 | libexecdir = @libexecdir@ 254 | libtool_VERSION = @libtool_VERSION@ 255 | localedir = @localedir@ 256 | localstatedir = @localstatedir@ 257 | lt_host_flags = @lt_host_flags@ 258 | mandir = @mandir@ 259 | mkdir_p = @mkdir_p@ 260 | multi_basedir = @multi_basedir@ 261 | oldincludedir = @oldincludedir@ 262 | pdfdir = @pdfdir@ 263 | port_specific_symbol_files = @port_specific_symbol_files@ 264 | prefix = @prefix@ 265 | program_transform_name = @program_transform_name@ 266 | psdir = @psdir@ 267 | sbindir = @sbindir@ 268 | sharedstatedir = @sharedstatedir@ 269 | srcdir = @srcdir@ 270 | sysconfdir = @sysconfdir@ 271 | target = @target@ 272 | target_alias = @target_alias@ 273 | target_cpu = @target_cpu@ 274 | target_os = @target_os@ 275 | target_vendor = @target_vendor@ 276 | top_build_prefix = @top_build_prefix@ 277 | top_builddir = @top_builddir@ 278 | top_srcdir = @top_srcdir@ 279 | toplevel_srcdir = @toplevel_srcdir@ 280 | 281 | # May be used by various substitution variables. 282 | gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) 283 | MAINT_CHARSET = latin1 284 | mkinstalldirs = $(SHELL) $(toplevel_srcdir)/mkinstalldirs 285 | PWD_COMMAND = $${PWDCMD-pwd} 286 | STAMP = echo timestamp > 287 | toolexecdir = $(glibcxx_toolexecdir) 288 | toolexeclibdir = $(glibcxx_toolexeclibdir) 289 | 290 | # These bits are all figured out from configure. Look in acinclude.m4 291 | # or configure.ac to see how they are set. See GLIBCXX_EXPORT_FLAGS. 292 | CONFIG_CXXFLAGS = \ 293 | $(SECTION_FLAGS) $(EXTRA_CXX_FLAGS) 294 | 295 | WARN_CXXFLAGS = \ 296 | $(WARN_FLAGS) $(WERROR) -fdiagnostics-show-location=once 297 | 298 | 299 | # -I/-D flags to pass when compiling. 300 | AM_CPPFLAGS = $(GLIBCXX_INCLUDES) 301 | pythondir = $(datadir)/gcc-$(gcc_version)/python 302 | nobase_python_DATA = \ 303 | libstdcxx/v6/printers.py \ 304 | libstdcxx/v6/__init__.py \ 305 | libstdcxx/__init__.py 306 | 307 | all: all-am 308 | 309 | .SUFFIXES: 310 | $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/fragment.am $(am__configure_deps) 311 | @for dep in $?; do \ 312 | case '$(am__configure_deps)' in \ 313 | *$$dep*) \ 314 | ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ 315 | && { if test -f $@; then exit 0; else break; fi; }; \ 316 | exit 1;; \ 317 | esac; \ 318 | done; \ 319 | echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign --ignore-deps python/Makefile'; \ 320 | $(am__cd) $(top_srcdir) && \ 321 | $(AUTOMAKE) --foreign --ignore-deps python/Makefile 322 | .PRECIOUS: Makefile 323 | Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status 324 | @case '$?' in \ 325 | *config.status*) \ 326 | cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ 327 | *) \ 328 | echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ 329 | cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ 330 | esac; 331 | 332 | $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) 333 | cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh 334 | 335 | $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) 336 | cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh 337 | $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) 338 | cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh 339 | $(am__aclocal_m4_deps): 340 | 341 | mostlyclean-libtool: 342 | -rm -f *.lo 343 | 344 | clean-libtool: 345 | -rm -rf .libs _libs 346 | install-nobase_pythonDATA: $(nobase_python_DATA) 347 | @$(NORMAL_INSTALL) 348 | test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)" 349 | @list='$(nobase_python_DATA)'; test -n "$(pythondir)" || list=; \ 350 | $(am__nobase_list) | while read dir files; do \ 351 | xfiles=; for file in $$files; do \ 352 | if test -f "$$file"; then xfiles="$$xfiles $$file"; \ 353 | else xfiles="$$xfiles $(srcdir)/$$file"; fi; done; \ 354 | test -z "$$xfiles" || { \ 355 | test "x$$dir" = x. || { \ 356 | echo "$(MKDIR_P) '$(DESTDIR)$(pythondir)/$$dir'"; \ 357 | $(MKDIR_P) "$(DESTDIR)$(pythondir)/$$dir"; }; \ 358 | echo " $(INSTALL_DATA) $$xfiles '$(DESTDIR)$(pythondir)/$$dir'"; \ 359 | $(INSTALL_DATA) $$xfiles "$(DESTDIR)$(pythondir)/$$dir" || exit $$?; }; \ 360 | done 361 | 362 | uninstall-nobase_pythonDATA: 363 | @$(NORMAL_UNINSTALL) 364 | @list='$(nobase_python_DATA)'; test -n "$(pythondir)" || list=; \ 365 | $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \ 366 | test -n "$$files" || exit 0; \ 367 | echo " ( cd '$(DESTDIR)$(pythondir)' && rm -f" $$files ")"; \ 368 | cd "$(DESTDIR)$(pythondir)" && rm -f $$files 369 | tags: TAGS 370 | TAGS: 371 | 372 | ctags: CTAGS 373 | CTAGS: 374 | 375 | check-am: all-am 376 | check: check-am 377 | all-am: Makefile $(DATA) all-local 378 | installdirs: 379 | for dir in "$(DESTDIR)$(pythondir)"; do \ 380 | test -z "$$dir" || $(MKDIR_P) "$$dir"; \ 381 | done 382 | install: install-am 383 | install-exec: install-exec-am 384 | install-data: install-data-am 385 | uninstall: uninstall-am 386 | 387 | install-am: all-am 388 | @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am 389 | 390 | installcheck: installcheck-am 391 | install-strip: 392 | $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ 393 | install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ 394 | `test -z '$(STRIP)' || \ 395 | echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install 396 | mostlyclean-generic: 397 | 398 | clean-generic: 399 | 400 | distclean-generic: 401 | -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) 402 | -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) 403 | 404 | maintainer-clean-generic: 405 | @echo "This command is intended for maintainers to use" 406 | @echo "it deletes files that may require special tools to rebuild." 407 | clean: clean-am 408 | 409 | clean-am: clean-generic clean-libtool mostlyclean-am 410 | 411 | distclean: distclean-am 412 | -rm -f Makefile 413 | distclean-am: clean-am distclean-generic 414 | 415 | dvi: dvi-am 416 | 417 | dvi-am: 418 | 419 | html: html-am 420 | 421 | html-am: 422 | 423 | info: info-am 424 | 425 | info-am: 426 | 427 | install-data-am: install-data-local install-nobase_pythonDATA 428 | 429 | install-dvi: install-dvi-am 430 | 431 | install-dvi-am: 432 | 433 | install-exec-am: 434 | 435 | install-html: install-html-am 436 | 437 | install-html-am: 438 | 439 | install-info: install-info-am 440 | 441 | install-info-am: 442 | 443 | install-man: 444 | 445 | install-pdf: install-pdf-am 446 | 447 | install-pdf-am: 448 | 449 | install-ps: install-ps-am 450 | 451 | install-ps-am: 452 | 453 | installcheck-am: 454 | 455 | maintainer-clean: maintainer-clean-am 456 | -rm -f Makefile 457 | maintainer-clean-am: distclean-am maintainer-clean-generic 458 | 459 | mostlyclean: mostlyclean-am 460 | 461 | mostlyclean-am: mostlyclean-generic mostlyclean-libtool 462 | 463 | pdf: pdf-am 464 | 465 | pdf-am: 466 | 467 | ps: ps-am 468 | 469 | ps-am: 470 | 471 | uninstall-am: uninstall-nobase_pythonDATA 472 | 473 | .MAKE: install-am install-strip 474 | 475 | .PHONY: all all-am all-local check check-am clean clean-generic \ 476 | clean-libtool distclean distclean-generic distclean-libtool \ 477 | dvi dvi-am html html-am info info-am install install-am \ 478 | install-data install-data-am install-data-local install-dvi \ 479 | install-dvi-am install-exec install-exec-am install-html \ 480 | install-html-am install-info install-info-am install-man \ 481 | install-nobase_pythonDATA install-pdf install-pdf-am \ 482 | install-ps install-ps-am install-strip installcheck \ 483 | installcheck-am installdirs maintainer-clean \ 484 | maintainer-clean-generic mostlyclean mostlyclean-generic \ 485 | mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ 486 | uninstall-nobase_pythonDATA 487 | 488 | 489 | all-local: gdb.py 490 | 491 | gdb.py: hook.in Makefile 492 | sed -e 's,@pythondir@,$(pythondir),' \ 493 | -e 's,@toolexeclibdir@,$(toolexeclibdir),' < $(srcdir)/hook.in > $@ 494 | 495 | install-data-local: gdb.py 496 | @$(mkdir_p) $(DESTDIR)$(toolexeclibdir) 497 | @here=`pwd`; cd $(DESTDIR)$(toolexeclibdir); \ 498 | for file in libstdc++*; do \ 499 | case $$file in \ 500 | *-gdb.py) ;; \ 501 | *.la) ;; \ 502 | *) if test -h $$file; then \ 503 | continue; \ 504 | fi; \ 505 | libname=$$file;; \ 506 | esac; \ 507 | done; \ 508 | cd $$here; \ 509 | echo " $(INSTALL_DATA) gdb.py $(DESTDIR)$(toolexeclibdir)/$$libname-gdb.py"; \ 510 | $(INSTALL_DATA) gdb.py $(DESTDIR)$(toolexeclibdir)/$$libname-gdb.py 511 | 512 | # Tell versions [3.59,3.63) of GNU make to not export all variables. 513 | # Otherwise a system limit (for SysV at least) may be exceeded. 514 | .NOEXPORT: 515 | -------------------------------------------------------------------------------- /pgdb/stlpprinters/hook.in: -------------------------------------------------------------------------------- 1 | # -*- python -*- 2 | # Copyright (C) 2009, 2010 Free Software Foundation, Inc. 3 | 4 | # This program is free software; you can redistribute it and/or modify 5 | # it under the terms of the GNU General Public License as published by 6 | # the Free Software Foundation; either version 3 of the License, or 7 | # (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License 15 | # along with this program. If not, see . 16 | 17 | import sys 18 | import gdb 19 | import os 20 | import os.path 21 | 22 | pythondir = '@pythondir@' 23 | libdir = '@toolexeclibdir@' 24 | 25 | # This file might be loaded when there is no current objfile. This 26 | # can happen if the user loads it manually. In this case we don't 27 | # update sys.path; instead we just hope the user managed to do that 28 | # beforehand. 29 | if gdb.current_objfile () is not None: 30 | # Update module path. We want to find the relative path from libdir 31 | # to pythondir, and then we want to apply that relative path to the 32 | # directory holding the objfile with which this file is associated. 33 | # This preserves relocatability of the gcc tree. 34 | 35 | # Do a simple normalization that removes duplicate separators. 36 | pythondir = os.path.normpath (pythondir) 37 | libdir = os.path.normpath (libdir) 38 | 39 | prefix = os.path.commonprefix ([libdir, pythondir]) 40 | # In some bizarre configuration we might have found a match in the 41 | # middle of a directory name. 42 | if prefix[-1] != '/': 43 | prefix = os.path.dirname (prefix) + '/' 44 | 45 | # Strip off the prefix. 46 | pythondir = pythondir[len (prefix):] 47 | libdir = libdir[len (prefix):] 48 | 49 | # Compute the ".."s needed to get from libdir to the prefix. 50 | dotdots = ('..' + os.sep) * len (libdir.split (os.sep)) 51 | 52 | objfile = gdb.current_objfile ().filename 53 | dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir) 54 | 55 | if not dir_ in sys.path: 56 | sys.path.insert(0, dir_) 57 | 58 | # Load the pretty-printers. 59 | from libstdcxx.v6.printers import register_libstdcxx_printers 60 | register_libstdcxx_printers (gdb.current_objfile ()) 61 | -------------------------------------------------------------------------------- /pgdb/stlpprinters/libstdcxx/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pgdb/stlpprinters/libstdcxx/v6/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | --------------------------------------------------------------------------------