├── .gitignore ├── GPUtil ├── GPUtil.py ├── __init__.py └── demo_GPUtil.py ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── setup.cfg └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | -------------------------------------------------------------------------------- /GPUtil/GPUtil.py: -------------------------------------------------------------------------------- 1 | # GPUtil - GPU utilization 2 | # 3 | # A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi 4 | # 5 | # Author: Anders Krogh Mortensen (anderskm) 6 | # Date: 16 January 2017 7 | # Web: https://github.com/anderskm/gputil 8 | # 9 | # LICENSE 10 | # 11 | # MIT License 12 | # 13 | # Copyright (c) 2017 anderskm 14 | # 15 | # Permission is hereby granted, free of charge, to any person obtaining a copy 16 | # of this software and associated documentation files (the "Software"), to deal 17 | # in the Software without restriction, including without limitation the rights 18 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 19 | # copies of the Software, and to permit persons to whom the Software is 20 | # furnished to do so, subject to the following conditions: 21 | # 22 | # The above copyright notice and this permission notice shall be included in all 23 | # copies or substantial portions of the Software. 24 | # 25 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 30 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 | # SOFTWARE. 32 | 33 | from subprocess import Popen, PIPE 34 | from distutils import spawn 35 | import os 36 | import math 37 | import random 38 | import time 39 | import sys 40 | import platform 41 | 42 | 43 | __version__ = '1.4.0' 44 | 45 | class GPU: 46 | def __init__(self, ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu): 47 | self.id = ID 48 | self.uuid = uuid 49 | self.load = load 50 | self.memoryUtil = float(memoryUsed)/float(memoryTotal) 51 | self.memoryTotal = memoryTotal 52 | self.memoryUsed = memoryUsed 53 | self.memoryFree = memoryFree 54 | self.driver = driver 55 | self.name = gpu_name 56 | self.serial = serial 57 | self.display_mode = display_mode 58 | self.display_active = display_active 59 | self.temperature = temp_gpu 60 | 61 | def safeFloatCast(strNumber): 62 | try: 63 | number = float(strNumber) 64 | except ValueError: 65 | number = float('nan') 66 | return number 67 | 68 | def getGPUs(): 69 | if platform.system() == "Windows": 70 | # If the platform is Windows and nvidia-smi 71 | # could not be found from the environment path, 72 | # try to find it from system drive with default installation path 73 | nvidia_smi = spawn.find_executable('nvidia-smi') 74 | if nvidia_smi is None: 75 | nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive'] 76 | else: 77 | nvidia_smi = "nvidia-smi" 78 | 79 | # Get ID, processing and memory utilization for all GPUs 80 | try: 81 | p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE) 82 | stdout, stderror = p.communicate() 83 | except: 84 | return [] 85 | output = stdout.decode('UTF-8') 86 | # output = output[2:-1] # Remove b' and ' from string added by python 87 | #print(output) 88 | ## Parse output 89 | # Split on line break 90 | lines = output.split(os.linesep) 91 | #print(lines) 92 | numDevices = len(lines)-1 93 | GPUs = [] 94 | for g in range(numDevices): 95 | line = lines[g] 96 | #print(line) 97 | vals = line.split(', ') 98 | #print(vals) 99 | for i in range(12): 100 | # print(vals[i]) 101 | if (i == 0): 102 | deviceIds = int(vals[i]) 103 | elif (i == 1): 104 | uuid = vals[i] 105 | elif (i == 2): 106 | gpuUtil = safeFloatCast(vals[i])/100 107 | elif (i == 3): 108 | memTotal = safeFloatCast(vals[i]) 109 | elif (i == 4): 110 | memUsed = safeFloatCast(vals[i]) 111 | elif (i == 5): 112 | memFree = safeFloatCast(vals[i]) 113 | elif (i == 6): 114 | driver = vals[i] 115 | elif (i == 7): 116 | gpu_name = vals[i] 117 | elif (i == 8): 118 | serial = vals[i] 119 | elif (i == 9): 120 | display_active = vals[i] 121 | elif (i == 10): 122 | display_mode = vals[i] 123 | elif (i == 11): 124 | temp_gpu = safeFloatCast(vals[i]); 125 | GPUs.append(GPU(deviceIds, uuid, gpuUtil, memTotal, memUsed, memFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu)) 126 | return GPUs # (deviceIds, gpuUtil, memUtil) 127 | 128 | 129 | def getAvailable(order = 'first', limit=1, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): 130 | # order = first | last | random | load | memory 131 | # first --> select the GPU with the lowest ID (DEFAULT) 132 | # last --> select the GPU with the highest ID 133 | # random --> select a random available GPU 134 | # load --> select the GPU with the lowest load 135 | # memory --> select the GPU with the most memory available 136 | # limit = 1 (DEFAULT), 2, ..., Inf 137 | # Limit sets the upper limit for the number of GPUs to return. E.g. if limit = 2, but only one is available, only one is returned. 138 | 139 | # Get device IDs, load and memory usage 140 | GPUs = getGPUs() 141 | 142 | # Determine, which GPUs are available 143 | GPUavailability = getAvailability(GPUs, maxLoad=maxLoad, maxMemory=maxMemory, memoryFree=memoryFree, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) 144 | availAbleGPUindex = [idx for idx in range(0,len(GPUavailability)) if (GPUavailability[idx] == 1)] 145 | # Discard unavailable GPUs 146 | GPUs = [GPUs[g] for g in availAbleGPUindex] 147 | 148 | # Sort available GPUs according to the order argument 149 | if (order == 'first'): 150 | GPUs.sort(key=lambda x: float('inf') if math.isnan(x.id) else x.id, reverse=False) 151 | elif (order == 'last'): 152 | GPUs.sort(key=lambda x: float('-inf') if math.isnan(x.id) else x.id, reverse=True) 153 | elif (order == 'random'): 154 | GPUs = [GPUs[g] for g in random.sample(range(0,len(GPUs)),len(GPUs))] 155 | elif (order == 'load'): 156 | GPUs.sort(key=lambda x: float('inf') if math.isnan(x.load) else x.load, reverse=False) 157 | elif (order == 'memory'): 158 | GPUs.sort(key=lambda x: float('inf') if math.isnan(x.memoryUtil) else x.memoryUtil, reverse=False) 159 | 160 | # Extract the number of desired GPUs, but limited to the total number of available GPUs 161 | GPUs = GPUs[0:min(limit, len(GPUs))] 162 | 163 | # Extract the device IDs from the GPUs and return them 164 | deviceIds = [gpu.id for gpu in GPUs] 165 | 166 | return deviceIds 167 | 168 | #def getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan = False): 169 | # # Determine, which GPUs are available 170 | # GPUavailability = np.zeros(len(GPUs)) 171 | # for i in range(len(GPUs)): 172 | # if (GPUs[i].load < maxLoad or (includeNan and np.isnan(GPUs[i].load))) and (GPUs[i].memoryUtil < maxMemory or (includeNan and np.isnan(GPUs[i].memoryUtil))): 173 | # GPUavailability[i] = 1 174 | 175 | def getAvailability(GPUs, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): 176 | # Determine, which GPUs are available 177 | GPUavailability = [1 if (gpu.memoryFree>=memoryFree) and (gpu.load < maxLoad or (includeNan and math.isnan(gpu.load))) and (gpu.memoryUtil < maxMemory or (includeNan and math.isnan(gpu.memoryUtil))) and ((gpu.id not in excludeID) and (gpu.uuid not in excludeUUID)) else 0 for gpu in GPUs] 178 | return GPUavailability 179 | 180 | def getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False, includeNan=False, excludeID=[], excludeUUID=[]): 181 | #GPUs = getGPUs() 182 | #firstAvailableGPU = np.NaN 183 | #for i in range(len(GPUs)): 184 | # if (GPUs[i].load < maxLoad) & (GPUs[i].memory < maxMemory): 185 | # firstAvailableGPU = GPUs[i].id 186 | # break 187 | #return firstAvailableGPU 188 | for i in range(attempts): 189 | if (verbose): 190 | print('Attempting (' + str(i+1) + '/' + str(attempts) + ') to locate available GPU.') 191 | # Get first available GPU 192 | available = getAvailable(order=order, limit=1, maxLoad=maxLoad, maxMemory=maxMemory, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) 193 | # If an available GPU was found, break for loop. 194 | if (available): 195 | if (verbose): 196 | print('GPU ' + str(available) + ' located!') 197 | break 198 | # If this is not the last attempt, sleep for 'interval' seconds 199 | if (i != attempts-1): 200 | time.sleep(interval) 201 | # Check if an GPU was found, or if the attempts simply ran out. Throw error, if no GPU was found 202 | if (not(available)): 203 | raise RuntimeError('Could not find an available GPU after ' + str(attempts) + ' attempts with ' + str(interval) + ' seconds interval.') 204 | 205 | # Return found GPU 206 | return available 207 | 208 | 209 | def showUtilization(all=False, attrList=None, useOldCode=False): 210 | GPUs = getGPUs() 211 | if (all): 212 | if (useOldCode): 213 | print(' ID | Name | Serial | UUID || GPU util. | Memory util. || Memory total | Memory used | Memory free || Display mode | Display active |') 214 | print('------------------------------------------------------------------------------------------------------------------------------') 215 | for gpu in GPUs: 216 | print(' {0:2d} | {1:s} | {2:s} | {3:s} || {4:3.0f}% | {5:3.0f}% || {6:.0f}MB | {7:.0f}MB | {8:.0f}MB || {9:s} | {10:s}'.format(gpu.id,gpu.name,gpu.serial,gpu.uuid,gpu.load*100,gpu.memoryUtil*100,gpu.memoryTotal,gpu.memoryUsed,gpu.memoryFree,gpu.display_mode,gpu.display_active)) 217 | else: 218 | attrList = [[{'attr':'id','name':'ID'}, 219 | {'attr':'name','name':'Name'}, 220 | {'attr':'serial','name':'Serial'}, 221 | {'attr':'uuid','name':'UUID'}], 222 | [{'attr':'temperature','name':'GPU temp.','suffix':'C','transform': lambda x: x,'precision':0}, 223 | {'attr':'load','name':'GPU util.','suffix':'%','transform': lambda x: x*100,'precision':0}, 224 | {'attr':'memoryUtil','name':'Memory util.','suffix':'%','transform': lambda x: x*100,'precision':0}], 225 | [{'attr':'memoryTotal','name':'Memory total','suffix':'MB','precision':0}, 226 | {'attr':'memoryUsed','name':'Memory used','suffix':'MB','precision':0}, 227 | {'attr':'memoryFree','name':'Memory free','suffix':'MB','precision':0}], 228 | [{'attr':'display_mode','name':'Display mode'}, 229 | {'attr':'display_active','name':'Display active'}]] 230 | 231 | else: 232 | if (useOldCode): 233 | print(' ID GPU MEM') 234 | print('--------------') 235 | for gpu in GPUs: 236 | print(' {0:2d} {1:3.0f}% {2:3.0f}%'.format(gpu.id, gpu.load*100, gpu.memoryUtil*100)) 237 | elif attrList is None: 238 | # if `attrList` was not specified, use the default one 239 | attrList = [[{'attr':'id','name':'ID'}, 240 | {'attr':'load','name':'GPU','suffix':'%','transform': lambda x: x*100,'precision':0}, 241 | {'attr':'memoryUtil','name':'MEM','suffix':'%','transform': lambda x: x*100,'precision':0}], 242 | ] 243 | 244 | if (not useOldCode): 245 | if (attrList is not None): 246 | headerString = '' 247 | GPUstrings = ['']*len(GPUs) 248 | for attrGroup in attrList: 249 | #print(attrGroup) 250 | for attrDict in attrGroup: 251 | headerString = headerString + '| ' + attrDict['name'] + ' ' 252 | headerWidth = len(attrDict['name']) 253 | minWidth = len(attrDict['name']) 254 | 255 | attrPrecision = '.' + str(attrDict['precision']) if ('precision' in attrDict.keys()) else '' 256 | attrSuffix = str(attrDict['suffix']) if ('suffix' in attrDict.keys()) else '' 257 | attrTransform = attrDict['transform'] if ('transform' in attrDict.keys()) else lambda x : x 258 | for gpu in GPUs: 259 | attr = getattr(gpu,attrDict['attr']) 260 | 261 | attr = attrTransform(attr) 262 | 263 | if (isinstance(attr,float)): 264 | attrStr = ('{0:' + attrPrecision + 'f}').format(attr) 265 | elif (isinstance(attr,int)): 266 | attrStr = ('{0:d}').format(attr) 267 | elif (isinstance(attr,str)): 268 | attrStr = attr; 269 | elif (sys.version_info[0] == 2): 270 | if (isinstance(attr,unicode)): 271 | attrStr = attr.encode('ascii','ignore') 272 | else: 273 | raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') 274 | 275 | attrStr += attrSuffix 276 | 277 | minWidth = max(minWidth,len(attrStr)) 278 | 279 | headerString += ' '*max(0,minWidth-headerWidth) 280 | 281 | minWidthStr = str(minWidth - len(attrSuffix)) 282 | 283 | for gpuIdx,gpu in enumerate(GPUs): 284 | attr = getattr(gpu,attrDict['attr']) 285 | 286 | attr = attrTransform(attr) 287 | 288 | if (isinstance(attr,float)): 289 | attrStr = ('{0:'+ minWidthStr + attrPrecision + 'f}').format(attr) 290 | elif (isinstance(attr,int)): 291 | attrStr = ('{0:' + minWidthStr + 'd}').format(attr) 292 | elif (isinstance(attr,str)): 293 | attrStr = ('{0:' + minWidthStr + 's}').format(attr); 294 | elif (sys.version_info[0] == 2): 295 | if (isinstance(attr,unicode)): 296 | attrStr = ('{0:' + minWidthStr + 's}').format(attr.encode('ascii','ignore')) 297 | else: 298 | raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') 299 | 300 | attrStr += attrSuffix 301 | 302 | GPUstrings[gpuIdx] += '| ' + attrStr + ' ' 303 | 304 | headerString = headerString + '|' 305 | for gpuIdx,gpu in enumerate(GPUs): 306 | GPUstrings[gpuIdx] += '|' 307 | 308 | headerSpacingString = '-' * len(headerString) 309 | print(headerString) 310 | print(headerSpacingString) 311 | for GPUstring in GPUstrings: 312 | print(GPUstring) 313 | -------------------------------------------------------------------------------- /GPUtil/__init__.py: -------------------------------------------------------------------------------- 1 | # GPUtil - GPU utilization 2 | # 3 | # A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi 4 | # 5 | # Author: Anders Krogh Mortensen (anderskm) 6 | # Date: 16 January 2017 7 | # Web: https://github.com/anderskm/gputil 8 | # 9 | # LICENSE 10 | # 11 | # MIT License 12 | # 13 | # Copyright (c) 2017 anderskm 14 | # 15 | # Permission is hereby granted, free of charge, to any person obtaining a copy 16 | # of this software and associated documentation files (the "Software"), to deal 17 | # in the Software without restriction, including without limitation the rights 18 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 19 | # copies of the Software, and to permit persons to whom the Software is 20 | # furnished to do so, subject to the following conditions: 21 | # 22 | # The above copyright notice and this permission notice shall be included in all 23 | # copies or substantial portions of the Software. 24 | # 25 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 30 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 | # SOFTWARE. 32 | 33 | from .GPUtil import GPU, getGPUs, getAvailable, getAvailability, getFirstAvailable, showUtilization, __version__ 34 | -------------------------------------------------------------------------------- /GPUtil/demo_GPUtil.py: -------------------------------------------------------------------------------- 1 | import GPUtil as GPU 2 | import sys 3 | # Get all device ids and their processing and memory utiliazion 4 | # (deviceIds, gpuUtil, memUtil) = GPU.getGPUs() 5 | 6 | # Print os and python version information 7 | print('OS: ' + sys.platform) 8 | print(sys.version) 9 | 10 | # Print package name and version number 11 | print(GPU.__name__ + ' ' + GPU.__version__) 12 | 13 | # Show the utilization of all GPUs in a nice table 14 | GPU.showUtilization() 15 | 16 | # Show all stats of all GPUs in a nice table 17 | GPU.showUtilization(all=True) 18 | 19 | # Get all available GPU(s), ordered by ID in ascending order 20 | print('All available ordered by id: '), 21 | print(GPU.getAvailable(order='first', limit=999)) 22 | 23 | # Get 1 available GPU, ordered by ID in descending order 24 | print('Last available: '), 25 | print(GPU.getAvailable(order='last', limit=1)) 26 | 27 | # Get 1 random available GPU 28 | print('Random available: '), 29 | print(GPU.getAvailable(order='random')) 30 | 31 | # Get 1 available GPU, ordered by GPU load ascending 32 | print('First available weighted by GPU load ascending: '), 33 | print(GPU.getAvailable(order='load', limit=1)) 34 | 35 | # Get all available GPU with max load of 10%, ordered by memory ascending 36 | print('All available weighted by memory load ascending: '), 37 | print(GPU.getAvailable(order='memory', limit=999, maxLoad=0.1)) 38 | 39 | # Get the first available GPU 40 | firstGPU = GPU.getFirstAvailable() 41 | print('First available GPU id:'), 42 | print(firstGPU) 43 | 44 | # Get the first available GPU, where memory usage is less than 90% and processing is less than 80% 45 | firstGPU = GPU.getFirstAvailable(maxMemory=0.9, maxLoad=0.8) 46 | print('First available GPU id (memory < 90%, load < 80%):'), 47 | print(firstGPU) 48 | 49 | # Get the first available GPU, where processing is less than 1% 50 | firstGPU = GPU.getFirstAvailable(attempts=5, interval=5, maxLoad=0.01, verbose=True) 51 | print('First available GPU id (load < 1%):'), 52 | print(firstGPU) 53 | # NOTE: If all your GPUs currently have a load larger than 1%, this step will 54 | # fail. It's not a bug! It is intended to do so, if it does not find an available GPU. 55 | 56 | # Get the first available GPU, where memory usage is less than 1% 57 | firstGPU = GPU.getFirstAvailable(attempts=5, interval=5, maxMemory=0.01, verbose=True) 58 | print('First available GPU id (memory < 1%):'), 59 | print(firstGPU) 60 | # NOTE: If all your GPUs currently have a memory consumption larger than 1%, 61 | # this step will fail. It's not a bug! It is intended to do so, if it does not 62 | # find an available GPU. 63 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 anderskm 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE.txt 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GPUtil 2 | `GPUtil` is a Python module for getting the GPU status from NVIDA GPUs using `nvidia-smi`. 3 | `GPUtil` locates all GPUs on the computer, determines their availablity and returns a ordered list of available GPUs. 4 | Availablity is based upon the current memory consumption and load of each GPU. 5 | The module is written with GPU selection for Deep Learning in mind, but it is not task/library specific and it can be applied to any task, where it may be useful to identify available GPUs. 6 | 7 | **Table of Contents** 8 | 9 | 1. [Requirements](#requirements) 10 | 1. [Installation](#installation) 11 | 1. [Usage](#usage) 12 | 1. [Main functions](#main-functions) 13 | 1. [Helper functions](#helper-functions) 14 | 1. [Examples](#examples) 15 | 1. [Select first available GPU in Caffe](#select-first-available-gpu-in-caffe) 16 | 1. [Occupy only 1 GPU in TensorFlow](#occupy-only-1-gpu-in-tensorflow) 17 | 1. [Monitor GPU in a separate thread](#monitor-gpu-in-a-separate-thread) 18 | 1. [License](#license) 19 | 20 | ## Requirements 21 | NVIDIA GPU with latest NVIDIA driver installed. 22 | GPUtil uses the program `nvidia-smi` to get the GPU status of all available NVIDIA GPUs. `nvidia-smi` should be installed automatically, when you install your NVIDIA driver. 23 | 24 | Supports both Python 2.X and 3.X. 25 | 26 | Python libraries: 27 | * subprocess ([The Python Standard Library](https://docs.python.org/3/library/subprocess.html)) 28 | * distutils ([The Python Standard Library](https://docs.python.org/3/library/distutils.html)) 29 | * math ([The Python Standard Library](https://docs.python.org/3/library/math.html)) 30 | * random ([The Python Standard Library](https://docs.python.org/3/library/random.html)) 31 | * time ([The Python Standard Library](https://docs.python.org/3/library/time.html)) 32 | * os ([The Python Standard Library](https://docs.python.org/3/library/os.html)) 33 | * sys ([The Python Standard Library](https://docs.python.org/3/library/sys.html)) 34 | * platform ([The Python Standard Library](https://docs.python.org/3/library/platform.html)) 35 | 36 | Tested on CUDA driver version 390.77 Python 2.7 and 3.5. 37 | 38 | ## Installation 39 | 40 | 1. Open a terminal (Ctrl+Shift+T) 41 | 2. Type `pip install gputil` 42 | 3. Test the installation 43 | 1. Open a terminal in a folder other than the GPUtil folder 44 | 2. Start a python console by typing `python` in the terminal 45 | 3. In the newly opened python console, type: 46 | ```python 47 | import GPUtil 48 | GPUtil.showUtilization() 49 | ``` 50 | 4. Your output should look something like following, depending on your number of GPUs and their current usage: 51 | ``` 52 | ID GPU MEM 53 | -------------- 54 | 0 0% 0% 55 | ``` 56 | 57 | ### Old way of installation 58 | 59 | 1. Download or clone repository to your computer 60 | 2. Add GPUtil folder to ~/.bashrc 61 | 1. Open a new terminal (Press Ctrl+Alt+T) 62 | 2. Open bashrc: 63 | ``` 64 | gedit ~/.bashrc 65 | ``` 66 | 3. Added your GPUtil folder to the environment variable `PYTHONPATH` (replace `` with your folder path): 67 | ``` 68 | export PYTHONPATH="$PYTHONPATH:" 69 | 70 | Example: 71 | export PYTHONPATH="$PYTHONPATH:/home/anderskm/github/gputil" 72 | ``` 73 | 4. Save ~/.bashrc and close gedit 74 | 5. Restart your terminal 75 | 1. Test the installation 76 | 1. Open a terminal in a folder other than the GPUtil folder 77 | 2. Start a python console by typing `python` in the terminal 78 | 3. In the newly opened python console, type: 79 | ```python 80 | import GPUtil 81 | GPUtil.showUtilization() 82 | ``` 83 | 4. Your output should look something like following, depending on your number of GPUs and their current usage: 84 | ``` 85 | ID GPU MEM 86 | -------------- 87 | 0 0% 0% 88 | ``` 89 | 90 | ## Usage 91 | 92 | To include `GPUtil` in your Python code, all you hve to do is included it at the beginning of your script: 93 | 94 | ```python 95 | import GPUtil 96 | ``` 97 | 98 | Once included all functions are available. The functions along with a short description of inputs, outputs and their functionality can be found in the following two sections. 99 | 100 | ### Main functions 101 | 102 | ```python 103 | deviceIDs = GPUtil.getAvailable(order = 'first', limit = 1, maxLoad = 0.5, maxMemory = 0.5, includeNan=False, excludeID=[], excludeUUID=[]) 104 | ``` 105 | Returns a list ids of available GPUs. Availablity is determined based on current memory usage and load. The order, maximum number of devices, their maximum load and maximum memory consumption are determined by the input arguments. 106 | 107 | * Inputs 108 | * `order` - Deterimines the order in which the available GPU device ids are returned. `order` should be specified as one of the following strings: 109 | * `'first'` - orders available GPU device ids by ascending id (**defaut**) 110 | * `'last'` - orders available GPU device ids by descending id 111 | * `'random'` - orders the available GPU device ids randomly 112 | * `'load'`- orders the available GPU device ids by ascending load 113 | * `'memory'` - orders the available GPU device ids by ascending memory usage 114 | * `limit` - limits the number of GPU device ids returned to the specified number. Must be positive integer. (**default = 1**) 115 | * `maxLoad` - Maximum current relative load for a GPU to be considered available. GPUs with a load larger than `maxLoad` is not returned. (**default = 0.5**) 116 | * `maxMemory` - Maximum current relative memory usage for a GPU to be considered available. GPUs with a current memory usage larger than `maxMemory` is not returned. (**default = 0.5**) 117 | * `includeNan` - True/false flag indicating whether to include GPUs where either load or memory usage is NaN (indicating usage could not be retrieved). (**default = False**) 118 | * `excludeID` - List of IDs, which should be excluded from the list of available GPUs. See `GPU` class description. (**default = []**) 119 | * `excludeUUID` - Same as `excludeID` except it uses the UUID. (**default = []**) 120 | * Outputs 121 | * deviceIDs - list of all available GPU device ids. A GPU is considered available, if the current load and memory usage is less than `maxLoad` and `maxMemory`, respectively. The list is ordered according to `order`. The maximum number of returned device ids is limited by `limit`. 122 | 123 | ```python 124 | deviceID = GPUtil.getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False) 125 | ``` 126 | Returns the first avaiable GPU. Availablity is determined based on current memory usage and load, and the ordering is determined by the specified order. 127 | If no available GPU is found, an error is thrown. 128 | When using the default values, it is the same as `getAvailable(order = 'first', limit = 1, maxLoad = 0.5, maxMemory = 0.5)` 129 | 130 | * Inputs 131 | * `order` - See the description for `GPUtil.getAvailable(...)` 132 | * `maxLoad` - Maximum current relative load for a GPU to be considered available. GPUs with a load larger than `maxLoad` is not returned. (**default = 0.5**) 133 | * `maxMemory` - Maximum current relative memory usage for a GPU to be considered available. GPUs with a current memory usage larger than `maxMemory` is not returned. (**default = 0.5**) 134 | * `attempts` - Number of attempts the function should make before giving up finding an available GPU. (**default = 1**) 135 | * `interval` - Interval in seconds between each attempt to find an available GPU. (**default = 900** --> 15 mins) 136 | * `verbose` - If `True`, prints the attempt number before each attempt and the GPU id if an available is found. 137 | * `includeNan` - See the description for `GPUtil.getAvailable(...)`. (**default = False**) 138 | * `excludeID` - See the description for `GPUtil.getAvailable(...)`. (**default = []**) 139 | * `excludeUUID` - See the description for `GPUtil.getAvailable(...)`. (**default = []**) 140 | * Outputs 141 | * deviceID - list with 1 element containing the first available GPU device ids. A GPU is considered available, if the current load and memory usage is less than `maxLoad` and `maxMemory`, respectively. The order and limit are fixed to `'first'` and `1`, respectively. 142 | 143 | 144 | ```python 145 | GPUtil.showUtilization(all=False, attrList=None, useOldCode=False) 146 | ``` 147 | Prints the current status (id, memory usage, uuid load) of all GPUs 148 | * Inputs 149 | * `all` - True/false flag indicating if all info on the GPUs should be shown. Overwrites `attrList`. 150 | * `attrList` - List of lists of `GPU` attributes to display. See code for more information/example. 151 | * `useOldCode` - True/false flag indicating if the old code to display GPU utilization should be used. 152 | * Outputs 153 | * _None_ 154 | 155 | ### Helper functions 156 | ```python 157 | class GPU 158 | ``` 159 | Helper class handle the attributes of each GPU. Quoted descriptions are copied from corresponding descriptions by `nvidia-smi`. 160 | * Attributes for each `GPU` 161 | * `id` - "Zero based index of the GPU. Can change at each boot." 162 | * `uuid` - "This value is the globally unique immutable alphanumeric identifier of the GPU. It does not correspond to any physical label on the board. Does not change across reboots." 163 | * `load` - Relative GPU load. 0 to 1 (100%, full load). "Percent of time over the past sample period during which one or more kernels was executing on the GPU. The sample period may be between 1 second and 1/6 second depending on the product." 164 | * `memoryUtil` - Relative memory usage from 0 to 1 (100%, full usage). "Percent of time over the past sample period during which global (device) memory was being read or written. The sample period may be between 1 second and 1/6 second depending on the product." 165 | * `memoryTotal` - "Total installed GPU memory." 166 | * `memoryUsed` - "Total GPU memory allocated by active contexts." 167 | * `memoryFree` - "Total free GPU memory." 168 | * `driver` - "The version of the installed NVIDIA display driver." 169 | * `name` - "The official product name of the GPU." 170 | * `serial` - This number matches the serial number physically printed on each board. It is a globally unique immutable alphanumeric value. 171 | * `display_mode` - "A flag that indicates whether a physical display (e.g. monitor) is currently connected to any of the GPU's connectors. "Enabled" indicates an attached display. "Disabled" indicates otherwise." 172 | * `display_active` - "A flag that indicates whether a display is initialized on the GPU's (e.g. memory is allocated on the device for display). Display can be active even when no monitor is physically attached. "Enabled" indicates an active display. "Disabled" indicates otherwise." 173 | 174 | ```python 175 | GPUs = GPUtil.getGPUs() 176 | ``` 177 | * Inputs 178 | * _None_ 179 | * Outputs 180 | * `GPUs` - list of all GPUs. Each `GPU` corresponds to one GPU in the computer and contains a device id, relative load and relative memory usage. 181 | 182 | ```python 183 | GPUavailability = GPUtil.getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan=False, excludeID=[], excludeUUID=[]) 184 | ``` 185 | Given a list of `GPUs` (see `GPUtil.getGPUs()`), return a equally sized list of ones and zeroes indicating which corresponding GPUs are available. 186 | 187 | * Inputs 188 | * `GPUs` - List of `GPUs`. See `GPUtil.getGPUs()` 189 | * `maxLoad` - Maximum current relative load for a GPU to be considered available. GPUs with a load larger than `maxLoad` is not returned. (**default = 0.5**) 190 | * `maxMemory` - Maximum current relative memory usage for a GPU to be considered available. GPUs with a current memory usage larger than `maxMemory` is not returned. (**default = 0.5**) 191 | * `includeNan` - See the description for `GPUtil.getAvailable(...)`. (**default = False**) 192 | * `excludeID` - See the description for `GPUtil.getAvailable(...)`. (**default = []**) 193 | * `excludeUUID` - See the description for `GPUtil.getAvailable(...)`. (**default = []**) 194 | * Outputs 195 | * GPUavailability - binary list indicating if `GPUs` are available or not. A GPU is considered available, if the current load and memory usage is less than `maxLoad` and `maxMemory`, respectively. 196 | 197 | 198 | See [demo_GPUtil.py](https://github.com/anderskm/gputil/blob/master/demo_GPUtil.py) for examples and more details. 199 | 200 | ## Examples 201 | 202 | 203 | ### Select first available GPU in Caffe 204 | In the Deep Learning library [Caffe](http://caffe.berkeleyvision.org/), the user can switch between using the CPU or GPU through their Python interface. 205 | This is done by calling the methods `caffe.set_mode_cpu()` and `caffe.set_mode_gpu()`, respectively. 206 | Below is a minimum working example for selecting the first available GPU with GPUtil to run a Caffe network. 207 | 208 | ```python 209 | # Import caffe and GPUtil 210 | import caffe 211 | import GPUtil 212 | 213 | # Set CUDA_DEVICE_ORDER so the IDs assigned by CUDA match those from nvidia-smi 214 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 215 | 216 | # Get the first available GPU 217 | DEVICE_ID_LIST = GPUtil.getFirstAvailable() 218 | DEVICE_ID = DEVICE_ID_LIST[0] # grab first element from list 219 | 220 | # Select GPU mode 221 | caffe.set_mode_gpu() 222 | # Select GPU id 223 | caffe.set_device(DEVICE_ID) 224 | 225 | # Initialize your network here 226 | 227 | ``` 228 | 229 | **Note:** At the time of writing this example, the Caffe Python wrapper only supports 1 GPU, although the underlying code supports multiple GPUs. 230 | Calling directly Caffe from the terminal allows for using multiple GPUs. 231 | 232 | ### Occupy only 1 GPU in TensorFlow 233 | By default, [TensorFlow](https://www.tensorflow.org/) will occupy all available GPUs when using a gpu as a device (e.g. `tf.device('\gpu:0')`). 234 | By setting the environment variable `CUDA_VISIBLE_DEVICES`, the user can mask which GPUs should be visible to TensorFlow via CUDA (See [CUDA_VISIBLE_DEVICES - Masking GPUs](http://acceleware.com/blog/cudavisibledevices-masking-gpus)). Using GPUtil.py, the CUDA_VISIBLE_DEVICES can be set programmatically based on the available GPUs. 235 | Below is a minimum working example of how to occupy only 1 GPU in TensorFlow using GPUtil. 236 | To run the code, copy it into a new python file (e.g. `demo_tensorflow_gputil.py`) and run it (e.g. enter `python demo_tensorflow_gputil.py` in a terminal). 237 | 238 | **Note:** Even if you set the device you run your code on to a CPU, TensorFlow will occupy all available GPUs. To avoid this, all GPUs can be hidden from TensorFlow with `os.environ["CUDA_VISIBLE_DEVICES"] = ''`. 239 | 240 | ```python 241 | # Import os to set the environment variable CUDA_VISIBLE_DEVICES 242 | import os 243 | import tensorflow as tf 244 | import GPUtil 245 | 246 | # Set CUDA_DEVICE_ORDER so the IDs assigned by CUDA match those from nvidia-smi 247 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 248 | 249 | # Get the first available GPU 250 | DEVICE_ID_LIST = GPUtil.getFirstAvailable() 251 | DEVICE_ID = DEVICE_ID_LIST[0] # grab first element from list 252 | 253 | # Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id 254 | os.environ["CUDA_VISIBLE_DEVICES"] = str(DEVICE_ID) 255 | 256 | # Since all other GPUs are masked out, the first available GPU will now be identified as GPU:0 257 | device = '/gpu:0' 258 | print('Device ID (unmasked): ' + str(DEVICE_ID)) 259 | print('Device ID (masked): ' + str(0)) 260 | 261 | # Run a minimum working example on the selected GPU 262 | # Start a session 263 | with tf.Session() as sess: 264 | # Select the device 265 | with tf.device(device): 266 | # Declare two numbers and add them together in TensorFlow 267 | a = tf.constant(12) 268 | b = tf.constant(30) 269 | result = sess.run(a+b) 270 | print('a+b=' + str(result)) 271 | 272 | ``` 273 | 274 | Your output should look something like the code block below. Notice how only one of the GPUs are found and created as a tensorflow device. 275 | 276 | ``` 277 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcublas.so locally 278 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcudnn.so locally 279 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcufft.so locally 280 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcuda.so.1 locally 281 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcurand.so locally 282 | Device: /gpu:0 283 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:885] Found device 0 with properties: 284 | name: TITAN X (Pascal) 285 | major: 6 minor: 1 memoryClockRate (GHz) 1.531 286 | pciBusID 0000:02:00.0 287 | Total memory: 11.90GiB 288 | Free memory: 11.76GiB 289 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:906] DMA: 0 290 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:916] 0: Y 291 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:975] Creating TensorFlow device (/gpu:0) -> (device: 0, name: TITAN X (Pascal), pci bus id: 0000:02:00.0) 292 | a+b=42 293 | 294 | ``` 295 | Comment the `os.environ["CUDA_VISIBLE_DEVICES"] = str(DEVICE_ID)` line and compare the two outputs. 296 | Depending on your number of GPUs, your output should look something like code block below. 297 | Notice, how all 4 GPUs are being found and created as a tensorflow device, whereas when `CUDA_VISIBLE_DEVICES` was set, only 1 GPU was found and created. 298 | 299 | ``` 300 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcublas.so locally 301 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcudnn.so locally 302 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcufft.so locally 303 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcuda.so.1 locally 304 | I tensorflow/stream_executor/dso_loader.cc:128] successfully opened CUDA library libcurand.so locally 305 | Device: /gpu:0 306 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:885] Found device 0 with properties: 307 | name: TITAN X (Pascal) 308 | major: 6 minor: 1 memoryClockRate (GHz) 1.531 309 | pciBusID 0000:02:00.0 310 | Total memory: 11.90GiB 311 | Free memory: 11.76GiB 312 | W tensorflow/stream_executor/cuda/cuda_driver.cc:590] creating context when one is currently active; existing: 0x2c8e400 313 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:885] Found device 1 with properties: 314 | name: TITAN X (Pascal) 315 | major: 6 minor: 1 memoryClockRate (GHz) 1.531 316 | pciBusID 0000:03:00.0 317 | Total memory: 11.90GiB 318 | Free memory: 11.76GiB 319 | W tensorflow/stream_executor/cuda/cuda_driver.cc:590] creating context when one is currently active; existing: 0x2c92040 320 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:885] Found device 2 with properties: 321 | name: TITAN X (Pascal) 322 | major: 6 minor: 1 memoryClockRate (GHz) 1.531 323 | pciBusID 0000:83:00.0 324 | Total memory: 11.90GiB 325 | Free memory: 11.76GiB 326 | W tensorflow/stream_executor/cuda/cuda_driver.cc:590] creating context when one is currently active; existing: 0x2c95d90 327 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:885] Found device 3 with properties: 328 | name: TITAN X (Pascal) 329 | major: 6 minor: 1 memoryClockRate (GHz) 1.531 330 | pciBusID 0000:84:00.0 331 | Total memory: 11.90GiB 332 | Free memory: 11.76GiB 333 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 0 and 2 334 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 0 and 3 335 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 1 and 2 336 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 1 and 3 337 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 2 and 0 338 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 2 and 1 339 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 3 and 0 340 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:777] Peer access not supported between device ordinals 3 and 1 341 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:906] DMA: 0 1 2 3 342 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:916] 0: Y Y N N 343 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:916] 1: Y Y N N 344 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:916] 2: N N Y Y 345 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:916] 3: N N Y Y 346 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:975] Creating TensorFlow device (/gpu:0) -> (device: 0, name: TITAN X (Pascal), pci bus id: 0000:02:00.0) 347 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:975] Creating TensorFlow device (/gpu:1) -> (device: 1, name: TITAN X (Pascal), pci bus id: 0000:03:00.0) 348 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:975] Creating TensorFlow device (/gpu:2) -> (device: 2, name: TITAN X (Pascal), pci bus id: 0000:83:00.0) 349 | I tensorflow/core/common_runtime/gpu/gpu_device.cc:975] Creating TensorFlow device (/gpu:3) -> (device: 3, name: TITAN X (Pascal), pci bus id: 0000:84:00.0) 350 | a+b=42 351 | ``` 352 | 353 | ### Monitor GPU in a separate thread 354 | If using GPUtil to monitor GPUs during training, it may show 0% utilization. A way around this is to use a separate monitoring thread. 355 | ```python 356 | import GPUtil 357 | from threading import Thread 358 | import time 359 | 360 | class Monitor(Thread): 361 | def __init__(self, delay): 362 | super(Monitor, self).__init__() 363 | self.stopped = False 364 | self.delay = delay # Time between calls to GPUtil 365 | self.start() 366 | 367 | def run(self): 368 | while not self.stopped: 369 | GPUtil.showUtilization() 370 | time.sleep(self.delay) 371 | 372 | def stop(self): 373 | self.stopped = True 374 | 375 | # Instantiate monitor with a 10-second delay between updates 376 | monitor = Monitor(10) 377 | 378 | # Train, etc. 379 | 380 | # Close monitor 381 | monitor.stop() 382 | ``` 383 | 384 | ## License 385 | See [LICENSE](https://github.com/anderskm/gputil/blob/master/LICENSE.txt) 386 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | license_file = LICENSE.txt 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | setup( 4 | name = 'GPUtil', 5 | packages = ['GPUtil'], 6 | version = '1.4.0', 7 | description = 'GPUtil is a Python module for getting the GPU status from NVIDA GPUs using nvidia-smi.', 8 | author = 'Anders Krogh Mortensen', 9 | author_email = 'anderskroghm@gmail.com', 10 | url = 'https://github.com/anderskm/gputil', 11 | download_url = 'https://github.com/anderskm/gputil/tarball/v1.4.0', 12 | keywords = ['gpu','utilization','load','memory','available','usage','free','select','nvidia'], 13 | classifiers = [], 14 | license = 'MIT', 15 | ) 16 | --------------------------------------------------------------------------------