├── MANIFEST.in ├── .gitignore ├── setup.py ├── LICENSE.txt ├── README.rst ├── npmat.py └── gnumpy.py /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.rst 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | dist/ 3 | gnumpy.egg-info/ 4 | include/ 5 | lib/ 6 | local/ 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | try: 2 | from setuptools import setup 3 | except ImportError: 4 | from distutils.core import setup 5 | import os 6 | 7 | here = os.path.abspath(os.path.dirname(__file__)) 8 | README = open(os.path.join(here, 'README.rst')).read() 9 | 10 | setup(name='gnumpy', 11 | version='0.3dev', 12 | description="Gnumpy is a simple Python module that interfaces in a way " 13 | "almost identical to numpy, but does its computations on your " 14 | "computer's GPU, using Cudamat.", 15 | long_description=README, 16 | author='Tijmen Tieleman', 17 | license='BSD-derived (see LICENSE.txt)', 18 | url='http://www.cs.toronto.edu/~tijmen/gnumpy.html', 19 | py_modules=['gnumpy', 'npmat'], 20 | ) 21 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The gnumpy.py module is supplied under this license: 2 | 3 | Copyright (c) 2010-2011 Tijmen Tieleman 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | If you use Gnumpy for scientific work that gets published, you should include 24 | in that publication a citation of the technical report that describes Gnumpy. 25 | That report can be found at http://www.cs.toronto.edu/~tijmen/gnumpyTr.pdf 26 | 27 | The npmat.py module is Copyright (c) Ilya Sutskever 28 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | [This document is a copy of the original. `The latest version is 2 | available on Tijmen Tieleman's 3 | homepage. `_] 4 | 5 | Gnumpy is free software, but if you use it in scientific work that 6 | gets published, you should cite `this tech report 7 | `_ in your 8 | publication. 9 | 10 | Documentation: `here 11 | `_. 12 | 13 | Do you want to have both the compute power of GPU's and the 14 | programming convenience of Python numpy? Gnumpy + Cudamat will bring 15 | you that. 16 | 17 | Gnumpy is a simple Python module that interfaces in a way almost 18 | identical to numpy, but does its computations on your computer's 19 | GPU. See `this example 20 | `_, training an 21 | RBM using Gnumpy. 22 | 23 | Gnumpy runs on top of, and therefore requires, the excellent `cudamat 24 | `_ library, written by `Vlad Mnih 25 | `_. 26 | 27 | Gnumpy can run in simulation mode: everything happens on the CPU, but 28 | the interface is the same. This can be helpful if you like to write 29 | your programs on your GPU-less laptop before running them on a 30 | GPU-equipped machine. It also allows you to easily test what 31 | performance gain you get from using a GPU. The simulation mode 32 | requires `npmat `_, written 33 | by `Ilya Sutskever `_. [npmat is 34 | included in this distribution.] 35 | 36 | Gnumpy is licensed with a BSD-style license (i.e. it's completely free 37 | to use for everyone, also as a component in commercial software), with 38 | one added note: if you use it for scientific work that gets published, 39 | you must include reference to the Gnumpy tech report in your 40 | publication. For details of the license, see the top of gnumpy.py. 41 | 42 | Recent changes: 43 | 44 | - 2012-07-25: Bugfix. gnumpy.dot(x, x), when x is a 1-dimensional array, didn't work but now works. 45 | - 2011-06-06: gnumpy.dot() now takes arrays of ndim>2. 46 | - 2011-04-19: Bugfix: several bugs involving zero size arrays were fixed. 47 | - 2011-04-15: Bugfix. "x=gnumpy.zeros(10); x[array([])] = garray([])" didn't work as it should. Now it does. 48 | - 2011-03-24: Added gnumpy.outer(). 49 | - 2011-03-15: The ability to check for infs and nans automatically has been added to Gnumpy. 50 | - 2010-07-19: Cudamat now enables fast indexing with arrays of indices. Download the newest Cudamat to have fast indexing with arrays in Gnumpy. 51 | - 2010-07-08: Renamed the project to Gnumpy. It used to be called Gpunnumpy. 52 | -------------------------------------------------------------------------------- /npmat.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import os, pdb, time, warnings 4 | import numpy as np 5 | 6 | __DTYPE__ = np.float64 7 | 8 | 9 | def dummy(): 10 | return CUDAMatrix(np.zeros((1, 1))) 11 | 12 | def deprecated(func): 13 | """This is a decorator which can be used to mark functions 14 | as deprecated. It will result in a warning being emmitted 15 | when the function is used.""" 16 | 17 | def newFunc(*args, **kwargs): 18 | warnings.warn("Call to deprecated function %s." % func.__name__, 19 | category=DeprecationWarning) 20 | return func(*args, **kwargs) 21 | newFunc.__name__ = func.__name__ 22 | newFunc.__doc__ = func.__doc__ 23 | newFunc.__dict__.update(func.__dict__) 24 | return newFunc 25 | 26 | #from cudamat import CUDAMatException 27 | class CUDAMatException(Exception): 28 | pass 29 | 30 | 31 | 32 | IncompatibleDimensionsException = CUDAMatException("Incompatible matrix dimensions.") 33 | 34 | InvalidConfig = CUDAMatException("Invalid Configuration Error (i.e., a dim of the array must be smaller than 2**16.") 35 | ## TODO: Figure out which functions produce an invalid config error. These are those who allocate a thread per col/row/elem. 36 | ## Those who allocate a bunch of rows per thread, like mult, add, sub, etc, should be immune to the invalid 37 | ## configuration error. PS: this error occurs on the real cudamat, which is why it happens. 38 | ## Sum/Max/Cumsum 39 | MAX_DIM = 2**16 40 | 41 | 42 | class CUDAMatrix(object): 43 | """ 44 | A CUDAMatrix object represents a matrix of single precision floating point 45 | numbers on a GPU. 46 | """ 47 | 48 | def __init__(self, array, ref=True): 49 | if ref: 50 | self.numpy_array = reformat(array) 51 | else: 52 | self.numpy_array = array 53 | assert self.numpy_array.ndim == 2 54 | self.trans = False 55 | 56 | def __del__(self): 57 | pass 58 | 59 | @staticmethod 60 | def init_random(seed): 61 | import numpy.random as random 62 | random.seed(seed) 63 | 64 | 65 | 66 | @property 67 | def num_elems(self): 68 | return self.numpy_array.size 69 | 70 | @property 71 | def shape(self): 72 | return self.numpy_array.shape 73 | 74 | def cheap_transpose(self): 75 | return CUDAMatrix(self.reshape((self.shape[1], self.shape[0]))) 76 | 77 | def reshape(self, shape): 78 | assert shape[0]*shape[1] == self.shape[0]*self.shape[1] 79 | #self.numpy_array.resize(shape) 80 | #self.numpy_array = self.numpy_array.reshape(shape, order='F') 81 | self.numpy_array.resize(*shape) 82 | return self 83 | 84 | 85 | def copy(self): 86 | return empty().assign(self) 87 | 88 | 89 | def set_np_array(self, X): 90 | assert X.shape == self.shape 91 | self.numpy_array[:] = X 92 | self.copy_to_device() 93 | return self 94 | 95 | 96 | 97 | def zero_copy(self): 98 | return self.copy().assign(0) 99 | 100 | 101 | def resize(self, shape): 102 | 103 | if self.shape != shape: 104 | 105 | print 'CUDAMatrix: resize (%s -> %s)' % (self.shape, shape) 106 | #self.numpy_array = np.resize(self.numpy_array, shape).astype(__DTYPE__) 107 | self.numpy_array.resize(shape) 108 | self.numpy_array[:] = 0 109 | 110 | 111 | return self 112 | 113 | @property 114 | def T(self): 115 | return CUDAMatrix(self.numpy_array.T) 116 | 117 | @property 118 | def mat(self): 119 | return self.numpy_array 120 | 121 | 122 | @deprecated 123 | def set_shape(self, shape): 124 | return self.resize(shape) 125 | 126 | 127 | def asarray(self): 128 | """ 129 | Copies the matrix to an ndarray on the CPU and returns it. 130 | """ 131 | 132 | #return reformat(self.numpy_array.copy()) 133 | return self.numpy_array 134 | 135 | def copy_to_device(self): 136 | """ 137 | Copy the matrix to the GPU. 138 | """ 139 | 140 | pass 141 | 142 | 143 | 144 | def select_columns(self, indices, target): 145 | """ 146 | copies some columns of self into target. 147 | must be a row vector. Its elements are float32's representing integers, e.g. "34.0" means the integer "34". 148 | after this call, for all r,c, target[r,c]=self[r,indices[c]]. 149 | This returns target. 150 | Negative indices are interpreted in the usual Python way: all elements of had better be in the range [-self.shape[1], self.shape[1]-1]. 151 | This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in . 152 | """ 153 | 154 | assert target.shape[0]==self.shape[0] 155 | assert indices.shape[0]==1 156 | assert indices.shape[1] == target.shape[1] 157 | 158 | for c in range(target.shape[1]): 159 | try: 160 | target.numpy_array[:,c] = self.numpy_array[:, int(indices.numpy_array.ravel()[c])] 161 | except IndexError: 162 | target.numpy_array[:,c] = np.nan 163 | return target 164 | 165 | def set_selected_columns(self, indices, source): 166 | """ 167 | copies all columns of source into some columns of self. 168 | must be a row vector. Its elements are float32's representing 169 | integers, e.g. "34.0" means the integer "34". after this call, for all 170 | r,c, self[r,indices[c]]=source[r,c]. This returns self. 171 | Negative indices are interpreted in the usual Python way: all elements 172 | of had better be in the range [-self.shape[1], self.shape[1]-1]. 173 | This does bounds checking, but out of bounds indices do not raise an 174 | exception (because the programmer was lazy). Instead, they result in NaN 175 | values in . 176 | """ 177 | 178 | assert self.shape[0]==source.shape[0] 179 | assert indices.shape[0]==1 180 | assert indices.shape[1]==source.shape[1] 181 | 182 | for c in range(source.shape[1]): 183 | try: 184 | self.numpy_array[:,int(indices.numpy_array.ravel()[c])] = source.numpy_array[:,c] 185 | except IndexError: 186 | self.numpy_array[:,int(indices.numpy_array.ravel()[c])] = np.nan 187 | return self 188 | 189 | 190 | def copy_to_host(self): 191 | """ 192 | Copy the matrix to the CPU. 193 | """ 194 | return self.asarray() 195 | 196 | 197 | def np(self): 198 | return self.copy_to_host() 199 | 200 | 201 | 202 | 203 | def assign(self, val): 204 | """Assign val to self, where val can be a scalar or a CUDAMatrix 205 | with the same dimensions as self. """ 206 | 207 | 208 | if isinstance(val, CUDAMatrix): 209 | self.resize(val.shape) 210 | self.numpy_array[:] = val.numpy_array 211 | 212 | 213 | elif isinstance(val, (int, float, __DTYPE__)): 214 | self.numpy_array[:] = val 215 | 216 | return self 217 | 218 | def free_device_memory(self): 219 | """ 220 | Free memory used up by the matrix on the GPU. 221 | """ 222 | pass 223 | 224 | 225 | def set_trans(self, is_trans): 226 | """ 227 | Set the transposedness flag to is_trans. 228 | """ 229 | if is_trans is True: 230 | self.numpy_array = self.numpy_array.T 231 | 232 | 233 | 234 | def slice(self, first_col, last_col): 235 | return CUDAMatrix(self.numpy_array[:, first_col:last_col], ref=False) 236 | 237 | def get_row_slice(self, start, end, target = None): 238 | """ 239 | Get the rows with indices start through end. If target is not provided 240 | memory for a new matrix will be allocated. 241 | """ 242 | 243 | 244 | 245 | ans = CUDAMatrix(self.numpy_array[start:end, :].copy()) 246 | 247 | if target is not None: 248 | target.assign(ans) 249 | else: 250 | target = ans 251 | 252 | return target 253 | 254 | 255 | def set_row_slice(self, start, end, mat): 256 | try: 257 | self.numpy_array[start:end] = mat.numpy_array 258 | except ValueError: 259 | raise IncompatibleDimensionsException 260 | return self 261 | 262 | 263 | def get_col_slice(self, start, end, target = None): 264 | ## NOTE: no .copy() 265 | ans = self.slice(start, end) 266 | 267 | if target is not None: 268 | target.assign(ans) 269 | else: 270 | target = ans 271 | 272 | return target 273 | 274 | def set_col_slice(self, start, end, mat): 275 | return self.slice(start, end).assign(mat) 276 | 277 | 278 | 279 | 280 | 281 | # def select_columns(self, indices, target): 282 | # """ 283 | # Copies selected columns into a target matrix. 284 | # , , and are all cudamat matrices. 285 | # is an M by K matrix. 286 | # is of shape 1 by N. All elements x are expected to be 287 | # 0<=x is an M by N matrix that will be filled with the result. 290 | # After the operation, for all i,j, target[i, j] = self[i, int(indices[j])] 291 | # This returns . 292 | # ? idea: No bounds checking is done. 293 | # """ 294 | # M, K = self.shape 295 | 296 | # one, N = indices.shape 297 | # assert one == 1 298 | # M_, N_ = target.shape 299 | # assert M_ == M and N == N_ 300 | 301 | # np_ints = indices.numpy_array.astype(int) 302 | 303 | # if not (np_ints.max() < K and np_ints.min() >= 0): 304 | # raise ValueError("Index out of bounds.") 305 | 306 | 307 | # target.numpy_array[:] = self.numpy_array[:, np_ints.flatten()] 308 | 309 | 310 | 311 | # return target 312 | 313 | 314 | 315 | 316 | def transpose(self, target = None): 317 | 318 | if target is None: 319 | return CUDAMatrix(self.numpy_array.T.copy()) 320 | else: 321 | target.numpy_array.resize((self.shape[1], self.shape[0])) 322 | target.numpy_array[:] = self.numpy_array.T 323 | 324 | return target 325 | 326 | 327 | def assign_transpose(self, t): 328 | return t.transpose(target = self) 329 | 330 | 331 | 332 | def fill_with_rand(self): 333 | """ 334 | Fill matrix on the GPU with random numbers drawn from the uniform 335 | distribution over the (0,1) interval. 336 | """ 337 | self.numpy_array[:] = np.random.rand(*self.shape) 338 | 339 | return self 340 | 341 | 342 | 343 | 344 | 345 | def fill_with_randn(self): 346 | """ 347 | Fill matrix on the GPU with random numbers drawn from the standard normal 348 | distribution. 349 | """ 350 | 351 | self.numpy_array[:] = np.random.randn(*self.shape) 352 | 353 | return self 354 | 355 | 356 | 357 | def add_col_vec(self, vec, target = None): 358 | """ 359 | Add vector vec to every column of the matrix. If a target is provided, 360 | it is used to store the result instead of self. 361 | """ 362 | 363 | a, b = self.shape 364 | a_, b_ = vec.shape 365 | 366 | if not (b_ == 1 and a_ == a): 367 | raise IncompatibleDimensionsException 368 | 369 | 370 | if target is None: 371 | target = self 372 | 373 | target.resize(self.shape) 374 | 375 | target.numpy_array[:] = self.numpy_array + vec.numpy_array 376 | 377 | return target 378 | 379 | def assign_add_col_vec(self, a, b): 380 | return a.add_col_vec(b, target = self) 381 | 382 | 383 | 384 | def add_col_mult(self, vec, mult, target = None): 385 | """ 386 | Add a multiple of vector vec to every column of the matrix. If a target 387 | is provided, it is used to store the result instead of self. 388 | """ 389 | 390 | a, b = self.shape 391 | a_, b_ = vec.shape 392 | 393 | if not (b_ == 1 and a_ == a): 394 | raise IncompatibleDimensionsException 395 | 396 | 397 | if target is None: 398 | target = self 399 | 400 | target.resize(self.shape) 401 | 402 | target.numpy_array[:] = self.numpy_array + vec.numpy_array * mult 403 | 404 | return target 405 | 406 | 407 | 408 | 409 | 410 | def assign_add_col_mult(self, a, m, b): 411 | return a.add_col_vec(b, m, target = self) 412 | 413 | 414 | 415 | def add_row_vec(self, vec, target = None): 416 | """ 417 | Add vector vec to every row of the matrix. If a target is provided, 418 | it is used to store the result instead of self. 419 | """ 420 | 421 | a, b = self.shape 422 | a_, b_ = vec.shape 423 | 424 | if not (a_ == 1 and b_ == b): 425 | raise IncompatibleDimensionsException 426 | 427 | 428 | if target is None: 429 | target = self 430 | 431 | target.resize(self.shape) 432 | 433 | target.numpy_array[:] = vec.numpy_array + self.numpy_array 434 | 435 | return target 436 | 437 | 438 | 439 | def assign_add_row_vec(self, a, b): 440 | return a.add_row_vec(b, target = self) 441 | 442 | 443 | 444 | def mult_by_col(self, vec, target = None): 445 | """ 446 | Multiply vector vec into every column of the matrix. If a target is 447 | provided, it is used to store the result instead of self. 448 | """ 449 | 450 | 451 | a, b = self.shape 452 | a_, b_ = vec.shape 453 | 454 | if not (b_ == 1 and a_ == a): 455 | raise IncompatibleDimensionsException 456 | 457 | if target is None: 458 | target = self 459 | 460 | target.resize(self.shape) 461 | 462 | 463 | target.numpy_array[:] = vec.numpy_array * self.numpy_array 464 | 465 | 466 | return target 467 | 468 | 469 | 470 | def mult_by_row(self, vec, target = None): 471 | """ 472 | Multiply vector vec into every row of the matrix. If a target is 473 | provided, it is used to store the result instead of self. 474 | """ 475 | 476 | a, b = self.shape 477 | a_, b_ = vec.shape 478 | 479 | if not (b_ == b and a_ == 1): 480 | raise IncompatibleDimensionsException 481 | 482 | if target is None: 483 | target = self 484 | 485 | target.resize(self.shape) 486 | 487 | 488 | target.numpy_array[:] = vec.numpy_array * self.numpy_array 489 | 490 | return target 491 | 492 | 493 | 494 | 495 | 496 | def sum(self, axis, target = None): 497 | """ 498 | Sum the matrix along the given dimension, where 0 represents the leading 499 | dimension and 1 represents the non-leading dimension. If a target is 500 | not prvided, a new vector is created for storing the result. 501 | """ 502 | 503 | 504 | 505 | if axis == 0: 506 | ans = self.numpy_array.sum(0)[np.newaxis, :] 507 | elif axis == 1: 508 | ans = self.numpy_array.sum(1)[:, np.newaxis] 509 | else: 510 | raise ValueError("axis must be only 0 or 1; instead, got %s\n", axis) 511 | 512 | ans = CUDAMatrix(ans) 513 | 514 | if target is not None: 515 | target.assign(ans) 516 | else: 517 | target = ans 518 | return target 519 | 520 | 521 | def mean(self, axis, target = None): 522 | 523 | 524 | 525 | 526 | if axis == 0: 527 | ans = self.numpy_array.mean(0)[np.newaxis, :] 528 | elif axis == 1: 529 | ans = self.numpy_array.mean(1)[:, np.newaxis] 530 | else: 531 | raise ValueError("axis must be only 0 or 1; instead, got %s\n", axis) 532 | 533 | ans = CUDAMatrix(ans) 534 | 535 | if target is not None: 536 | target.assign(ans) 537 | else: 538 | target = ans 539 | return target 540 | 541 | 542 | 543 | 544 | 545 | def assign_sum(self, mat, axis): 546 | return mat.sum(axis, target = self) 547 | 548 | def assign_mean(self, mat, axis): 549 | return mat.mean(axis, target = self) 550 | 551 | 552 | 553 | def add_sums(self, mat, axis, mult = 1.): 554 | """ 555 | Add a multiple of the sums of the matrix mat along the given dimension 556 | to self. 557 | """ 558 | 559 | 560 | 561 | if self.numpy_array.shape != self.mat.shape: 562 | raise IncompatibleDimensionsException 563 | 564 | sum = mat.sum(axis) 565 | 566 | sum.numpy_array *= mult 567 | 568 | if axis == 0: 569 | self.add_row_vec(sum) 570 | elif axis == 1: 571 | self.add_col_vec(sum) 572 | 573 | return self 574 | 575 | 576 | def less_than(self, val, target = None): 577 | """ 578 | Perform the operation target = 1. * (self < val), where val can be a matrix or a scalar. 579 | """ 580 | 581 | 582 | if target is None: 583 | target = self 584 | 585 | target.resize(self.shape) 586 | 587 | if isinstance(val, (int, float, __DTYPE__)): 588 | target.numpy_array[:] = self.numpy_array < val 589 | 590 | else: 591 | if val.shape != self.shape: 592 | raise IncompatibleDimensionsException 593 | 594 | 595 | target.numpy_array[:] = (self.numpy_array < val.numpy_array).astype(__DTYPE__) 596 | 597 | return target 598 | 599 | def assign_less_than(self, mat, val): 600 | return mat.less_than(val, self) 601 | 602 | 603 | 604 | 605 | def greater_than(self, val, target = None): 606 | """ 607 | Perform the operation target = 1. * (self > val), where val can be a matrix or a scalar. 608 | """ 609 | 610 | 611 | if target is None: 612 | target = self 613 | 614 | target.resize(self.shape) 615 | 616 | if isinstance(val, (int, float, __DTYPE__)): 617 | target.numpy_array[:] = (self.numpy_array > val).astype(__DTYPE__) 618 | else: 619 | if val.shape != self.shape: 620 | raise IncompatibleDimensionsException 621 | 622 | 623 | target.numpy_array[:] = (self.numpy_array > val.numpy_array).astype(__DTYPE__) 624 | 625 | return target 626 | 627 | 628 | def assign_greater_than(self, mat, val): 629 | return mat.greater_than(val, self) 630 | 631 | 632 | 633 | 634 | def max(self, axis, target = None, transpose_aux=None): 635 | """ 636 | Find the maximum value along the given dimension, where 0 represents the 637 | leading dimension and 1 represents the non-leading dimension. If a target 638 | is not prvided, a new vector is created for storing the result. 639 | """ 640 | 641 | 642 | 643 | m, n = self.shape 644 | 645 | if axis == 0: 646 | if target is None: 647 | target = empty((1, n)) 648 | 649 | target.resize((1, n)) 650 | 651 | 652 | target.numpy_array[:] = self.numpy_array.max(0) 653 | 654 | 655 | 656 | elif axis == 1: 657 | # IN theory: we are supposed to do this: 658 | 659 | # if not target: 660 | # #target = CUDAMatrix(np.empty((m, 1), dtype=np.float32, order = 'F')) 661 | # target = empty((m, 1)) 662 | # else: 663 | # target.resize((m, 1)) 664 | 665 | 666 | 667 | # err_code = _cudamat.max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis)) 668 | # if err_code: 669 | # raise generate_exception(err_code) 670 | 671 | assert transpose_aux != None 672 | 673 | self.transpose(target = transpose_aux) 674 | 675 | target.reshape(target.shape[::-1]) 676 | 677 | transpose_aux.max(axis = 0, target = target) 678 | 679 | target.reshape(target.shape[::-1]) 680 | 681 | 682 | 683 | 684 | return target 685 | 686 | def assign_max(self, mat, axis, transpose_aux=None): 687 | return mat.max(axis, target = self, transpose_aux = transpose_aux) 688 | 689 | def total_max(self): 690 | row_maxes = empty((1, 1)).assign_max(self, axis = 0) 691 | return row_maxes.reshape((row_maxes.shape[1], row_maxes.shape[0])).max(axis = 0).asarray()[0,0] 692 | 693 | def total_sum(self): 694 | return self.numpy_array.sum() 695 | 696 | 697 | def sign(self, target = None): 698 | 699 | if target is None: 700 | target = empty(self.shape) 701 | 702 | target.resize(self.shape) 703 | 704 | target.numpy_array[:] = np.sign(self.numpy_array) 705 | 706 | return target 707 | 708 | 709 | def assign_sign(self, a): 710 | return a.sign(target = self) 711 | 712 | 713 | def apply_sigmoid(self, target = None): 714 | """ 715 | Apply the logistic sigmoid to each element of the matrix. 716 | """ 717 | 718 | return sigmoid(self, target) 719 | 720 | def sigmoid(self, target = None): 721 | """ 722 | Apply the logistic sigmoid to each element of the matrix. 723 | """ 724 | 725 | return sigmoid(self, target) 726 | 727 | 728 | def assign_sigmoid(self, t): 729 | return sigmoid(t, self) 730 | 731 | 732 | def log(self, target = None): 733 | return log(self, target) 734 | 735 | def assign_log(self, t): 736 | return log(t, self) 737 | 738 | def exp(self, target = None): 739 | return exp(self, target) 740 | 741 | def assign_exp(self, t): 742 | return exp(t, self) 743 | 744 | def pow(self, p, target = None): 745 | return pow(self, p, target) 746 | 747 | def assign_pow(self, mat, p): 748 | return pow(mat, p, self) 749 | 750 | 751 | def sqrt(self, target = None): 752 | return sqrt(self, target) 753 | 754 | 755 | def assign_sqrt(self, mat): 756 | return sqrt(mat, self) 757 | 758 | 759 | def reciprocal(self, target = None): 760 | """ 761 | Find the reciprocal of each element of the matrix. 762 | """ 763 | 764 | if not target: 765 | target = self 766 | 767 | target.resize(self.shape) 768 | 769 | 770 | target.numpy_array[:] = 1./self.numpy_array[:] 771 | 772 | return target 773 | 774 | def assign_reciprocal(self, mat): 775 | return mat.reciprocal(target = self) 776 | 777 | 778 | 779 | def dot(self, mat2, target = None): 780 | """ 781 | Multiply the matrix by mat2 from the right. 782 | """ 783 | 784 | return dot(self, mat2, target) 785 | 786 | 787 | def assign_dot(self, m1, m2): 788 | m1.dot(m2, target = self) 789 | return self 790 | 791 | 792 | def add_dot(self, m1, m2): 793 | """ 794 | Add the dot product of m1 and m2 to the matrix. 795 | """ 796 | 797 | 798 | m3 = dot(m1, m2) 799 | 800 | if m3.shape != self.shape: 801 | raise IncompatibleDimensionsException 802 | 803 | self.numpy_array += m3.numpy_array 804 | 805 | 806 | return self 807 | 808 | def subtract_dot(self, m1, m2): 809 | """ 810 | Subtract the dot product of m1 and m2 from the matrix. 811 | """ 812 | 813 | 814 | 815 | m3 = dot(m1, m2) 816 | 817 | if m3.shape != self.shape: 818 | raise IncompatibleDimensionsException 819 | 820 | self.numpy_array -= m3.numpy_array 821 | 822 | 823 | return self 824 | 825 | 826 | def add_mult(self, mat2, alpha = 1.): 827 | """ 828 | Add multiple of mat2 to the matrix. 829 | """ 830 | 831 | if mat2.shape != self.shape: 832 | raise IncompatibleDimensionsException 833 | 834 | self.numpy_array += mat2.numpy_array * alpha 835 | 836 | return self 837 | 838 | def assign_mult(self, mat2, alpha): 839 | self.resize(mat2.shape) 840 | self.assign(0) 841 | self.add_mult(mat2, alpha) 842 | return self 843 | 844 | 845 | def subtract_mult(self, mat2, alpha = 1.): 846 | """ 847 | Subtract a multiple of mat2 from the matrix. 848 | """ 849 | 850 | if mat2.shape != self.shape: 851 | raise IncompatibleDimensionsException 852 | 853 | self.numpy_array -= mat2.numpy_array * alpha 854 | 855 | return self 856 | 857 | 858 | def add(self, val, target = None): 859 | """Add val to self, where val can be a scalar or a CUDAMatrix with the 860 | same dimensions as self. """ 861 | 862 | if not target: 863 | target = self 864 | 865 | target.resize(self.shape) 866 | 867 | 868 | 869 | 870 | if isinstance(val, CUDAMatrix): 871 | if target.shape != val.shape: 872 | raise IncompatibleDimensionsException 873 | target.numpy_array[:] = self.numpy_array + val.numpy_array 874 | 875 | elif isinstance(val, (int, float, __DTYPE__)): 876 | target.numpy_array[:] = self.numpy_array + val 877 | else: 878 | raise ValueError, "Value must be of type CUDAMatrix, int, or float." 879 | 880 | 881 | 882 | return target 883 | 884 | def assign_add(self, a, b): 885 | a.add(b, target = self) 886 | return self 887 | 888 | 889 | 890 | def subtract(self, val, target = None): 891 | """Subtract val from self, where val can be a scalar or a CUDAMatrix with 892 | the same dimensions as self. """ 893 | 894 | if not target: 895 | target = self 896 | 897 | target.resize(self.shape) 898 | 899 | 900 | 901 | if isinstance(val, CUDAMatrix): 902 | if target.shape != val.shape: 903 | raise IncompatibleDimensionsException 904 | target.numpy_array[:] = self.numpy_array - val.numpy_array 905 | 906 | elif isinstance(val, (int, float, __DTYPE__)): 907 | target.numpy_array[:] = self.numpy_array - val 908 | else: 909 | raise ValueError, "Value must be of type CUDAMatrix, int, or float." 910 | 911 | 912 | 913 | return target 914 | 915 | 916 | 917 | def assign_subtract(self, a, b): 918 | a.subtract(b, target = self) 919 | return self 920 | 921 | 922 | 923 | 924 | def divide(self, val, target = None): 925 | """Divide self by val, where val can be a scalar or a CUDAMatrix with the 926 | same dimensions as self. """ 927 | 928 | if not target: 929 | target = self 930 | 931 | target.resize(self.shape) 932 | 933 | 934 | if isinstance(val, CUDAMatrix): 935 | if target.shape != val.shape: 936 | raise IncompatibleDimensionsException 937 | target.numpy_array[:] = self.numpy_array / val.numpy_array 938 | 939 | elif isinstance(val, (int, float, __DTYPE__)): 940 | target.numpy_array[:] = self.numpy_array / val 941 | else: 942 | raise ValueError, "Value must be of type CUDAMatrix, int, or float." 943 | 944 | 945 | 946 | return target 947 | 948 | 949 | 950 | def assign_divide(self, a, b): 951 | a.divide(b, target = self) 952 | return self 953 | 954 | 955 | 956 | def mult(self, val, target = None): 957 | """Multiply self by val, where val can be a scalar or a CUDAMatrix with 958 | the same dimensions as self. """ 959 | 960 | if not target: 961 | target = self 962 | 963 | target.resize(self.shape) 964 | 965 | 966 | if isinstance(val, CUDAMatrix): 967 | if target.shape != val.shape: 968 | raise IncompatibleDimensionsException 969 | target.numpy_array[:] = self.numpy_array * val.numpy_array 970 | 971 | elif isinstance(val, (int, float, __DTYPE__)): 972 | target.numpy_array[:] = self.numpy_array * val 973 | else: 974 | raise ValueError, "Value must be of type CUDAMatrix, int, or float." 975 | 976 | 977 | 978 | return target 979 | 980 | 981 | 982 | 983 | 984 | def assign_mult(self, a, b): 985 | a.mult(b, target = self) 986 | return self 987 | 988 | 989 | 990 | 991 | @deprecated 992 | def assign_scalar(self, alpha): 993 | """ 994 | Assign scalar alpha to every element of the matrix. 995 | """ 996 | self.assign(alpha) 997 | return self 998 | 999 | @deprecated 1000 | def mult_by_scalar(self, alpha, target = None): 1001 | """ 1002 | Multiply the matrix by a scalar. 1003 | """ 1004 | return self.mult(alpha, target) 1005 | 1006 | 1007 | 1008 | 1009 | @deprecated 1010 | def div_by_scalar(self, alpha, target = None): 1011 | """ 1012 | Divide the matrix by a scalar. 1013 | """ 1014 | 1015 | return self.divide(alpha, target) 1016 | 1017 | 1018 | 1019 | @deprecated 1020 | def add_scalar(self, alpha, target = None): 1021 | """ 1022 | Increment the matrix by a scalar. 1023 | """ 1024 | return self.add(alpha, target) 1025 | 1026 | 1027 | def euclid_norm(self): 1028 | return np.sqrt((self.numpy_array**2).sum()) 1029 | 1030 | 1031 | def empty(shape=None): 1032 | """ 1033 | Creates and returns a new CUDAMatrix with the given shape. 1034 | """ 1035 | 1036 | if shape is None: 1037 | shape = (1, 1) 1038 | 1039 | return CUDAMatrix(np.empty(shape)) 1040 | 1041 | 1042 | def zeros(shape): 1043 | return empty(shape).assign(0) 1044 | 1045 | def randn(a, b): 1046 | ans = empty((a, b)).fill_with_randn() 1047 | return ans 1048 | 1049 | 1050 | 1051 | def sum(mat, axis, target = None): 1052 | """ 1053 | Sum the matrix along the given dimension, where 0 represents the leading 1054 | dimension and 1 represents the non-leading dimension. If a target is 1055 | not prvided, a new vector is created for storing the result. 1056 | """ 1057 | return mat.sum(axis, target) 1058 | 1059 | 1060 | def dot(m1, m2, target = None): 1061 | """ 1062 | Find the dot product between m1 and m2. 1063 | """ 1064 | 1065 | m = m1.shape[0] 1066 | n = m2.shape[1] 1067 | 1068 | target_shape = (m, n) 1069 | if not target: 1070 | target = empty(target_shape) 1071 | 1072 | target.resize(target_shape) 1073 | 1074 | try: 1075 | target.numpy_array[:] = np.dot(m1.numpy_array, m2.numpy_array) 1076 | except ValueError: 1077 | raise IncompatibleDimensionsException 1078 | 1079 | return target 1080 | 1081 | def vdot(m1, m2): 1082 | assert m1.shape == m2.shape 1083 | return (m1.asarray() * m2.asarray()).sum() 1084 | 1085 | 1086 | 1087 | def sigmoid(mat, target = None): 1088 | """ 1089 | Apply the logistic sigmoid to each element of the matrix mat. 1090 | """ 1091 | 1092 | 1093 | if not target: 1094 | target = mat 1095 | 1096 | target.resize(mat.shape) 1097 | 1098 | target.numpy_array[:] = 1. / (1 + np.exp(-mat.numpy_array)) 1099 | 1100 | return target 1101 | 1102 | 1103 | def tanh(mat, target = None): 1104 | """ 1105 | Apply the logistic sigmoid to each element of the matrix mat. 1106 | """ 1107 | 1108 | 1109 | if not target: 1110 | target = mat 1111 | 1112 | target.resize(mat.shape) 1113 | 1114 | target.numpy_array[:] = np.tanh(mat.numpy_array) 1115 | 1116 | return target 1117 | 1118 | 1119 | def gammaln(mat, target = None): 1120 | 1121 | 1122 | 1123 | if not target: 1124 | target = mat 1125 | 1126 | target.resize(mat.shape) 1127 | 1128 | import scipy.special 1129 | target.numpy_array[:] = scipy.special.gammaln(mat.numpy_array) 1130 | 1131 | return target 1132 | 1133 | 1134 | 1135 | 1136 | 1137 | def abs(mat, target = None): 1138 | """ 1139 | Apply the logistic sigmoid to each element of the matrix mat. 1140 | """ 1141 | 1142 | 1143 | if not target: 1144 | target = mat 1145 | 1146 | target.resize(mat.shape) 1147 | 1148 | target.numpy_array[:] = abs(mat.numpy_array) 1149 | 1150 | return target 1151 | 1152 | 1153 | 1154 | 1155 | def log_1_plus_exp(mat, target = None): 1156 | """ 1157 | Apply log(1+exp(x)) to each element of the matrix mat. 1158 | """ 1159 | if not target: 1160 | target = mat 1161 | mask = mat.numpy_array > 0 1162 | target.numpy_array[mask] = mat.numpy_array[mask] + np.log(1+np.exp(-mat.numpy_array[mask])) 1163 | mask = np.logical_not(mask) 1164 | target.numpy_array[mask] = np.log(1+np.exp(mat.numpy_array[mask])) 1165 | return target 1166 | log_1_sum_exp = log_1_plus_exp 1167 | 1168 | def log(mat, target = None): 1169 | """ 1170 | Find the natural logarithm of each element of the matrix mat. 1171 | """ 1172 | 1173 | if not target: 1174 | target = mat 1175 | 1176 | target.resize(mat.shape) 1177 | 1178 | target.numpy_array[:] = np.log(mat.numpy_array) 1179 | 1180 | return target 1181 | 1182 | def exp(mat, target = None): 1183 | """ 1184 | Apply the exponential function to each element of the matrix mat. 1185 | """ 1186 | 1187 | if not target: 1188 | target = mat 1189 | 1190 | target.resize(mat.shape) 1191 | 1192 | target.numpy_array[:] = np.exp(mat.numpy_array) 1193 | 1194 | return target 1195 | 1196 | 1197 | if not target: 1198 | target = mat 1199 | 1200 | target.resize(mat.shape) 1201 | 1202 | return target 1203 | 1204 | 1205 | def sqrt(mat, target = None): 1206 | """ 1207 | Compute the square root of each element of the matrix mat. 1208 | """ 1209 | 1210 | if not target: 1211 | target = mat 1212 | 1213 | target.resize(mat.shape) 1214 | 1215 | target.numpy_array[:] = np.sqrt(mat.numpy_array) 1216 | 1217 | return target 1218 | 1219 | 1220 | if not target: 1221 | target = mat 1222 | 1223 | target.resize(mat.shape) 1224 | 1225 | return target 1226 | 1227 | def pow(mat, p, target = None): 1228 | """ 1229 | Compute the 'p'th power of each element of the matrix mat. 1230 | """ 1231 | 1232 | if not target: 1233 | target = mat 1234 | 1235 | target.resize(mat.shape) 1236 | 1237 | target.numpy_array[:] = mat.numpy_array[:] ** p 1238 | 1239 | return target 1240 | 1241 | def cuda_sync_threads(): 1242 | pass 1243 | 1244 | def reformat(array): 1245 | """ 1246 | Returns array as a float32 array in FORTRAN order. 1247 | """ 1248 | return np.array(array, dtype=__DTYPE__, order='F') 1249 | 1250 | 1251 | def cuda_set_some_device(): 1252 | return 0 1253 | 1254 | def cuda_set_device(dev_id): 1255 | """ 1256 | Selects the CUDA device with the given ID. 1257 | """ 1258 | 1259 | 1260 | return 0 1261 | 1262 | def cuda_get_free_device(): 1263 | """ 1264 | Returns the ID of the first free CUDA device. 1265 | """ 1266 | return 0 1267 | 1268 | 1269 | 1270 | def cublas_init(): 1271 | """ 1272 | Initialize Cublas. 1273 | """ 1274 | 1275 | return 0 1276 | 1277 | def cublas_shutdown(): 1278 | """ 1279 | Shut down Cublas. 1280 | """ 1281 | return 0 1282 | 1283 | 1284 | # The following functions are for implementing things like coarse filters and 1285 | # models with replicated local filters. At the moment they are quite slow. 1286 | 1287 | def sum_superpixels(source, target, w, temp = None): 1288 | raise NotImplemented() 1289 | 1290 | 1291 | 1292 | def kronecker(mat1, mat2, target = None): 1293 | raise NotIMplemented 1294 | 1295 | 1296 | 1297 | def flat_to_tiled(source, target, stride): 1298 | raise NotImplemented() 1299 | 1300 | def tiled_to_flat(source, target, stride, temp = None): 1301 | raise NotImplemented() 1302 | 1303 | def flat_to_tiled3(source, target, stride): 1304 | raise NotImplemented() 1305 | 1306 | 1307 | 1308 | 1309 | 1310 | 1311 | def get_item_from_each_row(source, target, inds, num_rows, num_cols): 1312 | if source.numpy_array.shape == (num_cols, num_rows): 1313 | src = source.numpy_array.T 1314 | else: 1315 | src = source.numpy_array.reshape(num_rows, num_cols) 1316 | ix = inds.numpy_array.reshape(num_rows).astype(int) 1317 | t = target.numpy_array.reshape(num_rows) 1318 | 1319 | for i in range(num_rows): 1320 | t[i] = src[i,ix[i]] 1321 | return target 1322 | 1323 | 1324 | def set_item_to_each_row(source, target, inds, num_rows, num_cols): 1325 | if source.numpy_array.shape == (num_cols, num_rows): 1326 | src = source.numpy_array.T 1327 | else: 1328 | src = source.numpy_array.reshape(num_rows, num_cols) 1329 | 1330 | ix = inds.numpy_array.reshape(num_rows).astype(int) 1331 | t = target.numpy_array.reshape(num_rows) 1332 | 1333 | for i in range(num_rows): 1334 | src[i,ix[i]] = t[i] 1335 | return source 1336 | 1337 | 1338 | 1339 | 1340 | 1341 | 1342 | 1343 | 1344 | 1345 | 1346 | 1347 | 1348 | 1349 | 1350 | 1351 | 1352 | def abs(X, aux): 1353 | return aux.assign_mult(X, X).sqrt() 1354 | 1355 | def total_sum(X): 1356 | return X.total_sum() 1357 | 1358 | 1359 | def mean(mat, axis, target = None): 1360 | 1361 | target = sum(mat, axis, target) 1362 | 1363 | 1364 | 1365 | target.mult_by_scalar(1. / mat.shape[axis]) 1366 | 1367 | return target 1368 | 1369 | 1370 | def total_mean(mat): 1371 | s = total_sum(mat) 1372 | return s / mat.num_elems 1373 | 1374 | 1375 | 1376 | 1377 | 1378 | 1379 | 1380 | 1381 | def cumsum(mat, target): 1382 | 1383 | target.resize(mat.shape) 1384 | 1385 | target.numpy_array[:] = mat.numpy_array.cumsum(1) 1386 | 1387 | return target 1388 | 1389 | 1390 | 1391 | 1392 | # def multi_transpose(IN, OUT, w, h, batch_size): 1393 | # """ 1394 | # the order of w, h seems wrong, but it is consistent with the one on cudamat.py 1395 | # """ 1396 | # assert IN.shape == (w*h, batch_size) 1397 | # assert OUT.shape == (w*h, batch_size) 1398 | 1399 | 1400 | # from pylab import amap, transpose 1401 | # OUT.numpy_array[:] = amap(transpose,IN.numpy_array.reshape(h, w, batch_size).transpose([2,0,1])).transpose([1,2,0]).reshape(w*h, batch_size) 1402 | 1403 | 1404 | def multi_transpose(IN, OUT, w, h, batch_size): 1405 | i = IN.numpy_array 1406 | o = OUT.numpy_array 1407 | 1408 | # o = o.reshape(batch_size, w, h) 1409 | # o[:] = i.reshape(batch_size, h, w).transpose([0,2,1]) 1410 | # OUT.numpy_array[:] = o.reshape(*OUT.numpy_array.shape) 1411 | 1412 | o = o.ravel() 1413 | o[:] = i.reshape(h, w, batch_size).transpose([1,0,2]).ravel() 1414 | OUT.numpy_array[:] = o.reshape(*OUT.numpy_array.shape) 1415 | 1416 | 1417 | return OUT 1418 | 1419 | def ind_incr(target, inds, axis): 1420 | 1421 | 1422 | assert target.shape[1] == inds.shape[0] * inds.shape[1] 1423 | assert inds.shape[1] == 1 or inds.shape[0] == 1 1424 | 1425 | if axis == 1: 1426 | try: 1427 | for i in inds: 1428 | target.numpy_array[:, i] += 1 1429 | except IndexError: 1430 | raise IncompatibleDimensionsException 1431 | 1432 | 1433 | return target 1434 | 1435 | elif axis == 0: 1436 | 1437 | try: 1438 | for i in inds: 1439 | target.numpy_array[i, :] += 1 1440 | except IndexError: 1441 | raise IncompatibleDimensionsException 1442 | 1443 | 1444 | return target 1445 | 1446 | 1447 | else: 1448 | raise Exception ("bad axis.") 1449 | 1450 | 1451 | 1452 | 1453 | ## The code below has been lifted from cudamat. It needs to work with numpy. 1454 | 1455 | 1456 | MAX_ELEMS = 2 ** 16 - 10 1457 | class softmax: 1458 | def __init__(self, axis): 1459 | self.axis = axis 1460 | 1461 | self.transpose_aux = empty() 1462 | self.neg_max = empty() 1463 | self.mat = empty() 1464 | self.exp = empty() 1465 | self.Z = empty() 1466 | self.probs = empty() 1467 | 1468 | 1469 | self.transpose_aux_small = empty() 1470 | self.neg_max_small = empty() 1471 | self.mat_small = empty() 1472 | self.exp_small = empty() 1473 | self.Z_small = empty() 1474 | self.probs_small = empty() 1475 | 1476 | 1477 | 1478 | def __call__(self, mat, target): 1479 | 1480 | 1481 | if mat.shape != target.shape: 1482 | target.resize(mat.shape) 1483 | 1484 | if self.axis == 1: 1485 | return self.__call_helper_small__(mat, target) 1486 | 1487 | 1488 | 1489 | 1490 | pos = 0 1491 | step = MAX_ELEMS 1492 | 1493 | ## width is how many elems we have to work with. 1494 | width = mat.shape[1 - self.axis] 1495 | 1496 | while pos < width: 1497 | next = min(width, pos + step) 1498 | 1499 | step_size = next - pos 1500 | 1501 | if step_size == step: 1502 | self.__call_helper__(mat.slice(pos, next), 1503 | target.slice(pos, next)) 1504 | else: 1505 | self.__call_helper_small__(mat.slice(pos, next), 1506 | target.slice(pos, next)) 1507 | 1508 | pos += step_size 1509 | 1510 | return target 1511 | 1512 | 1513 | 1514 | def __call_helper__(self, mat, target): 1515 | 1516 | 1517 | 1518 | 1519 | 1520 | 1521 | self.neg_max.\ 1522 | assign_max(mat, 1523 | axis = self.axis, 1524 | transpose_aux = self.transpose_aux).\ 1525 | mult(-1) 1526 | 1527 | if self.axis == 0: 1528 | self.mat.assign_add_row_vec(mat, self.neg_max) 1529 | else: 1530 | self.mat.assign_add_col_vec(mat, self.neg_max) 1531 | 1532 | self.exp.assign_exp(self.mat) 1533 | 1534 | self.Z.assign_sum(self.exp, self.axis).reciprocal() 1535 | 1536 | self.probs.assign(self.exp) 1537 | if self.axis == 0: 1538 | self.probs.mult_by_row(self.Z) 1539 | else: 1540 | self.probs.mult_by_col(self.Z) 1541 | 1542 | target.assign(self.probs) 1543 | 1544 | 1545 | 1546 | 1547 | def __call_helper_small__(self, mat, target): 1548 | 1549 | self.neg_max_small.\ 1550 | assign_max(mat, 1551 | axis = self.axis, 1552 | transpose_aux = self.transpose_aux_small).\ 1553 | mult(-1) 1554 | 1555 | if self.axis == 0: 1556 | self.mat_small.assign_add_row_vec(mat, self.neg_max_small) 1557 | else: 1558 | self.mat_small.assign_add_col_vec(mat, self.neg_max_small) 1559 | 1560 | self.exp_small.assign_exp(self.mat_small) 1561 | 1562 | self.Z_small.assign_sum(self.exp_small, self.axis).reciprocal() 1563 | 1564 | 1565 | 1566 | self.probs_small.assign(self.exp_small) 1567 | if self.axis == 0: 1568 | self.probs_small.mult_by_row(self.Z_small) 1569 | else: 1570 | self.probs_small.mult_by_col(self.Z_small) 1571 | 1572 | 1573 | 1574 | 1575 | 1576 | target.assign(self.probs_small) 1577 | 1578 | 1579 | 1580 | 1581 | 1582 | 1583 | 1584 | 1585 | 1586 | def log_Zs(self, mat, target): 1587 | 1588 | self.neg_max.\ 1589 | assign_max(mat, 1590 | axis = self.axis, 1591 | transpose_aux = self.transpose_aux).\ 1592 | mult(-1) 1593 | 1594 | if self.axis == 0: 1595 | self.mat.assign_add_row_vec(mat, self.neg_max) 1596 | else: 1597 | self.mat.assign_add_col_vec(mat, self.neg_max) 1598 | 1599 | ## the exps without the max 1600 | self.exp.assign_exp(self.mat) 1601 | 1602 | ## take the sums of the exps, take the log, and add subtruct the maxes. 1603 | target.assign_sum(self.exp, self.axis).log().add(self.neg_max.mult(-1)) 1604 | 1605 | return target 1606 | 1607 | 1608 | 1609 | 1610 | 1611 | 1612 | 1613 | 1614 | class sample_multinomial: 1615 | def __init__(self, probs, axis): 1616 | raise NotImplementedError("use robust_multinomial instead.") 1617 | 1618 | self.axis = axis 1619 | self.cumsums = empty() 1620 | self.cumsums_t = empty() 1621 | self.probs_t = empty() 1622 | 1623 | 1624 | 1625 | self.cumsums_small = empty() 1626 | self.cumsums_t_small = empty() 1627 | self.probs_t_small = empty() 1628 | 1629 | 1630 | 1631 | 1632 | 1633 | self.set_probs(probs) 1634 | 1635 | 1636 | self.samples = empty() 1637 | self.samples_small = empty() 1638 | 1639 | 1640 | if axis == 0: 1641 | 1642 | width = probs.shape[1] 1643 | std_width = min(width, MAX_ELEMS) 1644 | 1645 | 1646 | 1647 | self.rand_vals = empty((1, std_width)) 1648 | self.ones = empty((probs.shape[0], 1)).assign(1.) 1649 | 1650 | 1651 | 1652 | small_width = max(0, width % MAX_ELEMS) 1653 | 1654 | 1655 | 1656 | self.rand_vals_small = empty((1, small_width)) 1657 | self.ones_small = empty((probs.shape[1], 1)).assign(1.) 1658 | 1659 | 1660 | 1661 | elif axis == 1: 1662 | 1663 | 1664 | width = probs.shape[0] 1665 | std_width = min(width, MAX_ELEMS) 1666 | 1667 | 1668 | 1669 | self.rand_vals = empty((std_width, 1)) 1670 | self.ones = empty((1, probs.shape[1])).assign(1.) 1671 | 1672 | 1673 | 1674 | small_width = max(0, width % MAX_ELEMS) 1675 | 1676 | 1677 | self.rand_vals_small = empty((small_width, 1)) 1678 | self.ones_small = empty((1, probs.shape[1])).assign(1.) 1679 | 1680 | 1681 | 1682 | 1683 | 1684 | 1685 | 1686 | self.rand_mat = empty() 1687 | self.threshs = empty() 1688 | 1689 | 1690 | self.rand_mat_small = empty() 1691 | self.threshs_small = empty() 1692 | 1693 | 1694 | 1695 | 1696 | def set_probs(self, probs): 1697 | if self.axis == 1: 1698 | cumsum(probs, self.cumsums) 1699 | 1700 | else: 1701 | probs.transpose(target = self.probs_t) 1702 | cumsum(self.probs_t, self.cumsums_t) 1703 | self.cumsums_t.transpose(target = self.cumsums) 1704 | 1705 | 1706 | 1707 | 1708 | 1709 | 1710 | 1711 | 1712 | 1713 | def multi_sample(self, target, k): 1714 | target.resize(self.cumsums.shape) 1715 | 1716 | 1717 | for i in range(k): 1718 | 1719 | self.rand_vals.fill_with_rand() 1720 | 1721 | if self.axis == 1: 1722 | self.rand_mat.assign_dot(self.rand_vals, self.ones) 1723 | else: 1724 | self.rand_mat.assign_dot(self.ones, self.rand_vals) 1725 | 1726 | 1727 | self.threshs.\ 1728 | assign_less_than(self.cumsums, self.rand_mat).\ 1729 | sum(self.axis, target = self.samples) 1730 | 1731 | 1732 | 1733 | 1734 | ind_incr(target, self.samples, self.axis) 1735 | 1736 | return target 1737 | 1738 | 1739 | 1740 | 1741 | 1742 | 1743 | 1744 | 1745 | 1746 | def set_probs_helper_small(self, probs): 1747 | self.probs = probs 1748 | if self.axis == 1: 1749 | cumsum(probs, self.cumsums_small) 1750 | 1751 | else: 1752 | probs.transpose(target = self.probs_t_small) 1753 | cumsum(self.probs_t_small, self.cumsums_t_small) 1754 | self.cumsums_t_small.transpose(target = self.cumsums_small) 1755 | 1756 | 1757 | 1758 | def multi_sample_helper_small(self, target, k): 1759 | target.resize(self.cumsums_small.shape) 1760 | 1761 | 1762 | for i in range(k): 1763 | 1764 | self.rand_vals_small.fill_with_rand() 1765 | 1766 | if self.axis == 1: 1767 | self.rand_mat_small.assign_dot(self.rand_vals_small, self.ones_small) 1768 | else: 1769 | self.rand_mat_small.assign_dot(self.ones_small, self.rand_vals_small) 1770 | 1771 | 1772 | self.threshs_small.\ 1773 | assign_less_than(self.cumsums_small, self.rand_mat_small).\ 1774 | sum(self.axis, target = self.samples_small) 1775 | 1776 | 1777 | 1778 | 1779 | ind_incr(target, self.samples_small, self.axis) 1780 | 1781 | return target 1782 | 1783 | 1784 | 1785 | 1786 | 1787 | 1788 | def sample_from_probs(self, probs, target): 1789 | 1790 | if probs.shape != target.shape: 1791 | target.resize(probs.shape) 1792 | 1793 | 1794 | ## yes: we make a loop. 1795 | 1796 | pos = 0 1797 | step = MAX_ELEMS 1798 | width = probs.shape[1] 1799 | while pos < width: 1800 | next = min(width, pos + step) 1801 | 1802 | step_size = next - pos 1803 | 1804 | if step_size == step: 1805 | p = probs.slice(pos, next) 1806 | t = target.slice(pos, next) 1807 | 1808 | self.set_probs(p) 1809 | self.multi_sample(t, 1) 1810 | 1811 | else: 1812 | p = probs.slice(pos, next) 1813 | t = target.slice(pos, next) 1814 | 1815 | 1816 | self.set_probs_helper_small(probs) 1817 | self.multi_sample_helper_small(t, 1) 1818 | 1819 | pos += step_size 1820 | 1821 | 1822 | 1823 | return target 1824 | 1825 | 1826 | 1827 | 1828 | 1829 | 1830 | class robust_multinomial: 1831 | def __init__(self, shape, axis): 1832 | self.axis = axis 1833 | self.cumsums = empty() 1834 | self.cumsums_t = empty() 1835 | self.probs_t = empty() 1836 | 1837 | 1838 | 1839 | self.cumsums_small = empty() 1840 | self.cumsums_t_small = empty() 1841 | self.probs_t_small = empty() 1842 | 1843 | 1844 | 1845 | 1846 | 1847 | 1848 | self.samples = empty() 1849 | self.samples_small = empty() 1850 | 1851 | 1852 | if axis == 0: 1853 | 1854 | width = shape[1] 1855 | std_width = min(width, MAX_ELEMS) 1856 | 1857 | 1858 | 1859 | self.rand_vals = empty((1, std_width)) 1860 | self.ones = empty((shape[0], 1)).assign(1.) 1861 | 1862 | 1863 | 1864 | small_width = max(0, width % MAX_ELEMS) 1865 | 1866 | 1867 | 1868 | self.rand_vals_small = empty((1, small_width)) 1869 | self.ones_small = empty((shape[0], 1)).assign(1.) 1870 | 1871 | 1872 | 1873 | elif axis == 1: 1874 | 1875 | 1876 | width = shape[0] 1877 | std_width = min(width, MAX_ELEMS) 1878 | 1879 | 1880 | 1881 | self.rand_vals = empty((std_width, 1)) 1882 | self.ones = empty((1, shape[1])).assign(1.) 1883 | 1884 | 1885 | 1886 | small_width = max(0, width % MAX_ELEMS) 1887 | 1888 | 1889 | self.rand_vals_small = empty((small_width, 1)) 1890 | self.ones_small = empty((1, shape[1])).assign(1.) 1891 | 1892 | 1893 | 1894 | 1895 | 1896 | 1897 | 1898 | self.rand_mat = empty() 1899 | self.threshs = empty() 1900 | 1901 | 1902 | self.rand_mat_small = empty() 1903 | self.threshs_small = empty() 1904 | 1905 | 1906 | 1907 | 1908 | def set_probs(self, probs): 1909 | self.probs = probs 1910 | if self.axis == 1: 1911 | cumsum(probs, self.cumsums) 1912 | 1913 | else: 1914 | probs.transpose(target = self.probs_t) 1915 | cumsum(self.probs_t, self.cumsums_t) 1916 | self.cumsums_t.transpose(target = self.cumsums) 1917 | 1918 | 1919 | 1920 | 1921 | 1922 | 1923 | 1924 | 1925 | 1926 | def multi_sample(self, target, k): 1927 | target.resize(self.cumsums.shape) 1928 | 1929 | 1930 | for i in range(k): 1931 | 1932 | self.rand_vals.fill_with_rand() 1933 | 1934 | if self.axis == 1: 1935 | self.rand_mat.assign_dot(self.rand_vals, self.ones) 1936 | else: 1937 | self.rand_mat.assign_dot(self.ones, self.rand_vals) 1938 | 1939 | 1940 | self.threshs.\ 1941 | assign_less_than(self.cumsums, self.rand_mat).\ 1942 | sum(self.axis, target = self.samples) 1943 | 1944 | 1945 | 1946 | 1947 | ind_incr(target, self.samples, self.axis) 1948 | 1949 | return target 1950 | 1951 | 1952 | 1953 | 1954 | 1955 | 1956 | 1957 | 1958 | 1959 | def set_probs_helper_small(self, probs): 1960 | if self.axis == 1: 1961 | cumsum(probs, self.cumsums_small) 1962 | 1963 | else: 1964 | probs.transpose(target = self.probs_t_small) 1965 | cumsum(self.probs_t_small, self.cumsums_t_small) 1966 | self.cumsums_t_small.transpose(target = self.cumsums_small) 1967 | 1968 | 1969 | 1970 | 1971 | def multi_sample_helper_small(self, target, k): 1972 | target.resize(self.cumsums_small.shape) 1973 | 1974 | for i in range(k): 1975 | 1976 | self.rand_vals_small.fill_with_rand() 1977 | 1978 | if self.axis == 1: 1979 | self.rand_mat_small.assign_dot(self.rand_vals_small, self.ones_small) 1980 | else: 1981 | self.rand_mat_small.assign_dot(self.ones_small, self.rand_vals_small) 1982 | 1983 | 1984 | self.threshs_small.\ 1985 | assign_less_than(self.cumsums_small, self.rand_mat_small).\ 1986 | sum(self.axis, target = self.samples_small) 1987 | 1988 | 1989 | 1990 | 1991 | ind_incr(target, self.samples_small, self.axis) 1992 | 1993 | return target 1994 | 1995 | 1996 | 1997 | 1998 | 1999 | 2000 | def sample_from_probs(self, probs, target): 2001 | 2002 | if probs.shape != target.shape: 2003 | target.resize(probs.shape) 2004 | 2005 | 2006 | ## yes: we make a loop. 2007 | 2008 | pos = 0 2009 | step = MAX_ELEMS 2010 | 2011 | width = probs.shape[1 - self.axis] 2012 | 2013 | while pos < width: 2014 | next = min(width, pos + step) 2015 | 2016 | step_size = next - pos 2017 | 2018 | p = probs.slice(pos, next) 2019 | t = target.slice(pos, next) 2020 | 2021 | 2022 | if step_size == step: 2023 | 2024 | self.set_probs(p) 2025 | self.multi_sample(t, 1) 2026 | 2027 | else: 2028 | 2029 | self.set_probs_helper_small(p) 2030 | self.multi_sample_helper_small(t, 1) 2031 | 2032 | pos += step 2033 | 2034 | 2035 | 2036 | return target 2037 | -------------------------------------------------------------------------------- /gnumpy.py: -------------------------------------------------------------------------------- 1 | """Documentation can be found at http://www.cs.toronto.edu/~tijmen/gnumpy.htmln""" 2 | 3 | """ 4 | 5 | Copyright (c) 2010-2011 Tijmen Tieleman 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in 15 | all copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 | THE SOFTWARE. 24 | 25 | If you use Gnumpy for scientific work that gets published, you should include 26 | in that publication a citation of the technical report that describes Gnumpy. 27 | That report can be found at http://www.cs.toronto.edu/~tijmen/gnumpyTr.pdf 28 | 29 | """ 30 | 31 | 32 | 33 | 34 | 35 | """ 36 | This file is not intended to be read by anyone other than gnumpy developers. It's long, it's weakly documented (much of the internal documentation is elsewhere), and many lines are unnaturally long & illegible because I did a lot of inlining. 37 | 38 | If you really want to know how gnumpy works internally, or if you want to extend it, you can ask me for the original, which doesn't have the inlining, and the internal documentation. 39 | """ 40 | 41 | 42 | 43 | 44 | # ------------------------------------------------------------------------------- module init & shutdown 45 | 46 | import numpy, operator, sys as _sys, types as types, time as _time, os as _os, __builtin__, collections as _collections, pdb as _pdb, gc as _gc, ctypes as _ctypes 47 | 48 | _useGpu = _os.environ.get('GNUMPY_USE_GPU', 'auto') 49 | assert _useGpu in ('auto', 'yes', 'no'), "environment variable GNUMPY_USE_GPU, if present, should be one of 'auto', 'yes', 'no'." 50 | if _useGpu == 'auto': 51 | try: import cudamat as _cudamat; _useGpu = 'yes' 52 | except: print 'gnumpy: failed to import cudamat. Using npmat instead. No GPU will be used.'; _useGpu = 'no' 53 | if _useGpu == 'yes': 54 | import cudamat as _cudamat 55 | elif _useGpu == 'no': 56 | import npmat as _cudamat 57 | _precision = _os.environ.get('GNUMPY_CPU_PRECISION', '32') 58 | assert _precision in ('32', '64', '128'), 'environment variable GNUMPY_CPU_PRECISION, if present, should have value 32, 64, or 128.' 59 | _cudamat.__DTYPE__ = eval('numpy.float'+_precision) 60 | 61 | _cmType = _cudamat.CUDAMatrix 62 | _isTijmen = False 63 | if hasattr(_cudamat, 'ct'): _ctInt = _cudamat.ct.c_int 64 | 65 | def board_id_to_use(): 66 | try: 67 | import gpu_lock 68 | return gpu_lock.obtain_lock_id() 69 | except: 70 | print 'gnumpy: failed to use gpu_lock. Using board #0 without knowing whether it is in use or not.' 71 | return 0 72 | 73 | _boardId = None 74 | def _init_gpu(): 75 | """ picks a board and claims it (if using cudamat aot npmat). exception if there is no board. """ 76 | if '__gpu_inited' in globals(): return 77 | global _boardId 78 | if _useGpu=='yes': 79 | _boardId = ( board_id_to_use() if callable(board_id_to_use) else board_id_to_use) 80 | if _boardId==-1: raise Exception('No gpu board is available. gnumpy will not function. Consider telling it to run on the CPU by setting environment variable GNUMPY_USE_GPU to "no".') 81 | _cudamat.cuda_set_device(_boardId) 82 | _cudamat.cublas_init() 83 | _cudamat.CUDAMatrix.init_random(0) 84 | globals()['__gpu_inited'] = None 85 | 86 | expensive_check_probability = 1 87 | acceptable_number_types = 'anything goes' # alternatives: 'no nans'; 'no nans or infs'; or a number indicating the max allowed abs 88 | dont__check_number_types_in_non_garrays = True 89 | class GnumpyNumberTypeException(Exception): pass 90 | 91 | _checking_number_type_now = False 92 | def _check_number_types(x): 93 | """ does some checks, and then returns x. """ 94 | if acceptable_number_types == 'anything goes': return x # this is the typical case, and in this case I just want to leave this checking function asap. 95 | 96 | global _checking_number_type_now 97 | if dont__check_number_types_in_non_garrays and not isinstance(x, garray): return x 98 | if _checking_number_type_now: return x # to prevent checks upon checks upon checks (infinite recursion) 99 | try: 100 | _checking_number_type_now = True 101 | if acceptable_number_types == 'no nans': raise NotImplementedError 102 | elif acceptable_number_types == 'no nans or infs': 103 | if not garray(x, copy=False).all_real(): raise GnumpyNumberTypeException('Found values that violate the rule set by gnumpy.acceptable_number_types: "%s"' % acceptable_number_types) 104 | elif type(acceptable_number_types) in _numberTypes: 105 | if (abs(garray(x, copy=False)) > acceptable_number_types).any2(): raise GnumpyNumberTypeException('Found values that violate the rule set by gnumpy.acceptable_number_types: "%s"' % acceptable_number_types) 106 | else: assert False, 'gnumpy: the value of variable "acceptable_number_types" must be one of "anything goes", "no nans", "no nans or infs".' 107 | finally: 108 | _checking_number_type_now = False 109 | return x 110 | 111 | 112 | 113 | # ------------------------------------------------------------------------------- helpers copied from other files 114 | 115 | def _isFullSlice(x): return type(x) == types.SliceType and x == slice(None) # the first check is necessary to avoid returning a broadcast array of False's if x is an array 116 | def _isSequence(x): return type(x) == list or type(x) == tuple or type(x)==xrange 117 | def _insertT(tup, index, tupleToInsert): return tuple(tup[:index]) + tuple(tupleToInsert) + tuple(tup[index:]) 118 | def _modifyT(tup, index, newValue): return tuple(tup[:index]) + (newValue,) + tuple(tup[index+1:]) 119 | def _deleteT(tup, start, end): return tup[:start] + tup[end:] 120 | def _prodT(x): return reduce(operator.mul, x, 1) 121 | def _findIndex3(tupOrGenerator): return ( i for i, x in enumerate(tuple(tupOrGenerator)) if x).next() 122 | def _isNumber(x): return type(x) in _numberTypes 123 | def _nonSeqAsS(x): return ( x if _isSequence(x) else (x,)) 124 | _t0=() 125 | def reduceAdd(x): return reduce(operator.add, x) 126 | 127 | def _deleteT2(tup, index): 128 | index %= len(tup) 129 | return tup[:index] + tup[index+1:] 130 | 131 | _intTypes = set((types.IntType, types.LongType, numpy.int16, numpy.int32, numpy.int8, numpy.int64)) 132 | _floatTypes = set((types.FloatType, numpy.float64, numpy.float32, getattr(numpy, 'float128', numpy.float64), getattr(numpy, 'float96', numpy.float64))) # considering numpy.float64 a number is debatable. it really is a numpy object, and behaves that way, too: it has a __mul__ which prevents garray.__rmul__ from getting the task. However, for most purposes it's a number. 133 | _numberTypes = _intTypes | _floatTypes 134 | 135 | def _allTheSame(tup): 136 | tup = tuple(tup) 137 | if len(tup)<=1: return True 138 | for elt in tup[1:]: 139 | if elt != tup[0]: return False 140 | return True 141 | 142 | 143 | 144 | 145 | 146 | # ------------------------------------------------------------------------------- gnumpy specific helpers 147 | 148 | def _all2_(t, pred): return reduce(operator.and_, map(pred, t), True) 149 | def _any2_(t, pred): return reduce(operator.or_, map(pred, t), False) 150 | 151 | def _doExpensiveCheck(): return numpy.random.rand() < expensive_check_probability 152 | 153 | def as_garray(x): return ( x if isinstance(x, garray) else garray(x)) 154 | def as_garray_or_scalar(x): return ( x if type(x) in _numberTypes or isinstance(x, garray) else garray(x)) 155 | def as_numpy_array(x): return ( x.as_numpy_array() if isinstance(x, garray) else numpy.array(x)) 156 | 157 | def _cm_reshape(cm, newShape): 158 | if _prodT(newShape)==0: return cm 159 | else: return cm.reshape(tuple(reversed(newShape))) 160 | 161 | def _cm_col_slice_write(cm, start, end, sourceCm): 162 | cm.set_row_slice(start, end, sourceCm) 163 | 164 | def _cm_col_slice_read(cm, start, end, target): 165 | cm.get_row_slice(start, end, target) 166 | return target 167 | 168 | def _cm_row_slice_read(cm, start, end): 169 | if start==end: return _new_cm((0, cm.shape[0])) # cudamat special case workaround 170 | if cm.shape[1]==1 and start==0 and end==1: return cm # cudamat special case workaround 171 | ret = cm.get_col_slice(start, end) 172 | return ret 173 | 174 | def _read_single_index(index, axisLen): 175 | index = int(index) 176 | if index>=axisLen or index<-axisLen: raise IndexError('index out of bounds. index %d requested on an axis of length %d' % (index, axisLen)) 177 | return index % axisLen 178 | 179 | def _short_slice(i): return slice(i, i+1) 180 | 181 | def _read_simple_slice(sl, axisLen): 182 | assert sl.step in (None, 1), 'simple slice not understood' 183 | sFrom, sTo = slice(( None if sl.start==None else int(sl.start)), ( None if sl.stop==None else int(sl.stop))).indices(axisLen)[:2] 184 | if sFrom>sTo: sTo = sFrom 185 | return (sFrom, sTo, sTo-sFrom) 186 | 187 | def _extend_shape(shape, nAxes): return (1,) * (nAxes-len(shape)) + shape 188 | 189 | 190 | 191 | 192 | # ------------------------------------------------------------------------------- memory management 193 | 194 | max_memory_usage = numpy.inf # public 195 | 196 | _cmsForReuse = _collections.defaultdict(list) # dict from size to list of reusable (abandoned) cms 197 | __memoryInUse = 0 198 | _memoryUsers = _collections.defaultdict(lambda: (0, 0)) 199 | track_memory_usage = False 200 | 201 | def _new_cm(sizeOrShape): 202 | """ 203 | Internal. 204 | Returns a new CUDAMatrix object of the given size. 205 | This is the only proc that allocs gpu mem. 206 | """ 207 | global __memoryInUse 208 | if type(sizeOrShape) == tuple: 209 | if _prodT(sizeOrShape)==0: return _new_cm(1) # cudamat workaround: cudamat can't handle size 0 arrays 210 | else: return _new_cm(sizeOrShape[0]*sizeOrShape[1]).reshape((sizeOrShape[1], sizeOrShape[0])) 211 | size = sizeOrShape 212 | if size==0: return _cudamat.empty((1, 1)) # cudamat workaround 213 | if len(_cmsForReuse[size])!=0: 214 | return _cm_reshape(_cmsForReuse[size].pop(), (1, size)) # re-use an abandoned cm 215 | _init_gpu() 216 | if __memoryInUse+size*4*5 > max_memory_usage: free_reuse_cache(False) # if we're somewhat close to the limit, then free what's easy to free, and hope that there are contiguous blocks available. 217 | if __memoryInUse+size*4 > max_memory_usage: # if we're (still) OVER the limit, then do whatever can be done to make more mem available 218 | free_reuse_cache(True) # gc.collect can take quite some time 219 | if __memoryInUse+size*4 > max_memory_usage: 220 | raise MemoryError('Gnumpy ran out of memory. Currently in use are %s; the maximum allowed is %s; so the present request for %s is refused. Free some memory and try again.' % (_n_bytes_str(__memoryInUse), _n_bytes_str(max_memory_usage), _n_bytes_str(size*4))) 221 | try: 222 | ret = _cudamat.empty((size, 1)) 223 | __memoryInUse += size*4 # do this only if the malloc succeeded 224 | return ret 225 | except _cudamat.CUDAMatException, e: # this means that malloc failed 226 | raise MemoryError('The GPU failed to allocate the requested %d bytes of memory. This doesn\'t mean that your program is using too much memory. It does, however, mean that you should reduce the value of gnumpy.max_memory_usage (currently %s), to always have some memory unused (which is necessary to find contiguous large blocks of memory to allocate). Failing to allocate enough memory makes the GPU feel very unwell, so you are advised to restart Python now, or expect to see incoherent error messages and risk causing more serious damage.' % (size*4, str(max_memory_usage))) 227 | 228 | def free_reuse_cache(completely=True): 229 | """ 230 | This frees all GPU memory that is not in use but is kept allocated for re-use. 231 | If is set to False, this works quicker but less thoroughly. 232 | """ 233 | if completely: _gc.collect() # this has to happen before the loop, because this may add more entries in _cmsForReuse which then have to be freed by the loop 234 | global __memoryInUse 235 | for size in _cmsForReuse: 236 | while _cmsForReuse[size]: 237 | _cmsForReuse[size].pop() 238 | __memoryInUse -= size*4 239 | del _gc.garbage[:] # this shouldn't be necessary at all, but for some reason perfectly referenced AND perfectly deletable cms get put there 240 | 241 | def _n_bytes_str(n): 242 | def _base(s): return ( _base(s[:-3]) + ',' + s[-3:] if len(s)>=4 else s) 243 | return _base(str(n)) + ' bytes' 244 | 245 | def memory_in_use(in_megabytes=False): 246 | """ returns the number of bytes (or megabytes if you asked for that) of GPU memory that are in use. """ 247 | return __memoryInUse // ( 2**20 if in_megabytes else 1) 248 | 249 | def memory_available(free_reuse_cache_first): 250 | if free_reuse_cache_first: free_reuse_cache() 251 | return max_memory_usage - memory_in_use() 252 | 253 | def _calling_line(): 254 | """ Internal. Inspects the current python call stack and returns a nice string description of the line of code that called gnumpy. """ 255 | stack = _pdb.traceback.extract_stack()[::-1] # newest first 256 | stack = stack[( i for i, x in enumerate(stack) if x[0] != stack[0][0]).next():] # skip any gnumpy procs on the stack 257 | def stackFrameToString(frame): return 'File "%s", line %d, in function %s: %s' % (frame[0], frame[1], frame[2], ( '' if frame[3]==None else frame[3])) 258 | ret = stackFrameToString(stack[0]) 259 | for frame in stack[1:]: 260 | if 'File "",' in ret: break 261 | if 'File "",' in ret: break 262 | ret += '\n Called by: ' + stackFrameToString(frame) 263 | return ret 264 | 265 | def memory_allocators(minimum_n_bytes=1): 266 | """ Prints a list of lines in your code that caused allocated GPU memory that's still in use. """ 267 | if not track_memory_usage: 268 | print 'The variable gnumpy.track_memory_usage must be set to True, to enable memory data collection (which can slow down your program a lot).' 269 | return 270 | for line, (n,amt) in sorted(_memoryUsers.items(), key=lambda x:x[1][1]) [::-1] : 271 | if amt >= minimum_n_bytes: 272 | print '%d objects, totalling %s, that are still in use, were allocated by: %s' % (n, _n_bytes_str(amt), line) 273 | print 274 | 275 | 276 | 277 | # ------------------------------------------------------------------------------- external procs 278 | 279 | def status(): 280 | if _useGpu=='no': print 'gnumpy is running on the CPU, i.e. in simulation mode. The data type is float%s.' % _precision 281 | if _useGpu=='yes': 282 | if _boardId==None: print 'gnumpy is planning to run on a GPU, but hasn\'t yet chosen & initialized a board.' 283 | else: print 'gnumpy is running on GPU board #%d.' % _boardId 284 | print '%s of gpu memory are in use, of which at least %s can be freed immediately by gnumpy.free_reuse_cache().' % (_n_bytes_str(__memoryInUse), _n_bytes_str(__builtin__.sum( size*len(cms)*4 for size, cms in _cmsForReuse.items()))) 285 | 286 | 287 | 288 | def _rand__base(shapeInfo, distribution, zero_d_means_scalar): 289 | if len(shapeInfo)==1 and _isSequence(shapeInfo[0]): zero_d_means_scalar = False; shapeInfo = shapeInfo[0] 290 | ret = empty(shapeInfo) 291 | {'uniform': _cmType.fill_with_rand, 'normal': _cmType.fill_with_randn}[distribution](ret._base) 292 | if ret.size!=0 and _doExpensiveCheck(): assert ret.sum() < 100 + 2*ret.size, 'numerical gpu error: rand() gave a result>100' 293 | if len(shapeInfo) == 0 and zero_d_means_scalar: return ret.item() 294 | else: return ret 295 | 296 | def tile(a, reps): 297 | if type(reps) in _numberTypes: reps = (reps,) 298 | reps = tuple(reps) # for generator expressions 299 | a = as_garray(a) 300 | if len(reps) > a.ndim: a = a._add_axes(len(reps)) 301 | if len(reps) < a.ndim: reps = _extend_shape(reps, a.ndim) # now len(reps)==a.ndim 302 | retShape = tuple([ a.shape[i] * reps[i] for i in tuple(xrange(len(reps)))]) 303 | if _prodT(retShape)==0: return zeros(retShape) 304 | if _prodT(reps)==1: return a 305 | for i in range(a.ndim-1): # merge replication requests on adjacent axes, for efficiency. 306 | if reps[i]!=1 and reps[i+1]!=1 and a.shape[i]==1: return a.reshape(_deleteT2(a.shape, i)).tile(reps[:i]+(_prodT(reps[i:i+2]),)+reps[i+2:]).reshape(map(operator.mul, a.shape, reps)) 307 | def dataIDone(nextA, i): return nextA.reshape(_modifyT(a.shape, i, a.shape[i]*reps[i])).tile(_modifyT(reps, i, 1)) 308 | if reps[0]!=1: # replicating rows is easy and efficient: just repeat the data a number of times. 309 | temp = empty((reps[0], a.size)) # shape doesn't matter because dataIDone changes it 310 | tempCm = temp._base_shaped(1) 311 | if reps[0]>=1: 312 | _cm_row_slice_read(tempCm, 0, 1).assign(a._base_as_row()) 313 | nCopiesDone = 1 314 | while nCopiesDone < reps[0]: 315 | nNow = __builtin__.min(nCopiesDone, reps[0]-nCopiesDone) 316 | _cm_row_slice_read(tempCm, nCopiesDone, nCopiesDone + nNow).assign(_cm_row_slice_read(tempCm, 0, nNow)) 317 | nCopiesDone += nNow 318 | return dataIDone(temp, 0) 319 | # the general case is repeating a subset (aot the whole array) n times, before moving on to the next subset 320 | # using a transpose with the right shape, the subsets can become columns. those can be lengthened because that is replicating rows; a second transpose makes them now-lengthened subsets again 321 | axis = __builtin__.min( i for i in range(a.ndim) if reps[i]!=1) 322 | return dataIDone(a.reshape_2d(axis).T.tile((reps[axis], 1)).T, axis) 323 | 324 | def is_garray(x): return isinstance(x, garray) 325 | def is_array(x): return isinstance(x, garray) or type(x) == numpy.ndarray 326 | 327 | def rand(*shapeInfo): 328 | """ the desired array shape can be entered either as integers or as a tuple of integers. If you enter a tuple you always get an array; if you enter nothing you get a scalar. """ 329 | return _rand__base(shapeInfo, 'uniform', True) 330 | 331 | def randn(*shapeInfo): 332 | """ the desired array shape can be entered either as integers or as a tuple of integers. If you enter a tuple you always get an array; if you enter nothing you get a scalar. """ 333 | return _rand__base(shapeInfo, 'normal', True) 334 | 335 | def empty(shape): 336 | if _isSequence(shape) or type(shape) == types.GeneratorType: shape = tuple(shape) 337 | else: shape = (shape,) 338 | return garray(_new_cm(_prodT(shape)), shape, None) 339 | 340 | def zeros (shape): 341 | if _isSequence(shape) or type(shape) == types.GeneratorType: shape = tuple(shape) 342 | else: shape = (shape,) 343 | ret = empty(shape) 344 | ret._base.assign(0) 345 | return ret 346 | 347 | def ones (shape): 348 | if _isSequence(shape) or type(shape) == types.GeneratorType: shape = tuple(shape) 349 | else: shape = (shape,) 350 | ret = empty(shape) 351 | ret._base.assign(1) 352 | return ret 353 | 354 | def seed_rand(seed=None): 355 | _init_gpu() 356 | if seed==None: seed = int(_time.time()) 357 | _cudamat.CUDAMatrix.init_random(seed) 358 | 359 | def dot(a1, a2): 360 | # internally: for matrix-matrix multiplies only; vectors are treated like special cases. 361 | a1 = as_garray(a1); a2 = as_garray(a2) 362 | if a1.ndim==0 or a2.ndim==0: return a1*a2 363 | if a1.ndim==a2.ndim==1: 364 | if a1 is a2: return sum(a1**2) 365 | else: return dot(a1.reshape(1, a1.size), a2.reshape(a2.size, 1)).item() 366 | if a1.ndim==2 and a2.ndim==1: return dot(a1, a2.reshape(a2.size, 1)).ravel() # treat a2 like a column vector 367 | if a1.ndim==1 and a2.ndim==2: return dot(a1._add_axes(2), a2)[0] # treat a1 like a row vector 368 | if a1.shape[-1] != a2.shape[-2]: raise ValueError('arrays not aligned for dot product. a dot product was requested of arrays with shapes %s and %s' % (a1.shape, a2.shape)) 369 | if a1.ndim==a2.ndim==2: 370 | retShape = (a1.shape[0], a2.shape[1]) 371 | if a1.shape[1]==0: return zeros(retShape) # cudamat bug workaround 372 | ret = empty(retShape) 373 | if ret.size!=0: _cudamat.dot(a2._base_as_2d(), a1._base_as_2d(), ret._base_as_2d()) 374 | return ret 375 | if a1.ndim >= 2 and a2.ndim >= 2: 376 | # this is not necessarily fast, because if a2.ndim>=3 then it involves a transpose 377 | a12 = ( a1.reshape_2d(-1) if a1.ndim!=2 else a1) 378 | a22 = ( a2.transpose((a2.ndim-2,) + tuple(xrange(a2.ndim-2)) + (a2.ndim-1,)).reshape_2d(1) 379 | if a2.ndim!=2 else 380 | a2) 381 | retShape = _deleteT2(a1.shape, -1) + _deleteT2(a2.shape, -2) 382 | return dot(a12, a22).reshape(retShape) 383 | raise NotImplementedError('dot with arguments of shapes %s and %s' % (a1.shape, a2.shape)) 384 | 385 | def outer(vec1, vec2): return dot(vec1.ravel()[:, newaxis], vec2.ravel()[newaxis, :]) 386 | 387 | def concatenate(arrays, axis=0): 388 | arrays = tuple(map(as_garray, arrays)) 389 | if axis<0: axis += arrays[0].ndim 390 | if not _isSequence(arrays) or not type(axis) in _numberTypes: raise ValueError('wrong argument types to gnumpy.concatenate: expected to be a sequence and to be a number, but got types %s and %s.' % (type(arrays), type(axis))) 391 | if axis not in range(arrays[0].ndim): raise ValueError('bad axis number (%d) specified (the first array has %d axes)' % (axis, arrays[0].ndim)) 392 | if not _allTheSame( _deleteT2(a.shape, axis) for a in arrays): raise ValueError('array dimensions must agree except possibly for axis #%d. The given array shapes are: %s' % (axis, tuple( a.shape for a in arrays))) 393 | finalShape = _modifyT(arrays[0].shape, axis, __builtin__.sum( a.shape[axis] for a in arrays)) 394 | if axis==0: 395 | ret = empty(finalShape) 396 | nextI = 0 397 | for a in arrays: 398 | _cm_row_slice_read(ret._base_shaped(ret.ndim), nextI, nextI+a.size).assign(a._base_shaped(a.ndim)) 399 | nextI += a.size 400 | return ret 401 | else: 402 | return concatenate(tuple([ a.reshape_2d(axis).T for a in arrays]), 0).T.reshape(finalShape) 403 | 404 | def where(a, *vararg): 405 | """ 406 | Note: if only one argument is provided, the returned value will be a tuple of *numpy* arrays of integer indices (gpu arrays can only contain floats). 407 | """ 408 | if vararg==_t0: return numpy.where(as_numpy_array(a)) 409 | assert len(vararg)==2, 'wrong number of arguments to gnumpy.where()' 410 | return garray(numpy.where(as_numpy_array(a), as_numpy_array(vararg[0]), as_numpy_array(vararg[1]))) 411 | 412 | def nonzero(a): 413 | """ See notes for where(). """ 414 | return where(a) 415 | 416 | newaxis = None 417 | 418 | def eye(n): return diagflat(ones(n)) 419 | 420 | def diagflat(a, k=0): 421 | if isinstance(a, garray): return a.diagflat(k) 422 | else: return numpy.diagflat(a,k) 423 | 424 | def tensordot(a, b, axes=2): 425 | if type(axes) in _numberTypes: return dot(a.reshape_2d(a.ndim-axes), b.reshape_2d(axes)).reshape(a.shape[:a.ndim-axes] + b.shape[axes:]) 426 | assert len(axes)==2 and len(axes[0])==len(axes[1]), 'the axes parameter to gnumpy.tensordot looks bad' 427 | aRemove, bRemove = (tuple(axes[0]), tuple(axes[1])) 428 | return tensordot(a.transpose(filter(lambda x: x not in aRemove, tuple(xrange(a.ndim))) + aRemove), 429 | b.transpose(bRemove + filter(lambda x: x not in bRemove, tuple(xrange(b.ndim)))), 430 | len(aRemove)) 431 | 432 | 433 | 434 | # ------------------------------------------------------------------------------- reductors 435 | 436 | def _reductor__base(x, axis, gpuOp, npOp): 437 | if _isTijmen: numTimeIncurred(x.size, '%s onDim0=%s' % (npOp.__name__, axis in (0, None))) 438 | if type(x) == numpy.ndarray: return npOp(x, axis) 439 | if not isinstance(x, garray): x = garray(x) 440 | if gpuOp==None: return garray(npOp(x.as_numpy_array(), axis)) 441 | else: return gpuOp(x, axis) 442 | 443 | def all(x, axis=None): 444 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 445 | return _reductor__base(x, axis, garray.all, numpy.all) 446 | 447 | def any(x, axis=None): 448 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 449 | return _reductor__base(x, axis, garray.any, numpy.any) 450 | 451 | def sum(x, axis=None): 452 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 453 | return _reductor__base(x, axis, garray.sum, numpy.sum) 454 | 455 | def mean(x, axis=None): 456 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 457 | return _reductor__base(x, axis, garray.mean, numpy.mean) 458 | 459 | def max(x, axis=None): 460 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 461 | return _reductor__base(x, axis, garray.max, numpy.max) 462 | 463 | def min(x, axis=None): 464 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 465 | return _reductor__base(x, axis, garray.min, numpy.min) 466 | 467 | def prod(x, axis=None): 468 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 469 | return _reductor__base(x, axis, None, numpy.prod) 470 | 471 | def std(x, axis=None): 472 | """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ 473 | return _reductor__base(x, axis, None, numpy.std) 474 | 475 | 476 | 477 | # ------------------------------------------------------------------------------- elementwise operations 478 | 479 | def _elementwise__base(x, opGpu, opNp): 480 | if type(x) in _numberTypes: return _check_number_types(float(opNp(x))) 481 | if opGpu==None or type(x) == numpy.ndarray: # else, time admin happens in the method 482 | if _isTijmen: numTimeIncurred(x.size, opNp.__name__) 483 | if isinstance(x, garray): 484 | if opGpu==None: return _check_number_types(garray(opNp(x.as_numpy_array()))) 485 | else: return _check_number_types(opGpu(x)) 486 | if type(x) == numpy.ndarray: 487 | if x.ndim==0: return _check_number_types(numpy.array(opNp(x))) 488 | else: return _check_number_types(opNp(x)) 489 | raise TypeError('value %s of unexpected type %s provided to %s()' % (x, type(x), str(opNp).split("'")[1])) 490 | 491 | def abs(x): 492 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 493 | return _elementwise__base(x, garray.abs, numpy.abs) 494 | 495 | def exp(x): 496 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 497 | return _elementwise__base(x, garray.exp, numpy.exp) 498 | 499 | def isinf(x): 500 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 501 | return _elementwise__base(x, garray.isinf, numpy.isinf) 502 | 503 | def isnan(x): 504 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 505 | return _elementwise__base(x, garray.isnan, numpy.isnan) 506 | 507 | def log(x): 508 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 509 | return _elementwise__base(x, garray.log, numpy.log) 510 | 511 | def log_1_plus_exp(x): 512 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 513 | return _elementwise__base(x, garray.log_1_plus_exp, lambda x: 1.+exp(x)) 514 | 515 | def logistic(x): 516 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 517 | return _elementwise__base(x, garray.logistic, lambda x: 1./(1. + exp(-x))) 518 | 519 | def negative(x): 520 | """ 521 | Like -x, except that a zero dimensional numpy array input results in a numpy array return value. 522 | This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). 523 | """ 524 | return _elementwise__base(x, operator.neg, operator.neg) 525 | 526 | def sign(x): 527 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 528 | return _elementwise__base(x, garray.sign, numpy.sign) 529 | 530 | def sqrt(x): 531 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 532 | return _elementwise__base(x, garray.sqrt, numpy.sqrt) 533 | 534 | def tanh(x): 535 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 536 | return _elementwise__base(x, garray.tanh, numpy.tanh) 537 | 538 | def log10(x): 539 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 540 | return _elementwise__base(x, None, numpy.log10) 541 | 542 | def log2(x): 543 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 544 | return _elementwise__base(x, None, numpy.log2) 545 | 546 | def cos(x): 547 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 548 | return _elementwise__base(x, None, numpy.cos) 549 | 550 | def sin(x): 551 | """ This works on garrays, numpy arrays, and numbers, preserving type (though all numbers become floats). """ 552 | return _elementwise__base(x, None, numpy.sin) 553 | 554 | 555 | 556 | 557 | 558 | class garray(object): 559 | """ 560 | A class designed to interface like numpy arrays, and internally do its work on a GPU. 561 | Documentation can be found at http://www.cs.toronto.edu/~tijmen/gnumpy.html 562 | """ 563 | 564 | # ------------------------------------------------------------------------------- internal aux 565 | 566 | def _set_shape_info(self, shape): # setting these as attributes rather than properties saves exec time 567 | self.shape = shape 568 | self.size = _prodT(shape) 569 | self.ndim = len(shape) 570 | 571 | @property 572 | def nbytes(self): return self.size * 4 573 | @property 574 | def nMBytes(self): return self.nbytes / 2**20 575 | 576 | def _base_shaped(self, nDimsAsRows): return _cm_reshape(self._base, (_prodT(self.shape[:nDimsAsRows]), _prodT(self.shape[nDimsAsRows:]))) 577 | def _base_as_row(self): return _cm_reshape(self._base, (1, self.size)) 578 | def _base_as_2d(self): return self._base.reshape((self.shape[1], self.shape[0])) # optimized from self._base_shaped(1) by inlining 579 | 580 | def _new_cm(self, nDimsAsRows=0): return _new_cm((_prodT(self.shape[:nDimsAsRows]), _prodT(self.shape[nDimsAsRows:]))) # same size as self, with given shape 581 | 582 | def _new(self, cm): return garray(cm, self.shape, None) # short notation for the result of elementwise ops 583 | 584 | def _tile_to_broadcast(self, otherShape, indicesToBroadcast='all'): 585 | """ self.shape and otherShape must already be of the same length. otherShape is relevant only where self.shape is 1. """ 586 | if otherShape == self.shape: return self 587 | assert self.ndim == len(otherShape), 'dimensionality mismatch in _tile_to_broadcast' 588 | if indicesToBroadcast=='all': indicesToBroadcast = tuple( i for i in range(self.ndim) if self.shape[i]==1 and otherShape[i]!=1) 589 | return self.tile( ( 1 if i not in indicesToBroadcast else otherShape[i] ) for i in range(self.ndim)) 590 | 591 | def _broadcastable_op(self, other, operatorName): 592 | """ 593 | accepted ops: "add", "multiply", "less than", "greater than", "pow". 594 | other must be either scalar or garray. 595 | """ 596 | basicHandler = {'add': _cmType.add, 'multiply': _cmType.mult, 'less than': _cmType.less_than, 'greater than': _cmType.greater_than, 'pow': _cudamat.pow}[operatorName] 597 | if (type(other) in _numberTypes or (other.size==1 and other.ndim <= self.ndim)): # having other be a scalar is faster than doing a broadcast 598 | if _isTijmen: numTimeIncurred(self.size, 'AS eltwise') 599 | return self._new(basicHandler(self._base_as_row(), ( other.item() if isinstance(other, garray) else other), self._new_cm())) 600 | if operatorName=='pow': raise NotImplementedError('a**b where b is anything other than a scalar') 601 | other = as_garray(other) 602 | if self.ndim > other.ndim: other = other._add_axes(self.ndim) 603 | if self.ndim < other.ndim: return self._add_axes(other.ndim)._broadcastable_op(other, operatorName) 604 | if operatorName in ('less than', 'greater than'): 605 | self2 = self._tile_to_broadcast(other.shape) 606 | if _isTijmen: numTimeIncurred(self.size, 'eltwise binary, no bc') 607 | return self2._new(basicHandler(self2._base_as_row(), other._tile_to_broadcast(self2.shape)._base_as_row(), self2._new_cm())) 608 | if self.ndim < other.ndim: return other._broadcastable_op(self, operatorName) # now self.ndim == other.ndim 609 | selfToBroadcast = tuple( self.shape[i]==1 and other.shape[i]!=1 for i in range(self.ndim)) 610 | otherToBroadcast = tuple( other.shape[i]==1 and self.shape[i]!=1 for i in range(self.ndim)) 611 | bc = otherToBroadcast; bci = tuple( i for i in tuple(xrange(len(bc))) if bc[i]) 612 | if reduce(operator.or_, selfToBroadcast, False) and reduce(operator.or_, otherToBroadcast, False): return self._broadcastable_op(other._tile_to_broadcast(self.shape, bci), operatorName) 613 | if reduce(operator.or_, selfToBroadcast, False): return other._broadcastable_op(self, operatorName) # now only other may have dims that need to be broadcast 614 | if reduce(operator.or_, ( other.shape[i] not in (1, self.shape[i]) for i in range(self.ndim)), False): raise ValueError('shape mismatch: objects cannot be broadcast to a single shape') 615 | if not reduce(operator.or_, otherToBroadcast, False): # handle case: nothing to bc 616 | if _isTijmen: numTimeIncurred(self.size, 'eltwise binary, no bc') 617 | return self._new(( _cmType.add if operatorName=='add' else _cmType.mult)(self._base_as_row(), other._base_as_row(), self._new_cm())) 618 | if self.size==0: return self 619 | if bci == tuple(xrange(len(bci))): # handle case: only the first dims need broadcasting 620 | if operatorName in ('multiply', 'add') and _isTijmen: # using optimized cuda code 621 | ret = empty(self.shape) 622 | axis0len = _prodT(self.shape[:len(bci)]) 623 | axis1len = _prodT(self.shape[len(bci):]) 624 | nThreadsPerBlock = 512 625 | nBlocks = axis1len//nThreadsPerBlock+1 626 | cudaFn = getattr(_cudamat._cudamat, '%sBcAxis0' % operatorName) 627 | cudaFn.restype = _ctypes.c_int 628 | assert 0==cudaFn(_ctInt(nBlocks), _ctInt(nThreadsPerBlock), self._base.p_mat, other._base.p_mat, ret._base.p_mat, _ctInt(axis0len), _ctInt(axis1len)) 629 | if _isTijmen: numTimeIncurred(self.size, 'eltwise bc axis 0') 630 | return ret 631 | #return self._new(( _cmType.add_col_vec if operatorName=='add' else _cmType.mult_by_col)(self._base_shaped(len(bci)), other._base_as_row(), self._new_cm(len(bci)))) 632 | if bci == tuple(xrange(self.ndim-len(bci), self.ndim)): # handle case: only the last dims need broadcasting 633 | if _isTijmen: numTimeIncurred(self.size, 'eltwise bc axis -1') 634 | return self._new(( _cmType.add_row_vec if operatorName=='add' else _cmType.mult_by_row)(self._base_shaped(self.ndim-len(bci)), other._base_shaped(self.ndim-len(bci)), self._new_cm(self.ndim-len(bci)))) 635 | # remaining case: broadcasting neither just the first dims nor just the last dims. this can be done very intelligently, but for now I won't bother 636 | if operatorName=='multiply' and len(bci)==1 and hasattr(_cudamat._cudamat, 'multiplyBcAxis1'): # special case: using optimized multiplyBcAxis1 (my cuda code) 637 | ret = empty(self.shape) 638 | axisI = bci[0] 639 | axis0len = _prodT(self.shape[:bci[0]]) 640 | axis1len = self.shape[bci[0]] 641 | axis2len = _prodT(self.shape[bci[0]+1:]) 642 | _cudamat._cudamat.multiplyBcAxis1.restype = _ctypes.c_int 643 | assert 0==_cudamat._cudamat.multiplyBcAxis1(_ctInt(__builtin__.min(512, axis2len)), 644 | self._base.p_mat, 645 | other._base.p_mat, 646 | ret._base.p_mat, 647 | _ctInt(axis0len), 648 | _ctInt(axis1len), 649 | _ctInt(axis2len), 650 | ) 651 | if _isTijmen: numTimeIncurred(self.size, 'eltwise bc axis 1') 652 | return ret 653 | return self._broadcastable_op(other._tile_to_broadcast(self.shape, bci[:1]), operatorName) 654 | 655 | def _elementwise_unary(self, handler): 656 | if _isTijmen: numTimeIncurred(self.size, handler.__name__) 657 | return _check_number_types(self._new(handler(self._base_as_row(), self._new_cm()))) 658 | 659 | def _reduction__base(self, operatorInCm, axis): 660 | if axis==None and operatorInCm==_cmType.sum and self.size==0: return 0.0 # cudamat bug workaround 661 | if axis==None: return self.ravel()._reduction__base(operatorInCm, 0).item() 662 | if not type(axis) in _numberTypes: raise TypeError('the value %s is not appropriate for the "axis" parameter.' % str(axis)) 663 | axis = int(axis) 664 | if axis < -self.ndim or axis>=self.ndim: raise ValueError('axis (%d) out of bounds for an array with %d axes.' % (axis, self.ndim)) 665 | axis %= self.ndim 666 | if axis==0 and operatorInCm==_cmType.max: # max over rows is not yet supported in cudamat 667 | return self.reshape_2d(1).T.max(1).reshape(self.shape[1:]) 668 | if axis==0 and self.ndim==1 and self.size>5000 and operatorInCm==_cmType.sum: # optimization. apparently, cudamat is not maximally efficient. 669 | n = int(numpy.sqrt(self.size-1)) 670 | return self[:n*n].reshape((n, n))._reduction__base(operatorInCm, 0)._reduction__base(operatorInCm, 0) + self[n*n:]._reduction__base(operatorInCm, 0) 671 | if operatorInCm==_cmType.sum: 672 | chunkSize = 1024*256 # sum over longer dimensions fails in cudamat 673 | nChunks = (self.shape[axis] + chunkSize-1) // chunkSize 674 | if nChunks>1: 675 | return reduceAdd( self[(slice(None),) * axis + (slice(chunkI*chunkSize, __builtin__.min(self.shape[axis], (chunkI+1)*chunkSize)),)]._reduction__base(operatorInCm, axis) 676 | for chunkI in range(nChunks)) 677 | if self.shape[axis]==0: 678 | if operatorInCm==_cmType.max: raise ValueError('max over an axis of length 0 is undefined') 679 | assert operatorInCm==_cmType.sum, 'unexpected operator' 680 | return zeros(_deleteT2(self.shape, axis)) 681 | if self.size==0 and self.shape[axis]!=0: return empty(_deleteT2(self.shape, axis)) 682 | if axis==0: return _check_number_types(garray(operatorInCm(self._base_shaped(1), 1, _new_cm(_prodT(self.shape[1:]))), self.shape[1:], None)) 683 | if axis==self.ndim-1: 684 | if self.ndim!=2: return self.reshape_2d(-1)._reduction__base(operatorInCm, 1).reshape(self.shape[:-1]) 685 | if self.ndim==2: 686 | chunkSize = 2**16-1 687 | nChunks = (len(self) + chunkSize-1) // chunkSize 688 | if nChunks>1: # cudamat chokes on big arrays, so break it in pieces for cudamat 689 | chunks = tuple([ self[chunkI*chunkSize : __builtin__.min((chunkI+1)*chunkSize, len(self))] 690 | for chunkI in range(nChunks)]) 691 | return concatenate([ chunk._reduction__base(operatorInCm, 1) for chunk in chunks]) 692 | else: # small array 693 | return _check_number_types(garray(operatorInCm(self._base_shaped(1), 0, _new_cm((len(self), 1))), (len(self),), None)) 694 | return self.transpose_simple(axis)._reduction__base(operatorInCm, 0).transpose_simple(-axis) 695 | 696 | 697 | 698 | # ------------------------------------------------------------------------------- external misc non-numerical 699 | 700 | def __init__(self, data, copy=True, ndmin=0): 701 | """ the parameters mean the same as in numpy.array() """ 702 | if type(data)!=_cmType: assert copy in (True, False) and type(ndmin) in _numberTypes, 'garray() parameters copy=%s, ndmin=%s are not of the right type' % (str(copy), str(ndmin)) 703 | if type(data)==_cmType: # internal use only. the 3 arguments are, unlike their names suggest, the ._base, .shape, ._is_alias_of 704 | self._base = data 705 | self._set_shape_info(copy) 706 | self._is_alias_of = ndmin 707 | if self._is_alias_of==None and track_memory_usage: 708 | self.allocating_line = _calling_line() 709 | _memoryUsers[self.allocating_line] = (_memoryUsers[self.allocating_line][0]+1, _memoryUsers[self.allocating_line][1]+self.size*4) 710 | elif isinstance(data, garray): 711 | if ndmin>0: data = data._add_axes(ndmin) 712 | garray.__init__(self, 713 | ( _new_cm(data.size).assign(data._base_as_row()) if copy else data._base), 714 | data.shape, 715 | ( None if copy else data)) 716 | elif type(data) == types.GeneratorType: garray.__init__(self, tuple(data), ndmin=ndmin) 717 | elif _isSequence(data): 718 | if len(data)==0 or not _any2_(data, is_garray): garray.__init__(self, numpy.array(data, ndmin=ndmin), copy=False) 719 | else: garray.__init__(self, concatenate( as_garray(element)[None] for element in data), ndmin=ndmin) # no need to copy, because concat copies. 720 | else: # remaining cases. essentially init from numpy array. 721 | npa = numpy.array(data, copy=False) # in case data was a number 722 | if str(npa.dtype) in ('object', '|S3'): raise TypeError('Cannot convert "%s" to a garray.' % data) 723 | # we're not using the cudamat constructor, because that always allocs gpu mem, and this way the mem may come from re-use. 724 | cm = _new_cm(npa.size) 725 | if not hasattr(cm, 'numpy_array'): 726 | #cm.copy_to_host() # if cm was created using cudamat.empty, this is needed to associate cm with a numpy array 727 | # follows an inlined version of the relevant portion of cm.copy_to_host(). This is quicker because it doesn't actually copy. 728 | cm.numpy_array = numpy.empty((cm.mat.size[0], cm.mat.size[1]), dtype=numpy.float32, order='F') 729 | cm.mat.data_host = cm.numpy_array.ctypes.data_as(_ctypes.POINTER(_ctypes.c_float)) 730 | cm.mat.on_host = 1 731 | if npa.size!=0: cm.numpy_array[:] = npa.reshape((-1, 1), order='C') # no cudamat.reformat is needed, because that's only dtype and order change, which are handled by the assignment anyway 732 | cm.copy_to_device() 733 | garray.__init__(self, cm, _extend_shape(npa.shape, ndmin), None) 734 | 735 | def __new__(cls, *args, **kwarg): return object.__new__(cls) 736 | 737 | def as_numpy_array(self, dtype=numpy.float64): 738 | if self.size==0: return numpy.zeros(self.shape, dtype) 739 | return numpy.array(self._base_as_row().asarray(), copy=True, order='C', dtype=dtype).reshape(self.shape) 740 | 741 | asarray = as_numpy_array # the cudamat name 742 | 743 | def astype(self, type): return self.asarray().astype(type) 744 | 745 | tile = tile 746 | 747 | def ravel(self): return self.reshape(-1) 748 | 749 | def item(self): return self.as_numpy_array().item() 750 | 751 | def _add_axes(self, finalNdim): return self.reshape(_extend_shape(self.shape, finalNdim)) 752 | 753 | def sort(self, axis=-1, kind='quicksort', order=None): 754 | """ like numpy.sort, this sorts in place and returns None. """ 755 | temp = self.as_numpy_array() 756 | temp.sort(axis, kind, order) 757 | self[:] = temp 758 | 759 | def reshape(self, *newShape): 760 | if len(newShape)==1 and not type(newShape[0]) in _numberTypes: newShape = tuple(newShape[0]) 761 | if not _all2_(newShape, _isNumber): raise TypeError('the parameters to reshape don\'t look like a valid shape') 762 | if -1 in newShape: 763 | if _prodT(newShape)==0: raise ValueError("-1 as a parameter to reshape is not allowed if one of the other parameters is zero.") 764 | newShape = _modifyT(newShape, operator.indexOf(newShape, -1), self.size//-_prodT(newShape)) 765 | if _prodT(newShape) != self.size: raise ValueError('the total number of items cannot be changed in a reshape') 766 | return garray(self._base, newShape, self) 767 | 768 | def reshape_2d(self, n_dimensions_as_rows): 769 | """ reshapes to 2 axes. The first axes of the array become the first axis of the returned value. The remaining ones form the second axis. """ 770 | if n_dimensions_as_rows<0: n_dimensions_as_rows += self.ndim 771 | return self.reshape((_prodT(self.shape[:n_dimensions_as_rows]), _prodT(self.shape[n_dimensions_as_rows:]))) 772 | 773 | @property 774 | def T(self): 775 | if self.ndim==2: # _base case 776 | if self.size==0: return self.reshape(tuple(reversed(self.shape))) # cudamat bug workaround 777 | if self.shape[1]>1e6: # cudamat bug workaround. with 2m columns it fails 778 | return concatenate([ self[:, i*10**6 : (i+1)*10**6].T for i in range((self.shape[1]+10**6-1)//10**6)]) 779 | if self.shape[0]>1e6: # cudamat bug workaround. using concat is not an option, because that uses transpose. 780 | ret = empty(tuple(reversed(self.shape))) 781 | for i in range((self.shape[0]+10**6-1)//10**6): 782 | ret[:, i*10**6 : (i+1)*10**6] = self[i*10**6 : (i+1)*10**6].T 783 | return ret 784 | return garray(self._base_as_2d().transpose(_new_cm(tuple(reversed(self.shape)))), tuple(reversed(self.shape)), None) 785 | else: return self.transpose() 786 | 787 | def transpose_simple(self, nDimsToGroup): 788 | """ shifts the first axes to the end, and the remaining ones to the start. This returns a new array, not an alias. """ 789 | if nDimsToGroup<0: nDimsToGroup += self.ndim 790 | return self.reshape_2d(nDimsToGroup).T.reshape(self.shape[nDimsToGroup:] + self.shape[:nDimsToGroup]) 791 | 792 | def transpose(self, *axes): 793 | """ like numpy.transpose, except that this doesn't return an alias, but rather a new array. """ 794 | # This is not really supported by cudamat, so it takes creativity. I handle a variety of cases differently. 795 | if len(axes)==1 and not type(axes[0]) in _numberTypes: axes = tuple(axes[0]) 796 | if axes==_t0: axes = tuple(reversed(tuple(xrange(self.ndim)))) 797 | if axes == tuple(xrange(self.ndim)): return self.copy() 798 | if tuple(sorted(axes)) != tuple(xrange(self.ndim)): raise ValueError("%s is not a valid argument to transpose() of an array of %d axes" % (axes, self.ndim)) 799 | for i in range(self.ndim-1): 800 | if axes[i+1]==axes[i]+1: return (self. # see if the task can be simplified by collapsing some axes that are kept adjacent 801 | reshape(self.shape[:axes[i]] + (_prodT(self.shape[axes[i]:axes[i]+2]),) + self.shape[axes[i]+2:]). 802 | transpose((originalAxisI-(originalAxisI>axes[i])) for originalAxisI in _deleteT2(axes, i+1)). 803 | reshape(self.shape[axisI] for axisI in axes)) 804 | if self.ndim==3 and hasattr(_cudamat, '_cudamat') and hasattr(_cudamat._cudamat, 'transpose3') and self.size!=0: 805 | reorderingI = {(0, 2, 1): 0, (1, 0, 2): 1, (2, 1, 0): 2}[axes] 806 | ret = empty(tuple( self.shape[axisI] for axisI in axes)) 807 | gridX, gridY = (self.size+511)//512, 1 808 | while gridX>65535: gridY*=2; gridX = (gridX+1)//2; 809 | _cudamat._cudamat.transpose3.restype = _ctypes.c_int 810 | assert 0==_cudamat._cudamat.transpose3(_ctInt(gridX), _ctInt(gridY), self._base.p_mat, ret._base.p_mat, _ctInt(self.shape[0]), _ctInt(self.shape[1]), _ctInt(self.shape[2]), _ctInt(reorderingI)) 811 | return ret 812 | def shiftAxesRight(shiftN): return self.transpose_simple(-shiftN).transpose( (axisI+shiftN)%self.ndim for axisI in axes) 813 | for i in range(self.ndim-1): # see if the task can be simplified by rotating axes right by 1. if so, the loop before this one can simplify further 814 | if axes[i:i+2] == (self.ndim-1, 0): return shiftAxesRight(1) 815 | # no further simplifications can be done. we need to proceed with a loop over the first axis. First rotate the intended axis to position 0. 816 | if axes[0]!=0: return shiftAxesRight(-axes[0]) 817 | ret = empty( self.shape[axisI] for axisI in axes) 818 | for i in range(self.shape[0]): ret[i] = self[i].transpose( x-1 for x in axes[1:]) 819 | return ret 820 | 821 | def copy(self): return garray(self, copy=True) 822 | 823 | def diagflat(self, k=0): 824 | if self.ndim!=1: return self.ravel().diagflat(k) 825 | if k!=0: raise NotImplementedError('k!=0 for garray.diagflat') 826 | selfSize = self.size 827 | ret = zeros((selfSize, selfSize)) 828 | ret.ravel()[:-1].reshape((selfSize-1, selfSize+1))[:, 0] = self[:-1] 829 | if selfSize!=0: ret.ravel()[-1] = self[-1] 830 | return ret 831 | 832 | def diagonal(self): 833 | if self.ndim==1: return self.diagflat() 834 | if self.ndim==2: 835 | if self.shape[0] > self.shape[1]: return self[:self.shape[1]].diagonal() 836 | if self.shape[1] > self.shape[0]: return self[:, :self.shape[0]].diagonal() 837 | return self.ravel()[::self.shape[0]+1] 838 | raise NotImplementedError('garray.diagonal for arrays with ndim other than 1 or 2.') 839 | def diag(self): return self.diagonal() 840 | 841 | 842 | 843 | # ------------------------------------------------------------------------------- elementwise type checking 844 | 845 | def all_real(self): 846 | """ returns True iff all array elements are regular floats, as opposed to inf's, -inf's, and NaN's. """ 847 | return (self*0).sum()==0 848 | 849 | def isinf(self): 850 | """ elementwise, checking for inf or -inf. """ 851 | return 1 - self.isreal() - self.isnan() 852 | 853 | def isreal(self): 854 | """ elementwise, checking for real numbers. See also .all_real() """ 855 | return (self-numpy.inf) 856 | 857 | def isnan(self): 858 | """ elementwise, checking for NaN's. """ 859 | return (self>0) + (self<1) < .5 860 | 861 | def isnumber(self): 862 | """ elementwise, checking for anything other than NaN's """ 863 | return (self>0) + (self<1) > .5 864 | 865 | 866 | 867 | # ------------------------------------------------------------------------------- external misc numerical 868 | 869 | def __abs__(self): return self._elementwise_unary(_cudamat.abs) 870 | def abs(self): return __builtin__.abs(self) 871 | def as_bool(self): return self!=0 872 | def exp(self): return self._elementwise_unary(_cudamat.exp) 873 | def log(self): return self._elementwise_unary(_cudamat.log) 874 | def log_1_plus_exp(self): return self._elementwise_unary(_cudamat.log_1_plus_exp) 875 | def logistic(self): return self._elementwise_unary(_cudamat.sigmoid) 876 | sigmoid = logistic 877 | def sign(self): return self._elementwise_unary(_cmType.sign) 878 | def sqrt(self): return self._elementwise_unary(_cudamat.sqrt) 879 | def tanh(self): return self._elementwise_unary(_cudamat.tanh) 880 | 881 | 882 | def sum(self, axis=None): return self._reduction__base(_cmType.sum, axis) 883 | def mean(self, axis=None): return self.sum(axis) / ( self.size if axis==None else self.shape[axis]) 884 | def max(self, axis=None): 885 | if axis==0 and self.shape[0]>=1 and hasattr(_cudamat._cudamat, 'maxAxis0'): # my own fast implementation 886 | ret = empty(self.shape[1:]) 887 | _ctInt = _cudamat.ct.c_int 888 | nThreadsPerBlock = 32 889 | gridX, gridY = ((ret.size+nThreadsPerBlock-1)//nThreadsPerBlock), 1 890 | while gridX>65535: gridY*=2; gridX = (gridX+1)//2; 891 | _cudamat._cudamat.maxAxis0.restype = _ctypes.c_int 892 | assert 0==_cudamat._cudamat.maxAxis0(_ctInt(gridX), _ctInt(gridY), _ctInt(nThreadsPerBlock), self._base.p_mat, ret._base.p_mat, _ctInt(self.shape[0]), _ctInt(ret.size)) 893 | return ret 894 | if self.isnan().any2(): 895 | if axis==None: return self.asarray().max() 896 | else: return garray(self.asarray().max(axis)) 897 | #raise NotImplementedError('cudamat max fails with nans') 898 | return self._reduction__base(_cmType.max, axis) 899 | def argmax(self, axis=None): return numpy.argmax(self.asarray(), axis) 900 | def argmin(self, axis=None): return numpy.argmin(self.asarray(), axis) 901 | def min(self, axis=None): return -(-self).max(axis) 902 | def all(self, axis=None): return ( True if self.size==0 else (self.as_bool()).min()) 903 | def any(self, axis=None): return ( False if self.size==0 else (self.as_bool()).max()) 904 | 905 | def all2(self, axis=None): return 1-(1-self).any2(axis) # optimized for when I'm sure that the content is boolean 906 | def any2(self, axis=None): return self.sum(axis) > 0 # optimized for when I'm sure that the content is boolean 907 | 908 | def rand(self, distribution = 'uniform'): 909 | """ 910 | returns a new garray, of the same shape as self, filled with random numbers. 911 | can be either 'uniform' or 'normal'. 912 | """ 913 | return _rand__base(self.shape, distribution, False) 914 | 915 | def euclid_norm(self): return self._base.euclid_norm() 916 | 917 | dot = dot 918 | where = where 919 | nonzero = nonzero 920 | 921 | def __nonzero__(self): return self.size==1 and self.item()!=0 922 | 923 | 924 | # ------------------------------------------------------------------------------- operator overloads, numerical 925 | 926 | def __add__(self, other): return _check_number_types(self._broadcastable_op(as_garray_or_scalar(other), 'add')) 927 | def __mul__(self, other): return _check_number_types(self._broadcastable_op(as_garray_or_scalar(other), 'multiply')) 928 | def __or__(self, other): return (self.as_bool() + other.as_bool()).as_bool() 929 | def __and__(self, other): return self.as_bool() * other.as_bool() 930 | 931 | def __pow__(self, other, modulo=None): 932 | if modulo!=None: raise NotImplementedError('power with modulo') 933 | if type(other) in _numberTypes and other==2: return self*self # faster 934 | return self._broadcastable_op(as_garray_or_scalar(other), 'pow') 935 | 936 | 937 | # the following would be a lot simpler if I wouldn't have to deal with nans 938 | 939 | def __lt__(self, other): return _check_number_types(self._broadcastable_op(as_garray_or_scalar(other), 'less than')) 940 | 941 | def __gt__(self, other): return _check_number_types(self._broadcastable_op(as_garray_or_scalar(other), 'greater than')) 942 | 943 | def __le__(self, other): return self.isnumber() * as_garray(other).isnumber() * (1-(self>other)) 944 | 945 | def __ge__(self, other): return self.isnumber() * as_garray(other).isnumber() * (1-(self=other) if type(other) in _castableTypes else False) 950 | 951 | def eq2(self, other): 952 | """ 953 | Returns a boolean: True if self and other are the same (arrays with the same shape and contents); False otherwise. 954 | This is what == does on most Python objects (on arrays it's been strangely overloaded though). 955 | garrays compare equal to numpy arrays with the same contents, even if the data types differ. 956 | """ 957 | if self is other: return True 958 | if not is_array(other): return False 959 | if self.shape != other.shape: return False 960 | return all(self==other)==1 961 | 962 | def __sub__(self, other): 963 | if isinstance(other, garray) and other.shape==self.shape: # use specialized method 964 | return self._new(self._base_as_row().subtract(other._base_as_row(), self._new_cm())) 965 | else: return self + -as_garray(other) # if i need to broadcast, making use of the row add and col add methods is probably faster 966 | 967 | def __div__(self, other): 968 | if type(other) in _numberTypes: return self * (1./other) 969 | other = as_garray(other) 970 | return self * other._new(other._base_as_row().reciprocal(other._new_cm())) 971 | 972 | def __rmul__(self, other): return self*other 973 | def __radd__(self, other): return self+other 974 | def __rsub__(self, other): return other + -self 975 | def __rdiv__(self, other): return as_garray(other) / self 976 | def __rpow__(self, other): raise NotImplementedError('a**b where only b is a garray') 977 | 978 | def __pos__(self): return self 979 | def __neg__(self): return self*-1 980 | 981 | def __iadd__(self, other): self[_t0] = self+other; return self # not as direct as it might have been, but the effect is the same. "self[:]" doesn't work for 0das. 982 | def __imul__(self, other): self[_t0] = self*other; return self 983 | def __isub__(self, other): self[_t0] = self-other; return self 984 | def __idiv__(self, other): self[_t0] = self/other; return self 985 | def __imod__(self, other): self[_t0] = self%other; return self 986 | def __ipow__(self, other, modulo=None): self[_t0] = self.__pow__(other, modulo); return self 987 | 988 | 989 | 990 | # ------------------------------------------------------------------------------- operator overloads, non-numerical 991 | 992 | def __len__(self): 993 | if self.ndim==0: raise TypeError('len() of unsized object') 994 | return self.shape[0] 995 | 996 | def __getitem__(self, selectors): 997 | selectors = _nonSeqAsS(selectors) 998 | for i,sel in enumerate(selectors): # deal with newaxis and ellipsis 999 | if sel is Ellipsis: return self[selectors[:i] + (slice(None),)* (self.ndim - (__builtin__.sum( x != None for x in selectors)-1)) + selectors[i+1:]] # sel==Ellipsis is bad when sel is an array 1000 | if sel is newaxis: return self.reshape(_insertT(self.shape, i, (1,)))[_modifyT(selectors, i, slice(None))] 1001 | if len(selectors) > self.ndim: raise IndexError('more indices than axes') 1002 | if _all2_(selectors, _isFullSlice): return self 1003 | if reduce(operator.and_, ( _isSequence(sel) or is_array(sel) for sel in selectors), True) and len(selectors)>=2: 1004 | selectors = tuple(map(as_garray, selectors)) 1005 | if reduce(operator.or_, ( (sel < 0).sum() > 0 for sel in selectors), False): raise NotImplementedError('negative indices in index arrays, combined with having multiple indices arrays') 1006 | # ravel the first two dimensions into one, and translate the corresponding indices arrays into one accordingly 1007 | return self.reshape((self.shape[0]*self.shape[1],) + self.shape[2:])[(selectors[0]*self.shape[1]+selectors[1],) + selectors[2:]] 1008 | if __builtin__.sum( _isSequence(sel) or is_array(sel) for sel in selectors)>1: 1009 | raise NotImplementedError('slicing with more than one sequence/array among the indices, with also other kinds of values among the indices') 1010 | # handle the operations on different axes one by one; earlier axes are handled earlier 1011 | axisI = ( i for i, x in enumerate(selectors) if not _isFullSlice(x)).next() 1012 | axisLen = self.shape[axisI] 1013 | axisSelector = selectors[axisI] 1014 | if not _all2_(selectors[axisI+1:], _isFullSlice): return self[selectors[:axisI+1]][(slice(None),)*(axisI+(not type(axisSelector) in _numberTypes)) + selectors[axisI+1:]] # first select on axisI only; then do the further axes. 1015 | # from here, axisI is the only axis on which we don't take a full slice 1016 | if type(axisSelector) == types.SliceType and axisSelector.step not in (1, None): axisSelector = numpy.arange(axisLen)[axisSelector] 1017 | if type(axisSelector) in _numberTypes: # selecting a single location on axisI, and thus reducing the dimensionality by 1 1018 | ret = self[selectors[:axisI] + (_short_slice(_read_single_index(axisSelector, axisLen)),)] .reshape(_deleteT2(self.shape, axisI)) 1019 | return ( ret.item() if ret.shape==_t0 else ret) # exception, to have the same behavior as numpy 1020 | if _isSequence(axisSelector) or type(axisSelector) == numpy.ndarray: axisSelector = garray(axisSelector) 1021 | if isinstance(axisSelector, garray): 1022 | # a 1d index means re-arranging this axis. I.e. a number of length 1 selections on this axis, concatenated on this axis. 1023 | # other dimensionality means using the raveled version, and then reshaping to reflect the selector dimensionality 1024 | if hasattr(_cmType, 'select_columns'): 1025 | if axisI==0: 1026 | if _doExpensiveCheck() and (axisSelector> len(self)-.01).sum() !=0: raise IndexError('index %d (found in an indices array) is too large, for an axis of length %d' % (max(axisSelector), len(self))) 1027 | if _doExpensiveCheck() and (axisSelector<-len(self)-.5).sum() !=0: raise IndexError('index %d (found in an indices array) is too small, for an axis of length %d' % (min(axisSelector), len(self))) 1028 | return garray(self._base_shaped(1).select_columns(axisSelector._base_shaped(axisSelector.ndim), _new_cm((axisSelector.size, self.size/self.shape[0]))), axisSelector.shape + self.shape[1:], None) 1029 | else: return self.transpose_simple(axisI)[axisSelector].transpose_simple(-axisI) 1030 | else: return (concatenate(tuple( self[_modifyT(selectors, axisI, slice(choiceOnThisAxis, choiceOnThisAxis+1))] for choiceOnThisAxis in axisSelector.ravel()), axisI) 1031 | .reshape(self.shape[:axisI] + axisSelector.shape + self.shape[axisI+1:])) 1032 | if not type(axisSelector) == types.SliceType: raise ValueError('index not understood: %s' % axisSelector) 1033 | # from here, selector is a simple slice 1034 | sFrom, sTo, sLen = _read_simple_slice(axisSelector, axisLen) 1035 | retShape = _modifyT(self.shape, axisI, sLen) 1036 | if _prodT(retShape)==0: return zeros(retShape) 1037 | if axisI==0: return garray(_cm_row_slice_read(self._base_shaped(1), sFrom, sTo), retShape, self) # slice on axis 0 is free, using _cm_row_slice_read 1038 | if axisI!=1: return self.reshape((_prodT(self.shape[:axisI]),) + self.shape[axisI:])[:, sFrom:sTo].reshape(retShape) # redirect: collapse earlier axes into one 1039 | if self.ndim != 2: return self.reshape_2d(1)[:, sFrom * _prodT(self.shape[axisI+1:]) : sTo * _prodT(self.shape[axisI+1:])].reshape(retShape) # redirect: use long elements 1040 | chunkSize = int(2e6) 1041 | nChunks = (len(self) + chunkSize - 1) // chunkSize 1042 | if nChunks>1: return concatenate( tuple(self[chunkI*chunkSize : (chunkI+1)*chunkSize, sFrom:sTo] for chunkI in range(nChunks)), 0) # redirect in batches, bc cudamat chokes on big jobs 1043 | # _base case for column slice 1044 | retCm = _new_cm(retShape) 1045 | _cm_col_slice_read(self._base_shaped(1), sFrom, sTo, retCm) 1046 | return garray(retCm, retShape, None) 1047 | 1048 | def __iter__(self): 1049 | for i in tuple(xrange(len(self))): yield self[i] 1050 | 1051 | def __setitem__(self, selectors, other): 1052 | # this is different from getitem. There, I can handle the axes one at a time. Here, it's more integrated. 1053 | selectors = _nonSeqAsS(selectors) 1054 | for i,sel in enumerate(selectors): # deal with ellipsis 1055 | if sel is Ellipsis: return self.__setitem__(selectors[:i] + (slice(None),)* (self.ndim - (len(selectors)-1)) + selectors[i+1:], other) # sel==Ellipsis is bad when sel is an array 1056 | if len(selectors) > self.ndim: raise IndexError('more indices than axes') 1057 | if reduce(operator.and_, ( is_array(sel) or _isSequence(sel) for sel in selectors), True) and selectors!=_t0: 1058 | if len(selectors)==1: 1059 | if not hasattr(_cmType, 'set_selected_columns'): 1060 | raise NotImplementedError("slice assign with a sequence/array as index. Get the newest version of cudamat (or npmat if you're running on the cpu).") 1061 | sel = as_garray(selectors[0]) 1062 | if len(sel) != len(other): raise ValueError('number of rows to set != number of provided rows') 1063 | if other.shape[1:] != self.shape[1:]: raise ValueError('shape mismatch in assignment') 1064 | if sel.ndim!=1: raise NotImplementedError('assignment with as index an array of ndim!=1') 1065 | if sel.size==0: return # the current implementation of set_selected_columns doesn't handle that well 1066 | self._base_shaped(1).set_selected_columns(sel._base_shaped(1), other._base_shaped(1)) 1067 | else: # >1 selectors, all arrays/sequences. ravel the first dimension of self, and correspondingly unify the first two selectors 1068 | self.reshape((_prodT(self.shape[:2]),) + self.shape[2:])[(as_garray(selectors[0])*self.shape[1]+as_garray(selectors[1]),) + selectors[2:]] = as_garray(other) 1069 | return 1070 | if reduce(operator.or_, ( _isSequence(axisSel) or is_array(axisSel) for axisSel in selectors), False): raise NotImplementedError('slice assign with a sequence/array as index, as well as other indexing objects') 1071 | if reduce(operator.or_, ( type(axisSel) == types.SliceType and axisSel.step not in (1, None) for axisSel in selectors), False): raise NotImplementedError('slice assign with stride != 1') 1072 | if not reduce(operator.and_, ( type(axisSel) in _numberTypes or type(axisSel) == types.SliceType for axisSel in selectors), True): raise ValueError('index not understood, in slice assignment.') 1073 | selectors = selectors + (slice(None),)*(self.ndim-len(selectors)) 1074 | # now len(selectors) == ndim, and all selectors are single indices or simple slices 1075 | # task: broadcast other, and do shape check. 1076 | other = as_garray_or_scalar(other) 1077 | assignedShape = tuple( _read_simple_slice(axisSel, self.shape[axisI])[2] for axisI, axisSel in enumerate(selectors) if not type(axisSel) in _numberTypes) 1078 | if isinstance(other, garray): 1079 | if other.ndim < len(assignedShape): other = other._add_axes(len(assignedShape)) 1080 | if other.ndim > len(assignedShape): 1081 | if _prodT(other.shape[: other.ndim-len(assignedShape)]) != 1: raise ValueError('Incompatible shapes in slice assign: the assigned area has shape %s, and the incoming values have shape %s.' % (assignedShape, other.shape)) 1082 | other = other.reshape(other.shape[-len(assignedShape):]) 1083 | # now other.ndim == len(assignedShape) 1084 | if not reduce(operator.and_, ( other.shape[axisNr] in (1, assignedShape[axisNr]) for axisNr in tuple(xrange(len(assignedShape)))), True): 1085 | raise ValueError('Incompatible shapes in slice assign: the incoming values have shape %s, but the assigned area has shape %s.' % (other.shape, assignedShape)) 1086 | other = other._tile_to_broadcast(assignedShape) 1087 | # the only time I can use scalar assign is when I don't need cudamat's column assign at all. that only happens when all selectors other than optionally the first are full slices. 1088 | if _all2_(selectors[1:], _isFullSlice): 1089 | ( _cm_row_slice_read(self._base_shaped(1), _read_single_index(selectors[0], self.shape[0]), _read_single_index(selectors[0], self.shape[0])+1) 1090 | if self.ndim==1 and type(selectors[0]) in _numberTypes else 1091 | self[selectors[:1]]._base_as_row() # I want this to work even when selectors = _t0 1092 | ).assign( other if type(other) in _numberTypes else other._base_as_row()) 1093 | return 1094 | if type(other) in _numberTypes: other = garray(other)._add_axes(len(assignedShape))._tile_to_broadcast(assignedShape) 1095 | # now other is a garray of exactly the expected shape, and there are things other than complete slices beyond axis #0 so I'm going to need a col assign. 1096 | # task: get rid of single indices in selectors 1097 | for i in range(self.ndim): 1098 | if type(selectors[i]) in _numberTypes: 1099 | selectors = _modifyT(selectors, i, _short_slice(_read_single_index(selectors[i], self.shape[i]))) 1100 | other = other.reshape(_insertT(other.shape, i, (1,))) 1101 | if not _isFullSlice(selectors[0]): return self[selectors[0]].__setitem__((slice(None),) + selectors[1:], other) 1102 | # now all selectors are either full or simple slices; axis 0 is a full slice; and at least one other axis is a simple slice. 1103 | axisI = ( i for i, x in enumerate(tuple( not _isFullSlice(sel) for sel in selectors)) if x).next() 1104 | if _all2_(selectors[axisI+1:], _isFullSlice): # then do a column slice assign directly using cudamat. 1105 | sFrom, sTo = _read_simple_slice(selectors[axisI], self.shape[axisI])[:2] 1106 | elementWidth = _prodT(self.shape[axisI+1:]) 1107 | if other.size!=0: # cudamat chokes on that 1108 | _cm_col_slice_write(self._base_shaped(axisI), sFrom*elementWidth, sTo*elementWidth, other._base_shaped(axisI)) 1109 | return 1110 | # remaining case: there are multiple non-full slices, and the slice on axis 0 is full. strategy: transpose to bring one of those non-full slices to the front. 1111 | selfT = self.transpose_simple(axisI) 1112 | selfT[selectors[axisI:] + selectors[:axisI]] = other.transpose_simple(axisI) 1113 | self._base_as_row().assign(selfT.transpose_simple(self.ndim-axisI)._base_as_row()) 1114 | 1115 | 1116 | 1117 | # ------------------------------------------------------------------------------- external, but not for user to see 1118 | 1119 | def __getstate__(self): 1120 | return (self.shape, self._base_as_row().asarray()) 1121 | 1122 | def __setstate__(self, state): 1123 | garray.__init__(self, state[1]) 1124 | self._set_shape_info(state[0]) 1125 | 1126 | def __array__(self, *dtype): 1127 | _envInstruction = _os.environ.get('GNUMPY_IMPLICIT_CONVERSION', 'refuse') 1128 | assert _envInstruction in ('allow', 'warn', 'refuse'), "environment variable GNUMPY_IMPLICIT_CONVERSION, if present, should be one of 'allow', 'warn', 'refuse'." 1129 | if _envInstruction=='refuse': raise TypeError("garray objects cannot be quietly converted to numpy arrays, because the environment variable GNUMPY_IMPLICIT_CONVERSION is set to 'refuse', or is not set at all (the default is 'refuse'). Set that variable to 'allow' or 'warn' if you wish to allow quiet conversion. garray's can always be explicitly converted using the .as_numpy_array() method.") 1130 | if _envInstruction=='warn': print "gnumpy: warning: a garray object is being quietly converted to a numpy array, and the environment variable GNUMPY_IMPLICIT_CONVERSION is set to 'warn'. garray objects can be explicitly converted using the .as_numpy_array() method." 1131 | return self.as_numpy_array().__array__(*dtype) 1132 | 1133 | def __repr__(self): return self.as_numpy_array().__repr__().replace('array(', 'garray(').replace('\n', '\n ').replace(', dtype=float32', '').replace(', dtype=float64', '') # 64 happens for empty arrays 1134 | 1135 | def __del__(self): 1136 | if not hasattr(self, '_is_alias_of'): 1137 | if _isTijmen: print 'gnumpy cleaning up an unfinished garray. mem counting may be off now.' 1138 | return # this object was never finished, because an exception (error or interrupt) occurred in the constructor. This check avoids error messages. 1139 | if self._is_alias_of is None: 1140 | # this is not true in one case: if a reference to self._base is stored somewhere explicitly (somewhere outside self but not in another garray). This happens internally sometimes. I saw it happening on the last line of setitem: a transpose is created (transposes own their mem, are not aliases), and then it's dropped but _base (obtained by _base_as_row) is still in use for a cm assign call. assert _sys.getrefcount(self._base)==2, _sys.getrefcount(self._base) 1141 | _cmsForReuse[self.size].append(self._base) 1142 | if track_memory_usage: _memoryUsers[self.allocating_line] = (_memoryUsers[self.allocating_line][0]-1, _memoryUsers[self.allocating_line][1]-self.size*4) 1143 | else: 1144 | assert type(self._is_alias_of).__name__ == 'garray', '_is_alias_of is of unexpected type, of which the str() is: "%s"' % str(type(self._is_alias_of)) 1145 | # del self._base # this is only to make the refcount assert not fail 1146 | 1147 | 1148 | 1149 | 1150 | _castableTypes = _numberTypes | set([tuple, list, numpy.array, garray]) 1151 | 1152 | --------------------------------------------------------------------------------