├── .gitignore ├── Makefile ├── README.md ├── Rmpi.R ├── doMPI.R ├── doSNOW.R ├── example-mpi.py ├── mpi.parSapply.R ├── mpiHello.c ├── parallel-dist.Rmd ├── parallel-dist.html ├── pbd-apply.R ├── pbd-construct.R ├── pbd-linalg.R ├── pbd-mpi.R ├── pi_code.py ├── python-pp.py ├── quad_mpi.c ├── quad_mpi.cpp ├── redistribute-test.R └── sockets.R /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.md 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | Rscript -e "library(knitr); knit2html('parallel-dist.Rmd')" 3 | 4 | clean: 5 | rm -f parallel-dist.{md,html} 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tutorial-parallel-distributed 2 | Tutorial on parallelization tools for distributed computing (multiple computers or cluster nodes) in R, Python, Matlab, and C. 3 | 4 | Please see [the parallel-dist.html file](https://htmlpreview.github.io/?https://github.com/berkeley-scf/tutorial-parallel-distributed/blob/master/parallel-dist.html), which is generated dynamically from the underlying Markdown and various code files. 5 | -------------------------------------------------------------------------------- /Rmpi.R: -------------------------------------------------------------------------------- 1 | ## @knitr Rmpi 2 | 3 | # example syntax of standard MPI functions 4 | 5 | library(Rmpi) 6 | ## by default this should start one fewer workers than processes 7 | ## saving one for the master 8 | ## but on my system, this fails unless explicitly 9 | ## ask for one fewer slave than total number of slots across hosts 10 | mpi.spawn.Rslaves(nslaves = mpi.universe.size()-1) 11 | 12 | n = 5 13 | mpi.bcast.Robj2slave(n) 14 | mpi.bcast.cmd(id <- mpi.comm.rank()) 15 | mpi.bcast.cmd(x <- rnorm(id)) 16 | 17 | mpi.remote.exec(ls(.GlobalEnv)) 18 | 19 | mpi.bcast.cmd(y <- 2 * x) 20 | mpi.remote.exec(print(y)) 21 | 22 | objs <- as.list(c('x', 'n')) 23 | # next command sends value of objs on _master_ as argument to rm 24 | mpi.remote.exec(do.call, rm, objs) 25 | 26 | # verify that 'n' is gone: 27 | mpi.remote.exec(print(n)) 28 | 29 | # collect results back via send/recv 30 | mpi.remote.exec(mpi.send.Robj(y, dest = 0, tag = 1)) 31 | results = list() 32 | for(i in 1:(mpi.comm.size()-1)){ 33 | results[[i]] = mpi.recv.Robj(source = i, tag = 1) 34 | } 35 | 36 | print(results) 37 | 38 | mpi.close.Rslaves() 39 | mpi.quit() 40 | -------------------------------------------------------------------------------- /doMPI.R: -------------------------------------------------------------------------------- 1 | ## @knitr doMPI 2 | 3 | ## you should have invoked R as: 4 | ## mpirun -machinefile .hosts -np 1 R CMD BATCH --no-save doMPI.R doMPI.out 5 | ## unless running within a SLURM job, in which case you should do: 6 | ## mpirun R CMD BATCH --no-save file.R file.out 7 | 8 | library(Rmpi) 9 | library(doMPI) 10 | 11 | cl = startMPIcluster() # by default will start one fewer slave 12 | # than elements in .hosts 13 | 14 | registerDoMPI(cl) 15 | clusterSize(cl) # just to check 16 | 17 | results <- foreach(i = 1:200) %dopar% { 18 | out = mean(rnorm(1e6)) 19 | } 20 | 21 | closeCluster(cl) 22 | 23 | mpi.quit() 24 | -------------------------------------------------------------------------------- /doSNOW.R: -------------------------------------------------------------------------------- 1 | ## @knitr doSNOW 2 | 3 | library(doSNOW) 4 | machines = c(rep("beren.berkeley.edu", 1), 5 | rep("gandalf.berkeley.edu", 1), 6 | rep("arwen.berkeley.edu", 2)) 7 | 8 | cl = makeCluster(machines, type = "SOCK") 9 | cl 10 | 11 | registerDoSNOW(cl) 12 | 13 | fun = function(i) 14 | out = mean(rnorm(n)) 15 | 16 | nTasks <- 120 17 | 18 | print(system.time(out <- foreach(i = 1:nTasks) %dopar% { 19 | outSub <- fun() 20 | outSub # this will become part of the out object 21 | })) 22 | 23 | stopCluster(cl) # good practice, but not strictly necessary 24 | -------------------------------------------------------------------------------- /example-mpi.py: -------------------------------------------------------------------------------- 1 | ## @knitr mpi4py 2 | 3 | from mpi4py import MPI 4 | import numpy as np 5 | 6 | comm = MPI.COMM_WORLD 7 | 8 | # simple print out Rank & Size 9 | id = comm.Get_rank() 10 | print("Of ", comm.Get_size() , " workers, I am number " , id, ".") 11 | 12 | def f(id, n): 13 | np.random.seed(id) 14 | return(np.mean(np.random.normal(0, 1, n))) 15 | 16 | n = 1000000 17 | result = f(id, n) 18 | 19 | 20 | output = comm.gather(result, root = 0) 21 | 22 | if id == 0: 23 | print(output) 24 | -------------------------------------------------------------------------------- /mpi.parSapply.R: -------------------------------------------------------------------------------- 1 | ## @knitr mpi.parSapply 2 | 3 | ## you should have invoked R as: 4 | ## mpirun -machinefile .hosts -np 1 R CMD BATCH --no-save mpi.parSapply.R mpi.parSapply.out 5 | ## unless running within a SLURM job, in which case you should do: 6 | ## mpirun R CMD BATCH --no-save mpi.parSapply.R mpi.parSapply.out 7 | 8 | library(Rmpi) 9 | ## on my system, this fails unless explicitly 10 | ## ask for one fewer slave than total number of slots across hosts 11 | mpi.spawn.Rslaves(nslaves = mpi.universe.size()-1) 12 | 13 | myfun <- function(i) { 14 | set.seed(i) 15 | mean(rnorm(1e7)) 16 | } 17 | 18 | x <- seq_len(25) 19 | # parallel sapply-type calculations on a vector 20 | system.time(out <- mpi.parSapply(x, myfun)) 21 | system.time(out <- mpi.applyLB(x, myfun)) 22 | 23 | nrows <- 10000 24 | x <- matrix(rnorm(nrows*50), nrow = nrows) 25 | # parallel apply on a matrix 26 | out <- mpi.parApply(x, 1, mean) 27 | 28 | mpi.close.Rslaves() 29 | mpi.quit() 30 | -------------------------------------------------------------------------------- /mpiHello.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | // mpicxx mpiHello.c -o mpiHello 6 | 7 | int main(int argc, char** argv) { 8 | int myrank, nprocs, namelen; 9 | char processor_name[MPI_MAX_PROCESSOR_NAME]; 10 | 11 | MPI_Init(&argc, &argv); 12 | MPI_Comm_size(MPI_COMM_WORLD, &nprocs); 13 | MPI_Comm_rank(MPI_COMM_WORLD, &myrank); 14 | MPI_Get_processor_name(processor_name, &namelen); 15 | printf("Hello from processor %d of %d on %s\n", myrank, nprocs, processor_name); 16 | 17 | MPI_Finalize(); 18 | return 0; 19 | } 20 | -------------------------------------------------------------------------------- /parallel-dist.Rmd: -------------------------------------------------------------------------------- 1 | Parallel Processing for Distributed Computing in R, Python, MATLAB, and C 2 | ============================================================== 3 | Parallelization tools in a distributed memory (multiple machine) context 4 | -------------- 5 | 6 | Chris Paciorek, Department of Statistics, UC Berkeley 7 | 8 | ```{r setup, include=FALSE} 9 | library(knitr) 10 | library(stringr) 11 | read_chunk('sockets.R') 12 | read_chunk('doMPI.R') 13 | read_chunk('doSNOW.R') 14 | read_chunk('mpi.parSapply.R') 15 | read_chunk('pbd-apply.R') 16 | read_chunk('pbd-linalg.R') 17 | read_chunk('pbd-mpi.R') 18 | read_chunk('Rmpi.R') 19 | read_chunk('pbd-construct.R') 20 | read_chunk('python-ipyparallel.py') 21 | read_chunk('python-pp.py') 22 | read_chunk('example-mpi.py') 23 | ``` 24 | 25 | # 0) This Tutorial 26 | 27 | This tutorial covers strategies for using parallel processing in R, Python, MATLAB (briefly), and C on multiple machines, in which the various processes must interact across a network linking the machines. 28 | 29 | This tutorial assumes you have access to two or more servers on which to parallelize your computation, potentially via a Linux cluster managed via scheduling software such as SLURM, and that MPI, R, and Python are installed on the machines. 30 | 31 | Alternatively, you may be able to start a virtual cluster on Amazon Web Services using CfnCluster. If using CfnCluster, we recommend using a virtual machine developed here at Berkeley, [the Berkeley Common Environment (BCE)](http://bce.berkeley.edu). BCE is a virtual Linux machine - basically it is a Linux computer that you can run within your own computer, regardless of whether you are using Windows, Mac, or Linux. This provides a common environment so that things behave the same for all of us. Please follow the instructions related to CfnCluster at the [BCE install page](http://bce.berkeley.edu/install.html). 32 | 33 | This tutorial assumes you have a working knowledge of either R, Python, or C. 34 | 35 | Materials for this tutorial, including the R markdown file and associated code files that were used to create this document are available on Github at (https://github.com/berkeley-scf/tutorial-parallel-distributed). You can download the files by doing a git clone from a terminal window on a UNIX-like machine, as follows: 36 | ```{r, clone, eval=FALSE} 37 | git clone https://github.com/berkeley-scf/tutorial-parallel-distributed 38 | ``` 39 | 40 | To create this HTML document, simply compile the corresponding R Markdown file in R as follows (the following will work from within BCE after cloning the repository as above). 41 | ```{r, build-html, eval=FALSE} 42 | Rscript -e "library(knitr); knit2html('parallel-dist.Rmd')" 43 | ``` 44 | This tutorial by Christopher Paciorek is licensed under a Creative Commons Attribution 3.0 Unported License. 45 | 46 | 47 | # 1) Types of parallel processing 48 | 49 | There are two basic flavors of parallel processing (leaving aside 50 | GPUs): distributed memory and shared memory. With shared memory, multiple 51 | processors (which I'll call cores) share the same memory. With distributed 52 | memory, you have multiple nodes, each with their own memory. You can 53 | think of each node as a separate computer connected by a fast network. 54 | 55 | ## 1.1) Some useful terminology: 56 | 57 | - *cores*: We'll use this term to mean the different processing 58 | units available on a single node. 59 | - *nodes*: We'll use this term to mean the different computers, 60 | each with their own distinct memory, that make up a cluster or supercomputer. 61 | - *processes*: computational tasks executing on a machine; multiple 62 | processes may be executing at once. A given program may start up multiple 63 | processes at once. Ideally we have no more processes than cores on 64 | a node. 65 | - *threads*: multiple paths of execution within a single process; 66 | the OS sees the threads as a single process, but one can think of 67 | them as 'lightweight' processes. Ideally when considering the processes 68 | and their threads, we would have no more processes and threads combined 69 | than cores on a node. 70 | - *forking*: child processes are spawned that are identical to 71 | the parent, but with different process IDs and their own memory. 72 | - *sockets*: some of R's parallel functionality involves creating 73 | new R processes (e.g., starting processes via *Rscript*) and 74 | communicating with them via a communication technology called sockets. 75 | 76 | 77 | ## 1.2) Distributed memory and an overview of the topics in this tutorial 78 | 79 | Parallel programming for distributed memory parallelism requires passing 80 | messages between the different nodes. The standard protocol for doing 81 | this is MPI, of which there are various versions, including *openMPI*, which we'll use here. 82 | 83 | The R package *Rmpi* implements MPI in R. The *pbdR* packages for R also implement MPI as well as distributed linear algebra. 84 | 85 | Python has a package *mpi4py* that allows use of MPI within Python. 86 | 87 | In both R and Python, there are also easy ways to do embarrassingly parallel calculations (such as simple parallel for loops) across multiple machines, with MPI and similar tools used behind the scenes to manage the worker processes. 88 | 89 | MATLAB has its own system for distributed computation, called the Distributed Computing Server (DCS), requiring additional licensing above the standard MATLAB installation. 90 | 91 | This tutorial will cover: 92 | - simple parallelization of embarrassingly parallel computations (in R, Python, and MATLAB) without writing code that explicitly uses MPI; 93 | - distributed linear algebra using the pbdR front-end to the *ScaLapack* package; and 94 | - using MPI explicitly (in R, Python and C). 95 | 96 | ## 1.3) Other type of parallel processing 97 | 98 | We won't cover any of these in this material. 99 | 100 | ### Shared memory parallelization 101 | 102 | For shared memory parallelism, each core is accessing the same memory 103 | so there is no need to pass information (in the form of messages) 104 | between different machines. But in some programming contexts one needs 105 | to be careful that activity on different cores doesn't mistakenly 106 | overwrite places in memory that are used by other cores. Threading is a form of shared memory parallelism. 107 | 108 | This tutorial will not cover shared memory parallelization, as it is covered in [a separate tutorial](https://github.com/berkeley-scf/tutorial-parallel-basics). 109 | 110 | For information about working with random numbers in a parallel computation, please see that same tutorial, as the discussion applies to both shared and distributed memory. 111 | 112 | ### GPUs 113 | 114 | GPUs (Graphics Processing Units) are processing units originally designed 115 | for rendering graphics on a computer quickly. This is done by having 116 | a large number of simple processing units for massively parallel calculation. 117 | The idea of general purpose GPU (GPGPU) computing is to exploit this 118 | capability for general computation. In spring 2016, I gave a [workshop on using GPUs](http://statistics.berkeley.edu/computing/gpu). 119 | 120 | Most researchers don't program for a GPU directly but rather use software (often machine learning software such as Tensorflow or Caffe) that has been programmed to take advantage of a GPU if one is available. 121 | 122 | 123 | ### Spark and Hadoop 124 | 125 | Spark and Hadoop are systems for implementing computations in a distributed 126 | memory environment, using the MapReduce approach. 127 | 128 | # 2) Starting MPI-based jobs 129 | 130 | Code that explicitly uses MPI, as well as code using MPI under the hood, such as *foreach* with *doMPI* in R and pbdR, requires that you start your process(es) in a special way via the *mpirun* command. Note that *mpirun*, *mpiexec* and *orterun* are synonyms under *openMPI*. 131 | 132 | The basic requirements for starting such a job are that you specify the number of processes you want to run and that you indicate what machines those processes should run on. Those machines should be networked together such that MPI can ssh to the various machines without any password required. 133 | 134 | # 2.1) Running an MPI job under SLURM 135 | 136 | 137 | 138 | There are two ways to tell *mpirun* the machines on which to run the worker processes. 139 | 140 | First, we can pass the machine names directly, replicating the name 141 | if we want multiple processes on a single machine. In the example here, these are machines accessible to me, and you would need to replace those names with the names of machines you have access to. You'll need to [set up SSH keys](http://statistics.berkeley.edu/computing/sshkeys) so that you can access the machines without a password. 142 | 143 | 144 | ```{r, mpirun1, engine='bash'} 145 | mpirun --host smeagol,radagast,arwen,arwen -np 4 hostname 146 | ``` 147 | 148 | Alternatively, we can create a file with the relevant information. 149 | 150 | ```{r, mpirun2, engine='bash'} 151 | echo 'smeagol slots=1' > .hosts 152 | echo 'radagast slots=1' >> .hosts 153 | echo 'arwen slots=2' >> .hosts 154 | mpirun -machinefile .hosts -np 4 hostname 155 | ``` 156 | 157 | **If you are running your code as part of a job submitted to SLURM, you generally won't need to pass the *machinefile* or *np* arguments as MPI will get that information from SLURM.** So you can simply do: 158 | 159 | ``` 160 | mpirun hostname 161 | ``` 162 | 163 | Note that on a CfnCluster-based EC2 VM, you could run your job through SLURM, or you can directly use the node names, which can be seen by invoking `sinfo` and looking at the *NODELIST* column. 164 | 165 | 166 | To limit the number of threads for each process, we can tell *mpirun* 167 | to export the value of *OMP_NUM_THREADS* to the processes. E.g., calling a C program, *quad_mpi*: 168 | 169 | ``` 170 | export OMP_NUM_THREADS=2 171 | mpirun -machinefile .hosts -np 4 -x OMP_NUM_THREADS quad_mpi 172 | ``` 173 | 174 | In the examples above, I illustrated with a simple bash command (hostname) and with a compiled C program, but one would similarly 175 | use the -machinefile flag when starting R or Python or a C program via mpirun. 176 | 177 | There are additional details involved in carefully controlling how processes are allocated to nodes, but the default arguments for mpirun should do a reasonable job in many situations. 178 | 179 | Also, I've had inconsistent results in terms of having the correct number of workers start up on each of the machines specified, depending on whether I specify the number of workers implicitly via the hosts information (without specifying -np), explicitly via -np or both. You may want to check that the right number of workers is running on each host. 180 | 181 | # 3) Basic parallelization across nodes 182 | 183 | Here we'll see the use of high-level packages in R, Python, and MATLAB that hide the details of communication between nodes. 184 | 185 | ## 3.1) R 186 | 187 | ### 3.1.1) *foreach* with the *doMPI* and *doSNOW* backends 188 | 189 | Just as we used *foreach* in a shared memory context, we can 190 | use it in a distributed memory context as well, and R will handle 191 | everything behind the scenes for you. 192 | 193 | #### *doMPI* 194 | 195 | Start R through the *mpirun* command as discussed above, either 196 | as a batch job or for interactive use. We'll only ask for 1 process 197 | because the worker processes will be started automatically from within R (but using the machine names information passed to mpirun). 198 | 199 | ``` 200 | mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save doMPI.R doMPI.out 201 | mpirun -machinefile .hosts -np 1 R --no-save 202 | ``` 203 | 204 | Here's R code for using *Rmpi* as the back-end to *foreach*. 205 | If you call *startMPIcluster* with no arguments, it will start 206 | up one fewer worker processes than the number of hosts times slots given to mpirun 207 | so your R code will be more portable. 208 | 209 | ```{r, doMPI, eval=FALSE, cache=TRUE} 210 | ``` 211 | 212 | ```{r, doMPI-test, engine='bash'} 213 | mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save doMPI.R doMPI.out 214 | cat doMPI.out 215 | ``` 216 | 217 | A caution concerning Rmpi/doMPI: when you invoke `startMPIcluster()`, 218 | all the slave R processes become 100% active and stay active until 219 | the cluster is closed. In addition, when *foreach* is actually 220 | running, the master process also becomes 100% active. So using this 221 | functionality involves some inefficiency in CPU usage. This inefficiency 222 | is not seen with a sockets cluster (Section 3.1.4) nor when using other 223 | Rmpi functionality - i.e., starting slaves with *mpi.spawn.Rslaves* 224 | and then issuing commands to the slaves. 225 | 226 | If you specified `-np` with more than one process then as with the C-based 227 | MPI job above, you can control the threading via OMP_NUM_THREADS 228 | and the -x flag to *mpirun*. Note that this only works when the 229 | R processes are directly started by *mpirun*, which they are 230 | not if you set -np 1. The *maxcores* argument to *startMPIcluster()* 231 | does not seem to function (perhaps it does on other systems). 232 | 233 | Sidenote: You can use *doMPI* on a single node, which might be useful for avoiding 234 | some of the conflicts between R's forking functionality and openBLAS that 235 | can cause R to hang when using *foreach* with *doParallel*. 236 | 237 | #### *doSNOW* 238 | 239 | The *doSNOW* backend has the advantage that it doesn't need to have MPI installed on the system. MPI can be tricky to install and keep working, so this is an easy approach to using *foreach* across multiple machines. 240 | 241 | Simply start R as you usually would. 242 | 243 | Here's R code for using *doSNOW* as the back-end to *foreach*. Make sure to use the `type = "SOCK"` argument or *doSNOW* will actually use MPI behind the scenes. 244 | 245 | ```{r, doSNOW, eval=FALSE, cache=TRUE} 246 | ``` 247 | 248 | #### Loading packages and accessing variables within your parallel tasks 249 | 250 | When using *foreach* with multiple machines, you need to use the *.packages* argument (or load the package in the code being run in parallel) to load any packages needed in the code. You do not need to explicitly export variables from the master process to the workers. Rather, *foreach* determines which variables in the global environment of the master process are used in the code being run in parallel and makes copies of those in each worker process. Note that these variables are read-only on the workers and cannot be modified (if you try to do so, you'll notice that *foreach* actually did not make copies of the variables that your code tries to modify). 251 | 252 | ### 3.1.2) Using pbdR 253 | 254 | There is a project to enhance R's capability for distributed 255 | memory processing called [pbdR](http://r-pbd.org). For an extensive tutorial, see the 256 | [pbdDEMO vignette](https://github.com/wrathematics/pbdDEMO/blob/master/inst/doc/pbdDEMO-guide.pdf?raw=true). 257 | *pbdR* is designed for 258 | SPMD processing in batch mode, which means that you start up multiple 259 | processes in a non-interactive fashion using mpirun. The same code 260 | runs in each R process so you need to have the code behavior depend 261 | on the process ID. 262 | 263 | *pbdR* provides the following capabilities: 264 | - the ability to do some parallel apply-style computations (this section), 265 | - the ability to do distributed linear algebra by interfacing to *ScaLapack* (see Section 4), and 266 | - an alternative to *Rmpi* for interfacing with MPI (see Section 5). 267 | 268 | 269 | Personally, I think the second of the three is the most exciting as 270 | it's a functionality not readily available in R or even more generally 271 | in other readily-accessible software. 272 | 273 | Let's see parallel-apply style computations in pbdR. 274 | 275 | Here's some basic syntax for doing a distributed *apply()* on 276 | a matrix that is on one of the workers. So in this case, the matrix is not initially distributed to the workers -- that is done as part of the *pbdApply* computation. (One can also use *pbdApply* on matrices that are already distributed, and this is of course recommended for large matrices -- see Section 4.) 277 | 278 | As mentioned above, pbdR code is always run in batch mode, with the same code running on all of the processes. This means that you often need to explicitly build in logic about which process should execute a given piece of code, including print statements. Here the check for `comm.rank() == 0` allows us to only create the matrix and call some print statements on the master node (rank 0). 279 | 280 | ```{r, pbd-apply, cache=TRUE, eval=FALSE} 281 | ``` 282 | 283 | 284 | 285 | ```{r, pbd-apply-example, engine='bash'} 286 | mpirun -machinefile .hosts -np 4 Rscript pbd-apply.R > pbd-apply.out 287 | cat pbd-apply.out 288 | ``` 289 | 290 | In this case it's a fair amount slower to parallelize the calculation than just to do it in R using *rowSums()*, because of the overhead of communication (including passing the data) with the workers. 291 | 292 | 293 | ### 3.1.3) Using parallel apply functionality in Rmpi 294 | 295 | *Rmpi* is a package that provides MPI capabilities from R, including low-level MPI type calls (see Section 5). It also provides high-level wrapper functions that use MPI behind the scenes, including parallel apply functionality for operating on lists (and vectors) with functions such as *mpi.parSapply*. 296 | 297 | The documentation (see `help(mpi.parSapply)`) documents a number of confusingly-named functions. It appears that they are basically multi-node versions of the analogous *parSapply* and related functions. 298 | 299 | ```{r, mpi.parSapply, eval=FALSE} 300 | ``` 301 | 302 | ```{r, mpi.parSapply-example, engine='bash'} 303 | mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save mpi.parSapply.R mpi.parSapply.out 304 | cat mpi.parSapply.out 305 | ``` 306 | 307 | In some cases, it may be useful to specify *job.num* when the number of tasks is bigger than the number of worker processes to ensure load-balancing. 308 | 309 | ### 3.1.4) Using sockets 310 | 311 | One can also set up a cluster with the worker processes communicating via sockets. You just need to specify 312 | a character vector with the machine names as the input to *makeCluster()*. A nice thing about this is that it doesn't involve any of the complications of working with needing MPI installed. 313 | 314 | ```{r, sockets, cache=TRUE} 315 | ``` 316 | 317 | Note the use of *clusterExport*, needed to make variables in the master process available to the workers; this involves making a copy of each variable for each worker process. You'd also need to load any packages used in the code being run in parallel in that code. 318 | 319 | ### 3.1.5) The *partools* package 320 | 321 | *partools* is a somewhat new package developed by Norm Matloff at UC-Davis. He has the perspective that Spark/Hadoop are not the right tools in many cases when doing statistics-related work and has developed some simple tools for parallelizing computation across multiple nodes, also referred to as *Snowdoop*. The tools make use of the key idea in Hadoop of a distributed file system and distributed data objects but avoid the complications of trying to ensure fault tolerance, which is critical only on very large clusters of machines. 322 | 323 | I won't go into details, but *partools* allows you to split up your data across multiple nodes and then read the data into R in parallel across R sessions running on those nodes, all controlled from a single master R session. You can then do operations on the subsets and gather results back to the master session as needed. One point that confused me in the *partools* vignette is that it shows how to split up a dataset that you can read into your R session, but it's not clear what one does if the dataset is too big to read into a single R session. 324 | 325 | ## 3.2) Python 326 | 327 | ### 3.2.1) IPython parallel 328 | 329 | One can use IPython's parallelization tools in a context with multiple nodes, though the setup to get the worker processes is a bit more involved when you have multiple nodes. For details on using IPython parallel on a single node, see the [parallel basics tutorial appendix](https://github.com/berkeley-scf/tutorial-parallel-basics). 330 | 331 | If we are using the SLURM scheduling software, here's how we start up the worker processes: 332 | 333 | ```{r, ipyparallel-setup, engine='bash', eval=FALSE} 334 | ipcontroller --ip='*' & 335 | sleep 25 336 | # next line will start as many ipengines as we have SLURM tasks 337 | # because srun is a SLURM command 338 | srun ipengine & 339 | sleep 45 # wait until all engines have successfully started 340 | ``` 341 | 342 | 343 | We can then run IPython to split up our computational tasks across the engines. 344 | 345 | ```{r, ipyparallel, engine='python', eval=FALSE} 346 | ``` 347 | 348 | To finish up, we need to shut down the cluster of workers: 349 | ```{r, engine='bash', eval=FALSE} 350 | ipcluster stop 351 | ``` 352 | 353 | To start the engines in a context outside of using slurm (provided all machines share a filesystem), you should be able ssh to each machine and run `ipengine &` for as many worker processes as you want to start as follows. In some, but not all cases (depending on how the network is set up) you may not need the `--location` flag. 354 | 355 | ```{r, ipyparallel-setup2, engine='bash', eval=FALSE} 356 | ipcontroller --ip='*' --location=URL_OF_THIS_MACHINE & 357 | sleep 25 358 | nengines=8 359 | ssh other_host "for (( i = 0; i < ${nengines}; i++ )); do ipengine & done" 360 | sleep 45 # wait until all engines have successfully started 361 | ``` 362 | 363 | ### 3.2.2) *pp* package 364 | 365 | Another way to parallelize across multiple nodes that uses more manual setup and doesn't integrate as well with scheduling software like SLURM is to use the pp package (also useful for parallelizing on a single machine as discussed in the [parallel basics tutorial appendix](https://github.com/berkeley-scf/tutorial-parallel-basics). 366 | 367 | Assuming that the pp package is installed on each node (e.g., `sudo apt-get install python-pp` on an Ubuntu machine), you need to start up a ppserver process on each node. E.g., if `$nodes` is a UNIX environment variable containing the names of the worker nodes and you want to start 2 workers per node: 368 | 369 | ```{r, pp-start, engine='bash', eval=FALSE} 370 | nodes='smeagol radagast beren arwen' 371 | for node in $nodes; do 372 | # cd /tmp is because of issue with starting ppserver in home directory 373 | # -w says how many workers to start on the node 374 | ssh $node "cd /tmp && ppserver -s mysecretphrase -t 120 -w 2 &" & 375 | done 376 | ``` 377 | 378 | Now in our Python code we create a server object and submit jobs to the server object, which manages the farming out of the tasks. Note that this will run interactively in IPython or as a script from UNIX, but there have been times where I was not able to run it interactively in the base Python interpreter. Also note that while we are illustrating this as basically another parallelized for loop, the individual jobs can be whatever calculations you want, so the function (in this case it's always *pi.sample*) could change from job to job. 379 | 380 | ```{r, python-pp, engine='python', eval=FALSE} 381 | ``` 382 | 383 | ```{r, python-pp-example, engine='bash', eval=FALSE} 384 | python python-pp.py > python-pp.out 385 | cat python-pp.out 386 | ``` 387 | 388 | ``` 389 | ['smeagol', 'radagast', 'beren', 'arwen', 'smeagol', 'radagast', 'beren', 'arwen'] 390 | Pi is roughly 3.141567 391 | Time elapsed: 32.0389587879 392 | ``` 393 | 394 | The -t flag used when starting ppserver should ensure that the server processes are removed, but if you need to do it manually, this should work: 395 | 396 | ```{r, pp-stop, engine='bash', eval=FALSE} 397 | for node in $nodes; do 398 | killall ppserver 399 | done 400 | ``` 401 | 402 | 403 | ## 3.3) MATLAB 404 | 405 | To use MATLAB across multiple nodes, you need to have the MATLAB Distributed Computing Server (DCS). If it is installed, one can set up MATLAB so that *parfor* will distribute its work across multiple nodes. Details may vary depending on how DCS is installed on your system. 406 | 407 | # 4) Distributed linear algebra in R using pbdR 408 | 409 | ## 4.1) Distributed linear algebra example 410 | 411 | And here's how you would set up a distributed matrix and do linear 412 | algebra on it. Note that when working with large matrices, you would 413 | generally want to construct the matrices (or read from disk) in a 414 | parallel fashion rather than creating the full matrix on one worker. 415 | For simplicity in the example, I construct the matrix, *x*, on the master 416 | and then create the distributed version of the matrix, *dx*, with *as.ddmatrix*. 417 | 418 | Here's the code in *pbd-linalg.R*. 419 | 420 | ```{r, pbd-linalg, eval=FALSE} 421 | ``` 422 | 423 | As before we run the job in batch mode via mpirun: 424 | 425 | ```{r, pbd-linalg-example, engine='bash', cache=TRUE} 426 | export OMP_NUM_THREADS=1 427 | mpirun -machinefile .hosts -np 4 -x OMP_NUM_THREADS Rscript pbd-linalg.R > pbd-linalg.out 428 | cat pbd-linalg.out 429 | ``` 430 | 431 | 432 | You may want to set the *bldim* argument to *as.ddmatrix*. That determines 433 | the size of the submatrices (aka 'blocks') into which the overall matrix is split. Generally, multiple 434 | submatrices are owned by an individual worker process. For example, to use 100x100 435 | blocks, you'd have 436 | ``` 437 | dx <- as.ddmatrix(x, bldim = c(100, 100)) 438 | ``` 439 | In general, you don't 440 | want the blocks too big as the work may not be well load-balanced, or too small as 441 | that may have a higher computational cost in terms of latency and communication. 442 | My experiments suggest that it's worth exploring block sizes of 10x10 through 1000x1000 (if you have square matrices). 443 | 444 | As a quick, completely non-definitive point of comparison, doing the 445 | crossproduct and Cholesky for the 8192x8192 matrix on 3 EC2 nodes 446 | (2 cores per node) with -np 6 took 39 seconds for each operation, 447 | while doing with two threads on the master node took 64 seconds (crossproduct) 448 | and 23 seconds (Cholesky). While that is a single test, some other experiments 449 | I've done also haven't show much speedup in using multiple nodes with pbdR compared 450 | to simply using a threaded BLAS on one machine. So you may need to get fairly big matrices 451 | that won't fit in memory on a single machine before it's worthwhile 452 | to do the computation in distributed fashion using pbdR. 453 | 454 | ## 4.2) Constructing a distributed matrix on parallel 455 | 456 | pbdR has functionality for reading in parallel from a parallel file 457 | system such as Lustre (available on Berkeley's Savio cluster). Things 458 | are bit more complicated if that's not the case. Here's some code that 459 | illustrates how to construct a distributed matrix from constituent column blocks. 460 | First create a distributed version of the 461 | matrix using a standard R matrix with each process owning a block of 462 | columns (I haven't yet gotten the syntax to work for blocks of rows). Then create a 463 | pbd version of that distributed matrix and finally convert the 464 | distributed matrix to a standard pbd block structure on which the 465 | linear algebra can be done efficiently. 466 | 467 | ```{r, pbd-construct, eval=FALSE} 468 | ``` 469 | 470 | The code above creates the submatrices within the R sessions, but one could also read in from separate files, one per process. 471 | 472 | The code in *redistribute-test.R* demonstrates that constructing the full matrix 473 | from column-wise blocks with this syntax works correctly. 474 | 475 | # 5) MPI 476 | 477 | ## 5.1) MPI Overview 478 | 479 | There are multiple MPI implementations, of which *openMPI* and 480 | *mpich* are very common. *openMPI* is quite common, and we'll use that. 481 | 482 | In MPI programming, the same code runs on all the machines. This is 483 | called SPMD (single program, multiple data). As we saw a bit with the pbdR code, one 484 | invokes the same code (same program) multiple times, but the behavior 485 | of the code can be different based on querying the rank (ID) of the 486 | process. Since MPI operates in a distributed fashion, any transfer 487 | of information between processes must be done explicitly via send 488 | and receive calls (e.g., *MPI_Send*, *MPI_Recv*, *MPI_Isend*, 489 | and *MPI_Irecv*). (The ``MPI_'' is for C code; C++ just has 490 | *Send*, *Recv*, etc.) 491 | 492 | The latter two of these functions (*MPI_Isend* and *MPI_Irecv*) 493 | are so-called non-blocking calls. One important concept to understand 494 | is the difference between blocking and non-blocking calls. Blocking 495 | calls wait until the call finishes, while non-blocking calls return 496 | and allow the code to continue. Non-blocking calls can be more efficient, 497 | but can lead to problems with synchronization between processes. 498 | 499 | In addition to send and receive calls to transfer to and from specific 500 | processes, there are calls that send out data to all processes (*MPI_Scatter*), 501 | gather data back (*MPI_Gather*) and perform reduction operations 502 | (*MPI_Reduce*). 503 | 504 | Debugging MPI code can be tricky because communication 505 | can hang, error messages from the workers may not be seen or readily 506 | accessible, and it can be difficult to assess the state of the worker 507 | processes. 508 | 509 | ## 5.2) Basic syntax for MPI in C 510 | 511 | 512 | Here's a basic hello world example The code is also in *mpiHello.c*. 513 | 514 | ``` 515 | // see mpiHello.c 516 | #include 517 | #include 518 | #include 519 | 520 | int main(int argc, char* argv) { 521 | int myrank, nprocs, namelen; 522 | char process_name[MPI_MAX_PROCESSOR_NAME]; 523 | MPI_Init(&argc, &argv); 524 | MPI_Comm_size(MPI_COMM_WORLD, &nprocs); 525 | MPI_Comm_rank(MPI_COMM_WORLD, &myrank); 526 | MPI_Get_processor_name(process_name, &namelen); 527 | printf("Hello from process %d of %d on %s\n", 528 | myrank, nprocs, process_name); 529 | MPI_Finalize(); 530 | return 0; 531 | } 532 | ``` 533 | 534 | There are C (*mpicc*) and C++ (*mpic++*) compilers for MPI programs (*mpicxx* and *mpiCC* are synonyms). 535 | I'll use the MPI C++ compiler 536 | even though the code is all plain C code. 537 | 538 | ```{r, change-hosts1, engine='bash', include=FALSE} 539 | # change hosts back so pdf shows original hosts 540 | echo 'smeagol slots=1' > .hosts 541 | echo 'radagast slots=1' >> .hosts 542 | echo 'arwen slots=2' >> .hosts 543 | ``` 544 | 545 | 546 | 547 | ```{r, mpiHello, engine = 'bash'} 548 | mpicxx mpiHello.c -o mpiHello 549 | cat .hosts # what hosts do I expect it to run on? 550 | mpirun -machinefile .hosts -np 4 mpiHello 551 | ``` 552 | 553 | 554 | 555 | To actually write real MPI code, you'll need to go learn some of the 556 | MPI syntax. See *quad_mpi.c* and *quad_mpi.cpp*, which 557 | are example C and C++ programs (for approximating an integral via 558 | quadrature) that show some of the basic MPI functions. Compilation 559 | and running are as above: 560 | 561 | ```{r, quad_mpi, engine = 'bash'} 562 | mpicxx quad_mpi.cpp -o quad_mpi 563 | mpirun -machinefile .hosts -np 4 quad_mpi 564 | ``` 565 | 566 | 567 | ## 5.3) Using MPI from R via Rmpi or pbdR 568 | 569 | ### 5.3.1) Rmpi 570 | 571 | R users can use Rmpi to interface with MPI. 572 | 573 | Here's some example code that uses actual Rmpi syntax (as opposed 574 | to *foreach* with Rmpi as the back-end, where the use of Rmpi was hidden from us). 575 | The syntax is very similar to the MPI C syntax we've already seen. 576 | This code runs in a master-slave paradigm where the master starts 577 | the slaves and invokes commands on them. It may be possible to run 578 | Rmpi in a context where each process runs the same code based 579 | on invoking with Rmpi, but I haven't investigated this further. 580 | 581 | 582 | ```{r, Rmpi, eval=FALSE} 583 | ``` 584 | 585 | *mpi.bcast.cmd* and *mpi.remote.exec* are quite similar - they execute a function on the workers and can also use arguments on the master as inputs to the function evaluated on the workers (see the ... argument). *mpi.remote.exec* can return the results of the execution to the master. 586 | 587 | As before, we would start R via *mpirun*, requesting one process, since the workers are started within R via *mpi.spawn.Rslaves*. 588 | 589 | ```{r, Rmpi-example, engine='bash'} 590 | mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save Rmpi.R Rmpi.out 591 | cat Rmpi.out 592 | ``` 593 | 594 | Note that if you do this in interactive mode, some of the usual functionality 595 | of command line R (tab completion, scrolling for history) is not enabled 596 | and errors will cause R to quit. This occurs because passing things 597 | through *mpirun* causes R to think it is not running interactively. 598 | 599 | Note: in some cases a cluster/supercomputer will be set up so that 600 | *Rmpi* is loaded and the worker processes are already started 601 | when you start R. In this case you wouldn't need to load *Rmpi* 602 | or use *mpi.spawn.Rslaves*. You can always run `mpi.comm.size()` to see how 603 | many workers are running. 604 | 605 | ### 5.3.2) pbdMPI in pbdR 606 | 607 | Here's an example of distributing an embarrassingly parallel calculation 608 | (estimating an integral via Monte Carlo - in this case estimating 609 | the value of pi). 610 | 611 | ```{r, pbd-mpi, eval=FALSE} 612 | ``` 613 | 614 | ```{r, pbd-mpi-example, engine='bash', cache=TRUE} 615 | mpirun -machinefile .hosts -np 4 Rscript pbd-mpi.R > pbd-mpi.out 616 | cat pbd-mpi.out 617 | ``` 618 | 619 | 620 | ## 5.4) Using MPI from Python via mpi4py 621 | 622 | Here's some basic use of MPI within Python. 623 | 624 | ```{r, mpi4py, engine='python', eval=FALSE} 625 | ``` 626 | 627 | To run the code, we start Python through the mpirun command as done previously. 628 | 629 | ```{r, mpi4py-example, engine = 'bash'} 630 | mpirun -machinefile .hosts -np 4 python example-mpi.py 631 | ``` 632 | 633 | More generally, you can send, receive, broadcast, gather, etc. as with MPI itself. 634 | 635 | *mpi4py* generally does not work interactively. 636 | 637 | # 6) Parallelization strategies 638 | 639 | The following are some basic principles/suggestions for how to parallelize 640 | your computation. 641 | 642 | Should I use one machine/node or many machines/nodes? 643 | 644 | - If you can do your computation on the cores of a single node using 645 | shared memory, that will be faster than using the same number of cores 646 | (or even somewhat more cores) across multiple nodes. Similarly, jobs 647 | with a lot of data/high memory requirements that one might think of 648 | as requiring Spark or Hadoop may in some cases be much faster if you can find 649 | a single machine with a lot of memory. 650 | - That said, if you would run out of memory on a single node, then you'll 651 | need to use distributed memory. 652 | 653 | What level or dimension should I parallelize over? 654 | 655 | - If you have nested loops, you generally only want to parallelize at 656 | one level of the code. That said, there may be cases in which it is 657 | helpful to do both. Keep in mind whether your linear algebra is being 658 | threaded. Often you will want to parallelize over a loop and not use 659 | threaded linear algebra. 660 | - Often it makes sense to parallelize the outer loop when you have nested 661 | loops. 662 | - You generally want to parallelize in such a way that your code is 663 | load-balanced and does not involve too much communication. 664 | 665 | How do I balance communication overhead with keeping my cores busy? 666 | 667 | - If you have very few tasks, particularly if the tasks take different 668 | amounts of time, often some of the processors will be idle and your code 669 | poorly load-balanced. 670 | - If you have very many tasks and each one takes little time, the communication 671 | overhead of starting and stopping the tasks will reduce efficiency. 672 | 673 | Should multiple tasks be pre-assigned to a process (i.e., a worker) (sometimes called *prescheduling*) or should tasks 674 | be assigned dynamically as previous tasks finish? 675 | 676 | - Basically if you have many tasks that each take similar time, you 677 | want to preschedule the tasks to reduce communication. If you have few tasks 678 | or tasks with highly variable completion times, you don't want to 679 | preschedule, to improve load-balancing. 680 | - For R in particular, some of R's parallel functions allow you to say whether the 681 | tasks should be prescheduled. E.g., `library(Rmpi); help(mpi.parSapply)` gives some information. 682 | 683 | 684 | -------------------------------------------------------------------------------- /parallel-dist.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Parallel Processing for Distributed Computing in R, Python, Matlab, and C 7 | 8 | 19 | 20 | 21 | 53 | 54 | 55 | 59 | 60 | 61 | 62 | 198 | 199 | 200 | 201 | 202 | 203 | 204 |

Parallel Processing for Distributed Computing in R, Python, Matlab, and C

205 | 206 |

Parallelization tools in a distributed memory (multiple machine) context

207 | 208 |

Chris Paciorek, Department of Statistics, UC Berkeley

209 | 210 |

0) This Tutorial

211 | 212 |

This tutorial covers strategies for using parallel processing in R, Python, Matlab (briefly), and C on multiple machines, in which the various processes must interact across a network linking the machines.

213 | 214 |

This tutorial assumes you have access to two or more servers on which to parallelize your computation, potentially via a Linux cluster managed via scheduling software such as SLURM, and that MPI, R, and Python are installed on the machines.

215 | 216 |

Alternatively, you may be able to start a virtual cluster on Amazon Web Services using CfnCluster. If using CfnCluster, we recommend using a virtual machine developed here at Berkeley, the Berkeley Common Environment (BCE). BCE is a virtual Linux machine - basically it is a Linux computer that you can run within your own computer, regardless of whether you are using Windows, Mac, or Linux. This provides a common environment so that things behave the same for all of us. Please follow the instructions related to CfnCluster at the BCE install page.

217 | 218 |

This tutorial assumes you have a working knowledge of either R, Python, or C.

219 | 220 |

Materials for this tutorial, including the R markdown file and associated code files that were used to create this document are available on Github at (https://github.com/berkeley-scf/tutorial-parallel-distributed). You can download the files by doing a git clone from a terminal window on a UNIX-like machine, as follows:

221 | 222 |
git clone https://github.com/berkeley-scf/tutorial-parallel-distributed
 223 | 
224 | 225 |

To create this HTML document, simply compile the corresponding R Markdown file in R as follows (the following will work from within BCE after cloning the repository as above).

226 | 227 |
Rscript -e "library(knitr); knit2html('parallel-dist.Rmd')"
 228 | 
229 | 230 |

This tutorial by Christopher Paciorek is licensed under a Creative Commons Attribution 3.0 Unported License.

231 | 232 |

1) Types of parallel processing

233 | 234 |

There are two basic flavors of parallel processing (leaving aside 235 | GPUs): distributed memory and shared memory. With shared memory, multiple 236 | processors (which I'll call cores) share the same memory. With distributed 237 | memory, you have multiple nodes, each with their own memory. You can 238 | think of each node as a separate computer connected by a fast network.

239 | 240 |

1.1) Some useful terminology:

241 | 242 |
    243 |
  • cores: We'll use this term to mean the different processing 244 | units available on a single node.
  • 245 |
  • nodes: We'll use this term to mean the different computers, 246 | each with their own distinct memory, that make up a cluster or supercomputer.
  • 247 |
  • processes: computational tasks executing on a machine; multiple 248 | processes may be executing at once. A given program may start up multiple 249 | processes at once. Ideally we have no more processes than cores on 250 | a node.
  • 251 |
  • threads: multiple paths of execution within a single process; 252 | the OS sees the threads as a single process, but one can think of 253 | them as 'lightweight' processes. Ideally when considering the processes 254 | and their threads, we would have no more processes and threads combined 255 | than cores on a node.
  • 256 |
  • forking: child processes are spawned that are identical to 257 | the parent, but with different process IDs and their own memory.
  • 258 |
  • sockets: some of R's parallel functionality involves creating 259 | new R processes (e.g., starting processes via Rscript) and 260 | communicating with them via a communication technology called sockets.
  • 261 |
262 | 263 |

1.2) Distributed memory and an overview of the topics in this tutorial

264 | 265 |

Parallel programming for distributed memory parallelism requires passing 266 | messages between the different nodes. The standard protocol for doing 267 | this is MPI, of which there are various versions, including openMPI, which we'll use here.

268 | 269 |

The R package Rmpi implements MPI in R. The pbdR packages for R also implement MPI as well as distributed linear algebra.

270 | 271 |

Python has a package mpi4py that allows use of MPI within Python.

272 | 273 |

In both R and Python, there are also easy ways to do embarrassingly parallel calculations (such as simple parallel for loops) across multiple machines, with MPI and similar tools used behind the scenes to manage the worker processes.

274 | 275 |

Matlab has its own system for distributed computation, called the Distributed Computing Server (DCS), requiring additional licensing above the standard Matlab installation.

276 | 277 |

This tutorial will cover:

278 | 279 |
    280 |
  • simple parallelization of embarrassingly parallel computations (in R, Python, and Matlab) without writing code that explicitly uses MPI;
  • 281 |
  • distributed linear algebra using the pbdR front-end to the ScaLapack package; and
  • 282 |
  • using MPI explicitly (in R, Python and C).
  • 283 |
284 | 285 |

1.3) Other type of parallel processing

286 | 287 |

We won't cover any of these in this material.

288 | 289 |

Shared memory parallelization

290 | 291 |

For shared memory parallelism, each core is accessing the same memory 292 | so there is no need to pass information (in the form of messages) 293 | between different machines. But in some programming contexts one needs 294 | to be careful that activity on different cores doesn't mistakenly 295 | overwrite places in memory that are used by other cores. Threading is a form of shared memory parallelism.

296 | 297 |

This tutorial will not cover shared memory parallelization, as it is covered in a separate tutorial.

298 | 299 |

For information about working with random numbers in a parallel computation, please see that same tutorial, as the discussion applies to both shared and distributed memory.

300 | 301 |

GPUs

302 | 303 |

GPUs (Graphics Processing Units) are processing units originally designed 304 | for rendering graphics on a computer quickly. This is done by having 305 | a large number of simple processing units for massively parallel calculation. 306 | The idea of general purpose GPU (GPGPU) computing is to exploit this 307 | capability for general computation. In spring 2016, I gave a workshop on using GPUs.

308 | 309 |

Most researchers don't program for a GPU directly but rather use software (often machine learning software such as Tensorflow or Caffe) that has been programmed to take advantage of a GPU if one is available.

310 | 311 |

Spark and Hadoop

312 | 313 |

Spark and Hadoop are systems for implementing computations in a distributed 314 | memory environment, using the MapReduce approach.

315 | 316 |

2) Starting MPI-based jobs

317 | 318 |

Code that explicitly uses MPI, as well as code using MPI under the hood, such as foreach with doMPI in R and pbdR, requires that you start your process(es) in a special way via the mpirun command. Note that mpirun, mpiexec and orterun are synonyms under openMPI.

319 | 320 |

The basic requirements for starting such a job are that you specify the number of processes you want to run and that you indicate what machines those processes should run on. Those machines should be networked together such that MPI can ssh to the various machines without any password required.

321 | 322 |

2.1) Running an MPI job under SLURM

323 | 324 |

There are two ways to tell mpirun the machines on which to run the worker processes.

325 | 326 |

First, we can pass the machine names directly, replicating the name 327 | if we want multiple processes on a single machine. In the example here, these are machines accessible to me, and you would need to replace those names with the names of machines you have access to. You'll need to set up SSH keys so that you can access the machines without a password.

328 | 329 |
mpirun --host smeagol,radagast,arwen,arwen -np 4 hostname
 330 | 
331 | 332 |
## smeagol
 333 | ## radagast
 334 | ## arwen
 335 | ## arwen
 336 | 
337 | 338 |

Alternatively, we can create a file with the relevant information.

339 | 340 |
echo 'smeagol slots=1' > .hosts
 341 | echo 'radagast slots=1' >> .hosts
 342 | echo 'arwen slots=2' >> .hosts
 343 | mpirun -machinefile .hosts -np 4 hostname
 344 | 
345 | 346 |
## smeagol
 347 | ## radagast
 348 | ## arwen
 349 | ## arwen
 350 | 
351 | 352 |

If you are running your code as part of a job submitted to SLURM, you generally won't need to pass the machinefile or np arguments as MPI will get that information from SLURM. So you can simply do:

353 | 354 |
mpirun hostname
 355 | 
356 | 357 |

Note that on a CfnCluster-based EC2 VM, you could run your job through SLURM, or you can directly use the node names, which can be seen by invoking sinfo and looking at the NODELIST column.

358 | 359 |

To limit the number of threads for each process, we can tell mpirun 360 | to export the value of OMP_NUM_THREADS to the processes. E.g., calling a C program, quad_mpi:

361 | 362 |
export OMP_NUM_THREADS=2
 363 | mpirun -machinefile .hosts -np 4 -x OMP_NUM_THREADS quad_mpi
 364 | 
365 | 366 |

In the examples above, I illustrated with a simple bash command (hostname) and with a compiled C program, but one would similarly 367 | use the -machinefile flag when starting R or Python or a C program via mpirun.

368 | 369 |

There are additional details involved in carefully controlling how processes are allocated to nodes, but the default arguments for mpirun should do a reasonable job in many situations.

370 | 371 |

Also, I've had inconsistent results in terms of having the correct number of workers start up on each of the machines specified, depending on whether I specify the number of workers implicitly via the hosts information (without specifying -np), explicitly via -np or both. You may want to check that the right number of workers is running on each host.

372 | 373 |

3) Basic parallelization across nodes

374 | 375 |

Here we'll see the use of high-level packages in R, Python, and Matlab that hide the details of communication between nodes.

376 | 377 |

3.1) R

378 | 379 |

3.1.1) foreach with the doMPI and doSNOW backends

380 | 381 |

Just as we used foreach in a shared memory context, we can 382 | use it in a distributed memory context as well, and R will handle 383 | everything behind the scenes for you.

384 | 385 |

doMPI

386 | 387 |

Start R through the mpirun command as discussed above, either 388 | as a batch job or for interactive use. We'll only ask for 1 process 389 | because the worker processes will be started automatically from within R (but using the machine names information passed to mpirun).

390 | 391 |
mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save doMPI.R doMPI.out
 392 | mpirun -machinefile .hosts -np 1 R --no-save
 393 | 
394 | 395 |

Here's R code for using Rmpi as the back-end to foreach. 396 | If you call startMPIcluster with no arguments, it will start 397 | up one fewer worker processes than the number of hosts times slots given to mpirun 398 | so your R code will be more portable.

399 | 400 |
## you should have invoked R as:
 401 | ## mpirun -machinefile .hosts -np 1 R CMD BATCH --no-save doMPI.R doMPI.out
 402 | ## unless running within a SLURM job, in which case you should do:
 403 | ## mpirun R CMD BATCH --no-save file.R file.out
 404 | 
 405 | library(Rmpi)
 406 | library(doMPI)
 407 | 
 408 | cl = startMPIcluster()  # by default will start one fewer slave
 409 | # than elements in .hosts
 410 | 
 411 | registerDoMPI(cl)
 412 | clusterSize(cl) # just to check
 413 | 
 414 | results <- foreach(i = 1:200) %dopar% {
 415 |   out = mean(rnorm(1e6))
 416 | }
 417 | 
 418 | closeCluster(cl)
 419 | 
 420 | mpi.quit()
 421 | 
422 | 423 |
mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save doMPI.R doMPI.out
 424 | cat doMPI.out
 425 | 
426 | 427 |
## > ## @knitr doMPI
 428 | ## > 
 429 | ## > ## you should have invoked R as:
 430 | ## > ## mpirun -machinefile .hosts -np 1 R CMD BATCH --no-save doMPI.R doMPI.out
 431 | ## > ## unless running within a SLURM job, in which case you should do:
 432 | ## > ## mpirun R CMD BATCH --no-save file.R file.out
 433 | ## > 
 434 | ## > library(Rmpi)
 435 | ## > library(doMPI)
 436 | ## Loading required package: foreach
 437 | ## Loading required package: iterators
 438 | ## > 
 439 | ## > cl = startMPIcluster()  # by default will start one fewer slave
 440 | ##  3 slaves are spawned successfully. 0 failed.
 441 | ## > # than elements in .hosts
 442 | ## >                                         
 443 | ## > registerDoMPI(cl)
 444 | ## > clusterSize(cl) # just to check
 445 | ## [1] 3
 446 | ## > 
 447 | ## > results <- foreach(i = 1:200) %dopar% {
 448 | ## +   out = mean(rnorm(1e6))
 449 | ## + }
 450 | ## > 
 451 | ## > closeCluster(cl)
 452 | ## > 
 453 | ## > mpi.quit()
 454 | 
455 | 456 |

A caution concerning Rmpi/doMPI: when you invoke startMPIcluster(), 457 | all the slave R processes become 100% active and stay active until 458 | the cluster is closed. In addition, when foreach is actually 459 | running, the master process also becomes 100% active. So using this 460 | functionality involves some inefficiency in CPU usage. This inefficiency 461 | is not seen with a sockets cluster (Section 3.1.4) nor when using other 462 | Rmpi functionality - i.e., starting slaves with mpi.spawn.Rslaves 463 | and then issuing commands to the slaves.

464 | 465 |

If you specified -np with more than one process then as with the C-based 466 | MPI job above, you can control the threading via OMP_NUM_THREADS 467 | and the -x flag to mpirun. Note that this only works when the 468 | R processes are directly started by mpirun, which they are 469 | not if you set -np 1. The maxcores argument to startMPIcluster() 470 | does not seem to function (perhaps it does on other systems).

471 | 472 |

Sidenote: You can use doMPI on a single node, which might be useful for avoiding 473 | some of the conflicts between R's forking functionality and openBLAS that 474 | can cause R to hang when using foreach with doParallel.

475 | 476 |

doSNOW

477 | 478 |

The doSNOW backend has the advantage that it doesn't need to have MPI installed on the system. MPI can be tricky to install and keep working, so this is an easy approach to using foreach across multiple machines.

479 | 480 |

Simply start R as you usually would.

481 | 482 |

Here's R code for using doSNOW as the back-end to foreach. Make sure to use the type = "SOCK" argument or doSNOW will actually use MPI behind the scenes.

483 | 484 |
library(doSNOW)
 485 | machines = c(rep("beren.berkeley.edu", 1),
 486 |     rep("gandalf.berkeley.edu", 1),
 487 |     rep("arwen.berkeley.edu", 2))
 488 | 
 489 | cl = makeCluster(machines, type = "SOCK")
 490 | cl
 491 | 
 492 | registerDoSNOW(cl)
 493 | 
 494 | fun = function(i)
 495 |   out = mean(rnorm(n))
 496 | 
 497 | nTasks <- 120
 498 | 
 499 | print(system.time(out <- foreach(i = 1:nTasks) %dopar% {
 500 |     outSub <- fun()
 501 |     outSub # this will become part of the out object
 502 | }))
 503 | 
 504 | stopCluster(cl)  # good practice, but not strictly necessary
 505 | 
506 | 507 |

Loading packages and accessing variables within your parallel tasks

508 | 509 |

When using foreach with multiple machines, you need to use the .packages argument (or load the package in the code being run in parallel) to load any packages needed in the code. You do not need to explicitly export variables from the master process to the workers. Rather, foreach determines which variables in the global environment of the master process are used in the code being run in parallel and makes copies of those in each worker process. Note that these variables are read-only on the workers and cannot be modified (if you try to do so, you'll notice that foreach actually did not make copies of the variables that your code tries to modify).

510 | 511 |

3.1.2) Using pbdR

512 | 513 |

There is a project to enhance R's capability for distributed 514 | memory processing called pbdR. For an extensive tutorial, see the 515 | pbdDEMO vignette. 516 | pbdR is designed for 517 | SPMD processing in batch mode, which means that you start up multiple 518 | processes in a non-interactive fashion using mpirun. The same code 519 | runs in each R process so you need to have the code behavior depend 520 | on the process ID.

521 | 522 |

pbdR provides the following capabilities:

523 | 524 |
    525 |
  • the ability to do some parallel apply-style computations (this section),
  • 526 |
  • the ability to do distributed linear algebra by interfacing to ScaLapack (see Section 4), and
  • 527 |
  • an alternative to Rmpi for interfacing with MPI (see Section 5).
  • 528 |
529 | 530 |

Personally, I think the second of the three is the most exciting as 531 | it's a functionality not readily available in R or even more generally 532 | in other readily-accessible software.

533 | 534 |

Let's see parallel-apply style computations in pbdR.

535 | 536 |

Here's some basic syntax for doing a distributed apply() on 537 | a matrix that is on one of the workers. So in this case, the matrix is not initially distributed to the workers – that is done as part of the pbdApply computation. (One can also use pbdApply on matrices that are already distributed, and this is of course recommended for large matrices – see Section 4.)

538 | 539 |

As mentioned above, pbdR code is always run in batch mode, with the same code running on all of the processes. This means that you often need to explicitly build in logic about which process should execute a given piece of code, including print statements. Here the check for comm.rank() == 0 allows us to only create the matrix and call some print statements on the master node (rank 0).

540 | 541 |
## you should have invoked R as:
 542 | ## mpirun -machinefile .hosts -np 4 R CMD BATCH --no-save pbd-apply.R pbd-apply.out
 543 | ## unless running within a SLURM job, in which case you should do:
 544 | ## mpirun R CMD BATCH --no-save pbd-apply.R pbd-apply.out
 545 | 
 546 | library(pbdMPI, quiet = TRUE )
 547 | init()
 548 | 
 549 | nrows <- 1e6
 550 | 
 551 | if(comm.rank()==0) {
 552 |     x <- matrix(rnorm(nrows*50), nrow = nrows)
 553 | }
 554 | 
 555 | sm <- comm.timer(out <- pbdApply(x, 1, mean, pbd.mode = 'mw', rank.source = 0))
 556 | if(comm.rank()==0) {
 557 |     print(out[1:5])
 558 |     print(sm)
 559 | }
 560 | 
 561 | finalize()
 562 | 
563 | 564 |
mpirun -machinefile .hosts -np 4 Rscript pbd-apply.R > pbd-apply.out
 565 | cat pbd-apply.out
 566 | 
567 | 568 |
## [1]  0.17686351 -0.12216986  0.04345966 -0.06581673  0.07439472
 569 | ##      min     mean      max 
 570 | ##  7.81000 12.99175 14.74900
 571 | 
572 | 573 |

In this case it's a fair amount slower to parallelize the calculation than just to do it in R using rowSums(), because of the overhead of communication (including passing the data) with the workers.

574 | 575 |

3.1.3) Using parallel apply functionality in Rmpi

576 | 577 |

Rmpi is a package that provides MPI capabilities from R, including low-level MPI type calls (see Section 5). It also provides high-level wrapper functions that use MPI behind the scenes, including parallel apply functionality for operating on lists (and vectors) with functions such as mpi.parSapply.

578 | 579 |

The documentation (see help(mpi.parSapply)) documents a number of confusingly-named functions. It appears that they are basically multi-node versions of the analogous parSapply and related functions.

580 | 581 |
## you should have invoked R as:
 582 | ## mpirun -machinefile .hosts -np 1 R CMD BATCH --no-save mpi.parSapply.R mpi.parSapply.out
 583 | ## unless running within a SLURM job, in which case you should do:
 584 | ## mpirun R CMD BATCH --no-save mpi.parSapply.R mpi.parSapply.out
 585 | 
 586 | library(Rmpi)
 587 | ## on my system, this fails unless explicitly
 588 | ## ask for one fewer slave than total number of slots across hosts
 589 | mpi.spawn.Rslaves(nslaves = mpi.universe.size()-1)
 590 | 
 591 | myfun <- function(i) {
 592 |       set.seed(i)
 593 |       mean(rnorm(1e7))
 594 | }
 595 | 
 596 | x <- seq_len(25)
 597 | # parallel sapply-type calculations on a vector 
 598 | system.time(out <- mpi.parSapply(x, myfun))
 599 | system.time(out <- mpi.applyLB(x, myfun))
 600 | 
 601 | nrows <- 10000
 602 | x <- matrix(rnorm(nrows*50), nrow = nrows)
 603 | # parallel apply on a matrix
 604 | out <- mpi.parApply(x, 1, mean)
 605 | 
 606 | mpi.close.Rslaves()
 607 | mpi.quit()
 608 | 
609 | 610 |
mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save mpi.parSapply.R mpi.parSapply.out
 611 | cat mpi.parSapply.out
 612 | 
613 | 614 |
## > ## @knitr mpi.parSapply
 615 | ## > 
 616 | ## > ## you should have invoked R as:
 617 | ## > ## mpirun -machinefile .hosts -np 1 R CMD BATCH --no-save mpi.parSapply.R mpi.parSapply.out
 618 | ## > ## unless running within a SLURM job, in which case you should do:
 619 | ## > ## mpirun R CMD BATCH --no-save mpi.parSapply.R mpi.parSapply.out
 620 | ## > 
 621 | ## > library(Rmpi)
 622 | ## > ## on my system, this fails unless explicitly
 623 | ## > ## ask for one fewer slave than total number of slots across hosts
 624 | ## > mpi.spawn.Rslaves(nslaves = mpi.universe.size()-1)
 625 | ##  3 slaves are spawned successfully. 0 failed.
 626 | ## master (rank 0, comm 1) of size 4 is running on: smeagol 
 627 | ## slave1 (rank 1, comm 1) of size 4 is running on: radagast 
 628 | ## slave2 (rank 2, comm 1) of size 4 is running on: arwen 
 629 | ## slave3 (rank 3, comm 1) of size 4 is running on: arwen 
 630 | ## > 
 631 | ## > myfun <- function(i) {
 632 | ## +       set.seed(i)
 633 | ## +       mean(rnorm(1e7))
 634 | ## + }
 635 | ## > 
 636 | ## > x <- seq_len(25)
 637 | ## > # parallel sapply-type calculations on a vector 
 638 | ## > system.time(out <- mpi.parSapply(x, myfun))
 639 | ##    user  system elapsed 
 640 | ##   5.612   8.032  13.644 
 641 | ## > system.time(out <- mpi.applyLB(x, myfun))
 642 | ##    user  system elapsed 
 643 | ##   5.400   7.044  12.441 
 644 | ## > 
 645 | ## > nrows <- 10000
 646 | ## > x <- matrix(rnorm(nrows*50), nrow = nrows)
 647 | ## > # parallel apply on a matrix
 648 | ## > out <- mpi.parApply(x, 1, mean)
 649 | ## > 
 650 | ## > mpi.close.Rslaves()
 651 | ## [1] 1
 652 | ## > mpi.quit()
 653 | 
654 | 655 |

In some cases, it may be useful to specify job.num when the number of tasks is bigger than the number of worker processes to ensure load-balancing.

656 | 657 |

3.1.4) Using sockets

658 | 659 |

One can also set up a cluster with the worker processes communicating via sockets. You just need to specify 660 | a character vector with the machine names as the input to makeCluster(). A nice thing about this is that it doesn't involve any of the complications of working with needing MPI installed.

661 | 662 |
library(parallel)
 663 | 
 664 | machines = c(rep("beren.berkeley.edu", 1),
 665 |     rep("gandalf.berkeley.edu", 1),
 666 |     rep("arwen.berkeley.edu", 2))
 667 | cl = makeCluster(machines)
 668 | cl
 669 | 
670 | 671 |
## socket cluster with 4 nodes on hosts 'beren.berkeley.edu', 'gandalf.berkeley.edu', 'arwen.berkeley.edu'
 672 | 
673 | 674 |
n = 1e7
 675 | clusterExport(cl, c('n'))
 676 | 
 677 | fun = function(i)
 678 |   out = mean(rnorm(n))
 679 | 
 680 | result <- parSapply(cl, 1:20, fun)
 681 | 
 682 | result[1:5]
 683 | 
684 | 685 |
## [1]  1.431600e-04  6.146156e-04 -8.718859e-05  8.976951e-05  1.152365e-04
 686 | 
687 | 688 |
stopCluster(cl) # not strictly necessary
 689 | 
690 | 691 |

Note the use of clusterExport, needed to make variables in the master process available to the workers; this involves making a copy of each variable for each worker process. You'd also need to load any packages used in the code being run in parallel in that code.

692 | 693 |

3.1.5) The partools package

694 | 695 |

partools is a somewhat new package developed by Norm Matloff at UC-Davis. He has the perspective that Spark/Hadoop are not the right tools in many cases when doing statistics-related work and has developed some simple tools for parallelizing computation across multiple nodes, also referred to as Snowdoop. The tools make use of the key idea in Hadoop of a distributed file system and distributed data objects but avoid the complications of trying to ensure fault tolerance, which is critical only on very large clusters of machines.

696 | 697 |

I won't go into details, but partools allows you to split up your data across multiple nodes and then read the data into R in parallel across R sessions running on those nodes, all controlled from a single master R session. You can then do operations on the subsets and gather results back to the master session as needed. One point that confused me in the partools vignette is that it shows how to split up a dataset that you can read into your R session, but it's not clear what one does if the dataset is too big to read into a single R session.

698 | 699 |

3.2) Python

700 | 701 |

3.2.1) IPython parallel

702 | 703 |

One can use IPython's parallelization tools in a context with multiple nodes, though the setup to get the worker processes is a bit more involved when you have multiple nodes. For details on using IPython parallel on a single node, see the parallel basics tutorial appendix.

704 | 705 |

If we are using the SLURM scheduling software, here's how we start up the worker processes:

706 | 707 |
ipcontroller --ip='*' &
 708 | sleep 25
 709 | # next line will start as many ipengines as we have SLURM tasks 
 710 | #   because srun is a SLURM command
 711 | srun ipengine &  
 712 | sleep 45  # wait until all engines have successfully started
 713 | 
714 | 715 |

We can then run IPython to split up our computational tasks across the engines.

716 | 717 |
import numpy as np
 718 | np.random.seed(0)
 719 | n = 500
 720 | p = 50
 721 | X = np.random.normal(0, 1, size = (n, p))
 722 | Y = X[: , 0] + pow(abs(X[:,1] * X[:,2]), 0.5) + X[:,1] - X[:,2] + np.random.normal(0, 1, n)
 723 | 
 724 | def looFit(index, Ylocal, Xlocal):
 725 |     rf = rfr(n_estimators=100)
 726 |     fitted = rf.fit(np.delete(Xlocal, index, axis = 0), np.delete(Ylocal, index))
 727 |     pred = rf.predict(np.array([Xlocal[index, :]]))
 728 |     return(pred[0])
 729 | 
 730 | from ipyparallel import Client
 731 | c = Client()
 732 | c.ids
 733 | 
 734 | dview = c[:]
 735 | dview.block = True
 736 | dview.apply(lambda : "Hello, World")
 737 | 
 738 | lview = c.load_balanced_view()
 739 | lview.block = True
 740 | 
 741 | dview.execute('from sklearn.ensemble import RandomForestRegressor as rfr')
 742 | dview.execute('import numpy as np')
 743 | mydict = dict(X = X, Y = Y, looFit = looFit)
 744 | dview.push(mydict)
 745 | 
 746 | nSub = 50  # for illustration only do a subset
 747 | 
 748 | # need a wrapper function because map() only operates on one argument
 749 | def wrapper(i):
 750 |     return(looFit(i, Y, X))
 751 | 
 752 | import time
 753 | time.time()
 754 | pred = lview.map(wrapper, range(nSub))
 755 | time.time()
 756 | 
 757 | print(pred[0:10])
 758 | 
 759 | # import pylab
 760 | # import matplotlib.pyplot as plt
 761 | # plt.plot(Y, pred, '.')
 762 | # pylab.show()
 763 | 
764 | 765 |

To finish up, we need to shut down the cluster of workers:

766 | 767 |
ipcluster stop
 768 | 
769 | 770 |

To start the engines in a context outside of using slurm (provided all machines share a filesystem), you should be able ssh to each machine and run ipengine & for as many worker processes as you want to start as follows. In some, but not all cases (depending on how the network is set up) you may not need the --location flag.

771 | 772 |
ipcontroller --ip='*' --location=URL_OF_THIS_MACHINE &
 773 | sleep 25
 774 | nengines=8
 775 | ssh other_host "for (( i = 0; i < ${nengines}; i++ )); do ipengine & done"
 776 | sleep 45  # wait until all engines have successfully started
 777 | 
778 | 779 |

3.2.2) pp package

780 | 781 |

Another way to parallelize across multiple nodes that uses more manual setup and doesn't integrate as well with scheduling software like SLURM is to use the pp package (also useful for parallelizing on a single machine as discussed in the parallel basics tutorial appendix.

782 | 783 |

Assuming that the pp package is installed on each node (e.g., sudo apt-get install python-pp on an Ubuntu machine), you need to start up a ppserver process on each node. E.g., if $nodes is a UNIX environment variable containing the names of the worker nodes and you want to start 2 workers per node:

784 | 785 |
nodes='smeagol radagast beren arwen'
 786 | for node in $nodes; do
 787 | # cd /tmp is because of issue with starting ppserver in home directory
 788 | # -w says how many workers to start on the node
 789 |     ssh $node "cd /tmp && ppserver -s mysecretphrase -t 120 -w 2 &" & 
 790 | done
 791 | 
792 | 793 |

Now in our Python code we create a server object and submit jobs to the server object, which manages the farming out of the tasks. Note that this will run interactively in IPython or as a script from UNIX, but there have been times where I was not able to run it interactively in the base Python interpreter. Also note that while we are illustrating this as basically another parallelized for loop, the individual jobs can be whatever calculations you want, so the function (in this case it's always pi.sample) could change from job to job.

794 | 795 |
import numpy.random
 796 | import pp
 797 | import time
 798 | import pi_code # provided in pi_code.py
 799 | 
 800 | samples_per_slice = 10000000
 801 | num_slices = 24*20
 802 | 
 803 | # remember to start ppserver on worker nodes
 804 | 
 805 | # assume 'hosts' contains the names of the nodes on which you 
 806 | # started ppserver
 807 | nprocsPerNode = 2
 808 | hosts = ['smeagol', 'radagast', 'beren', 'arwen']
 809 | ppservers = hosts * nprocsPerNode
 810 | 
 811 | print ppservers
 812 | # put ncpus=0 here or it will start workers locally too
 813 | job_server = pp.Server(ncpus = 0, ppservers = tuple(ppservers), secret = 'mysecretphrase')
 814 | 
 815 | inputs = [(i, samples_per_slice) for i in xrange(num_slices)]
 816 | 
 817 | t0 = time.time()
 818 | jobs = [job_server.submit(pi_code.sample, invalue, modules = ('numpy.random',)) for invalue in inputs]
 819 | results = [job() for job in jobs]
 820 | t1 = time.time()
 821 | 
 822 | print "Pi is roughly %f" % (4.0 * sum(results) / (num_slices*samples_per_slice))
 823 | print "Time elapsed: ", t1 - t0
 824 | 
825 | 826 |
python python-pp.py > python-pp.out
 827 | cat python-pp.out
 828 | 
829 | 830 |
['smeagol', 'radagast', 'beren', 'arwen', 'smeagol', 'radagast', 'beren', 'arwen']
 831 | Pi is roughly 3.141567
 832 | Time elapsed:  32.0389587879
 833 | 
834 | 835 |

The -t flag used when starting ppserver should ensure that the server processes are removed, but if you need to do it manually, this should work:

836 | 837 |
for node in $nodes; do
 838 |     killall ppserver
 839 | done
 840 | 
841 | 842 |

3.3) Matlab

843 | 844 |

To use Matlab across multiple nodes, you need to have the Matlab Distributed Computing Server (DCS). If it is installed, one can set up Matlab so that parfor will distribute its work across multiple nodes. Details may vary depending on how DCS is installed on your system.

845 | 846 |

4) Distributed linear algebra in R using pbdR

847 | 848 |

4.1) Distributed linear algebra example

849 | 850 |

And here's how you would set up a distributed matrix and do linear 851 | algebra on it. Note that when working with large matrices, you would 852 | generally want to construct the matrices (or read from disk) in a 853 | parallel fashion rather than creating the full matrix on one worker. 854 | For simplicity in the example, I construct the matrix, x, on the master 855 | and then create the distributed version of the matrix, dx, with as.ddmatrix.

856 | 857 |

Here's the code in pbd-linalg.R.

858 | 859 |
library(pbdDMAT, quiet = TRUE )
 860 | 
 861 | n <- 4096*2
 862 | 
 863 | # if you are putting multiple processes on node
 864 | # you may want to prevent threading of the linear algebra:
 865 | # library(RhpcBLASctl)
 866 | # blas_set_num_threads(1)
 867 | # (or do by passing OMP_NUM_THREADS to mpirun)
 868 | 
 869 | init.grid()
 870 | 
 871 | if(comm.rank()==0) print(date())
 872 | 
 873 | # pbd allows for parallel I/O, but here
 874 | # we keep things simple and distribute
 875 | # an object from one process
 876 | if(comm.rank() == 0) {
 877 |     x <- rnorm(n^2)
 878 |     dim(x) <- c(n, n)
 879 | } else x <- NULL
 880 | dx <- as.ddmatrix(x)
 881 | 
 882 | timing <- comm.timer(sigma <- crossprod(dx))
 883 | 
 884 | if(comm.rank()==0) {
 885 |     print(date())
 886 |     print(timing)
 887 | }
 888 | 
 889 | timing <- comm.timer(out <- chol(sigma))
 890 | 
 891 | if(comm.rank()==0) {
 892 |     print(date())
 893 |     print(timing)
 894 | }
 895 | 
 896 | finalize()
 897 | 
898 | 899 |

As before we run the job in batch mode via mpirun:

900 | 901 |
export OMP_NUM_THREADS=1
 902 | mpirun -machinefile .hosts -np 4 -x OMP_NUM_THREADS Rscript pbd-linalg.R > pbd-linalg.out
 903 | cat pbd-linalg.out
 904 | 
905 | 906 |
## Using 2x2 for the default grid size
 907 | ## 
 908 | ## [1] "Sat Oct 17 12:05:38 2015"
 909 | ## [1] "Sat Oct 17 12:06:54 2015"
 910 | ##    min   mean    max 
 911 | ## 48.086 50.806 52.585 
 912 | ## [1] "Sat Oct 17 12:08:10 2015"
 913 | ##      min     mean      max 
 914 | ## 76.47000 76.51125 76.53300
 915 | 
916 | 917 |

You may want to set the bldim argument to as.ddmatrix. That determines 918 | the size of the submatrices (aka 'blocks') into which the overall matrix is split. Generally, multiple 919 | submatrices are owned by an individual worker process. For example, to use 100x100 920 | blocks, you'd have

921 | 922 |
dx <- as.ddmatrix(x, bldim = c(100, 100))
 923 | 
924 | 925 |

In general, you don't 926 | want the blocks too big as the work may not be well load-balanced, or too small as 927 | that may have a higher computational cost in terms of latency and communication. 928 | My experiments suggest that it's worth exploring block sizes of 10x10 through 1000x1000 (if you have square matrices).

929 | 930 |

As a quick, completely non-definitive point of comparison, doing the 931 | crossproduct and Cholesky for the 8192x8192 matrix on 3 EC2 nodes 932 | (2 cores per node) with -np 6 took 39 seconds for each operation, 933 | while doing with two threads on the master node took 64 seconds (crossproduct) 934 | and 23 seconds (Cholesky). While that is a single test, some other experiments 935 | I've done also haven't show much speedup in using multiple nodes with pbdR compared 936 | to simply using a threaded BLAS on one machine. So you may need to get fairly big matrices 937 | that won't fit in memory on a single machine before it's worthwhile 938 | to do the computation in distributed fashion using pbdR.

939 | 940 |

4.2) Constructing a distributed matrix on parallel

941 | 942 |

pbdR has functionality for reading in parallel from a parallel file 943 | system such as Lustre (available on Berkeley's Savio cluster). Things 944 | are bit more complicated if that's not the case. Here's some code that 945 | illustrates how to construct a distributed matrix from constituent column blocks. 946 | First create a distributed version of the 947 | matrix using a standard R matrix with each process owning a block of 948 | columns (I haven't yet gotten the syntax to work for blocks of rows). Then create a 949 | pbd version of that distributed matrix and finally convert the 950 | distributed matrix to a standard pbd block structure on which the 951 | linear algebra can be done efficiently.

952 | 953 |
library(pbdDMAT, quiet = TRUE)
 954 | init.grid()
 955 | 
 956 | nprocs <- comm.size()
 957 | 
 958 | nrows <- 10000
 959 | ncolsPerBlock <- nrows/nprocs
 960 | 
 961 | # each process has a block of columns as an R matrix
 962 | subdata <- matrix(rnorm(nrows * ncolsPerBlock), ncol = ncols)
 963 | 
 964 | # now construct the distributed matrix object
 965 | tmp <- ddmatrix(data = subdata, nrow = nrows, ncol = nrows,
 966 |                bldim = c(nrows, ncolsPerBlock), ICTXT = 1)
 967 | # now rearrange the blocks for better linear algebra performance
 968 | dx <- redistribute(tmp, bldim = c(100, 100), ICTXT = 0)
 969 | 
 970 | finalize ()
 971 | 
972 | 973 |

The code above creates the submatrices within the R sessions, but one could also read in from separate files, one per process.

974 | 975 |

The code in redistribute-test.R demonstrates that constructing the full matrix 976 | from column-wise blocks with this syntax works correctly.

977 | 978 |

5) MPI

979 | 980 |

5.1) MPI Overview

981 | 982 |

There are multiple MPI implementations, of which openMPI and 983 | mpich are very common. openMPI is quite common, and we'll use that.

984 | 985 |

In MPI programming, the same code runs on all the machines. This is 986 | called SPMD (single program, multiple data). As we saw a bit with the pbdR code, one 987 | invokes the same code (same program) multiple times, but the behavior 988 | of the code can be different based on querying the rank (ID) of the 989 | process. Since MPI operates in a distributed fashion, any transfer 990 | of information between processes must be done explicitly via send 991 | and receive calls (e.g., MPI_Send, MPI_Recv, MPI_Isend, 992 | and MPI_Irecv). (The “MPI_'' is for C code; C++ just has 993 | Send, Recv, etc.)

994 | 995 |

The latter two of these functions (MPI_Isend and MPI_Irecv) 996 | are so-called non-blocking calls. One important concept to understand 997 | is the difference between blocking and non-blocking calls. Blocking 998 | calls wait until the call finishes, while non-blocking calls return 999 | and allow the code to continue. Non-blocking calls can be more efficient, 1000 | but can lead to problems with synchronization between processes.

1001 | 1002 |

In addition to send and receive calls to transfer to and from specific 1003 | processes, there are calls that send out data to all processes (MPI_Scatter), 1004 | gather data back (MPI_Gather) and perform reduction operations 1005 | (MPI_Reduce).

1006 | 1007 |

Debugging MPI code can be tricky because communication 1008 | can hang, error messages from the workers may not be seen or readily 1009 | accessible, and it can be difficult to assess the state of the worker 1010 | processes.

1011 | 1012 |

5.2) Basic syntax for MPI in C

1013 | 1014 |

Here's a basic hello world example The code is also in mpiHello.c.

1015 | 1016 |
// see mpiHello.c
1017 | #include <stdio.h> 
1018 | #include <math.h> 
1019 | #include <mpi.h>
1020 | 
1021 | int main(int argc, char* argv) {     
1022 |     int myrank, nprocs, namelen;     
1023 |     char process_name[MPI_MAX_PROCESSOR_NAME];
1024 |     MPI_Init(&argc, &argv);     
1025 |     MPI_Comm_size(MPI_COMM_WORLD, &nprocs);   
1026 |     MPI_Comm_rank(MPI_COMM_WORLD, &myrank);          
1027 |     MPI_Get_processor_name(process_name, &namelen);            
1028 |     printf("Hello from process %d of %d on %s\n", 
1029 |         myrank, nprocs, process_name);
1030 |     MPI_Finalize();     
1031 |     return 0; 
1032 | } 
1033 | 
1034 | 1035 |

There are C (mpicc) and C++ (mpic++) compilers for MPI programs (mpicxx and mpiCC are synonyms). 1036 | I'll use the MPI C++ compiler 1037 | even though the code is all plain C code.

1038 | 1039 |
mpicxx mpiHello.c -o mpiHello
1040 | cat .hosts # what hosts do I expect it to run on?
1041 | mpirun -machinefile .hosts -np 4 mpiHello
1042 | 
1043 | 1044 |
## smeagol slots=1
1045 | ## radagast slots=1
1046 | ## arwen slots=2
1047 | ## Hello from processor 0 of 4 on smeagol
1048 | ## Hello from processor 1 of 4 on radagast
1049 | ## Hello from processor 2 of 4 on arwen
1050 | ## Hello from processor 3 of 4 on arwen
1051 | 
1052 | 1053 |

To actually write real MPI code, you'll need to go learn some of the 1054 | MPI syntax. See quad_mpi.c and quad_mpi.cpp, which 1055 | are example C and C++ programs (for approximating an integral via 1056 | quadrature) that show some of the basic MPI functions. Compilation 1057 | and running are as above:

1058 | 1059 |
mpicxx quad_mpi.cpp -o quad_mpi
1060 | mpirun -machinefile .hosts -np 4 quad_mpi
1061 | 
1062 | 1063 |
## 27 September 2017 06:52:43 PM
1064 | ## 
1065 | ## QUAD_MPI
1066 | ##   C++/MPI version
1067 | ##   Estimate an integral of f(x) from A to B.
1068 | ##   f(x) = 50 / (pi * ( 2500 * x * x + 1 ) )
1069 | ## 
1070 | ##   A = 0
1071 | ##   B = 10
1072 | ##   N = 999999999
1073 | ##   EXACT =       0.4993633810764567
1074 | ## 
1075 | ##   Use MPI to divide the computation among 4 total processes,
1076 | ##   of which one is the master and does not do core computations.
1077 | ##   Process 1 contributed MY_TOTAL = 0.49809
1078 | ##   Process 3 contributed MY_TOTAL = 0.000318308
1079 | ## 
1080 | ##   Estimate =       0.4993634591634721
1081 | ##   Error = 7.808701535383378e-08
1082 | ##   Time = 10.03146505355835
1083 | ##   Process 2 contributed MY_TOTAL = 0.00095491
1084 | ## 
1085 | ## QUAD_MPI:
1086 | ##   Normal end of execution.
1087 | ## 
1088 | ## 27 September 2017 06:52:53 PM
1089 | 
1090 | 1091 |

5.3) Using MPI from R via Rmpi or pbdR

1092 | 1093 |

5.3.1) Rmpi

1094 | 1095 |

R users can use Rmpi to interface with MPI.

1096 | 1097 |

Here's some example code that uses actual Rmpi syntax (as opposed 1098 | to foreach with Rmpi as the back-end, where the use of Rmpi was hidden from us). 1099 | The syntax is very similar to the MPI C syntax we've already seen. 1100 | This code runs in a master-slave paradigm where the master starts 1101 | the slaves and invokes commands on them. It may be possible to run 1102 | Rmpi in a context where each process runs the same code based 1103 | on invoking with Rmpi, but I haven't investigated this further.

1104 | 1105 |
# example syntax of standard MPI functions
1106 | 
1107 | library(Rmpi)
1108 | ## by default this should start one fewer workers than processes
1109 | ## saving one for the master
1110 | ## but on my system, this fails unless explicitly
1111 | ## ask for one fewer slave than total number of slots across hosts
1112 | mpi.spawn.Rslaves(nslaves = mpi.universe.size()-1)
1113 | 
1114 | n = 5
1115 | mpi.bcast.Robj2slave(n)
1116 | mpi.bcast.cmd(id <- mpi.comm.rank())
1117 | mpi.bcast.cmd(x <- rnorm(id))
1118 | 
1119 | mpi.remote.exec(ls(.GlobalEnv))
1120 | 
1121 | mpi.bcast.cmd(y <- 2 * x)
1122 | mpi.remote.exec(print(y))
1123 | 
1124 | objs <- as.list(c('x', 'n'))
1125 | # next command sends value of objs on _master_ as argument to rm
1126 | mpi.remote.exec(do.call, rm, objs)
1127 | 
1128 | # verify that 'n' is gone:
1129 | mpi.remote.exec(print(n))
1130 | 
1131 | # collect results back via send/recv
1132 | mpi.remote.exec(mpi.send.Robj(y, dest = 0, tag = 1))
1133 | results = list()
1134 | for(i in 1:(mpi.comm.size()-1)){
1135 |   results[[i]] = mpi.recv.Robj(source = i, tag = 1)
1136 | }
1137 | 
1138 | print(results)
1139 | 
1140 | mpi.close.Rslaves()
1141 | mpi.quit()
1142 | 
1143 | 1144 |

mpi.bcast.cmd and mpi.remote.exec are quite similar - they execute a function on the workers and can also use arguments on the master as inputs to the function evaluated on the workers (see the … argument). mpi.remote.exec can return the results of the execution to the master.

1145 | 1146 |

As before, we would start R via mpirun, requesting one process, since the workers are started within R via mpi.spawn.Rslaves.

1147 | 1148 |
mpirun -machinefile .hosts -np 1 R CMD BATCH -q --no-save Rmpi.R Rmpi.out
1149 | cat Rmpi.out
1150 | 
1151 | 1152 |
## > ## @knitr Rmpi
1153 | ## > 
1154 | ## > # example syntax of standard MPI functions
1155 | ## > 
1156 | ## > library(Rmpi)
1157 | ## > ## by default this should start one fewer workers than processes
1158 | ## > ## saving one for the master
1159 | ## > ## but on my system, this fails unless explicitly
1160 | ## > ## ask for one fewer slave than total number of slots across hosts
1161 | ## > mpi.spawn.Rslaves(nslaves = mpi.universe.size()-1)
1162 | ##  3 slaves are spawned successfully. 0 failed.
1163 | ## master (rank 0, comm 1) of size 4 is running on: smeagol 
1164 | ## slave1 (rank 1, comm 1) of size 4 is running on: radagast 
1165 | ## slave2 (rank 2, comm 1) of size 4 is running on: arwen 
1166 | ## slave3 (rank 3, comm 1) of size 4 is running on: arwen 
1167 | ## > 
1168 | ## > n = 5
1169 | ## > mpi.bcast.Robj2slave(n)
1170 | ## > mpi.bcast.cmd(id <- mpi.comm.rank())
1171 | ## > mpi.bcast.cmd(x <- rnorm(id))
1172 | ## > 
1173 | ## > mpi.remote.exec(ls(.GlobalEnv))
1174 | ## $slave1
1175 | ## [1] "id" "n"  "x" 
1176 | ## 
1177 | ## $slave2
1178 | ## [1] "id" "n"  "x" 
1179 | ## 
1180 | ## $slave3
1181 | ## [1] "id" "n"  "x" 
1182 | ## 
1183 | ## > 
1184 | ## > mpi.bcast.cmd(y <- 2 * x)
1185 | ## > mpi.remote.exec(print(y))
1186 | ## $slave1
1187 | ## [1] -1.358116
1188 | ## 
1189 | ## $slave2
1190 | ## [1] -0.8357533 -0.3354547
1191 | ## 
1192 | ## $slave3
1193 | ## [1] -2.750742  1.510654 -1.412469
1194 | ## 
1195 | ## > 
1196 | ## > objs <- as.list(c('x', 'n'))
1197 | ## > # next command sends value of objs on _master_ as argument to rm
1198 | ## > mpi.remote.exec(do.call, rm, objs)
1199 | ## $slave3
1200 | ## [1] 0
1201 | ## 
1202 | ## > 
1203 | ## > # verify that 'n' is gone:
1204 | ## > mpi.remote.exec(print(n))
1205 | ## $slave1
1206 | ## [1] "Error in print(n) : object 'n' not found\n"
1207 | ## attr(,"class")
1208 | ## [1] "try-error"
1209 | ## attr(,"condition")
1210 | ## <simpleError in print(n): object 'n' not found>
1211 | ## 
1212 | ## $slave2
1213 | ## [1] "Error in print(n) : object 'n' not found\n"
1214 | ## attr(,"class")
1215 | ## [1] "try-error"
1216 | ## attr(,"condition")
1217 | ## <simpleError in print(n): object 'n' not found>
1218 | ## 
1219 | ## $slave3
1220 | ## [1] "Error in print(n) : object 'n' not found\n"
1221 | ## attr(,"class")
1222 | ## [1] "try-error"
1223 | ## attr(,"condition")
1224 | ## <simpleError in print(n): object 'n' not found>
1225 | ## 
1226 | ## > 
1227 | ## > # collect results back via send/recv
1228 | ## > mpi.remote.exec(mpi.send.Robj(y, dest = 0, tag = 1))
1229 | ## $slave1
1230 | ##          used (Mb) gc trigger (Mb) max used (Mb)
1231 | ## Ncells 327615 17.5     592000 31.7   550748 29.5
1232 | ## Vcells 600412  4.6    1308461 10.0   786430  6.0
1233 | ## 
1234 | ## $slave2
1235 | ##          used (Mb) gc trigger (Mb) max used (Mb)
1236 | ## Ncells 327615 17.5     592000 31.7   550748 29.5
1237 | ## Vcells 600412  4.6    1308461 10.0   786430  6.0
1238 | ## 
1239 | ## $slave3
1240 | ##          used (Mb) gc trigger (Mb) max used (Mb)
1241 | ## Ncells 327615 17.5     592000 31.7   550710 29.5
1242 | ## Vcells 600414  4.6    1308461 10.0   786430  6.0
1243 | ## 
1244 | ## > results = list()
1245 | ## > for(i in 1:(mpi.comm.size()-1)){
1246 | ## +   results[[i]] = mpi.recv.Robj(source = i, tag = 1)
1247 | ## + }
1248 | ## >   
1249 | ## > print(results)
1250 | ## [[1]]
1251 | ## [1] -1.358116
1252 | ## 
1253 | ## [[2]]
1254 | ## [1] -0.8357533 -0.3354547
1255 | ## 
1256 | ## [[3]]
1257 | ## [1] -2.750742  1.510654 -1.412469
1258 | ## 
1259 | ## > 
1260 | ## > mpi.close.Rslaves()
1261 | ## [1] 1
1262 | ## > mpi.quit()
1263 | 
1264 | 1265 |

Note that if you do this in interactive mode, some of the usual functionality 1266 | of command line R (tab completion, scrolling for history) is not enabled 1267 | and errors will cause R to quit. This occurs because passing things 1268 | through mpirun causes R to think it is not running interactively.

1269 | 1270 |

Note: in some cases a cluster/supercomputer will be set up so that 1271 | Rmpi is loaded and the worker processes are already started 1272 | when you start R. In this case you wouldn't need to load Rmpi 1273 | or use mpi.spawn.Rslaves. You can always run mpi.comm.size() to see how 1274 | many workers are running.

1275 | 1276 |

5.3.2) pbdMPI in pbdR

1277 | 1278 |

Here's an example of distributing an embarrassingly parallel calculation 1279 | (estimating an integral via Monte Carlo - in this case estimating 1280 | the value of pi).

1281 | 1282 |
library(pbdMPI, quiet = TRUE )
1283 | init()
1284 | 
1285 | myRank <- comm.rank() # comm index starts at 0 , not 1
1286 | comm.print(myRank , all.rank=TRUE)
1287 | node <- system("cat /etc/hostname", intern = TRUE) # Sys.getenv("HOSTNAME")
1288 | if(myRank == 0) {
1289 |     comm.print(paste0("hello, world from ", myRank, " ", node), all.rank=TRUE)
1290 | } else comm.print(paste0("goodbye from ", myRank, " ", node), all.rank=TRUE)
1291 | 
1292 | if(comm.rank() == 0) print(date())
1293 | set.seed(myRank)  # see parallel basics tutorial for more on parallel random number generation
1294 | N.gbd <- 1e7
1295 | X.gbd <- matrix(runif(N.gbd * 2), ncol = 2)
1296 | r.gbd <- sum(rowSums(X.gbd^2) <= 1)
1297 | ret <- allreduce(c(N.gbd,r.gbd), op = "sum")
1298 | PI <- 4 * ret [2] / ret [1]
1299 | comm.print(paste0("Pi is roughly: ", PI))
1300 | if(comm.rank() == 0) print(date())
1301 | 
1302 | finalize()
1303 | 
1304 | 1305 |
mpirun -machinefile .hosts -np 4 Rscript pbd-mpi.R > pbd-mpi.out
1306 | cat pbd-mpi.out
1307 | 
1308 | 1309 |
## COMM.RANK = 2
1310 | ## [1] 2
1311 | ## COMM.RANK = 3
1312 | ## [1] 3
1313 | ## COMM.RANK = 1
1314 | ## [1] 1
1315 | ## COMM.RANK = 0
1316 | ## [1] 0
1317 | ## COMM.RANK = 0
1318 | ## [1] "hello, world from 0 scf-sm10"
1319 | ## [1] "Sat Oct 17 12:21:28 2015"
1320 | ## COMM.RANK = 3
1321 | ## [1] "goodbye from 3 scf-sm11"
1322 | ## COMM.RANK = 1
1323 | ## [1] "goodbye from 1 scf-sm10"
1324 | ## COMM.RANK = 2
1325 | ## [1] "goodbye from 2 scf-sm11"
1326 | ## COMM.RANK = 0
1327 | ## [1] "Pi is roughly: 3.1421032"
1328 | ## [1] "Sat Oct 17 12:21:31 2015"
1329 | 
1330 | 1331 |

5.4) Using MPI from Python via mpi4py

1332 | 1333 |

Here's some basic use of MPI within Python.

1334 | 1335 |
from mpi4py import MPI
1336 | import numpy as np
1337 | 
1338 | comm = MPI.COMM_WORLD
1339 | 
1340 | # simple print out Rank & Size
1341 | id = comm.Get_rank()
1342 | print("Of ", comm.Get_size() , " workers, I am number " , id, ".")
1343 | 
1344 | def f(id, n):
1345 |     np.random.seed(id)
1346 |     return(np.mean(np.random.normal(0, 1, n)))
1347 | 
1348 | n = 1000000
1349 | result = f(id, n)
1350 | 
1351 | 
1352 | output = comm.gather(result, root = 0)
1353 | 
1354 | if id == 0:
1355 |     print(output)
1356 | 
1357 | 1358 |

To run the code, we start Python through the mpirun command as done previously.

1359 | 1360 |
mpirun -machinefile .hosts -np 4 python example-mpi.py 
1361 | 
1362 | 1363 |
## Of  4  workers, I am number  0 .
1364 | ## Of  4  workers, I am number  3 .
1365 | ## Of  4  workers, I am number  2 .
1366 | ## Of  4  workers, I am number  1 .
1367 | ## [0.0015121465155362318, 0.00065180430801923422, -0.000977212317921356, 0.001958404534987673]
1368 | 
1369 | 1370 |

More generally, you can send, receive, broadcast, gather, etc. as with MPI itself.

1371 | 1372 |

mpi4py generally does not work interactively.

1373 | 1374 |

6) Parallelization strategies

1375 | 1376 |

The following are some basic principles/suggestions for how to parallelize 1377 | your computation.

1378 | 1379 |

Should I use one machine/node or many machines/nodes?

1380 | 1381 |
    1382 |
  • If you can do your computation on the cores of a single node using 1383 | shared memory, that will be faster than using the same number of cores 1384 | (or even somewhat more cores) across multiple nodes. Similarly, jobs 1385 | with a lot of data/high memory requirements that one might think of 1386 | as requiring Spark or Hadoop may in some cases be much faster if you can find 1387 | a single machine with a lot of memory.
  • 1388 |
  • That said, if you would run out of memory on a single node, then you'll 1389 | need to use distributed memory.
  • 1390 |
1391 | 1392 |

What level or dimension should I parallelize over?

1393 | 1394 |
    1395 |
  • If you have nested loops, you generally only want to parallelize at 1396 | one level of the code. That said, there may be cases in which it is 1397 | helpful to do both. Keep in mind whether your linear algebra is being 1398 | threaded. Often you will want to parallelize over a loop and not use 1399 | threaded linear algebra.
  • 1400 |
  • Often it makes sense to parallelize the outer loop when you have nested 1401 | loops.
  • 1402 |
  • You generally want to parallelize in such a way that your code is 1403 | load-balanced and does not involve too much communication.
  • 1404 |
1405 | 1406 |

How do I balance communication overhead with keeping my cores busy?

1407 | 1408 |
    1409 |
  • If you have very few tasks, particularly if the tasks take different 1410 | amounts of time, often some of the processors will be idle and your code 1411 | poorly load-balanced.
  • 1412 |
  • If you have very many tasks and each one takes little time, the communication 1413 | overhead of starting and stopping the tasks will reduce efficiency.
  • 1414 |
1415 | 1416 |

Should multiple tasks be pre-assigned to a process (i.e., a worker) (sometimes called prescheduling) or should tasks 1417 | be assigned dynamically as previous tasks finish?

1418 | 1419 |
    1420 |
  • Basically if you have many tasks that each take similar time, you 1421 | want to preschedule the tasks to reduce communication. If you have few tasks 1422 | or tasks with highly variable completion times, you don't want to 1423 | preschedule, to improve load-balancing.
  • 1424 |
  • For R in particular, some of R's parallel functions allow you to say whether the 1425 | tasks should be prescheduled. E.g., library(Rmpi); help(mpi.parSapply) gives some information.
  • 1426 |
1427 | 1428 | 1429 | 1430 | 1431 | -------------------------------------------------------------------------------- /pbd-apply.R: -------------------------------------------------------------------------------- 1 | ## @knitr pbd-apply 2 | 3 | ## you should have invoked R as: 4 | ## mpirun -machinefile .hosts -np 4 R CMD BATCH --no-save pbd-apply.R pbd-apply.out 5 | ## unless running within a SLURM job, in which case you should do: 6 | ## mpirun R CMD BATCH --no-save pbd-apply.R pbd-apply.out 7 | 8 | library(pbdMPI, quiet = TRUE ) 9 | init() 10 | 11 | nrows <- 1e6 12 | 13 | if(comm.rank()==0) { 14 | x <- matrix(rnorm(nrows*50), nrow = nrows) 15 | } 16 | 17 | sm <- comm.timer(out <- pbdApply(x, 1, mean, pbd.mode = 'mw', rank.source = 0)) 18 | if(comm.rank()==0) { 19 | print(out[1:5]) 20 | print(sm) 21 | } 22 | 23 | finalize() 24 | -------------------------------------------------------------------------------- /pbd-construct.R: -------------------------------------------------------------------------------- 1 | ## @knitr pbd-construct 2 | library(pbdDMAT, quiet = TRUE) 3 | init.grid() 4 | 5 | nprocs <- comm.size() 6 | 7 | nrows <- 10000 8 | ncolsPerBlock <- nrows/nprocs 9 | 10 | # each process has a block of columns as an R matrix 11 | subdata <- matrix(rnorm(nrows * ncolsPerBlock), ncol = ncols) 12 | 13 | # now construct the distributed matrix object 14 | tmp <- ddmatrix(data = subdata, nrow = nrows, ncol = nrows, 15 | bldim = c(nrows, ncolsPerBlock), ICTXT = 1) 16 | # now rearrange the blocks for better linear algebra performance 17 | dx <- redistribute(tmp, bldim = c(100, 100), ICTXT = 0) 18 | 19 | finalize () 20 | -------------------------------------------------------------------------------- /pbd-linalg.R: -------------------------------------------------------------------------------- 1 | ## @knitr pbd-linalg 2 | 3 | library(pbdDMAT, quiet = TRUE ) 4 | 5 | n <- 4096*2 6 | 7 | # if you are putting multiple processes on node 8 | # you may want to prevent threading of the linear algebra: 9 | # library(RhpcBLASctl) 10 | # blas_set_num_threads(1) 11 | # (or do by passing OMP_NUM_THREADS to mpirun) 12 | 13 | init.grid() 14 | 15 | if(comm.rank()==0) print(date()) 16 | 17 | # pbd allows for parallel I/O, but here 18 | # we keep things simple and distribute 19 | # an object from one process 20 | if(comm.rank() == 0) { 21 | x <- rnorm(n^2) 22 | dim(x) <- c(n, n) 23 | } else x <- NULL 24 | dx <- as.ddmatrix(x) 25 | 26 | timing <- comm.timer(sigma <- crossprod(dx)) 27 | 28 | if(comm.rank()==0) { 29 | print(date()) 30 | print(timing) 31 | } 32 | 33 | timing <- comm.timer(out <- chol(sigma)) 34 | 35 | if(comm.rank()==0) { 36 | print(date()) 37 | print(timing) 38 | } 39 | 40 | finalize() 41 | -------------------------------------------------------------------------------- /pbd-mpi.R: -------------------------------------------------------------------------------- 1 | ## @knitr pbd-mpi 2 | 3 | 4 | library(pbdMPI, quiet = TRUE ) 5 | init() 6 | 7 | myRank <- comm.rank() # comm index starts at 0 , not 1 8 | comm.print(myRank , all.rank=TRUE) 9 | node <- system("cat /etc/hostname", intern = TRUE) # Sys.getenv("HOSTNAME") 10 | if(myRank == 0) { 11 | comm.print(paste0("hello, world from ", myRank, " ", node), all.rank=TRUE) 12 | } else comm.print(paste0("goodbye from ", myRank, " ", node), all.rank=TRUE) 13 | 14 | if(comm.rank() == 0) print(date()) 15 | set.seed(myRank) # see parallel basics tutorial for more on parallel random number generation 16 | N.gbd <- 1e7 17 | X.gbd <- matrix(runif(N.gbd * 2), ncol = 2) 18 | r.gbd <- sum(rowSums(X.gbd^2) <= 1) 19 | ret <- allreduce(c(N.gbd,r.gbd), op = "sum") 20 | PI <- 4 * ret [2] / ret [1] 21 | comm.print(paste0("Pi is roughly: ", PI)) 22 | if(comm.rank() == 0) print(date()) 23 | 24 | finalize() 25 | -------------------------------------------------------------------------------- /pi_code.py: -------------------------------------------------------------------------------- 1 | import numpy.random 2 | 3 | def sample(p, nSamples): 4 | numpy.random.seed(p) 5 | x, y = numpy.random.random(nSamples), numpy.random.random(nSamples) 6 | return numpy.sum(x*x + y*y < 1) 7 | -------------------------------------------------------------------------------- /python-pp.py: -------------------------------------------------------------------------------- 1 | ## @knitr python-pp 2 | 3 | import numpy.random 4 | import pp 5 | import time 6 | import pi_code # provided in pi_code.py 7 | 8 | samples_per_slice = 10000000 9 | num_slices = 24*20 10 | 11 | # remember to start ppserver on worker nodes 12 | 13 | # assume 'hosts' contains the names of the nodes on which you 14 | # started ppserver 15 | nprocsPerNode = 2 16 | hosts = ['smeagol', 'radagast', 'beren', 'arwen'] 17 | ppservers = hosts * nprocsPerNode 18 | 19 | print ppservers 20 | # put ncpus=0 here or it will start workers locally too 21 | job_server = pp.Server(ncpus = 0, ppservers = tuple(ppservers), secret = 'mysecretphrase') 22 | 23 | inputs = [(i, samples_per_slice) for i in xrange(num_slices)] 24 | 25 | t0 = time.time() 26 | jobs = [job_server.submit(pi_code.sample, invalue, modules = ('numpy.random',)) for invalue in inputs] 27 | results = [job() for job in jobs] 28 | t1 = time.time() 29 | 30 | print "Pi is roughly %f" % (4.0 * sum(results) / (num_slices*samples_per_slice)) 31 | print "Time elapsed: ", t1 - t0 32 | 33 | -------------------------------------------------------------------------------- /quad_mpi.c: -------------------------------------------------------------------------------- 1 | # include 2 | # include 3 | # include 4 | # include 5 | 6 | # include "mpi.h" 7 | 8 | int main ( int argc, char *argv[] ); 9 | double f ( double x ); 10 | void timestamp ( void ); 11 | 12 | /******************************************************************************/ 13 | 14 | int main ( int argc, char *argv[] ) 15 | 16 | /******************************************************************************/ 17 | /* 18 | Purpose: 19 | 20 | MAIN is the main program for QUAD_MPI. 21 | 22 | Licensing: 23 | 24 | This code is distributed under the GNU LGPL license. 25 | 26 | Modified: 27 | 28 | 19 July 2010 29 | 30 | Author: 31 | 32 | John Burkardt 33 | */ 34 | { 35 | double a; 36 | double b; 37 | double error; 38 | double exact; 39 | int i; 40 | int master = 0; 41 | double my_a; 42 | double my_b; 43 | int my_id; 44 | int my_n; 45 | double my_total; 46 | int n; 47 | int p; 48 | int p_num; 49 | int source; 50 | MPI_Status status; 51 | int tag; 52 | int target; 53 | double total; 54 | double wtime; 55 | double x; 56 | 57 | a = 0.0; 58 | b = 10.0; 59 | n = 10000000; 60 | exact = 0.49936338107645674464; 61 | /* 62 | Initialize MPI. 63 | */ 64 | MPI_Init ( &argc, &argv ); 65 | /* 66 | Get this processor's ID. 67 | */ 68 | MPI_Comm_rank ( MPI_COMM_WORLD, &my_id ); 69 | /* 70 | Get the number of processes. 71 | */ 72 | MPI_Comm_size ( MPI_COMM_WORLD, &p_num ); 73 | 74 | if ( my_id == master ) 75 | { 76 | /* 77 | We want N to be the total number of evaluations. 78 | If necessary, we adjust N to be divisible by the number of processes. 79 | */ 80 | my_n = n / ( p_num - 1 ); 81 | n = ( p_num - 1 ) * my_n; 82 | 83 | wtime = MPI_Wtime ( ); 84 | 85 | timestamp ( ); 86 | printf ( "\n" ); 87 | printf ( "QUAD_MPI\n" ); 88 | printf ( " C/MPI version\n" ); 89 | printf ( " Estimate an integral of f(x) from A to B.\n" ); 90 | printf ( " f(x) = 50 / (pi * ( 2500 * x * x + 1 ) )\n" ); 91 | printf ( "\n" ); 92 | printf ( " A = %f\n", a ); 93 | printf ( " B = %f\n", b ); 94 | printf ( " N = %d\n", n ); 95 | printf ( " EXACT = %24.16f\n", exact ); 96 | printf ( "\n" ); 97 | printf ( " Use MPI to divide the computation among\n" ); 98 | printf ( " multiple processes.\n" ); 99 | } 100 | 101 | source = master; 102 | MPI_Bcast ( &my_n, 1, MPI_INT, source, MPI_COMM_WORLD ); 103 | /* 104 | Process 0 assigns each process a subinterval of [A,B]. 105 | */ 106 | if ( my_id == master ) 107 | { 108 | for ( p = 1; p <= p_num - 1; p++ ) 109 | { 110 | my_a = ( ( double ) ( p_num - p ) * a 111 | + ( double ) ( p - 1 ) * b ) 112 | / ( double ) ( p_num - 1 ); 113 | 114 | target = p; 115 | tag = 1; 116 | MPI_Send ( &my_a, 1, MPI_DOUBLE, target, tag, MPI_COMM_WORLD ); 117 | 118 | my_b = ( ( double ) ( p_num - p - 1 ) * a 119 | + ( double ) ( p ) * b ) 120 | / ( double ) ( p_num - 1 ); 121 | 122 | target = p; 123 | tag = 2; 124 | MPI_Send ( &my_b, 1, MPI_DOUBLE, target, tag, MPI_COMM_WORLD ); 125 | } 126 | total = 0.0; 127 | my_total = 0.0; 128 | } 129 | /* 130 | Processes receive MY_A, MY_B, and compute their part of the integral. 131 | */ 132 | else 133 | { 134 | source = master; 135 | tag = 1; 136 | MPI_Recv ( &my_a, 1, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status ); 137 | 138 | source = master; 139 | tag = 2; 140 | MPI_Recv ( &my_b, 1, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status ); 141 | 142 | my_total = 0.0; 143 | for ( i = 1; i <= my_n; i++ ) 144 | { 145 | x = ( ( double ) ( my_n - i ) * my_a 146 | + ( double ) ( i - 1 ) * my_b ) 147 | / ( double ) ( my_n - 1 ); 148 | my_total = my_total + f ( x ); 149 | } 150 | 151 | my_total = ( my_b - my_a ) * my_total / ( double ) ( my_n ); 152 | 153 | printf ( " Process %d contributed MY_TOTAL = %f\n", my_id, my_total ); 154 | } 155 | /* 156 | Each process sends its value to the master process. 157 | */ 158 | MPI_Reduce ( &my_total, &total, 1, MPI_DOUBLE, MPI_SUM, master, MPI_COMM_WORLD ); 159 | /* 160 | Compute the weighted estimate. 161 | */ 162 | if ( my_id == master ) 163 | { 164 | error = fabs ( total - exact ); 165 | wtime = MPI_Wtime ( ) - wtime; 166 | 167 | printf ( "\n" ); 168 | printf ( " Estimate = %24.16f\n", total ); 169 | printf ( " Error = %e\n\n", error ); 170 | printf ( " Time = %f\n\n", wtime ); 171 | } 172 | /* 173 | Terminate MPI. 174 | */ 175 | MPI_Finalize ( ); 176 | /* 177 | Terminate. 178 | */ 179 | if ( my_id == master ) 180 | { 181 | printf ( "\n" ); 182 | printf ( "QUAD_MPI:\n" ); 183 | printf ( " Normal end of execution.\n" ); 184 | printf ( "\n" ); 185 | timestamp ( ); 186 | } 187 | 188 | return 0; 189 | } 190 | /******************************************************************************/ 191 | 192 | double f ( double x ) 193 | 194 | /******************************************************************************/ 195 | /* 196 | Purpose: 197 | 198 | F evaluates the function. 199 | */ 200 | { 201 | double pi; 202 | double value; 203 | 204 | pi = 3.141592653589793; 205 | value = 50.0 / ( pi * ( 2500.0 * x * x + 1.0 ) ); 206 | 207 | return value; 208 | } 209 | /******************************************************************************/ 210 | 211 | void timestamp ( void ) 212 | 213 | /******************************************************************************/ 214 | /* 215 | Purpose: 216 | 217 | TIMESTAMP prints the current YMDHMS date as a time stamp. 218 | 219 | Example: 220 | 221 | 31 May 2001 09:45:54 AM 222 | 223 | Licensing: 224 | 225 | This code is distributed under the GNU LGPL license. 226 | 227 | Modified: 228 | 229 | 24 September 2003 230 | 231 | Author: 232 | 233 | John Burkardt 234 | 235 | Parameters: 236 | 237 | None 238 | */ 239 | { 240 | # define TIME_SIZE 40 241 | 242 | static char time_buffer[TIME_SIZE]; 243 | const struct tm *tm; 244 | time_t now; 245 | 246 | now = time ( NULL ); 247 | tm = localtime ( &now ); 248 | 249 | strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm ); 250 | 251 | printf ( "%s\n", time_buffer ); 252 | 253 | return; 254 | # undef TIME_SIZE 255 | } 256 | 257 | -------------------------------------------------------------------------------- /quad_mpi.cpp: -------------------------------------------------------------------------------- 1 | # include 2 | # include 3 | # include 4 | # include 5 | # include 6 | # include 7 | # include 8 | # include 9 | 10 | using namespace std; 11 | 12 | # include "mpi.h" 13 | 14 | int main ( int argc, char *argv[] ); 15 | double f ( double x ); 16 | void timestamp ( ); 17 | 18 | //****************************************************************************80 19 | 20 | int main ( int argc, char *argv[] ) 21 | 22 | //****************************************************************************80 23 | // 24 | // Purpose: 25 | // 26 | // MAIN is the main program for QUAD_MPI. 27 | // 28 | // Licensing: 29 | // 30 | // This code is distributed under the GNU LGPL license. 31 | // 32 | // Modified: 33 | // 34 | // 19 July 2010 35 | // 36 | // Author: 37 | // 38 | // John Burkardt 39 | // 40 | { 41 | double a; 42 | double b; 43 | double error; 44 | double exact; 45 | int i; 46 | int id; 47 | double my_a; 48 | double my_b; 49 | int my_n; 50 | double my_total; 51 | int n; 52 | int p; 53 | int q; 54 | int source; 55 | MPI::Status status; 56 | int tag; 57 | int target; 58 | double total; 59 | double wtime; 60 | double x; 61 | 62 | a = 0.0; 63 | b = 10.0; 64 | n = 1000000000; 65 | exact = 0.49936338107645674464; 66 | // 67 | // Initialize MPI. 68 | // 69 | MPI::Init ( argc, argv ); 70 | // 71 | // Get this processor's ID. 72 | // 73 | id = MPI::COMM_WORLD.Get_rank ( ); 74 | // 75 | // Get the number of processors. 76 | // 77 | p = MPI::COMM_WORLD.Get_size ( ); 78 | 79 | if ( id == 0 ) 80 | { 81 | // 82 | // We want N to be the total number of evaluations. 83 | // If necessary, we adjust N to be divisible by the number of processors. 84 | // 85 | my_n = n / ( p - 1 ); 86 | n = ( p - 1 ) * my_n; 87 | 88 | wtime = MPI::Wtime ( ); 89 | 90 | timestamp ( ); 91 | cout << "\n"; 92 | cout << "QUAD_MPI\n"; 93 | cout << " C++/MPI version\n"; 94 | cout << " Estimate an integral of f(x) from A to B.\n"; 95 | cout << " f(x) = 50 / (pi * ( 2500 * x * x + 1 ) )\n"; 96 | cout << "\n"; 97 | cout << " A = " << a << "\n"; 98 | cout << " B = " << b << "\n"; 99 | cout << " N = " << n << "\n"; 100 | cout << " EXACT = " << setw(24) << setprecision(16) << exact << "\n"; 101 | cout << "\n"; 102 | cout << " Use MPI to divide the computation among " << p << " total processes,\n"; 103 | cout << " of which one is the master and does not do core computations.\n"; 104 | } 105 | 106 | source = 0; 107 | MPI::COMM_WORLD.Bcast ( &my_n, 1, MPI::INT, source ); 108 | // 109 | // Process 0 assigns each process a subinterval of [A,B]. 110 | // 111 | if ( id == 0 ) 112 | { 113 | for ( q = 1; q <= p - 1; q++ ) 114 | { 115 | my_a = ( ( double ) ( p - q ) * a 116 | + ( double ) ( q - 1 ) * b ) 117 | / ( double ) ( p - 1 ); 118 | 119 | target = q; 120 | tag = 1; 121 | MPI::COMM_WORLD.Send ( &my_a, 1, MPI::DOUBLE, target, tag ); 122 | 123 | my_b = ( ( double ) ( p - q - 1 ) * a 124 | + ( double ) ( q ) * b ) 125 | / ( double ) ( p - 1 ); 126 | 127 | target = q; 128 | tag = 2; 129 | MPI::COMM_WORLD.Send ( &my_b, 1, MPI::DOUBLE, target, tag ); 130 | } 131 | total = 0.0; 132 | my_total = 0.0; 133 | } 134 | // 135 | // Processes receive MY_A, MY_B, and compute their part of the integral. 136 | // 137 | else 138 | { 139 | source = 0; 140 | tag = 1; 141 | MPI::COMM_WORLD.Recv ( &my_a, 1, MPI::DOUBLE, source, tag, status ); 142 | 143 | source = 0; 144 | tag = 2; 145 | MPI::COMM_WORLD.Recv ( &my_b, 1, MPI::DOUBLE, source, tag, status ); 146 | 147 | my_total = 0.0; 148 | for ( i = 1; i <= my_n; i++ ) 149 | { 150 | x = ( ( double ) ( my_n - i ) * my_a 151 | + ( double ) ( i - 1 ) * my_b ) 152 | / ( double ) ( my_n - 1 ); 153 | my_total = my_total + f ( x ); 154 | } 155 | 156 | my_total = ( my_b - my_a ) * my_total / ( double ) ( my_n ); 157 | 158 | cout << " Process " << id << " contributed MY_TOTAL = " 159 | << my_total << "\n"; 160 | } 161 | // 162 | // Each process sends its value to the master process. 163 | // 164 | MPI::COMM_WORLD.Reduce ( &my_total, &total, 1, MPI::DOUBLE, MPI::SUM, 0 ); 165 | // 166 | // Compute the weighted estimate. 167 | // 168 | if ( id == 0 ) 169 | { 170 | error = fabs ( total - exact ); 171 | wtime = MPI::Wtime ( ) - wtime; 172 | 173 | cout << "\n"; 174 | cout << " Estimate = " << setw(24) << setprecision(16) << total << "\n"; 175 | cout << " Error = " << error << "\n"; 176 | cout << " Time = " << wtime << "\n"; 177 | } 178 | // 179 | // Terminate MPI. 180 | // 181 | MPI::Finalize ( ); 182 | // 183 | // Terminate. 184 | // 185 | if ( id == 0 ) 186 | { 187 | cout << "\n"; 188 | cout << "QUAD_MPI:\n"; 189 | cout << " Normal end of execution.\n"; 190 | cout << "\n"; 191 | timestamp ( ); 192 | } 193 | return 0; 194 | } 195 | //****************************************************************************80 196 | 197 | double f ( double x ) 198 | 199 | //****************************************************************************80 200 | // 201 | // Purpose: 202 | // 203 | // F evaluates the function. 204 | // 205 | { 206 | double pi; 207 | double value; 208 | 209 | pi = 3.141592653589793; 210 | value = 50.0 / ( pi * ( 2500.0 * x * x + 1.0 ) ); 211 | 212 | return value; 213 | } 214 | //****************************************************************************80 215 | 216 | void timestamp ( ) 217 | 218 | //****************************************************************************80 219 | // 220 | // Purpose: 221 | // 222 | // TIMESTAMP prints the current YMDHMS date as a time stamp. 223 | // 224 | // Example: 225 | // 226 | // 31 May 2001 09:45:54 AM 227 | // 228 | // Licensing: 229 | // 230 | // This code is distributed under the GNU LGPL license. 231 | // 232 | // Modified: 233 | // 234 | // 08 July 2009 235 | // 236 | // Author: 237 | // 238 | // John Burkardt 239 | // 240 | // Parameters: 241 | // 242 | // None 243 | // 244 | { 245 | # define TIME_SIZE 40 246 | 247 | static char time_buffer[TIME_SIZE]; 248 | const struct std::tm *tm_ptr; 249 | size_t len; 250 | std::time_t now; 251 | 252 | now = std::time ( NULL ); 253 | tm_ptr = std::localtime ( &now ); 254 | 255 | len = std::strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm_ptr ); 256 | 257 | std::cout << time_buffer << "\n"; 258 | 259 | return; 260 | # undef TIME_SIZE 261 | } 262 | -------------------------------------------------------------------------------- /redistribute-test.R: -------------------------------------------------------------------------------- 1 | library(pbdDMAT, quiet = TRUE) 2 | init.grid() 3 | 4 | data = matrix(2*comm.rank()+1, nrow = 10, ncol = 2) 5 | data[,2] <- 2*comm.rank()+2 6 | data[10,2] <- rnorm(1) 7 | 8 | dx <- ddmatrix ( data = data, nrow = 10, ncol = 10, bldim = c(10, 2), ICTXT = 1) 9 | dx2 <- redistribute(dx, bldim = c(2, 2), ICTXT = 0) 10 | 11 | # matrix should have all 1s in first column, all 2s in second, all 3s in third, etc, but with the last row for the 2nd, 4th, 6th, 8th, 10th columns having random normals 12 | 13 | print(dx) 14 | x <- as.matrix(dx) 15 | comm.print(x) 16 | print(dx2) 17 | x2 <- as.matrix(dx2) 18 | comm.print(x2) 19 | 20 | finalize() 21 | -------------------------------------------------------------------------------- /sockets.R: -------------------------------------------------------------------------------- 1 | ## @knitr sockets 2 | 3 | library(parallel) 4 | 5 | machines = c(rep("beren.berkeley.edu", 1), 6 | rep("gandalf.berkeley.edu", 1), 7 | rep("arwen.berkeley.edu", 2)) 8 | cl = makeCluster(machines) 9 | cl 10 | 11 | n = 1e7 12 | clusterExport(cl, c('n')) 13 | 14 | fun = function(i) 15 | out = mean(rnorm(n)) 16 | 17 | result <- parSapply(cl, 1:20, fun) 18 | 19 | result[1:5] 20 | 21 | stopCluster(cl) # not strictly necessary 22 | 23 | --------------------------------------------------------------------------------