├── .gitignore ├── 1a.c ├── 1b.c ├── 1c.c ├── 2a.c ├── 2b.c ├── 3a.c ├── 3b.c ├── 3c.c ├── 4a.c ├── 4b.c ├── 5a.c ├── 5b.c ├── 6a.c ├── 6b.c └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.out 2 | target -------------------------------------------------------------------------------- /1a.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define ARRAY_SIZE 8 6 | #define NUM_THREADS 4 7 | 8 | int main(int argc, char *argv[]) 9 | { 10 | 11 | int *a; 12 | int *b; 13 | int *c; 14 | 15 | int n = ARRAY_SIZE; 16 | int n_per_thread; 17 | int total_threads = NUM_THREADS; 18 | int i; 19 | 20 | // allocate spce for the arrays 21 | a = (int *)malloc(sizeof(int) * n); 22 | b = (int *)malloc(sizeof(int) * n); 23 | c = (int *)malloc(sizeof(int) * n); 24 | 25 | for (i = 0; i < n; i++) 26 | { 27 | a[i] = i; 28 | } 29 | for (i = 0; i < n; i++) 30 | { 31 | b[i] = i; 32 | } 33 | 34 | omp_set_num_threads(total_threads); 35 | 36 | n_per_thread = n / total_threads; 37 | 38 | #pragma omp parallel for shared(a, b, c) private(i) schedule(static, n_per_thread) 39 | for (i = 0; i < n; i++) 40 | { 41 | c[i] = a[i] + b[i]; 42 | printf("Thread %d works on element%d\n", omp_get_thread_num(), i); 43 | } 44 | 45 | printf("i\ta[i]\t+\tb[i]\t=\tc[i]\n"); 46 | for (i = 0; i < n; i++) 47 | { 48 | printf("%d\t%d\t\t%d\t\t%d\n", i, a[i], b[i], c[i]); 49 | } 50 | 51 | free(a); 52 | free(b); 53 | free(c); 54 | 55 | return 0; 56 | } 57 | -------------------------------------------------------------------------------- /1b.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #define CHUNKSIZE 10 5 | #define N 10 6 | 7 | int main(int argc, char *argv[]) 8 | { 9 | int nthreads, tid, i, chunk; 10 | float a[N], b[N], c[N]; 11 | 12 | for (i = 0; i < N; i++) 13 | a[i] = b[i] = i * 1.0; 14 | chunk = CHUNKSIZE; 15 | 16 | #pragma omp parallel shared(a, b, c, nthreads, chunk) private(i, tid) 17 | { 18 | tid = omp_get_thread_num(); 19 | if (tid == 0) 20 | { 21 | nthreads = omp_get_num_threads(); 22 | printf("Number of threads = %d\n", nthreads); 23 | } 24 | printf("Thread %d starting...\n", tid); 25 | 26 | #pragma omp for schedule(dynamic, chunk) 27 | for (i = 0; i < N; i++) 28 | { 29 | c[i] = a[i] + b[i]; 30 | printf("Thread %d: c[%d]= %f\n", tid, i, c[i]); 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /1c.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #define N 10 5 | int main(int argc, char *argv[]) 6 | { 7 | int i, nthreads, tid; 8 | float a[N], b[N], c[N], d[N]; 9 | 10 | for (i = 0; i < N; i++) 11 | { 12 | a[i] = i * 1.5; 13 | b[i] = i + 22.35; 14 | c[i] = d[i] = 0.0; 15 | } 16 | #pragma omp parallel shared(a, b, c, d, nthreads) private(i, tid) 17 | { 18 | tid = omp_get_thread_num(); 19 | if (tid == 0) 20 | { 21 | nthreads = omp_get_num_threads(); 22 | printf("Number of threads = %d\n", nthreads); 23 | } 24 | printf("Thread %d starting...\n", tid); 25 | #pragma omp sections nowait 26 | { 27 | #pragma omp section 28 | { 29 | printf("Thread %d doing section 1\n", tid); 30 | for (i = 0; i < N; i++) 31 | { 32 | c[i] = a[i] + b[i]; 33 | printf("Thread %d: c[%d]= %f\n", tid, i, c[i]); 34 | } 35 | } 36 | #pragma omp section 37 | { 38 | printf("Thread %d doing section 2\n", tid); 39 | for (i = 0; i < N; i++) 40 | { 41 | d[i] = a[i] * b[i]; 42 | printf("Thread %d: d[%d]= %f\n", tid, i, d[i]); 43 | } 44 | } 45 | } 46 | printf("Thread %d done.\n", tid); 47 | } 48 | } -------------------------------------------------------------------------------- /2a.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() 4 | { 5 | int n, a[100], i; 6 | omp_set_num_threads(2); 7 | printf("Enter the no of terms uptill which you want to generate the Fibonnaci Series\n"); 8 | scanf("%d", &n); 9 | a[0] = 0; 10 | a[1] = 1; 11 | #pragma omp parallel 12 | { 13 | #pragma omp single 14 | for (i = 2; i < n; i++) 15 | { 16 | a[i] = a[i - 2] + a[i - 1]; 17 | printf("Id of thread involved in the computation of fib number %d is=%d\n", i + 1, omp_get_thread_num()); 18 | } 19 | #pragma omp barrier 20 | #pragma omp single 21 | { 22 | printf("The series is:\n"); 23 | for (i = 0; i < n; i++) 24 | printf("%d \t Id of the thread displaying this number is = %d\n", a[i], omp_get_thread_num()); 25 | } 26 | } 27 | return 0; 28 | } -------------------------------------------------------------------------------- /2b.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #define NRA 20 5 | #define NCA 20 6 | #define NCB 20 7 | int main(int argc, char *argv[]) 8 | { 9 | int tid, nthreads, i, j, k, chunk; 10 | double a[NRA][NCA], b[NCA][NCB], c[NRA][NCB]; 11 | chunk = 5; 12 | #pragma omp parallel shared(a, b, c, nthreads, chunk) private(tid, i, j, k) 13 | { 14 | tid = omp_get_thread_num(); 15 | if (tid == 0) 16 | { 17 | nthreads = omp_get_num_threads(); 18 | printf("Starting matrix multiple example with %d threads\n", 19 | nthreads); 20 | printf("Initializing matrices...\n"); 21 | } 22 | #pragma omp for schedule(static, chunk) 23 | for (i = 0; i < NRA; i++) 24 | for (j = 0; j < NCA; j++) 25 | a[i][j] = i + j; 26 | #pragma omp for schedule(static, chunk) 27 | for (i = 0; i < NCA; i++) 28 | for (j = 0; j < NCB; j++) 29 | b[i][j] = i * j; 30 | #pragma omp for schedule(static, chunk) 31 | for (i = 0; i < NRA; i++) 32 | for (j = 0; j < NCB; j++) 33 | c[i][j] = 0; 34 | 35 | printf("Thread %d starting matrix multiply...\n", tid); 36 | #pragma omp for schedule(static, chunk) 37 | for (i = 0; i < NRA; i++) 38 | { 39 | printf("Thread=%d did row=%d\n", tid, i); 40 | for (j = 0; j < NCB; j++) 41 | for (k = 0; k < NCA; k++) 42 | c[i][j] += a[i][k] * b[k][j]; 43 | } 44 | } 45 | 46 | printf("******************************************************\n"); 47 | printf("Result Matrix:\n"); 48 | for (i = 0; i < NRA; i++) 49 | { 50 | for (j = 0; j < NCB; j++) 51 | printf("%.0lf ", c[i][j]); 52 | printf("\n"); 53 | } 54 | printf("******************************************************\n"); 55 | printf("Done.\n"); 56 | } -------------------------------------------------------------------------------- /3a.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | int main(int argc, char *argv[]) 5 | { 6 | int i, n, tid; 7 | float a[100], b[100], sum; 8 | n = 12; 9 | for (i = 0; i < n; i++) 10 | a[i] = b[i] = i * 1.0; 11 | sum = 0.0; 12 | #pragma omp parallel for reduction(+ \ 13 | : sum) 14 | for (i = 0; i < n; i++) 15 | { 16 | tid = omp_get_thread_num(); 17 | sum = sum + (a[i] * b[i]); 18 | printf(" tid= %d i=%d\n", tid, i); 19 | } 20 | printf(" Sum = %f\n", sum); 21 | } -------------------------------------------------------------------------------- /3b.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #define VECLEN 12 5 | float a[VECLEN], b[VECLEN], sum; 6 | float dotprod() 7 | { 8 | int i, tid; 9 | tid = omp_get_thread_num(); 10 | #pragma omp for reduction(+ \ 11 | : sum) 12 | for (i = 0; i < VECLEN; i++) 13 | { 14 | sum = sum + (a[i] * b[i]); 15 | printf(" tid= %d i=%d\n", tid, i); 16 | } 17 | } 18 | int main(int argc, char *argv[]) 19 | { 20 | int i; 21 | for (i = 0; i < VECLEN; i++) 22 | a[i] = b[i] = 1.0 * i; 23 | sum = 0.0; 24 | #pragma omp parallel 25 | dotprod(); 26 | printf("Sum = %f\n", sum); 27 | } -------------------------------------------------------------------------------- /3c.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main() 6 | { 7 | int arr[10]; 8 | omp_set_num_threads(4); 9 | int max_val = 0.0; 10 | int min_val = INT_MAX; 11 | int i; 12 | printf("Enter 10 values"); 13 | for (i = 0; i < 10; i++) 14 | scanf("%d", &arr[i]); 15 | #pragma omp parallel for reduction(max \ 16 | : max_val) 17 | for (i = 0; i < 10; i++) 18 | { 19 | printf("Thread id = %d and i = %d\n", omp_get_thread_num(), i); 20 | if (arr[i] > max_val) 21 | { 22 | max_val = arr[i]; 23 | } 24 | } 25 | #pragma omp parallel for reduction(min \ 26 | : min_val) 27 | for (i = 0; i < 10; i++) 28 | { 29 | if (arr[i] < min_val) 30 | { 31 | min_val = arr[i]; 32 | } 33 | } 34 | printf("\nmax_val = %d", max_val); 35 | printf("\nmin_val = %d\n", min_val); 36 | } -------------------------------------------------------------------------------- /4a.c: -------------------------------------------------------------------------------- 1 | #include "stdio.h" 2 | #include "stdlib.h" 3 | #include "mpi.h" 4 | #include "math.h" 5 | 6 | int main(int argc, char **argv) 7 | { 8 | int myid, numprocs; 9 | int tag, source, destination, count; 10 | int buffer; 11 | MPI_Status status; 12 | MPI_Init(&argc, &argv); 13 | MPI_Comm_size(MPI_COMM_WORLD, &numprocs); 14 | MPI_Comm_rank(MPI_COMM_WORLD, &myid); 15 | tag = 1; 16 | source = 0; 17 | destination = 1; 18 | count = 1; 19 | if (myid == source) 20 | { 21 | buffer = 100; 22 | MPI_Send(&buffer, count, MPI_INT, destination, tag, MPI_COMM_WORLD); 23 | printf("processor %d sent %d\n", myid, buffer); 24 | } 25 | if (myid == destination) 26 | { 27 | MPI_Recv(&buffer, count, MPI_INT, source, tag, MPI_COMM_WORLD, &status); 28 | printf("processor %d got %d\n", myid, buffer); 29 | } 30 | MPI_Finalize(); 31 | return 0; 32 | } 33 | -------------------------------------------------------------------------------- /4b.c: -------------------------------------------------------------------------------- 1 | #include "stdio.h" 2 | #include "mpi.h" 3 | #include "stdlib.h" 4 | 5 | int main(int argc, char *argv[]) 6 | { 7 | const int tag = 42; /* Message tag */ 8 | int id, ntasks, source_id, dest_id, err, i; 9 | MPI_Status status; 10 | int msg[2]; /* Message array */ 11 | err = MPI_Init(&argc, &argv); /* Initialize MPI */ 12 | if (err != MPI_SUCCESS) 13 | { 14 | printf("MPI initialization failed!\n"); 15 | exit(1); 16 | } 17 | err = MPI_Comm_size(MPI_COMM_WORLD, &ntasks); /* Get nr of tasks */ 18 | err = MPI_Comm_rank(MPI_COMM_WORLD, &id); /* Get id of this process */ 19 | if (ntasks < 2) 20 | { 21 | printf("You have to use at least 2 processors to run this program\n"); 22 | MPI_Finalize(); /* Quit if there is only one processor */ 23 | exit(0); 24 | } 25 | 26 | if (id == 0) 27 | { 28 | /* Process 0 (the receiver) does this */ 29 | for (i = 1; i < ntasks; i++) 30 | { 31 | err = MPI_Recv(msg, 2, MPI_INT, MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status); 32 | /* Receive a message */ 33 | source_id = status.MPI_SOURCE; 34 | /* Get id of sender */ 35 | printf("Received message %d %d from process %d\n", msg[0], msg[1], source_id); 36 | } 37 | } 38 | else 39 | { 40 | /* Processes 1 to N-1 (the senders) do this */ 41 | msg[0] = id; /* Put own identifier in the message */ 42 | msg[1] = ntasks; /* and total number of processes */ 43 | dest_id = 0; /* Destination address */ 44 | err = MPI_Send(msg, 2, MPI_INT, dest_id, tag, MPI_COMM_WORLD); 45 | } 46 | if (id == 0) 47 | printf("Ready\n"); 48 | err = MPI_Finalize(); /* Terminate MPI */ 49 | exit(0); 50 | return 0; 51 | } 52 | -------------------------------------------------------------------------------- /5a.c: -------------------------------------------------------------------------------- 1 | #include "stdio.h" 2 | #include "stdlib.h" 3 | #include "mpi.h" 4 | 5 | void my_bcast(void *data, int count, MPI_Datatype datatype, int root, MPI_Comm communicator) 6 | { 7 | int world_rank; 8 | MPI_Comm_rank(communicator, &world_rank); 9 | int world_size; 10 | MPI_Comm_size(communicator, &world_size); 11 | 12 | if (world_rank == root) 13 | { 14 | // If we are the root process, send our data to everyone 15 | int i; 16 | for (i = 0; i < world_size; i++) 17 | { 18 | if (i != world_rank) 19 | { 20 | MPI_Send(data, count, datatype, i, 0, communicator); 21 | } 22 | } 23 | } 24 | else 25 | { 26 | // If we are a receiver process, receive the data from the root 27 | MPI_Recv(data, count, datatype, root, 0, communicator, MPI_STATUS_IGNORE); 28 | } 29 | } 30 | int main(int argc, char **argv) 31 | { 32 | MPI_Init(NULL, NULL); 33 | int world_rank; 34 | MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); 35 | int data; 36 | if (world_rank == 0) 37 | { 38 | data = 101; 39 | printf("Process 0 broadcasting data %d\n", data); 40 | my_bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD); 41 | } 42 | else 43 | { 44 | my_bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD); 45 | printf("Process %d received data %d from root process\n", 46 | world_rank, data); 47 | } 48 | MPI_Finalize(); 49 | } 50 | -------------------------------------------------------------------------------- /5b.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | /* Define length of dot product vectors */ 6 | #define VECLEN 10 7 | 8 | int main(int argc, char *argv[]) 9 | { 10 | int i, myid, numprocs, len = VECLEN; 11 | double *a, *b; 12 | double mysum, allsum; 13 | /* MPI Initialization */ 14 | MPI_Init(&argc, &argv); 15 | MPI_Comm_size(MPI_COMM_WORLD, &numprocs); 16 | MPI_Comm_rank(MPI_COMM_WORLD, &myid); 17 | /* Each MPI task performs the dot product, obtains its partial sum, and then calls MPI_Reduce to obtain the global sum. */ 18 | if (myid == 0) 19 | printf("Starting omp_dotprod_mpi. Using %d tasks...\n", numprocs); 20 | 21 | /* Assign storage for dot product vectors */ 22 | a = (double *)malloc(len * sizeof(double)); 23 | b = (double *)malloc(len * sizeof(double)); 24 | /* Initialize dot product vectors */ 25 | for (i = 0; i < len; i++) 26 | { 27 | a[i] = 1.0; 28 | b[i] = a[i]; 29 | } 30 | /* Perform the dot product */ 31 | mysum = 0.0; 32 | for (i = 0; i < len; i++) 33 | { 34 | mysum += a[i] * b[i]; 35 | } 36 | 37 | printf("Task %d partial sum = %f\n", myid, mysum); 38 | /* After the dot product, perform a summation of results on each node */ 39 | MPI_Reduce(&mysum, &allsum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); 40 | if (myid == 0) 41 | printf("Done. MPI version: global sum = %f \n", allsum); 42 | free(a); 43 | free(b); 44 | MPI_Finalize(); 45 | } 46 | -------------------------------------------------------------------------------- /6a.c: -------------------------------------------------------------------------------- 1 | #include "mpi.h" 2 | #include 3 | #include 4 | 5 | #define NRA 62 /* number of rows in matrix A */ 6 | #define NCA 15 /* number of columns in matrix A */ 7 | #define NCB 7 /* number of columns in matrix B */ 8 | #define MASTER 0 /* taskid of first task */ 9 | #define FROM_MASTER 1 /* setting a message type */ 10 | #define FROM_WORKER 2 /* setting a message type */ 11 | 12 | int main(int argc, char *argv[]) 13 | { 14 | int numtasks, /* number of tasks in partition */ 15 | taskid, /* a task identifier */ 16 | numworkers, /* number of worker tasks */ 17 | source, /* task id of message source */ 18 | dest, /* task id of message destination */ 19 | mtype, /* message type */ 20 | rows, /* rows of matrix A sent to each worker */ 21 | averow, extra, offset, /* used to determine rows sent to each worker */ 22 | i, j, k, rc; /* misc */ 23 | double a[NRA][NCA], /* matrix A to be multiplied */ 24 | b[NCA][NCB], /* matrix B to be multiplied */ 25 | c[NRA][NCB]; /* result matrix C */ 26 | MPI_Status status; 27 | 28 | MPI_Init(&argc, &argv); 29 | MPI_Comm_rank(MPI_COMM_WORLD, &taskid); 30 | MPI_Comm_size(MPI_COMM_WORLD, &numtasks); 31 | if (numtasks < 2) 32 | { 33 | printf("Need at least two MPI tasks. Quitting...\n"); 34 | MPI_Abort(MPI_COMM_WORLD, rc); 35 | exit(1); 36 | } 37 | numworkers = numtasks - 1; 38 | 39 | /**************************** master task ************************************/ 40 | if (taskid == MASTER) 41 | { 42 | printf("mpi_mm has started with %d tasks.\n", numtasks); 43 | printf("Initializing arrays...\n"); 44 | for (i = 0; i < NRA; i++) 45 | for (j = 0; j < NCA; j++) 46 | a[i][j] = i + j; 47 | for (i = 0; i < NCA; i++) 48 | for (j = 0; j < NCB; j++) 49 | b[i][j] = i * j; 50 | 51 | /* Send matrix data to the worker tasks */ 52 | averow = NRA / numworkers; 53 | extra = NRA % numworkers; 54 | offset = 0; 55 | mtype = FROM_MASTER; 56 | for (dest = 1; dest <= numworkers; dest++) 57 | { 58 | rows = (dest <= extra) ? averow + 1 : averow; 59 | printf("Sending %d rows to task %d offset=%d\n", rows, dest, offset); 60 | MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); 61 | MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); 62 | MPI_Send(&a[offset][0], rows * NCA, MPI_DOUBLE, dest, mtype, 63 | MPI_COMM_WORLD); 64 | MPI_Send(&b, NCA * NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); 65 | offset = offset + rows; 66 | } 67 | 68 | /* Receive results from worker tasks */ 69 | mtype = FROM_WORKER; 70 | for (i = 1; i <= numworkers; i++) 71 | { 72 | source = i; 73 | MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); 74 | MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); 75 | MPI_Recv(&c[offset][0], rows * NCB, MPI_DOUBLE, source, mtype, 76 | MPI_COMM_WORLD, &status); 77 | printf("Received results from task %d\n", source); 78 | } 79 | 80 | /* Print results */ 81 | printf("******************************************************\n"); 82 | printf("Result Matrix:\n"); 83 | for (i = 0; i < NRA; i++) 84 | { 85 | printf("\n"); 86 | for (j = 0; j < NCB; j++) 87 | printf("%6.2f ", c[i][j]); 88 | } 89 | printf("\n******************************************************\n"); 90 | printf("Done.\n"); 91 | } 92 | 93 | /**************************** worker task ************************************/ 94 | if (taskid > MASTER) 95 | { 96 | mtype = FROM_MASTER; 97 | MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); 98 | MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); 99 | MPI_Recv(&a, rows * NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); 100 | MPI_Recv(&b, NCA * NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); 101 | 102 | for (k = 0; k < NCB; k++) 103 | for (i = 0; i < rows; i++) 104 | { 105 | c[i][k] = 0.0; 106 | for (j = 0; j < NCA; j++) 107 | c[i][k] = c[i][k] + a[i][j] * b[j][k]; 108 | } 109 | mtype = FROM_WORKER; 110 | MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD); 111 | MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD); 112 | MPI_Send(&c, rows * NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD); 113 | } 114 | MPI_Finalize(); 115 | } 116 | -------------------------------------------------------------------------------- /6b.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main(argc, argv) int argc; 6 | char *argv[]; 7 | { 8 | int done = 0, n, myid, numprocs, i; 9 | double PI25DT = 3.141592653589793238462643; 10 | double mypi, pi, h, sum, x; 11 | 12 | MPI_Init(&argc, &argv); 13 | MPI_Comm_size(MPI_COMM_WORLD, &numprocs); 14 | MPI_Comm_rank(MPI_COMM_WORLD, &myid); 15 | 16 | while (!done) 17 | { 18 | if (myid == 0) 19 | { 20 | printf("Enter the number of intervals: (0 quits) "); 21 | scanf("%d", &n); 22 | } 23 | MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); 24 | if (n == 0) 25 | break; 26 | 27 | h = 1.0 / (double)n; 28 | sum = 0.0; 29 | for (i = myid + 1; i <= n; i += numprocs) 30 | { 31 | x = h * ((double)i - 0.5); 32 | sum += 4.0 / (1.0 + x * x); 33 | } 34 | mypi = h * sum; 35 | 36 | MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, 37 | MPI_COMM_WORLD); 38 | 39 | if (myid == 0) 40 | printf("pi is approximately %.16f, Error is %.16f\n", 41 | pi, fabs(pi - PI25DT)); 42 | } 43 | MPI_Finalize(); 44 | return 0; 45 | } 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Parallel & Distributed Computing Programs 2 | Programs written in C with OpenMP or MPI. 3 | 4 |
5 | 6 | **To run OpenMP programs** 7 | 8 | ```bash 9 | $ gcc .c -fopenmp 10 | $ ./a.out 11 | ``` 12 | 13 | **To run MPI programs** 14 | ```bash 15 | $ mpicc -o .c 16 | $ mpirun 17 | ``` 18 | 19 |
20 |
21 |
22 | 23 | ### Questions: 24 | 25 | 1. a) Using OpenMP, Design, develop and run a multi-threaded program to perform and print vector addition. 26 | 27 | b) Using OpenMP, Design, develop and run a multi-threaded program to perform Loop work Sharing. 28 | 29 | c) Using OpenMP, Design, develop and run a multi-threaded program to perform Section work sharing. 30 | 31 |
32 | 33 | 2. a) Using OpenMP, design, develop and run a multi-threaded program to generate and print Fibonacci series. One thread has to generate the numbers up to the specified limit and another thread has to print them. 34 | 35 | b) Using OpenMP, Design, develop and run a multi-threaded program to perform matrix multiplication. 36 | 37 |
38 | 39 | 3. a) Using OpenMP, Design, develop and run a multi-threaded program to perform Combined parallel loop reduction. 40 | 41 | b) Using OpenMP, Design, develop and run a multi-threaded program to perform and Orphaned parallel loop reduction. 42 | 43 | c) Write a parallel loop that computes the maximum and minimum values in an array. 44 | 45 |
46 | 47 | 4. a) Using MPI, Design, develop and run a simple send/receive communication program. initializes MPI, Transfer the data from source to destination, then Finalizes (Quits) MPI. 48 | 49 | b) Using MPI in visual studio, Design, develop and run message passing mechanisms. 50 | 51 |
52 | 53 | 5. a) Using MPI, Design, develop and run Broadcast communication (MPI_Bcast) using MPI_Send and MPI_Recv. 54 | 55 | b) Using MPI, Design, develop and run reduce communication for vector addition (MPI_Reduce) using MPI_Send and MPI_Recv. 56 | 57 |
58 | 59 | 6. a) Using MPI, Design, develop and run matrix multiplication using MPI_Send and MPI_Recv. In this code, the master task distributes a matrix multiply operation to numtasks-1 worker tasks. 60 | 61 | b) Using MPI, Design, develop and compute pi value using MPI_Send and MPI_Recv. 62 | --------------------------------------------------------------------------------