├── GraphGeneratorExternal.cpp ├── PBFSWithGraphGeneration.c ├── ParallelBFS_usingMPI_Parallel_IO.c ├── README.md └── SequentialBFS.c /GraphGeneratorExternal.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | 8 | int main(int argc, char* argv[]) { 9 | typedef boost::adjacency_list Graph; 10 | typedef boost::erdos_renyi_iterator erg; 11 | 12 | if (argc < 3) return -1; 13 | 14 | long int n = std::atoi(argv[1]); 15 | double eps = std::atof(argv[2]); 16 | 17 | std::random_device rd; 18 | int seed = -1; 19 | 20 | long int s = 0 ; 21 | if (argc > 3) seed = std::atoi(argv[3]); 22 | if (argc == 5) s = std::atol(argv[4]); 23 | 24 | if (seed == -1) seed = rd(); 25 | double p = (eps / n); 26 | std::ofstream myfile; 27 | myfile.open(argv[5],std::ios::binary); 28 | 29 | std::mt19937 rng(seed); 30 | Graph G(erg(rng, n, p), erg(), n); 31 | 32 | boost::graph_traits::vertex_iterator e, end; 33 | std::tie(e, end) = boost::vertices(G); 34 | 35 | boost::graph_traits::in_edge_iterator inEdge, inEdgeEnd; 36 | 37 | boost::graph_traits::edge_iterator temp, temp_end; 38 | std::tie(temp, temp_end) = boost::edges(G); 39 | int *buf= new int[n]; 40 | int k= 0, pos=0; 41 | std::vector buffer(32 * 1024 * 1024*50); 42 | char one[1], zero[1]; 43 | one[0] = '\x1'; 44 | zero[0] = '\x0'; 45 | long long int edgeCount =0; 46 | for (; e != end; ++e) { 47 | std::fill_n(buf,n,0); 48 | std::tie(inEdge, inEdgeEnd) = boost::in_edges(*e, G); 49 | for(;inEdge != inEdgeEnd; ++inEdge) { 50 | int index_of_source = boost::source(*inEdge, G); 51 | buf[index_of_source] = 1; 52 | } 53 | for(k=0;k= buffer.size()-8) { 63 | myfile.write(buffer.data(), pos * sizeof(char)); 64 | pos = 0; 65 | } 66 | } 67 | } 68 | myfile.write(reinterpret_cast(buffer.data()), pos * sizeof(char)); 69 | myfile.close(); 70 | std::cout< 3 | #include 4 | #include "math.h" 5 | #include 6 | bool isClear(int[], long int, long int,long int ); 7 | long long int getIndex(long long int i,long long int j ,long long int rowSize); 8 | 9 | int main (int argc, char *argv[]) 10 | { 11 | unsigned long long int rowNo, columnNo; // rowNo and columnNo of processor in the 2-D partition. 12 | unsigned long long int noofPRows; // Number of processor rows in the 2-D partition. 13 | unsigned long long int noofVertices; // Given input vertices count of the graph. 14 | unsigned long long int noofVerticesPerProcessor; // Number of vertices a processor own after 2-D patitioning.(size of distributed frontier vector) 15 | unsigned long long int NVertices; // Normalized vertices count for distributing input matrix uniformly across processors 16 | int rank, numtasks; // rank of the processor in MPI_COMM_WORLD. 17 | 18 | MPI_Init(&argc, &argv); // Start of parallel execution 19 | double t1, t2; // used to track execution time 20 | t1 = MPI_Wtime(); // start of data setup phase 21 | MPI_Status status; 22 | MPI_Comm_size(MPI_COMM_WORLD, &numtasks); 23 | MPI_Comm_rank(MPI_COMM_WORLD,&rank); 24 | noofPRows = (int) sqrt(numtasks); // calcuting no of processor rows in 2-D partition, this is also equal to processor columns. 25 | noofVertices = atoll(argv[1]); // Size of the input graph (count of vertices) 26 | NVertices = ceil(noofVertices*1.0/numtasks)* numtasks; // normalizing the input vertices count for equal adjacency matrix distribution across processors. 27 | 28 | int dens = atoi(argv[3]); // desired density of the graph to be generated 29 | noofVerticesPerProcessor = (int) ceil(NVertices/(noofPRows*noofPRows));// no of vertices in sub adjacency matrix stored at each processor. 30 | 31 | /* Intializing all variables for graph traversal */ 32 | unsigned long long int i,j,k; 33 | int* F; //Global Frontier Vector. 34 | F = (int *) malloc(sizeof(int)*NVertices); 35 | for(i=0; i= rowNo){ 83 | for(i2=0; i2< noofVerticesinRowofProcessor*noofVerticesinRowofProcessor; i2++){ 84 | int rand1 = rand()%100; 85 | if(rand1 columnNo) { 117 | for(r=0;r columnNo) { 186 | MPI_Send(send_buffer, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD); 187 | MPI_Recv(Fij, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD, &status); 188 | } else { 189 | MPI_Recv(Fij, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD, &status); 190 | MPI_Send(send_buffer, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD); 191 | } 192 | } 193 | MPI_Allgather(Fij,noofVerticesPerProcessor,MPI_INT,rec_buffer,noofVerticesPerProcessor,MPI_INT,colComm); 194 | 195 | // computing next frontier, and following algorithm details explained in detail in the descrition and report. 196 | int val=0; 197 | for(i=0; i 0){ 210 | Tij[j]=1; 211 | } 212 | } 213 | } 214 | 215 | for(i=0; i 3 | #include 4 | #include "math.h" 5 | #include 6 | 7 | /* 8 | * This method checks the frontier vector F for all 0's to indicate entire graph has traversed. 9 | */ 10 | bool isClear(int[], long int, long int,long int ); 11 | 12 | /* 13 | * Main start of the program 14 | * Denotions used in the below code: 15 | * F -> Frontier vector to identify nodes to be traversed in next level. 16 | * T -> Temporary vector to hold the next level nodes. 17 | * P -> Parent vector used to hold the nodes already visited. 18 | * Aij -> Adjacency matrix local to each processor. 19 | */ 20 | int main (int argc, char *argv[]) 21 | { 22 | unsigned long long int rowNo, columnNo; 23 | unsigned long long int noofPRows, noofVertices, noofVerticesPerProcessor; 24 | double t1, t2; 25 | int rank, numtasks, size; 26 | MPI_Init(&argc, &argv); 27 | t1 = MPI_Wtime(); 28 | 29 | MPI_Status status; 30 | MPI_Comm_size(MPI_COMM_WORLD, &numtasks); 31 | MPI_Comm_rank(MPI_COMM_WORLD,&rank); 32 | noofPRows = (int) sqrt(numtasks); 33 | noofVertices = atoll(argv[2]); 34 | noofVerticesPerProcessor = (int) ceil(atoi(argv[2])/(noofPRows*noofPRows)); 35 | 36 | unsigned long long int i,j,k; 37 | int* F; 38 | F = (int *) malloc(sizeof(int)*noofVertices); 39 | for(i=0; i columnNo) { 117 | MPI_Send(send_buffer, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD); 118 | MPI_Recv(Fij, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD, &status); 119 | } else { 120 | MPI_Recv(Fij, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD, &status); 121 | MPI_Send(send_buffer, noofVerticesPerProcessor, MPI_INT, (columnNo-1)*noofPRows+rowNo-1, 123, MPI_COMM_WORLD); 122 | } 123 | } 124 | MPI_Allgather(Fij,noofVerticesPerProcessor,MPI_INT,rec_buffer,noofVerticesPerProcessor,MPI_INT,colComm); 125 | 126 | int val=0; 127 | 128 | for(i=0; i 0){ 140 | Tij[j]=1; 141 | } 142 | } 143 | } 144 | 145 | for(i=0; i 2 | #include 3 | #include 4 | #define MAX 262144 5 | 6 | typedef enum boolean{false, true} bool; 7 | char** adj; 8 | bool visited[ MAX ]; 9 | 10 | void create_graph(); 11 | void bfs(int source); 12 | void display(); 13 | 14 | int main(){ 15 | int i, source=1; 16 | create_graph(); 17 | for ( i = 0;i < MAX;i++ ) 18 | visited[ i ] = false; 19 | clock_t start = clock(), diff; 20 | bfs(source); 21 | diff = clock() - start; 22 | int msec = diff * 1000 / CLOCKS_PER_SEC; 23 | printf("Time taken %d seconds %d milliseconds", msec/1000, msec%1000); 24 | return 0; 25 | } /*End of main()*/ 26 | 27 | void create_graph(){ 28 | adj = (char **) malloc(sizeof(char*)*MAX); 29 | int ind=0; 30 | for(;ind=dens){ 41 | adj[i2][j2] = 1; 42 | adj[j2][i2] = 1; 43 | noofOnes++; 44 | }else{ 45 | adj[i2][j2] = 0; 46 | adj[j2][i2] = 0; 47 | } 48 | } 49 | } 50 | } /*End of create_graph()*/ 51 | 52 | void display() 53 | { 54 | int i, j; 55 | printf("inside display\n"); 56 | for ( i = 0;i < MAX;i++ ) 57 | { 58 | for ( j = 0;j < MAX;j++ ) 59 | printf( "%4d", adj[ i ][ j ] ); 60 | 61 | printf( "\n" ); 62 | } 63 | } /*End of display()*/ 64 | 65 | 66 | void bfs( int v ) 67 | { 68 | long long int i, front, rear; 69 | int* que = (int*) malloc(sizeof(int)*MAX); 70 | front = rear = -1; 71 | printf( "%d ", v ); 72 | visited[ v ] = true; 73 | rear++; 74 | front++; 75 | que[ rear ] = v; 76 | while ( front <= rear ){ 77 | v = que[ front ]; /* delete from queue */ 78 | front++; 79 | for ( i = 0;i < MAX;i++ ){ 80 | /* Check for adjacent unvisited nodes */ 81 | if ( adj[ v ][ i ] == 1 && visited[ i ] == false ){ 82 | visited[ i ] = true; 83 | rear++; 84 | que[ rear ] = i; 85 | } 86 | } 87 | } 88 | } /*End of bfs()*/ 89 | 90 | 91 | --------------------------------------------------------------------------------