├── .gitignore ├── LICENSE ├── README.md ├── simpleProblem.mps └── src ├── cudaCheck.cuh ├── lpProblem.cu ├── lpProblem.cuh ├── main.cu ├── print.cu ├── print.cuh ├── simplex.cu └── simplex.cuh /.gitignore: -------------------------------------------------------------------------------- 1 | # build 2 | *.d 3 | *.i 4 | *.o 5 | *.ii 6 | *.gpu 7 | *.ptx 8 | *.cubin 9 | *.fatbin 10 | 11 | # IDE 12 | .project 13 | .cproject 14 | Debug/ 15 | Release/ 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cuda-revised-simplex 2 | 3 | An implementation of the revised simplex algorithm in CUDA for solving linear optimization problems in the form `max{c*x | A*x=b, l<=x<=u}`. The LP problem is read from an MPS file by GLPK. 4 | 5 | The implementations uses the following data structures. The coefficient matrix and objective function values are splitted in two parts. One contains the basic variables and the other the nonbasic variables. The matrices are stored in array format and CSR format. The CSR format is used for calculations and the array format for copying of columns. The CSR representation is updated at the end of each iteration. Temporary variables are allocated once at the beginning and reused throughout the iterations. All values are stored in double precision. 6 | 7 | The LP problem is transformed into standard form. Lower bounds are eliminated via a shift. Upper bounds are integrated by addition of new equations. Negative right hand size are eliminated by multiplication of the row with -1. 8 | 9 | The implemented simplex algorithm is as follows. The current basis solution is calculated by solving the system of linear equations `A_B*x_B=b` with a QR decomposition from the cuSolver library. The basis solution can also be calculated by updating the old basis solution via `x_B_i=x_B_i-s_i*x_B_row/s_row, x_B_row=x_B_row/s_row$`. Afterwards reduced costs are calculated in two steps. First the system of linear equations `A_B^T*s=c_B` is solved with QR decomposition. The routine can't handle matrix transpose implicitly, so the CSR matrix is transposed explicitly by converting it into a CSC format and reinterpreting it as CSR format. Afterwards the reduced costs are calculated with a matrix-vector product with cuSparse library `g=A_NB^T*s-c_NB`. The outgoing variable is chosen via Dantzig's or steepest-edge rule. Dantzig's rule chooses the variable with the most negative reduced cost `column={i|min{g_i}, g_i<0}`. This is implemented as a parallel reduction over the values of `g`. The values aren't compared directly to zero but rather to a small tolerance like `-10^-12`. This is necessary because of numerical inaccuracy whereas very small values can arise that would be zero when solved exactly. The steepest-edge rule scales the values of `g` with the norm of the corresponding column of the basis matrix `g_i=g_i/||A_NB_i||`. The norm is calculated as the square root of the sum of squares `||v||=sum_i v_i^2` with the CSR representation. For this purpose the square of each element is added atomically on the corresponding position. After all elements are processed the values of the vector `g` are divided by the square root of the calculated values. The steepest edge rule has in general a better convergence behaviour and therefore takes less iterations. Here the smallest negative value is also used which is calculated by a parallel reduction. The current solution is optimal if such a value isn't found. The entering variable is calculated with the minimum ratio test. Therefore the equation system `A_B*s=A_NB_column` is solved by with a QR decomposition from the cuSolver library. The needed column of the non nonbasic matrix is copied from the array representation. The entering column is calculated by a parallel reduction of the ratio of basic variable and the corresponding values of `s`: `row={i|min{x_B_i/s_i}, s_i>0}`. The values of `s` are also not directly checked for positivity but rather if they are above a certain tolerance like `10^-12`. The lp problem is unbounded if such a value isn't found. After choosing entering and leaving variable, the data structures need to be updated. For this the corresponding columns in the array format of basic and nonbasic matrix are swapped. Afterwards the CSR representation is updated. Also the corresponding entries in basic and nonbasic objective function values are swapped. 10 | 11 | ## Build 12 | 13 | ``` 14 | nvcc -o cuda-revised-simplex src/*.cu -lcusolver -lcusparse -lglpk -arch=sm_35 --relocatable-device-code=true -O3 15 | ``` 16 | 17 | ## Run 18 | 19 | ``` 20 | ./cuda-revised-simplex simpleProblem.mps 21 | ``` 22 | 23 | The solution for the simple example should be 3.3333 for X8 (index [7]) (the objective function). Different values are possible for other variables because the problem has multiple optimal solutions. 24 | -------------------------------------------------------------------------------- /simpleProblem.mps: -------------------------------------------------------------------------------- 1 | NAME MINIEXAMPLE 2 | ROWS 3 | N COST 4 | E EQ1 5 | E EQ2 6 | E EQ3 7 | E EQ4 8 | COLUMNS 9 | X1 EQ1 1 10 | X2 EQ1 -2 EQ2 1 11 | X3 EQ2 -1 EQ4 1 12 | X4 EQ1 -2 EQ3 1 13 | X5 EQ3 -1 EQ4 1 14 | X6 EQ3 1 EQ4 -1 15 | X7 EQ4 1 16 | X8 COST 1 EQ1 -1 17 | X8 EQ3 -2 EQ4 -2 18 | RHS 19 | BOUNDS 20 | UP BND1 X1 10 21 | UP BND1 X2 1000 22 | UP BND1 X3 1000 23 | UP BND1 X4 1000 24 | UP BND1 X5 1000 25 | LO BND1 X6 -1000 26 | UP BND1 X6 1000 27 | LO BND1 X7 -1000 28 | UP BND1 X7 10 29 | UP BND1 X8 1000 30 | ENDATA 31 | -------------------------------------------------------------------------------- /src/cudaCheck.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Defines CUDA runtime error checking 3 | */ 4 | 5 | #ifndef CUDACHECK_CUH_ 6 | #define CUDACHECK_CUH_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | //#define NDEBUG // include to remove asserts and cudaCheck 14 | #define cudaCheck(call) __cudaCheck(call, __FILE__, __LINE__) 15 | 16 | inline void __cudaCheck(cudaError err, const char* file, int line) { 17 | #ifndef NDEBUG 18 | if (err != cudaSuccess) { 19 | fprintf(stderr, "%s(%d): CUDA error: %s\n", file, line, 20 | cudaGetErrorString(err)); 21 | exit(EXIT_FAILURE); 22 | } 23 | #endif 24 | } 25 | 26 | #define cuSparseCheck(call) __cuSparseCheck(call, __FILE__, __LINE__) 27 | 28 | inline void __cuSparseCheck(cusparseStatus_t err, const char* file, int line) { 29 | #ifndef NDEBUG 30 | if (err != CUSPARSE_STATUS_SUCCESS) { 31 | fprintf(stderr, "%s(%d): CUSPARSE error: %s\n", file, line, err); 32 | exit(EXIT_FAILURE); 33 | } 34 | #endif 35 | } 36 | 37 | #define cuSolverCheck(call) __cuSolverCheck(call, __FILE__, __LINE__) 38 | 39 | inline void __cuSolverCheck(cusolverStatus_t err, const char* file, int line) { 40 | #ifndef NDEBUG 41 | if (err != CUSOLVER_STATUS_SUCCESS) { 42 | fprintf(stderr, "%s(%d): CUSOLVER error: %s\n", file, line, err); 43 | exit(EXIT_FAILURE); 44 | } 45 | #endif 46 | } 47 | 48 | #endif /* CUDACHECK_CUH_ */ 49 | -------------------------------------------------------------------------------- /src/lpProblem.cu: -------------------------------------------------------------------------------- 1 | /** 2 | * Implements LP problem 3 | */ 4 | 5 | #include 6 | #include 7 | 8 | #include "cudaCheck.cuh" 9 | #include "lpProblem.cuh" 10 | #include "print.cuh" 11 | 12 | void readMPS(char *mpsFile, LPProblem *lpProblem) { 13 | glp_prob *lp = glp_create_prob(); 14 | glp_read_mps(lp, GLP_MPS_FILE, NULL, mpsFile); 15 | lpProblem->rows = glp_get_num_rows(lp); 16 | lpProblem->columns = glp_get_num_cols(lp); 17 | lpProblem->nnz = glp_get_num_nz(lp); 18 | lpProblem->isBasisAllocated = false; 19 | 20 | 21 | cudaCheck( 22 | cudaMallocManaged(&lpProblem->A, lpProblem->rows * lpProblem->columns * sizeof(double))); 23 | cudaCheck(cudaMallocManaged(&lpProblem->b, lpProblem->rows * sizeof(double))); 24 | cudaCheck(cudaMallocManaged(&lpProblem->c, lpProblem->columns * sizeof(double))); 25 | cudaCheck( 26 | cudaMallocManaged(&lpProblem->lowerBound, 27 | lpProblem->columns * sizeof(double))); 28 | cudaCheck( 29 | cudaMallocManaged(&lpProblem->upperBound, 30 | lpProblem->columns * sizeof(double))); 31 | 32 | for (int32_t i = 0; i < lpProblem->rows * lpProblem->columns; i++) { 33 | lpProblem->A[i] = 0.; 34 | } 35 | int32_t *indices = (int32_t *) malloc(lpProblem->columns * sizeof(int32_t)); 36 | double *values = (double *) malloc(lpProblem->columns * sizeof(double)); 37 | // for glpk i + 1 (indices one-based) 38 | for (int32_t i = 0; i < lpProblem->rows; i++) { 39 | if (glp_get_row_type(lp, i + 1) == GLP_FR) { 40 | // ignore cost row 41 | printf("shouldn't be here!!\n"); 42 | continue; 43 | } 44 | int32_t numberValues = glp_get_mat_row(lp, i + 1, indices, values); 45 | for (uint32_t j = 0; j < numberValues; j++) { 46 | lpProblem->A[i * lpProblem->columns + indices[j + 1] - 1] = values[j + 1]; 47 | } 48 | } 49 | free(indices); 50 | free(values); 51 | 52 | for (int32_t i = 0; i < lpProblem->rows; i++) { 53 | // constraints are expected to be in form A*x=b 54 | if (glp_get_row_type(lp, i + 1) == GLP_FX) { 55 | lpProblem->b[i] = glp_get_row_lb(lp, i + 1); 56 | } else { 57 | printf("Can only handle constraints in form A*x=b!"); 58 | exit(EXIT_FAILURE); 59 | } 60 | } 61 | 62 | for (uint32_t i = 0; i < lpProblem->columns; i++) { 63 | lpProblem->c[i] = glp_get_obj_coef(lp, i + 1); 64 | } 65 | 66 | for (int32_t i = 0; i < lpProblem->columns; i++) { 67 | lpProblem->lowerBound[i] = glp_get_col_lb(lp, i + 1); 68 | lpProblem->upperBound[i] = glp_get_col_ub(lp, i + 1); 69 | } 70 | 71 | glp_delete_prob(lp); 72 | glp_free_env(); 73 | } 74 | 75 | void convertToStandardform(LPProblem *source, LPProblem *converted) { 76 | converted->isBasisAllocated = source->isBasisAllocated; 77 | converted->rows= source->rows + source->columns; 78 | converted->columns = source->columns * 2; 79 | converted->nnz = source->nnz + source->columns * 2; 80 | cudaCheck(cudaMallocManaged(&converted->A, converted->rows * converted->columns * sizeof(double))); 81 | for (int32_t i = 0; i < converted->rows; i++) { 82 | for (int32_t j = 0; j < converted->columns; j++) { 83 | if (i < source->rows && j < source->columns) { 84 | converted->A[i * converted->columns + j] = source->A[i * source->columns + j]; 85 | } else if(i < source->rows) { 86 | converted->A[i * converted->columns + j] = 0; 87 | } else if(j < source->columns) { 88 | if (i - source->rows == j) { 89 | converted->A[i * converted->columns + j] = 1; 90 | } else { 91 | converted->A[i * converted->columns + j] = 0; 92 | } 93 | } else { 94 | if (i - source->rows == j - source->columns) { 95 | converted->A[i * converted->columns + j] = 1; 96 | } else { 97 | converted->A[i * converted->columns + j] = 0; 98 | } 99 | } 100 | } 101 | } 102 | cudaCheck(cudaMallocManaged(&converted->b, converted->rows * sizeof(double))); 103 | for (int32_t i = 0; i < converted->rows; i++) { 104 | if (i < source->rows) { 105 | converted->b[i] = source->b[i]; 106 | for (int32_t j = 0; j < source->columns; j++) { 107 | converted->b[i] -= source->A[i * source->columns + j] * source->lowerBound[j]; 108 | } 109 | } else { 110 | converted->b[i] = source->upperBound[i - source->rows] - source->lowerBound[i - source->rows]; 111 | } 112 | if (converted->b[i] < 0) { 113 | converted->b[i] = -converted->b[i]; 114 | for (int j = 0; j < converted->columns; j++) { 115 | converted->A[i * converted->columns + j] = -converted->A[i * converted->columns + j]; 116 | } 117 | } 118 | } 119 | cudaCheck(cudaMallocManaged(&converted->c, converted->columns * sizeof(double))); 120 | for (int32_t i = 0; i < converted->columns; i++) { 121 | if (i < source->columns) { 122 | converted->c[i] = source->c[i]; 123 | } else { 124 | converted->c[i] = 0; 125 | } 126 | } 127 | } 128 | 129 | void copyLPProblem(LPProblem *source, LPProblem *destination) { 130 | if (source->isBasisAllocated) { 131 | initializeLPProblem(destination, source->rows, source->columns, source->nnz); 132 | destination->isBasisAllocated = true; 133 | destination->rows = source->rows; 134 | destination->columns = source->columns; 135 | destination->nnz = source->nnz; 136 | cudaCheck(cudaMemcpy(destination->A, source->A, destination->rows * destination->columns * sizeof(double), cudaMemcpyDeviceToDevice)); 137 | cudaCheck(cudaMemcpy(destination->b, source->b, destination->rows * sizeof(double), cudaMemcpyDeviceToDevice)); 138 | cudaCheck(cudaMemcpy(destination->c, source->c, destination->columns * sizeof(double), cudaMemcpyDeviceToDevice)); 139 | 140 | destination->nnzAB = source->nnzAB; 141 | cudaCheck(cudaMemcpy(destination->AB, source->AB, destination->rows * destination->rows * sizeof(double), cudaMemcpyDeviceToDevice)); 142 | cudaCheck(cudaMemcpy(destination->ABRowPointer, source->ABRowPointer, (destination->rows + 1) * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 143 | cudaCheck(cudaMemcpy(destination->ABColumnIndices, source->ABColumnIndices, destination->nnz * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 144 | cudaCheck(cudaMemcpy(destination->ABValues, source->ABValues, destination->nnz * sizeof(double), cudaMemcpyDeviceToDevice)); 145 | 146 | cudaCheck(cudaMemcpy(destination->ABTRowPointer, source->ABTRowPointer, (destination->rows + 1) * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 147 | cudaCheck(cudaMemcpy(destination->ABTColumnIndices, source->ABTColumnIndices, destination->nnz * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 148 | cudaCheck(cudaMemcpy(destination->ABTValues, source->ABTValues, destination->nnz * sizeof(double), cudaMemcpyDeviceToDevice)); 149 | 150 | destination->nnzANB = source->nnzANB; 151 | cudaCheck(cudaMemcpy(destination->ANB, source->ANB, destination->rows * (destination->columns - destination->rows) * sizeof(double), cudaMemcpyDeviceToDevice)); 152 | cudaCheck(cudaMemcpy(destination->ANBRowPointer, source->ANBRowPointer, (destination->rows + 1) * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 153 | cudaCheck(cudaMemcpy(destination->ANBColumnIndices, source->ANBColumnIndices, destination->nnz * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 154 | cudaCheck(cudaMemcpy(destination->ANBValues, source->ANBValues, destination->nnz * sizeof(double), cudaMemcpyDeviceToDevice)); 155 | 156 | cudaCheck(cudaMemcpy(destination->cB, source->cB, destination->rows * sizeof(double), cudaMemcpyDeviceToDevice)); 157 | cudaCheck(cudaMemcpy(destination->cBIndex, source->cBIndex, destination->rows * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 158 | cudaCheck(cudaMemcpy(destination->cNB, source->cNB, (destination->columns - destination->rows) * sizeof(double), cudaMemcpyDeviceToDevice)); 159 | cudaCheck(cudaMemcpy(destination->cNBIndex, source->cNBIndex, (destination->columns - destination->rows) * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 160 | 161 | 162 | cudaCheck(cudaMemcpy(destination->xB, source->xB, destination->rows * sizeof(double), cudaMemcpyDeviceToDevice)); 163 | cudaCheck(cudaMemcpy(destination->xIndex, source->xIndex, destination->columns * sizeof(int32_t), cudaMemcpyDeviceToDevice)); 164 | cudaCheck(cudaMemcpy(destination->lowerBound, source->lowerBound, destination->columns * sizeof(double), cudaMemcpyDeviceToDevice)); 165 | cudaCheck(cudaMemcpy(destination->upperBound, source->upperBound, destination->columns * sizeof(double), cudaMemcpyDeviceToDevice)); 166 | } else { 167 | destination->isBasisAllocated = false; 168 | destination->rows = source->rows; 169 | destination->columns = source->columns; 170 | destination->nnz = source->nnz; 171 | cudaCheck(cudaMallocManaged(&destination->A, destination->rows * destination->columns * sizeof(double))); 172 | cudaCheck(cudaMemcpy(destination->A, source->A, destination->rows * destination->columns * sizeof(double), cudaMemcpyDeviceToDevice)); 173 | cudaCheck(cudaMallocManaged(&destination->b, destination->rows * sizeof(double))); 174 | cudaCheck(cudaMemcpy(destination->b, source->b, destination->rows * sizeof(double), cudaMemcpyDeviceToDevice)); 175 | cudaCheck(cudaMallocManaged(&destination->c, destination->columns * sizeof(double))); 176 | cudaCheck(cudaMemcpy(destination->c, source->c, destination->columns * sizeof(double), cudaMemcpyDeviceToDevice)); 177 | } 178 | } 179 | 180 | void deleteLPProblem(LPProblem *lpProblem) { 181 | cudaFree(lpProblem->A); 182 | cudaFree(lpProblem->b); 183 | cudaFree(lpProblem->c); 184 | cudaFree(lpProblem->lowerBound); 185 | cudaFree(lpProblem->upperBound); 186 | if (lpProblem->isBasisAllocated) { 187 | cudaFree(lpProblem->AB); 188 | cudaFree(lpProblem->ABRowPointer); 189 | cudaFree(lpProblem->ABColumnIndices); 190 | cudaFree(lpProblem->ABValues); 191 | cudaFree(lpProblem->ABTRowPointer); 192 | cudaFree(lpProblem->ABTColumnIndices); 193 | cudaFree(lpProblem->ABTValues); 194 | cudaFree(lpProblem->ANB); 195 | cudaFree(lpProblem->ANBRowPointer); 196 | cudaFree(lpProblem->ANBColumnIndices); 197 | cudaFree(lpProblem->ANBValues); 198 | cudaFree(lpProblem->cB); 199 | cudaFree(lpProblem->cBIndex); 200 | cudaFree(lpProblem->cNB); 201 | cudaFree(lpProblem->cNBIndex); 202 | cudaFree(lpProblem->xB); 203 | cudaFree(lpProblem->xIndex); 204 | cudaFree(lpProblem->s); 205 | cudaFree(lpProblem->g); 206 | cudaFree(lpProblem->gTemp); 207 | cudaFree(lpProblem->ANBColumn); 208 | cudaFree(lpProblem->nnzPerRow); 209 | cudaFree(lpProblem->row); 210 | cudaFree(lpProblem->column); 211 | } 212 | free(lpProblem); 213 | } 214 | 215 | void initializeLPProblem(LPProblem *lpProblem, int32_t rows, int32_t columns, 216 | int32_t nnz) { 217 | lpProblem->isBasisAllocated = true; 218 | lpProblem->rows = rows; 219 | lpProblem->columns = columns; 220 | lpProblem->nnz = nnz; 221 | cudaCheck( 222 | cudaMallocManaged(&lpProblem->A, rows * columns * sizeof(double))); 223 | cudaCheck(cudaMallocManaged(&lpProblem->b, rows * sizeof(double))); 224 | cudaCheck(cudaMallocManaged(&lpProblem->c, columns * sizeof(double))); 225 | cudaCheck( 226 | cudaMallocManaged(&lpProblem->lowerBound, 227 | columns * sizeof(double))); 228 | cudaCheck( 229 | cudaMallocManaged(&lpProblem->upperBound, 230 | columns * sizeof(double))); 231 | 232 | cudaCheck(cudaMallocManaged(&lpProblem->AB, rows * rows * sizeof(double))); 233 | cudaCheck( 234 | cudaMallocManaged(&lpProblem->ABRowPointer, 235 | (rows + 1) * sizeof(int32_t))); 236 | cudaCheck( 237 | cudaMallocManaged(&lpProblem->ABColumnIndices, 238 | nnz * sizeof(int32_t))); 239 | cudaCheck(cudaMallocManaged(&lpProblem->ABValues, nnz * sizeof(double))); 240 | 241 | cudaCheck( 242 | cudaMallocManaged(&lpProblem->ABTRowPointer, 243 | (rows + 1) * sizeof(int32_t))); 244 | cudaCheck( 245 | cudaMallocManaged(&lpProblem->ABTColumnIndices, 246 | nnz * sizeof(int32_t))); 247 | cudaCheck(cudaMallocManaged(&lpProblem->ABTValues, nnz * sizeof(double))); 248 | 249 | cudaCheck( 250 | cudaMallocManaged(&lpProblem->ANB, 251 | rows * (columns - rows) * sizeof(double))); 252 | cudaCheck( 253 | cudaMallocManaged(&lpProblem->ANBRowPointer, 254 | (rows + 1) * sizeof(int32_t))); 255 | cudaCheck( 256 | cudaMallocManaged(&lpProblem->ANBColumnIndices, 257 | nnz * sizeof(int32_t))); 258 | cudaCheck(cudaMallocManaged(&lpProblem->ANBValues, nnz * sizeof(double))); 259 | 260 | cudaCheck(cudaMallocManaged(&lpProblem->cB, rows * sizeof(double))); 261 | cudaCheck(cudaMallocManaged(&lpProblem->cBIndex, rows * sizeof(int32_t))); 262 | cudaCheck( 263 | cudaMallocManaged(&lpProblem->cNB, 264 | (columns - rows) * sizeof(double))); 265 | cudaCheck( 266 | cudaMallocManaged(&lpProblem->cNBIndex, 267 | (columns - rows) * sizeof(int32_t))); 268 | 269 | cudaCheck(cudaMallocManaged(&lpProblem->xB, rows * sizeof(double))); 270 | cudaCheck(cudaMallocManaged(&lpProblem->xIndex, columns * sizeof(int32_t))); 271 | cudaCheck(cudaMallocManaged(&lpProblem->s, rows * sizeof(double))); 272 | cudaCheck( 273 | cudaMallocManaged(&lpProblem->g, 274 | (columns - rows) * sizeof(double))); 275 | cudaCheck( 276 | cudaMallocManaged(&lpProblem->gTemp, 277 | (columns - rows) * sizeof(double))); 278 | cudaCheck(cudaMallocManaged(&lpProblem->ANBColumn, rows * sizeof(double))); 279 | cudaCheck(cudaMallocManaged(&lpProblem->nnzPerRow, rows * sizeof(int32_t))); 280 | 281 | cudaCheck(cudaMallocManaged(&lpProblem->row, sizeof(int32_t))); 282 | cudaCheck(cudaMallocManaged(&lpProblem->column, sizeof(int32_t))); 283 | } 284 | -------------------------------------------------------------------------------- /src/lpProblem.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Defines LP Problem. 3 | */ 4 | 5 | #ifndef LPPROBLEM_CUH_ 6 | #define LPPROBLEM_CUH_ 7 | 8 | #include 9 | #include 10 | 11 | typedef struct LPProblem { 12 | bool isBasisAllocated; 13 | bool isOptimal; 14 | bool isUnbounded; 15 | bool isSolution; 16 | 17 | int32_t rows; 18 | int32_t columns; 19 | int32_t nnz; 20 | double *A; 21 | 22 | int32_t nnzAB; 23 | double *AB; 24 | int32_t *ABRowPointer; 25 | int32_t *ABColumnIndices; 26 | double *ABValues; 27 | 28 | int32_t *ABTRowPointer; 29 | int32_t *ABTColumnIndices; 30 | double *ABTValues; 31 | 32 | int32_t nnzANB; 33 | double *ANB; 34 | int32_t *ANBRowPointer; 35 | int32_t *ANBColumnIndices; 36 | double *ANBValues; 37 | 38 | double *b; 39 | 40 | double *c; 41 | double *cB; 42 | int32_t *cBIndex; 43 | double *cNB; 44 | int32_t *cNBIndex; 45 | 46 | double *xB; 47 | int32_t *xIndex; 48 | double *lowerBound; 49 | double *upperBound; 50 | 51 | double *s; 52 | double *g; 53 | double *gTemp; 54 | double *ANBColumn; 55 | int32_t *nnzPerRow; 56 | int32_t *row; 57 | int32_t *column; 58 | } LPProblem; 59 | 60 | void readMPS(char *mpsFile, LPProblem *lpProblem); 61 | 62 | void convertToStandardform(LPProblem *source, LPProblem *converted); 63 | 64 | void copyLPProblem(LPProblem *source, LPProblem *destination); 65 | 66 | void deleteLPProblem(LPProblem *lpProblem); 67 | 68 | void initializeLPProblem(LPProblem *lpProblem, int32_t rows, int32_t columns, 69 | int32_t nnz); 70 | 71 | #endif /* LPPROBLEM_CUH_ */ 72 | -------------------------------------------------------------------------------- /src/main.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cudaCheck.cuh" 6 | #include "lpProblem.cuh" 7 | #include "print.cuh" 8 | #include "simplex.cuh" 9 | 10 | int32_t main(int32_t argc, char *argv[]) { 11 | cusolverSpHandle_t cusolverHandle; 12 | cuSolverCheck(cusolverSpCreate(&cusolverHandle)); 13 | cusparseHandle_t cusparseHandle; 14 | cuSparseCheck(cusparseCreate(&cusparseHandle)); 15 | 16 | cusparseMatDescr_t matrixDescriptor; 17 | cuSparseCheck(cusparseCreateMatDescr(&matrixDescriptor)); 18 | cuSparseCheck( 19 | cusparseSetMatType(matrixDescriptor, CUSPARSE_MATRIX_TYPE_GENERAL)); 20 | cuSparseCheck( 21 | cusparseSetMatIndexBase(matrixDescriptor, 22 | CUSPARSE_INDEX_BASE_ZERO)); 23 | 24 | LPProblem *lpProblem = (LPProblem *) malloc(sizeof(LPProblem)); 25 | readMPS(argv[1], lpProblem); 26 | LPProblem *lpProblemMod = (LPProblem *) malloc(sizeof(LPProblem)); 27 | convertToStandardform(lpProblem, lpProblemMod); 28 | findBFS(lpProblemMod, cusolverHandle, cusparseHandle, matrixDescriptor); 29 | LPProblem *lpProblemCopy = (LPProblem *) malloc(sizeof(LPProblem)); 30 | copyLPProblem(lpProblemMod, lpProblemCopy); 31 | deleteLPProblem(lpProblem); 32 | deleteLPProblem(lpProblemMod); 33 | deleteLPProblem(lpProblemCopy); 34 | 35 | cudaCheck(cudaDeviceReset()); 36 | 37 | return EXIT_SUCCESS; 38 | } 39 | -------------------------------------------------------------------------------- /src/print.cu: -------------------------------------------------------------------------------- 1 | /** 2 | * Implements print. 3 | */ 4 | 5 | #include 6 | 7 | void printIntArray(int32_t *array, int32_t length) { 8 | for (int32_t i = 0; i < length; i++) { 9 | printf("%d,", array[i]); 10 | } 11 | printf("\n"); 12 | } 13 | 14 | void printDoubleArray(double *array, int32_t length) { 15 | for (int32_t i = 0; i < length; i++) { 16 | printf("%g,", array[i]); 17 | } 18 | printf("\n"); 19 | } 20 | 21 | void printCSRMatrix(int32_t *RowPointer, int32_t *ColumnIndices, double *Values, 22 | int32_t rows) { 23 | for (int32_t i = 0; i <= rows; i++) { 24 | printf("%d,", RowPointer[i]); 25 | } 26 | printf("\n"); 27 | for (int32_t i = 0; i < RowPointer[rows]; i++) { 28 | printf("%d,", ColumnIndices[i]); 29 | } 30 | printf("\n"); 31 | for (int32_t i = 0; i < RowPointer[rows]; i++) { 32 | printf("%g,", Values[i]); 33 | } 34 | printf("\n"); 35 | } 36 | 37 | void printMatrix(double *matrix, int32_t rows, int32_t columns) { 38 | for (int32_t row = 0; row < rows; row++) { 39 | for (int32_t column = 0; column < columns; column++) { 40 | printf("%g,", matrix[row * columns + column]); 41 | } 42 | printf("\n"); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/print.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Defines print. 3 | */ 4 | 5 | #ifndef PRINT_CUH_ 6 | #define PRINT_CUH_ 7 | 8 | void printIntArray(int32_t *array, int32_t length); 9 | 10 | void printDoubleArray(double *array, int32_t length); 11 | 12 | void printCSRMatrix(int32_t *RowPointer, int32_t *ColumnIndices, double *Values, 13 | int32_t rows); 14 | 15 | void printMatrix(double *matrix, int32_t rows, int32_t columns); 16 | 17 | #endif /* PRINT_CUH_ */ 18 | -------------------------------------------------------------------------------- /src/simplex.cu: -------------------------------------------------------------------------------- 1 | /** 2 | * Implements simplex algorithm. 3 | */ 4 | 5 | #include "simplex.cuh" 6 | 7 | #include 8 | #include 9 | 10 | #include "cudaCheck.cuh" 11 | #include "print.cuh" 12 | 13 | __device__ double atomicAdd(double *address, double val) { 14 | unsigned long long int* address_as_ull = (unsigned long long int*) address; 15 | unsigned long long int old = *address_as_ull, assumed; 16 | if (val == 0.0) 17 | return __longlong_as_double(old); 18 | do { 19 | assumed = old; 20 | old = atomicCAS(address_as_ull, assumed, 21 | __double_as_longlong(val + __longlong_as_double(assumed))); 22 | } while (assumed != old); 23 | return __longlong_as_double(old); 24 | } 25 | 26 | __device__ void minimumIndex(double *value1, int32_t *index1, double value2, 27 | int32_t index2) { 28 | if (*value1 > value2) { 29 | *value1 = value2; 30 | *index1 = index2; 31 | } 32 | } 33 | 34 | __device__ void warpReduceMinIndex(double *value, int32_t *index) { 35 | // see https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ 36 | for (int32_t offset = warpSize / 2; offset > 0; offset /= 2) { 37 | double shuffleValue = __shfl_down(*value, offset, warpSize); 38 | double shuffleIndex = __shfl_down(*index, offset, warpSize); 39 | minimumIndex(value, index, shuffleValue, shuffleIndex); 40 | }; 41 | } 42 | 43 | __device__ void blockReduceMinGIndex(double *g, int32_t columns, 44 | double *blockData, int32_t *blockIndex, double *localData, 45 | int32_t *localIndex) { 46 | // see https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ 47 | int32_t warpIndex = threadIdx.x / warpSize; 48 | int32_t warpLane = threadIdx.x % warpSize; 49 | 50 | *localData = DBL_MAX; 51 | *localIndex = -1; 52 | for (int32_t i = threadIdx.x; i < columns; i += blockDim.x) { 53 | // not 0 because of numerical errors 54 | if (g[i] < -1.e-4) { 55 | minimumIndex(localData, localIndex, g[i], i); 56 | } 57 | } 58 | warpReduceMinIndex(localData, localIndex); 59 | if (warpLane == 0) { 60 | blockData[warpIndex] = *localData; 61 | blockIndex[warpIndex] = *localIndex; 62 | } 63 | 64 | __syncthreads(); 65 | 66 | if (threadIdx.x < blockDim.x / warpSize) { 67 | *localData = blockData[warpLane]; 68 | *localIndex = blockIndex[warpLane]; 69 | } else { 70 | *localData = DBL_MAX; 71 | *localIndex = -1; 72 | } 73 | if (warpIndex == 0) { 74 | warpReduceMinIndex(localData, localIndex); 75 | } 76 | } 77 | 78 | __device__ void blockReduceMinSIndex(double *s, double *xB, int32_t rows, 79 | double *blockData, int32_t *blockIndex, double *localData, 80 | int32_t *localIndex) { 81 | // see https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ 82 | int32_t warpIndex = threadIdx.x / warpSize; 83 | int32_t warpLane = threadIdx.x % warpSize; 84 | 85 | *localData = DBL_MAX; 86 | *localIndex = -1; 87 | for (int32_t i = threadIdx.x; i < rows; i += blockDim.x) { 88 | // not 0 because of numerical errors 89 | if (s[i] > 1.e-4) { 90 | minimumIndex(localData, localIndex, xB[i] / s[i], i); 91 | } 92 | } 93 | warpReduceMinIndex(localData, localIndex); 94 | if (warpLane == 0) { 95 | blockData[warpIndex] = *localData; 96 | blockIndex[warpIndex] = *localIndex; 97 | } 98 | 99 | __syncthreads(); 100 | 101 | if (threadIdx.x < blockDim.x / warpSize) { 102 | *localData = blockData[warpLane]; 103 | *localIndex = blockIndex[warpLane]; 104 | } else { 105 | *localData = DBL_MAX; 106 | *localIndex = -1; 107 | } 108 | if (warpIndex == 0) { 109 | warpReduceMinIndex(localData, localIndex); 110 | } 111 | } 112 | 113 | __global__ void copyColumnDouble(double *fromMatrix, double *to, int32_t rows, 114 | int32_t columns, int32_t column) { 115 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 116 | for (int32_t i = id; i < rows; i += gridDim.x * blockDim.x) { 117 | to[i] = fromMatrix[i * columns + column]; 118 | } 119 | } 120 | 121 | __global__ void copyColumnDouble2(double *fromANB, double *toANB, int32_t rows, 122 | int32_t fromColumns, int32_t toColumns, double *fromCNB, double *toCNB, 123 | int32_t *fromCNBIndex, int32_t *toCNBIndex, int32_t fromColumn, int32_t toColumn) { 124 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 125 | for (int32_t i = id; i < rows; i += gridDim.x * blockDim.x) { 126 | toANB[i * toColumns + toColumn] = fromANB[i * fromColumns + fromColumn]; 127 | } 128 | if (id == 0) { 129 | toCNB[toColumn] = fromCNB[fromColumn]; 130 | toCNBIndex[toColumn] = fromCNBIndex[fromColumn]; 131 | } 132 | } 133 | 134 | __global__ void copyNegativeDouble(double *from, double *to, int32_t elements) { 135 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 136 | for (int32_t i = id; i < elements; i += gridDim.x * blockDim.x) { 137 | to[i] = -from[i]; 138 | } 139 | } 140 | 141 | __global__ void minG(double *g, int32_t columns, int32_t *column) { 142 | extern __shared__ double blockData[]; 143 | int32_t *blockIndex = (int32_t *) &blockData[blockDim.x / 32]; 144 | 145 | double localData; 146 | int32_t localIndex; 147 | blockReduceMinGIndex(g, columns, blockData, blockIndex, &localData, 148 | &localIndex); 149 | 150 | if (threadIdx.x == 0) { 151 | *column = localIndex; 152 | } 153 | } 154 | 155 | __global__ void minS(double *s, double *xB, int32_t columns, int32_t *row) { 156 | extern __shared__ double blockData[]; 157 | int32_t *blockIndex = (int32_t *) &blockData[blockDim.x / 32]; 158 | 159 | double localData; 160 | int32_t localIndex; 161 | blockReduceMinSIndex(s, xB, columns, blockData, blockIndex, &localData, 162 | &localIndex); 163 | 164 | if (threadIdx.x == 0) { 165 | *row = localIndex; 166 | } 167 | } 168 | 169 | __global__ void steepestEdge(double *gTemp, int32_t *ANBColumnIndices, 170 | double *ANBValues, int32_t columns, int32_t nnz) { 171 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 172 | for (int32_t i = id; i < nnz; i += gridDim.x * blockDim.x) { 173 | atomicAdd(&gTemp[ANBColumnIndices[i]], 174 | ANBValues[ANBColumnIndices[i]] 175 | * ANBValues[ANBColumnIndices[i]]); 176 | } 177 | } 178 | 179 | __global__ void steepestEdge2(double *g, double *gTemp, int32_t columns) { 180 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 181 | for (int32_t i = id; i < columns; i += gridDim.x * blockDim.x) { 182 | g[i] *= rsqrt(gTemp[i]); 183 | } 184 | } 185 | 186 | __global__ void swapColumnDouble(double *AB, double *ANB, int32_t rows, 187 | int32_t columnsAB, int32_t columnsANB, int32_t columnAB, int32_t columnANB, 188 | double *cB, int32_t *cBIndex, double *cNB, int32_t *cNBIndex, 189 | int32_t *xIndex) { 190 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 191 | for (int32_t i = id; i < rows; i += gridDim.x * blockDim.x) { 192 | double temp = AB[i * columnsAB + columnAB]; 193 | AB[i * columnsAB + columnAB] = ANB[i * columnsANB + columnANB]; 194 | ANB[i * columnsANB + columnANB] = temp; 195 | } 196 | 197 | if (id == 0) { 198 | double tempValue = cB[columnAB]; 199 | cB[columnAB] = cNB[columnANB]; 200 | cNB[columnANB] = tempValue; 201 | 202 | int32_t tempIndex = cBIndex[columnAB]; 203 | cBIndex[columnAB] = cNBIndex[columnANB]; 204 | cNBIndex[columnANB] = tempIndex; 205 | 206 | tempIndex = xIndex[columnAB + columnsANB]; 207 | xIndex[columnAB + columnsANB] = xIndex[columnANB]; 208 | xIndex[columnANB] = tempIndex; 209 | } 210 | } 211 | 212 | __global__ void updateXB(double *xB, double *s, int32_t rows, int32_t row) { 213 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 214 | //xBi=xBi-si*xBrow/srow xBrow=xBrow/srow 215 | for (int32_t i = id; i < rows; i += gridDim.x * blockDim.x) { 216 | if (i == row) { 217 | xB[i] /= s[i]; 218 | } else { 219 | xB[i] -= s[i] * xB[row] / s[row]; 220 | } 221 | } 222 | } 223 | 224 | __global__ void zero(double *pointer, int32_t length) { 225 | int32_t id = blockIdx.x * blockDim.x + threadIdx.x; 226 | for (int32_t i = id; i < length; i += gridDim.x * blockDim.x) { 227 | pointer[i] = 0.; 228 | } 229 | } 230 | 231 | void findBFS(LPProblem *lpProblem, cusolverSpHandle_t cusolverHandle, 232 | cusparseHandle_t cusparseHandle, cusparseMatDescr_t matrixDescriptor) { 233 | LPProblem *lpTemp = (LPProblem *) malloc(sizeof(LPProblem)); 234 | initializeLPProblem(lpTemp, lpProblem->rows, lpProblem->columns + lpProblem->rows, lpProblem->nnz + lpProblem->rows); 235 | 236 | lpTemp->isBasisAllocated = false; 237 | lpTemp->rows = lpProblem->rows; 238 | lpTemp->columns = lpProblem->columns + lpProblem->rows; 239 | lpTemp->nnz = lpProblem->nnz + lpProblem->rows; 240 | cudaCheck(cudaMemcpy(lpTemp->ANB, lpProblem->A, lpTemp->rows * (lpTemp->columns - lpTemp->rows)* sizeof(double), cudaMemcpyDeviceToDevice)); 241 | cuSparseCheck( 242 | cusparseDnnz(cusparseHandle, CUSPARSE_DIRECTION_COLUMN, lpTemp->columns - lpTemp->rows, lpTemp->rows, 243 | matrixDescriptor, lpTemp->ANB, lpTemp->columns - lpTemp->rows, lpTemp->nnzPerRow, 244 | &lpTemp->nnzANB)); 245 | cuSparseCheck( 246 | cusparseDdense2csc(cusparseHandle, lpTemp->columns - lpTemp->rows, lpTemp->rows, 247 | matrixDescriptor, lpTemp->ANB, lpTemp->columns - lpTemp->rows, lpTemp->nnzPerRow, lpTemp->ANBValues, 248 | lpTemp->ANBColumnIndices, lpTemp->ANBRowPointer)); 249 | cudaCheck(cudaDeviceSynchronize()); 250 | for (int32_t i = 0; i < lpTemp->rows; i++) { 251 | for (int32_t j = 0; j < lpTemp->rows; j++) { 252 | if (i == j) { 253 | lpTemp->AB[i * lpTemp->rows + j] = 1; 254 | } else { 255 | lpTemp->AB[i * lpTemp->rows + j] = 0; 256 | } 257 | } 258 | } 259 | cuSparseCheck( 260 | cusparseDnnz(cusparseHandle, CUSPARSE_DIRECTION_COLUMN, lpTemp->rows, lpTemp->rows, 261 | matrixDescriptor, lpTemp->AB, lpTemp->rows, lpTemp->nnzPerRow, &lpTemp->nnzAB)); 262 | cuSparseCheck( 263 | cusparseDdense2csc(cusparseHandle, lpTemp->rows, lpTemp->rows, matrixDescriptor, 264 | lpTemp->AB, lpTemp->rows, lpTemp->nnzPerRow, lpTemp->ABValues, lpTemp->ABColumnIndices, 265 | lpTemp->ABRowPointer)); 266 | cudaCheck(cudaMemcpy(lpTemp->b, lpProblem->b, lpTemp->rows * sizeof(double), cudaMemcpyDeviceToDevice)); 267 | cudaCheck(cudaMemcpy(lpTemp->cNB, lpProblem->c, (lpTemp->columns - lpTemp->rows) * sizeof(double), cudaMemcpyDeviceToDevice)); 268 | for (int32_t i = 0; i < lpTemp->columns - lpTemp->rows; i++) { 269 | lpTemp->cNBIndex[i] = i; 270 | } 271 | for (int32_t i = 0; i < lpTemp->rows; i++) { 272 | lpTemp->cBIndex[i] = i + lpTemp->columns - lpTemp->rows; 273 | lpTemp->cB[i] = -1000; 274 | } 275 | for (int32_t i = 0; i < lpTemp->columns; i++) { 276 | lpTemp->xIndex[i] = i; 277 | } 278 | 279 | simplex(cusolverHandle, cusparseHandle, matrixDescriptor, lpTemp->rows, 280 | lpTemp->columns, lpTemp->nnz, &lpTemp->nnzAB, lpTemp->AB, 281 | lpTemp->ABRowPointer, lpTemp->ABColumnIndices, lpTemp->ABValues, 282 | lpTemp->ABTRowPointer, lpTemp->ABTColumnIndices, lpTemp->ABTValues, 283 | &lpTemp->nnzANB, lpTemp->ANB, lpTemp->ANBRowPointer, 284 | lpTemp->ANBColumnIndices, lpTemp->ANBValues, lpTemp->b, lpTemp->cB, 285 | lpTemp->cBIndex, lpTemp->cNB, lpTemp->cNBIndex, lpTemp->xB, 286 | lpTemp->xIndex, lpTemp->s, lpTemp->g, lpTemp->gTemp, 287 | lpTemp->ANBColumn, lpTemp->nnzPerRow, lpTemp->row, lpTemp->column); 288 | 289 | for (int32_t i = 0; i < lpTemp->rows; i++) { 290 | if (lpTemp->cBIndex[i] < lpProblem->columns) { 291 | printf("[%d]%g(%d),", lpTemp->cBIndex[i], lpTemp->xB[i], lpProblem->columns); 292 | } 293 | } 294 | printf("\n"); 295 | 296 | lpProblem->isSolution = true; 297 | for (int32_t i = 0; i < lpTemp->rows; i++) { 298 | if (lpTemp->cBIndex[i] >= lpProblem->columns) { 299 | lpProblem->isSolution = false; 300 | break; 301 | } 302 | } 303 | 304 | deleteLPProblem(lpTemp); 305 | } 306 | 307 | void simplex(cusolverSpHandle_t cusolverHandle, cusparseHandle_t cusparseHandle, 308 | cusparseMatDescr_t matrixDescriptor, int32_t rows, int32_t columns, 309 | int32_t nnz, int32_t *nnzAB, double *AB, int32_t *ABRowPointer, 310 | int32_t *ABColumnIndices, double *ABValues, int32_t *ABTRowPointer, 311 | int32_t *ABTColumnIndices, double *ABTValues, int32_t *nnzANB, 312 | double *ANB, int32_t *ANBRowPointer, int32_t *ANBColumnIndices, 313 | double *ANBValues, double *b, double *cB, int32_t *cBIndex, double *cNB, 314 | int32_t *cNBIndex, double *xB, int32_t *xIndex, double *s, double *g, 315 | double *gTemp, double *ANBColumn, int32_t *nnzPerRow, int32_t *row, 316 | int32_t *column) { 317 | double tolerance = 1.e-15; 318 | int32_t reorder = 0; 319 | int32_t singularity; 320 | double one = 1.; 321 | 322 | int32_t blockSize = 384; 323 | int32_t gridSize = 16; 324 | 325 | int32_t iteration = 0; 326 | while (true) { 327 | // AB*xB=b or xB_i=xB_i-s_i*xB_row/s_row xB_row=xB_row/s_row 328 | if (1) {//iteration % 10 == 0) { 329 | cuSolverCheck( 330 | cusolverSpDcsrlsvqr(cusolverHandle, rows, *nnzAB, 331 | matrixDescriptor, ABValues, ABRowPointer, 332 | ABColumnIndices, b, tolerance, reorder, xB, 333 | &singularity)); 334 | if (singularity != -1) { 335 | printf("singularity at %d\n", singularity); 336 | } 337 | } else { 338 | cudaDeviceSynchronize(); 339 | updateXB<<>>(xB, s, rows, *row); 340 | } 341 | 342 | // y*AB=cB (als ABT*s=cB) 343 | cuSparseCheck( 344 | cusparseDcsr2csc(cusparseHandle, rows, rows, *nnzAB, ABValues, 345 | ABRowPointer, ABColumnIndices, ABTValues, 346 | ABTColumnIndices, ABTRowPointer, 347 | CUSPARSE_ACTION_NUMERIC, 348 | cusparseGetMatIndexBase(matrixDescriptor))); 349 | cudaDeviceSynchronize(); 350 | cuSolverCheck( 351 | cusolverSpDcsrlsvqr(cusolverHandle, rows, *nnzAB, 352 | matrixDescriptor, ABTValues, ABTRowPointer, 353 | ABTColumnIndices, cB, tolerance, reorder, s, 354 | &singularity)); 355 | if (singularity != -1) { 356 | printf("singularity at %d\n", singularity); 357 | break; 358 | } 359 | 360 | // g=y*ANB-cNB (als g=ANBT*s-cNB) 361 | copyNegativeDouble<<>>(cNB, g, columns - rows); 362 | cuSparseCheck( 363 | cusparseDcsrmv(cusparseHandle, CUSPARSE_OPERATION_TRANSPOSE, 364 | rows, columns - rows, *nnzANB, &one, matrixDescriptor, 365 | ANBValues, ANBRowPointer, ANBColumnIndices, s, &one, 366 | g)); 367 | // steepest edge g_i=g_i/||ANB_i|| 368 | zero<<>>(gTemp, columns - rows); 369 | steepestEdge<<>>(gTemp, ANBColumnIndices, 370 | ANBValues, columns - rows, *nnzANB); 371 | steepestEdge2<<>>(g, gTemp, columns - rows); 372 | 373 | // column={i|min{g_i},g_i<0} 374 | minG<<<1, blockSize, blockSize / 32 * 2 * sizeof(double)>>>(g, 375 | columns - rows, column); 376 | cudaCheck(cudaDeviceSynchronize()); 377 | 378 | // !column -> optimal 379 | if (*column == -1) { 380 | printf("optimal\n"); 381 | break; 382 | } 383 | 384 | // AB*s=ANB_column 385 | copyColumnDouble<<>>(ANB, ANBColumn, rows, 386 | columns - rows, *column); 387 | cuSolverCheck( 388 | cusolverSpDcsrlsvqr(cusolverHandle, rows, *nnzAB, 389 | matrixDescriptor, ABValues, ABRowPointer, 390 | ABColumnIndices, ANBColumn, tolerance, reorder, s, 391 | &singularity)); 392 | if (singularity != -1) { 393 | printf("singularity at %d\n", singularity); 394 | break; 395 | } 396 | 397 | // row={i|min{xB_i/s_i},s_i>0} 398 | minS<<<1, blockSize, blockSize / 32 * 2 * sizeof(double)>>>(s, xB, rows, 399 | row); 400 | cudaCheck(cudaDeviceSynchronize()); 401 | 402 | // !row -> unbounded 403 | if (*row == -1) { 404 | printf("unbounded\n"); 405 | break; 406 | } 407 | 408 | // swap/update variables 409 | // dense matrix is assumed to be stored in column-major format, need to transpose (implicitly via conversion to CSC format and reinterpreting as CSR) 410 | swapColumnDouble<<>>(AB, ANB, rows, rows, columns - rows, *row, *column, cB, cBIndex, cNB, cNBIndex,xIndex); 411 | cuSparseCheck( 412 | cusparseDnnz(cusparseHandle, CUSPARSE_DIRECTION_COLUMN, rows, rows, 413 | matrixDescriptor, AB, rows, nnzPerRow, nnzAB)); 414 | cuSparseCheck( 415 | cusparseDdense2csc(cusparseHandle, rows, rows, matrixDescriptor, 416 | AB, rows, nnzPerRow, ABValues, ABColumnIndices, 417 | ABRowPointer)); 418 | cuSparseCheck( 419 | cusparseDnnz(cusparseHandle, CUSPARSE_DIRECTION_COLUMN, columns - rows, rows, 420 | matrixDescriptor, ANB, columns - rows, nnzPerRow, 421 | nnzANB)); 422 | cuSparseCheck( 423 | cusparseDdense2csc(cusparseHandle, columns - rows, rows, 424 | matrixDescriptor, ANB, columns - rows, nnzPerRow, ANBValues, 425 | ANBColumnIndices, ANBRowPointer)); 426 | 427 | iteration++; 428 | } 429 | 430 | // solve exact 431 | cuSolverCheck( 432 | cusolverSpDcsrlsvqr(cusolverHandle, rows, *nnzAB, 433 | matrixDescriptor, ABValues, ABRowPointer, 434 | ABColumnIndices, b, tolerance, reorder, xB, 435 | &singularity)); 436 | if (singularity != -1) { 437 | printf("singularity at %d\n", singularity); 438 | } 439 | } 440 | -------------------------------------------------------------------------------- /src/simplex.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Defines simplex algorithm. 3 | */ 4 | 5 | #ifndef SIMPLEX_CUH_ 6 | #define SIMPLEX_CUH_ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "lpProblem.cuh" 13 | 14 | __device__ double atomicAdd(double *address, double val); 15 | 16 | __device__ void minimumIndex(double *value1, int32_t *index1, double value2, 17 | int32_t index2); 18 | 19 | __device__ void warpReduceMinIndex(double *value, int32_t *index); 20 | 21 | __device__ void blockReduceMinGIndex(double *g, int32_t columns, 22 | double *blockData, int32_t *blockIndex, double *localData, 23 | int32_t *localIndex); 24 | 25 | __device__ void blockReduceMinSIndex(double *s, double *xB, int32_t rows, 26 | double *blockData, int32_t *blockIndex, double *localData, 27 | int32_t *localIndex); 28 | 29 | __global__ void copyColumnDouble(double *fromMatrix, double *to, int32_t rows, 30 | int32_t columns, int32_t column); 31 | 32 | __global__ void copyColumnDouble2(double *fromANB, double *toANB, int32_t rows, 33 | int32_t fromColumns, int32_t toColumns, double *fromCNB, double *toCNB, 34 | double *fromCNBIndex, double *toCNBIndex, int32_t fromColumn, int32_t toColumn); 35 | 36 | __global__ void copyNegativeDouble(double *from, double *to, int32_t elements); 37 | 38 | __global__ void minG(double *g, int32_t columns, int32_t *column); 39 | 40 | __global__ void minS(double *s, double *xB, int32_t columns, int32_t *row); 41 | 42 | __global__ void steepestEdge(double *gTemp, int32_t *ANBColumnIndices, 43 | double *ANBValues, int32_t columns, int32_t nnz); 44 | 45 | __global__ void steepestEdge2(double *g, double *gTemp, int32_t columns); 46 | 47 | __global__ void swapColumnDouble(double *AB, double *ANB, int32_t rows, 48 | int32_t columnsAB, int32_t columnsANB, int32_t columnAB, int32_t columnANB, 49 | double *cB, int32_t *cBIndex, double *cNB, int32_t *cNBIndex, 50 | int32_t *xIndex); 51 | 52 | __global__ void updateXB(double *xB, double *s, int32_t rows, int32_t row); 53 | 54 | __global__ void zero(double *pointer, int32_t length); 55 | 56 | void findBFS(LPProblem *lpProblem, cusolverSpHandle_t cusolverHandle, 57 | cusparseHandle_t cusparseHandle, cusparseMatDescr_t matrixDescriptor); 58 | 59 | void simplex(cusolverSpHandle_t cusolverHandle, cusparseHandle_t cusparseHandle, 60 | cusparseMatDescr_t matrixDescriptor, int32_t rows, int32_t columns, 61 | int32_t nnz, int32_t *nnzAB, double *AB, int32_t *ABRowPointer, 62 | int32_t *ABColumnIndices, double *ABValues, int32_t *ABTRowPointer, 63 | int32_t *ABTColumnIndices, double *ABTValues, int32_t *nnzANB, 64 | double *ANB, int32_t *ANBRowPointer, int32_t *ANBColumnIndices, 65 | double *ANBValues, double *b, double *cB, int32_t *cBIndex, double *cNB, 66 | int32_t *cNBIndex, double *xB, int32_t *xIndex, double *s, double *g, 67 | double *gTemp, double *ANBColumn, int32_t *nnzPerRow, int32_t *row, 68 | int32_t *column); 69 | 70 | #endif /* SIMPLEX_CUH_ */ 71 | --------------------------------------------------------------------------------