├── .clang-format ├── .gitmodules ├── gemm_naive.cu ├── CMakeLists.txt ├── README.md ├── gemm_use_128.cu ├── util.cuh ├── first_attempt.cu ├── gemm_use_tile.cu ├── fc_relu.cu ├── gemm_use_smem.cu ├── gemm_transpose_smem.cu ├── gemm_hide_smem_latency.cu ├── gemm_final.cu └── gemm.cu /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Google -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "third_party/cutlass"] 2 | path = third_party/cutlass 3 | url = git@github.com:NVIDIA/cutlass.git 4 | shallow = true 5 | -------------------------------------------------------------------------------- /gemm_naive.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | __global__ void gemmKernel(const float *__restrict__ A, 5 | const float *__restrict__ B, float *__restrict__ C, 6 | float alpha, float beta, unsigned M, unsigned N, 7 | unsigned K) { 8 | unsigned int m = threadIdx.x + blockDim.x * blockIdx.x; 9 | unsigned int n = threadIdx.y + blockDim.y * blockIdx.y; 10 | float c = 0; 11 | openmlsys::Tensor2D pA{A, M, K}; 12 | openmlsys::Tensor2D pB{B, K, N}; 13 | openmlsys::Tensor2D pC{C, M, N}; 14 | if (!pC.validOffset(m, n)) return; 15 | for (unsigned k = 0; k < K; ++k) { 16 | c += pA(m, k) * pB(k, n); 17 | } 18 | c = c * alpha; 19 | float result = c; 20 | if (beta != 0) { 21 | result = result + pC(m, n) * beta; 22 | } 23 | pC(m, n) = result; 24 | } 25 | } // namespace 26 | 27 | void gemmNaive(const float *deviceAPtr, const float *deviceBPtr, 28 | float *deviceCPtr, float alpha, float beta, unsigned M, 29 | unsigned N, unsigned K) { 30 | dim3 block(16, 16); 31 | dim3 grid((M + block.x - 1) / block.x, (N + block.y - 1) / block.y); 32 | 33 | gemmKernel<<>>(deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, 34 | M, N, K); 35 | } 36 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.12.4) 2 | project(openmlsys-cuda CXX CUDA) 3 | 4 | set(CMAKE_CUDA_STANDARD 17) 5 | 6 | find_package(CUDA) 7 | include(FindCUDA/select_compute_arch) 8 | CUDA_DETECT_INSTALLED_GPUS(INSTALLED_GPU_CCS_1) 9 | string(STRIP "${INSTALLED_GPU_CCS_1}" INSTALLED_GPU_CCS_2) 10 | string(REPLACE " " ";" INSTALLED_GPU_CCS_3 "${INSTALLED_GPU_CCS_2}") 11 | string(REPLACE "." "" CUDA_ARCH_LIST "${INSTALLED_GPU_CCS_3}") 12 | message("-- nvcc generates code for arch ${CUDA_ARCH_LIST}") 13 | SET(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH_LIST}) 14 | 15 | find_package(Eigen3 REQUIRED) 16 | find_package(gflags REQUIRED) 17 | include_directories(${EIGEN3_INCLUDE_DIRS}) 18 | include_directories(${gflags_INCLUDE_DIR}) 19 | 20 | find_package(OpenMP REQUIRED) 21 | set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --use_fast_math -Xcompiler -fopenmp") 22 | if (${CMAKE_BUILD_TYPE} MATCHES "Debug") 23 | set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -O0 -G") 24 | else () 25 | set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -O3") 26 | endif () 27 | 28 | add_executable(gemm gemm.cu gemm_final.cu gemm_hide_smem_latency.cu gemm_transpose_smem.cu gemm_use_smem.cu gemm_use_tile.cu gemm_use_128.cu gemm_naive.cu) 29 | target_link_libraries(gemm ${CUDA_cublas_LIBRARY} OpenMP::OpenMP_CXX ${gflags_LIBRARIES}) 30 | 31 | add_executable(first_attempt first_attempt.cu) 32 | target_link_libraries(first_attempt OpenMP::OpenMP_CXX) 33 | 34 | set(CUTLASS_INCLUDE_DIR ./third_party/cutlass/include) 35 | add_executable(fc_relu fc_relu.cu) 36 | target_link_libraries(fc_relu PRIVATE OpenMP::OpenMP_CXX ${gflags_LIBRARIES}) 37 | target_include_directories(fc_relu PRIVATE ${CUTLASS_INCLUDE_DIR}) 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # openmlsys-cuda 2 | 3 | Examples for beginners to write your own high-performance AI operators. We introduced optimizations tricks like using shared memory and pipeline rearrangement to maximize the throughput. We also provided an example for using CUTLASS to implement an FC + ReLU fused operator. 4 | 5 | ## Dependencies 6 | 7 | - Eigen: CPU linear algebra template library 8 | - OpenMP: Enable multi-threads acceleration on CPU 9 | - CUDA toolkit: Compile GPU kernels and analyse GPU executions 10 | - Gflags: Commandline flags library released by Google 11 | - CUTLASS: GPU GEMM template library 12 | 13 | ### Installation Hints 14 | 15 | - Eigen: Use package manager, e.g. `apt install libeigen3-dev`, or download from 16 | the [official website](https://eigen.tuxfamily.org/) and build from source. 17 | - OpenMP: Most time the compilers have already integrated with OpenMP. If your compiler does not support OpenMP, 18 | try `apt install libgomp-dev` or `apt install libomp-dev` for GCC or Clang separately. 19 | - CUDA toolkit: It's recommended to install following 20 | the [official instructions](https://developer.nvidia.com/cuda-toolkit). 21 | - Gflags: Use package manager, e.g. `apt install libgflags-dev`, or download from 22 | the [official website](https://gflags.github.io/gflags/) and build from source. 23 | - CUTLASS: We have registered it to our git module, so you do not have to install by yourself. 24 | 25 | ## Compilation 26 | 27 | Once you have installed the dependencies, you can use the following instruction to compile the project: 28 | 29 | ```bash 30 | git clone git@github.com:openmlsys/openmlsys-cuda.git 31 | cd openmlsys-cuda 32 | git submodule init && git submodule sync 33 | mkdir build && cd build 34 | cmake .. 35 | make -j4 36 | ``` 37 | 38 | ## Examples 39 | 40 | - `first_attempt`: The naive implementation 41 | - `gemm`: Collection of implementations using different optimization tricks 42 | - `fc_relu`: Example for fusing FC and ReLU by using CUTLASS 43 | -------------------------------------------------------------------------------- /gemm_use_128.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | __global__ void gemmKernel(const float *__restrict__ A, 5 | const float *__restrict__ B, float *__restrict__ C, 6 | float alpha, float beta, unsigned M, unsigned N, 7 | unsigned K) { 8 | constexpr unsigned ratio = sizeof(openmlsys::float4) / sizeof(float); 9 | unsigned int m = (threadIdx.x + blockDim.x * blockIdx.x) * ratio; 10 | unsigned int n = (threadIdx.y + blockDim.y * blockIdx.y) * ratio; 11 | openmlsys::Tensor2D pA{A, M, K}; 12 | pA.addOffset(m, 0); 13 | openmlsys::Tensor2D pB{B, K, N / ratio}; 14 | pB.addOffset(0, n / ratio); 15 | openmlsys::Tensor2D pC{C, M, N / ratio}; 16 | pC.addOffset(m, n / ratio); 17 | if (!pC.validOffset(0, 0)) return; 18 | 19 | openmlsys::float4 c[4]; 20 | memset(c, 0, sizeof(c)); 21 | for (unsigned k = 0; k < K; ++k) { 22 | openmlsys::float4 fragmentA{}; 23 | #pragma unroll 24 | for (unsigned i = 0; i < ratio; ++i) { 25 | fragmentA[i] = pA(i, k); 26 | } 27 | openmlsys::float4 fragmentB = pB(k, 0); 28 | 29 | #pragma unroll 30 | for (unsigned i = 0; i < ratio; ++i) { 31 | c[i] = c[i] + fragmentB * fragmentA[i]; 32 | } 33 | } 34 | 35 | #pragma unroll 36 | for (auto &a : c) { 37 | a = a * alpha; 38 | } 39 | 40 | #pragma unroll 41 | for (unsigned i = 0; i < ratio; ++i) { 42 | openmlsys::float4 result = c[i]; 43 | if (beta != 0) { 44 | result = c[i] + pC(i, 0) * beta; 45 | } 46 | pC(i, 0) = result; 47 | } 48 | } 49 | } // namespace 50 | 51 | void gemmUse128(const float *deviceAPtr, const float *deviceBPtr, 52 | float *deviceCPtr, float alpha, float beta, unsigned M, 53 | unsigned N, unsigned K) { 54 | dim3 block(16, 16); 55 | dim3 grid((M / 4 - 1) / block.x + 1, (N / 4 - 1) / block.y + 1); 56 | 57 | gemmKernel<<>>(deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, 58 | M, N, K); 59 | } 60 | -------------------------------------------------------------------------------- /util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef GEMM_UTIL_CUH 2 | #define GEMM_UTIL_CUH 3 | namespace openmlsys { 4 | template 5 | struct Layout { 6 | static constexpr int m = _m; 7 | static constexpr int n = _n; 8 | static constexpr int k = _k; 9 | }; 10 | 11 | struct __device_builtin__ __builtin_align__(16) float4 { 12 | float data[4]; 13 | 14 | __host__ __device__ float operator[](unsigned idx) const { return data[idx]; } 15 | 16 | __host__ __device__ float &operator[](unsigned idx) { return data[idx]; } 17 | 18 | __host__ __device__ float4 operator*(float other) const { 19 | return float4{data[0] * other, data[1] * other, data[2] * other, 20 | data[3] * other}; 21 | } 22 | 23 | __host__ __device__ float4 operator+(const float4 &other) const { 24 | return float4{data[0] + other.data[0], data[1] + other.data[1], 25 | data[2] + other.data[2], data[3] + other.data[3]}; 26 | } 27 | }; 28 | 29 | template 30 | struct __device_builtin__ Tensor2D { 31 | T *const __restrict__ ptr; 32 | const unsigned rows, cols; 33 | int _rowOffset{0}, _colOffset{0}; 34 | 35 | template 36 | __host__ __device__ Tensor2D(t &&ptr, unsigned rows, unsigned cols) 37 | : ptr{reinterpret_cast(ptr)}, rows{rows}, cols{cols} {}; 38 | 39 | template 40 | __host__ __device__ void addOffset(int rowOffset, int colOffset) { 41 | _rowOffset += rowOffset; 42 | _colOffset += colOffset * sizeof(t) / sizeof(T); 43 | } 44 | 45 | __host__ __device__ bool validRowOffset(int rowOffset) const { 46 | return (_rowOffset + rowOffset) < rows; 47 | } 48 | 49 | __host__ __device__ bool validColOffset(int colOffset) const { 50 | return (_colOffset + colOffset) < cols; 51 | } 52 | 53 | __host__ __device__ bool validOffset(int rowOffset, int colOffset) const { 54 | return validRowOffset(rowOffset) && validColOffset(colOffset); 55 | } 56 | 57 | __host__ __device__ T &operator()(int row, int col) const { 58 | return ptr[_colOffset + col + (row + _rowOffset) * cols]; 59 | } 60 | }; 61 | } // namespace openmlsys 62 | #endif // GEMM_UTIL_CUH 63 | -------------------------------------------------------------------------------- /first_attempt.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | __global__ void gemmKernel(const float *A, const float *B, float *C, 7 | float alpha, float beta, unsigned M, unsigned N, 8 | unsigned K) { 9 | unsigned int m = threadIdx.x + blockDim.x * blockIdx.x; 10 | unsigned int n = threadIdx.y + blockDim.y * blockIdx.y; 11 | if (m >= M || n >= N) return; 12 | float c = 0; 13 | for (unsigned k = 0; k < K; ++k) { 14 | c += A[m * K + k] * B[k * N + n]; 15 | } 16 | c = c * alpha; 17 | float result = c; 18 | if (beta != 0) { 19 | result = result + C[m * N + n] * beta; 20 | } 21 | C[m * N + n] = result; 22 | } 23 | 24 | void gemmNaive(const float *A, const float *B, float *C, float alpha, 25 | float beta, unsigned M, unsigned N, unsigned K) { 26 | dim3 block(32, 32); 27 | dim3 grid((M - 1) / block.x + 1, (N - 1) / block.y + 1); 28 | 29 | gemmKernel<<>>(A, B, C, alpha, beta, M, N, K); 30 | } 31 | 32 | int main() { 33 | int gpu_rank = 0; 34 | cudaDeviceProp deviceProp{}; 35 | cudaGetDeviceProperties(&deviceProp, gpu_rank); 36 | cudaSetDevice(gpu_rank); 37 | printf("GPU %s status: ", deviceProp.name); 38 | double boostFrequency = deviceProp.clockRate / 1e6; 39 | int fp32CoresNum = 640; 40 | double peakPerformance = boostFrequency * fp32CoresNum * 2; 41 | printf( 42 | "clock rate %.3f GHz, FP32 cores num %d, FP32 peak throughput %.3f " 43 | "GFLOPS\n", 44 | boostFrequency, fp32CoresNum, peakPerformance); 45 | omp_set_num_threads(omp_get_num_procs()); 46 | unsigned M = 1024, N = 1024, K = 1024; 47 | float alpha = 1., beta = 0.; 48 | float *deviceAPrt, *deviceBPtr, *deviceCPtr; 49 | Eigen::Matrix A{M, K}, 50 | B{K, N}, C{M, N}; 51 | A.setRandom(); 52 | B.setRandom(); 53 | C.setRandom(); 54 | cudaMalloc(&deviceAPrt, M * K * sizeof(float)); 55 | cudaMemcpy(deviceAPrt, A.data(), M * K * sizeof(float), 56 | cudaMemcpyHostToDevice); 57 | cudaMalloc(&deviceBPtr, K * N * sizeof(float)); 58 | cudaMemcpy(deviceBPtr, B.data(), K * N * sizeof(float), 59 | cudaMemcpyHostToDevice); 60 | cudaMalloc(&deviceCPtr, M * N * sizeof(float)); 61 | cudaMemcpy(deviceCPtr, C.data(), M * N * sizeof(float), 62 | cudaMemcpyHostToDevice); 63 | cudaEvent_t startEvent, stopEvent; 64 | cudaEventCreate(&startEvent); 65 | cudaEventCreate(&stopEvent); 66 | cudaEventRecord(startEvent); 67 | gemmNaive(deviceAPrt, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 68 | cudaEventRecord(stopEvent); 69 | cudaEventSynchronize(stopEvent); 70 | float milliseconds = 0; 71 | cudaEventElapsedTime(&milliseconds, startEvent, stopEvent); 72 | printf("GPU use: %.3f(ms)\n", milliseconds); 73 | cudaEventDestroy(stopEvent); 74 | cudaEventDestroy(startEvent); 75 | Eigen::Matrix 76 | hostResult{M, N}, deviceResult{M, N}; 77 | clock_t begin, end; 78 | begin = clock(); 79 | hostResult = alpha * (A * B) + beta * C; 80 | end = clock(); 81 | printf("CPU use: %.3f(ms)\n", double(end - begin) / CLOCKS_PER_SEC * 1e3); 82 | cudaMemcpy(deviceResult.data(), deviceCPtr, M * N * sizeof(float), 83 | cudaMemcpyDeviceToHost); 84 | cudaDeviceSynchronize(); 85 | Eigen::Array diffArray = 86 | (hostResult - deviceResult).array().abs(); 87 | printf("Max Error: %f\n", diffArray.maxCoeff()); 88 | 89 | double GFLOPS = 2 * 1e-9 * M * N * K / (milliseconds * 1e-3); 90 | printf("GPU Throughput: %.3f GFLOPS\n", GFLOPS); 91 | } 92 | -------------------------------------------------------------------------------- /gemm_use_tile.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | template 5 | __global__ void gemmKernel(const float *__restrict__ A, 6 | const float *__restrict__ B, float *__restrict__ C, 7 | float alpha, float beta, unsigned M, unsigned N, 8 | unsigned K) { 9 | constexpr unsigned ratio = sizeof(openmlsys::float4) / sizeof(float); 10 | unsigned int m = threadIdx.x * LayoutThread::m + LayoutTile::m * blockIdx.x; 11 | unsigned int n = threadIdx.y * LayoutThread::n + LayoutTile::n * blockIdx.y; 12 | openmlsys::Tensor2D pA{A, M, K}; 13 | pA.addOffset(m, 0); 14 | openmlsys::Tensor2D pB{B, K, N / ratio}; 15 | pB.addOffset(0, n / ratio); 16 | openmlsys::Tensor2D pC{C, M, N / ratio}; 17 | pC.addOffset(m, n / ratio); 18 | 19 | const unsigned iterationA = LayoutTile::m / LayoutBlock::m / LayoutThread::m; 20 | const unsigned iterationB = LayoutTile::n / LayoutBlock::n / LayoutThread::n; 21 | const unsigned intervalA = LayoutTile::m / iterationA; 22 | const unsigned intervalB = LayoutTile::n / iterationB; 23 | 24 | bool validLoadTileA[iterationA]; 25 | bool validLoadTileB[iterationB]; 26 | 27 | #pragma unroll 28 | for (unsigned i = 0; i < iterationA; ++i) { 29 | validLoadTileA[i] = pA.validRowOffset(i * intervalA); 30 | } 31 | 32 | #pragma unroll 33 | for (unsigned i = 0; i < iterationB; ++i) { 34 | validLoadTileB[i] = pB.validColOffset(i * intervalB / ratio); 35 | } 36 | constexpr openmlsys::float4 float4Zero{0.f, 0.f, 0.f, 0.f}; 37 | 38 | openmlsys::float4 c[iterationA][iterationB][4]; 39 | memset(c, 0, sizeof(c)); 40 | for (unsigned k = 0; k < K; ++k) { 41 | #pragma unroll 42 | for (unsigned iterA = 0; iterA < iterationA; ++iterA) { 43 | openmlsys::float4 fragmentA{}; 44 | validLoadTileA[iterA] &= pA.validColOffset(k); 45 | #pragma unroll 46 | for (unsigned i = 0; i < ratio; ++i) { 47 | fragmentA[i] = validLoadTileA[iterA] ? pA(i + iterA * intervalA, k) : 0; 48 | } 49 | #pragma unroll 50 | for (unsigned iterB = 0; iterB < iterationB; ++iterB) { 51 | validLoadTileB[iterB] &= pB.validRowOffset(k); 52 | openmlsys::float4 fragmentB = validLoadTileB[iterB] 53 | ? pB(k, iterB * intervalB / ratio) 54 | : float4Zero; 55 | 56 | #pragma unroll 57 | for (unsigned i = 0; i < ratio; ++i) { 58 | c[iterA][iterB][i] = c[iterA][iterB][i] + fragmentB * fragmentA[i]; 59 | } 60 | } 61 | } 62 | } 63 | 64 | #pragma unroll 65 | for (auto &termA : c) { 66 | #pragma unroll 67 | for (auto &termB : termA) { 68 | #pragma unroll 69 | for (auto &term : termB) { 70 | term = term * alpha; 71 | } 72 | } 73 | } 74 | 75 | #pragma unroll 76 | for (unsigned iterA = 0; iterA < iterationA; ++iterA) { 77 | #pragma unroll 78 | for (unsigned iterB = 0; iterB < iterationB; ++iterB) { 79 | #pragma unroll 80 | for (unsigned i = 0; i < ratio; ++i) { 81 | openmlsys::float4 result{c[iterA][iterB][i]}; 82 | if (beta != 0) { 83 | result = result + 84 | pC(i + iterA * intervalA, iterB * intervalB / ratio) * beta; 85 | } 86 | pC(i + iterA * intervalA, iterB * intervalB / ratio) = result; 87 | } 88 | } 89 | } 90 | } 91 | } // namespace 92 | 93 | void gemmUseTile(const float *deviceAPtr, const float *deviceBPtr, 94 | float *deviceCPtr, float alpha, float beta, unsigned M, 95 | unsigned N, unsigned K) { 96 | using LayoutTile = openmlsys::Layout<128, 128, 16>; 97 | using LayoutBlock = openmlsys::Layout<16, 16>; 98 | using LayoutThread = openmlsys::Layout<4, 4>; 99 | 100 | dim3 block(LayoutBlock::m, LayoutBlock::n); 101 | dim3 grid((M * LayoutBlock::m / LayoutTile::m - 1) / LayoutBlock::m + 1, 102 | (N * LayoutBlock::n / LayoutTile::n - 1) / LayoutBlock::n + 1); 103 | 104 | gemmKernel<<>>( 105 | deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 106 | } 107 | -------------------------------------------------------------------------------- /fc_relu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include "cutlass/cutlass.h" 9 | #include "cutlass/gemm/device/gemm.h" 10 | 11 | template 13 | struct ReLUEpilogue { 14 | constexpr static int kCount = 1; 15 | using ElementOutput = ElementOutput_; 16 | using ElementAccumulator = ElementAccumulator_; 17 | using FragmentOutput = cutlass::Array; 18 | using FragmentAccumulator = cutlass::Array; 19 | using FragmentCompute = cutlass::Array; 20 | 21 | struct Params {}; 22 | 23 | CUTLASS_DEVICE 24 | explicit ReLUEpilogue(const Params &) {} 25 | 26 | CUTLASS_DEVICE 27 | constexpr static bool is_source_needed() { return true; } 28 | 29 | CUTLASS_DEVICE 30 | void set_k_partition(int, int) {} 31 | 32 | CUTLASS_DEVICE 33 | FragmentOutput operator()(const FragmentAccumulator &) const { 34 | return FragmentOutput{}; 35 | } 36 | 37 | CUTLASS_DEVICE 38 | FragmentOutput operator()( 39 | const FragmentCompute &fragmentCompute, 40 | const FragmentAccumulator &fragmentAccumulator) const { 41 | FragmentOutput output; 42 | #pragma unroll 43 | for (unsigned i = 0; i < kCount; ++i) { 44 | output[i] = 45 | ::max(ElementOutput(0), fragmentCompute[i] + fragmentAccumulator[i]); 46 | } 47 | return output; 48 | } 49 | }; 50 | 51 | DEFINE_int32(cpu_procs, omp_get_num_procs(), "processor num used of CPU"); 52 | DEFINE_int32(in_dim, 512, "input dim of FC"); 53 | DEFINE_int32(out_dim, 1024, "output dim of FC"); 54 | DEFINE_int32(batch_size, 128, "batch size"); 55 | 56 | int main(int argc, char *argv[]) { 57 | GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true); 58 | const int outDim = FLAGS_out_dim; 59 | const int batchSize = FLAGS_batch_size; 60 | const int inDim = FLAGS_in_dim; 61 | 62 | printf( 63 | "Starting the problem with batch size: %d, input dim: %d, output dim: " 64 | "%d\n", 65 | batchSize, inDim, outDim); 66 | 67 | using ElementAccumulator = float; 68 | using ElementComputeEpilogue = float; 69 | using ElementInputA = float; 70 | using ElementInputB = float; 71 | using ElementOutput = float; 72 | 73 | using RowMajor = cutlass::layout::RowMajor; 74 | 75 | using OperatorClass = cutlass::arch::OpClassSimt; 76 | using ArchTag = cutlass::arch::Sm50; 77 | 78 | using DefaultGemmConfiguration = 79 | cutlass::gemm::device::DefaultGemmConfiguration< 80 | OperatorClass, ArchTag, ElementInputA, ElementInputB, 81 | ElementComputeEpilogue, ElementAccumulator>; 82 | 83 | using ThreadblockShape = DefaultGemmConfiguration::ThreadblockShape; 84 | using WarpShape = DefaultGemmConfiguration::WarpShape; 85 | using InstructionShape = DefaultGemmConfiguration::InstructionShape; 86 | 87 | using Gemm = cutlass::gemm::device::Gemm< 88 | ElementInputA, RowMajor, ElementInputB, RowMajor, ElementOutput, RowMajor, 89 | ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, 90 | InstructionShape, 91 | ReLUEpilogue>; 92 | 93 | omp_set_num_threads(FLAGS_cpu_procs); 94 | 95 | Eigen::Matrix x, 96 | weight, outEigen, outCUTLASS; 97 | Eigen::Matrix bias; 98 | weight.resize(inDim, outDim); 99 | x.resize(batchSize, inDim); 100 | bias.resize(outDim); 101 | 102 | weight.setRandom(); 103 | x.setRandom(); 104 | bias.setRandom(); 105 | outEigen = ((x * weight).array().rowwise() + bias.array()).cwiseMax(0); 106 | outCUTLASS.resize(outEigen.rows(), outEigen.cols()); 107 | 108 | float *xDevPtr, *weightDevPtr, *biasDevPtr, *outDevPtr; 109 | cudaMalloc(&xDevPtr, x.size() * sizeof(float)); 110 | cudaMemcpy(xDevPtr, x.data(), x.size() * sizeof(float), 111 | cudaMemcpyHostToDevice); 112 | 113 | cudaMalloc(&weightDevPtr, weight.size() * sizeof(float)); 114 | cudaMemcpy(weightDevPtr, weight.data(), weight.size() * sizeof(float), 115 | cudaMemcpyHostToDevice); 116 | 117 | cudaMalloc(&biasDevPtr, bias.size() * sizeof(float)); 118 | cudaMemcpy(biasDevPtr, bias.data(), bias.size() * sizeof(float), 119 | cudaMemcpyHostToDevice); 120 | 121 | cudaMalloc(&outDevPtr, outEigen.size() * sizeof(float)); 122 | 123 | Gemm::Arguments args({batchSize, outDim, inDim}, {xDevPtr, inDim}, 124 | {weightDevPtr, outDim}, {biasDevPtr, 0}, 125 | {outDevPtr, outDim}, {}); 126 | Gemm gemm_op; 127 | gemm_op(args); 128 | cudaDeviceSynchronize(); 129 | cudaMemcpy(outCUTLASS.data(), outDevPtr, outCUTLASS.size() * sizeof(float), 130 | cudaMemcpyDeviceToHost); 131 | printf("Max error: %f\n", (outEigen - outCUTLASS).cwiseAbs().maxCoeff()); 132 | cudaFree(xDevPtr); 133 | cudaFree(weightDevPtr); 134 | cudaFree(biasDevPtr); 135 | cudaFree(outDevPtr); 136 | return 0; 137 | } 138 | -------------------------------------------------------------------------------- /gemm_use_smem.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | template 5 | __global__ void gemmKernel(const float *__restrict__ A, 6 | const float *__restrict__ B, float *__restrict__ C, 7 | float alpha, float beta, unsigned M, unsigned N, 8 | unsigned K) { 9 | constexpr unsigned ratio = sizeof(openmlsys::float4) / sizeof(float); 10 | using LayoutTileT = 11 | openmlsys::Layout; 13 | using LayoutThreadT = 14 | openmlsys::Layout; 15 | constexpr unsigned blockSize = LayoutBlock::m * LayoutBlock::n; 16 | constexpr openmlsys::float4 float4Zero{0.f, 0.f, 0.f, 0.f}; 17 | 18 | __shared__ openmlsys::float4 tileA[LayoutTile::m][LayoutTileT::k]; 19 | __shared__ openmlsys::float4 tileB[LayoutTile::k][LayoutTileT::n]; 20 | 21 | const unsigned nInTileC = threadIdx.x % LayoutBlock::m; 22 | const unsigned mInTileC = threadIdx.x / LayoutBlock::m; 23 | 24 | const unsigned kInTileA = threadIdx.x % LayoutTileT::k; 25 | const unsigned mInTileA = threadIdx.x / LayoutTileT::k; 26 | 27 | const unsigned nInTileB = threadIdx.x % LayoutTileT::n; 28 | const unsigned kinTileB = threadIdx.x / LayoutTileT::n; 29 | 30 | openmlsys::Tensor2D pA{A, M, K / ratio}; 31 | pA.addOffset(LayoutTile::m * blockIdx.y + mInTileA, kInTileA); 32 | openmlsys::Tensor2D pB{B, K, N / ratio}; 33 | pB.addOffset(kinTileB, 34 | LayoutTileT::n * blockIdx.x + nInTileB * LayoutThreadT::n); 35 | openmlsys::Tensor2D pC{C, M, N / ratio}; 36 | pC.addOffset(LayoutTile::m * blockIdx.y + mInTileC * LayoutThread::m, 37 | LayoutTileT::n * blockIdx.x + nInTileC * LayoutThreadT::n); 38 | 39 | constexpr unsigned tileSizeA = LayoutTile::m * LayoutTile::k; 40 | constexpr unsigned tileSizeB = LayoutTile::n * LayoutTile::k; 41 | constexpr unsigned tileIterationsA = tileSizeA / blockSize / ratio; 42 | constexpr unsigned tileGlobalIntervalA = blockSize / LayoutTileT::k; 43 | constexpr unsigned tileComputeIterationsA = LayoutTileT::m / LayoutBlock::m; 44 | constexpr unsigned tileSharedIntervalA = 45 | LayoutTile::m / tileComputeIterationsA; 46 | constexpr unsigned tileIterationsB = tileSizeB / blockSize / ratio; 47 | constexpr unsigned tileGlobalIntervalB = blockSize / LayoutTileT::n; 48 | constexpr unsigned tileComputeIterationsB = LayoutTileT::n / LayoutBlock::n; 49 | constexpr unsigned tileSharedIntervalBT = 50 | LayoutTileT::n / tileComputeIterationsB; 51 | 52 | openmlsys::float4 bufferA[tileIterationsA]; 53 | openmlsys::float4 bufferB[tileIterationsB]; 54 | bool validLoadTileA[tileIterationsA]; 55 | bool validLoadTileB[tileIterationsB]; 56 | 57 | #pragma unroll 58 | for (unsigned i = 0; i < tileIterationsA; ++i) { 59 | validLoadTileA[i] = pA.validRowOffset(i * tileGlobalIntervalA); 60 | } 61 | 62 | #pragma unroll 63 | for (unsigned i = 0; i < tileIterationsB; ++i) { 64 | validLoadTileB[i] = pB.validColOffset(0); 65 | } 66 | 67 | openmlsys::float4 c[tileComputeIterationsA * LayoutThread::m] 68 | [tileComputeIterationsB * LayoutThreadT::n]; 69 | memset(c, 0, sizeof(c)); 70 | 71 | openmlsys::float4 fragmentA[tileComputeIterationsA * LayoutThreadT::m]; 72 | openmlsys::float4 fragmentB[tileComputeIterationsB * LayoutThreadT::n]; 73 | 74 | for (unsigned i = 0; i < K; i += LayoutTile::k) { 75 | #pragma unroll 76 | for (unsigned j = 0; j < tileIterationsA; ++j) { 77 | validLoadTileA[j] &= pA.validColOffset(0); 78 | bufferA[j] = 79 | validLoadTileA[j] ? pA(j * tileGlobalIntervalA, 0) : float4Zero; 80 | } 81 | 82 | #pragma unroll 83 | for (unsigned j = 0; j < tileIterationsB; ++j) { 84 | validLoadTileB[j] &= pB.validRowOffset(j * tileGlobalIntervalB); 85 | bufferB[j] = 86 | validLoadTileB[j] ? pB(j * tileGlobalIntervalB, 0) : float4Zero; 87 | } 88 | 89 | __syncthreads(); 90 | #pragma unroll 91 | for (unsigned a = 0; a < tileIterationsA; ++a) { 92 | tileA[mInTileA + a * tileGlobalIntervalA][kInTileA] = bufferA[a]; 93 | } 94 | 95 | #pragma unroll 96 | for (unsigned a = 0; a < tileIterationsB; ++a) { 97 | tileB[kinTileB + a * tileGlobalIntervalB][nInTileB] = bufferB[a]; 98 | } 99 | __syncthreads(); 100 | 101 | #pragma unroll 102 | for (unsigned j = 0; j < LayoutTile::k; j++) { 103 | #pragma unroll 104 | for (unsigned a = 0; a < tileComputeIterationsA; ++a) { 105 | #pragma unroll 106 | for (unsigned b = 0; b < LayoutThread::m; ++b) { 107 | fragmentA[a][b] = 108 | tileA[a * tileSharedIntervalA + mInTileC * LayoutThread::m + b] 109 | [j / ratio][j % ratio]; 110 | } 111 | } 112 | #pragma unroll 113 | for (unsigned a = 0; a < tileComputeIterationsB; ++a) { 114 | fragmentB[a] = tileB[j][a * tileSharedIntervalBT + nInTileC]; 115 | } 116 | #pragma unroll 117 | for (unsigned d = 0; d < tileComputeIterationsA * LayoutThread::m; ++d) { 118 | #pragma unroll 119 | for (unsigned e = 0; e < tileComputeIterationsB * LayoutThreadT::n; 120 | ++e) { 121 | c[d][e] = 122 | c[d][e] + fragmentB[e] * 123 | fragmentA[d / LayoutThread::m][d % LayoutThread::m]; 124 | } 125 | } 126 | } 127 | pA.addOffset(0, LayoutTileT::k); 128 | pB.addOffset(LayoutTile::k, 0); 129 | } 130 | 131 | #pragma unroll 132 | for (auto &a : c) { 133 | #pragma unroll 134 | for (auto &b : a) { 135 | b = b * alpha; 136 | } 137 | } 138 | 139 | #pragma unroll 140 | for (unsigned i = 0; i < tileComputeIterationsA; ++i) { 141 | #pragma unroll 142 | for (unsigned a = 0; a < LayoutThread::m; a++) { 143 | const bool mValid = pC.validRowOffset(a); 144 | #pragma unroll 145 | for (unsigned b = 0; b < tileComputeIterationsB; b++) { 146 | const bool nValid = pC.validColOffset(b * tileSharedIntervalBT); 147 | if (mValid && nValid) { 148 | openmlsys::float4 result{c[a + i * LayoutThread::m][b]}; 149 | if (beta != 0) { 150 | result = result + pC(a, b * tileSharedIntervalBT) * beta; 151 | } 152 | pC(a, b * tileSharedIntervalBT) = result; 153 | } 154 | } 155 | } 156 | pC.addOffset(tileSharedIntervalA, 0); 157 | } 158 | } 159 | } // namespace 160 | 161 | void gemmUseSmem(const float *deviceAPtr, const float *deviceBPtr, 162 | float *deviceCPtr, float alpha, float beta, unsigned M, 163 | unsigned N, unsigned K) { 164 | using LayoutTile = openmlsys::Layout<128, 128, 16>; 165 | using LayoutBlock = openmlsys::Layout<16, 16>; 166 | using LayoutThread = openmlsys::Layout<4, 4>; 167 | 168 | dim3 block(LayoutBlock::m * LayoutBlock::n); 169 | dim3 grid((M - 1) / LayoutTile::m + 1, (N - 1) / LayoutTile::n + 1); 170 | 171 | gemmKernel<<>>( 172 | deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 173 | } 174 | -------------------------------------------------------------------------------- /gemm_transpose_smem.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | template 5 | __global__ void gemmKernel(const float *__restrict__ A, 6 | const float *__restrict__ B, float *__restrict__ C, 7 | float alpha, float beta, unsigned M, unsigned N, 8 | unsigned K) { 9 | constexpr unsigned ratio = sizeof(openmlsys::float4) / sizeof(float); 10 | using LayoutTileT = 11 | openmlsys::Layout; 13 | using LayoutThreadT = 14 | openmlsys::Layout; 15 | constexpr unsigned blockSize = LayoutBlock::m * LayoutBlock::n; 16 | constexpr openmlsys::float4 float4Zero{0.f, 0.f, 0.f, 0.f}; 17 | 18 | __shared__ openmlsys::float4 tileA[LayoutTile::k][LayoutTileT::m]; 19 | __shared__ openmlsys::float4 tileB[LayoutTile::k][LayoutTileT::n]; 20 | 21 | const unsigned nInTileC = threadIdx.x % LayoutBlock::m; 22 | const unsigned mInTileC = threadIdx.x / LayoutBlock::m; 23 | 24 | const unsigned kInTileA = threadIdx.x % LayoutTileT::k; 25 | const unsigned mInTileA = threadIdx.x / LayoutTileT::k; 26 | 27 | const unsigned nInTileB = threadIdx.x % LayoutTileT::n; 28 | const unsigned kinTileB = threadIdx.x / LayoutTileT::n; 29 | 30 | openmlsys::Tensor2D pA{A, M, K / ratio}; 31 | pA.addOffset(LayoutTile::m * blockIdx.y + mInTileA, kInTileA); 32 | openmlsys::Tensor2D pB{B, K, N / ratio}; 33 | pB.addOffset(kinTileB, 34 | LayoutTileT::n * blockIdx.x + nInTileB * LayoutThreadT::n); 35 | openmlsys::Tensor2D pC{C, M, N / ratio}; 36 | pC.addOffset(LayoutTile::m * blockIdx.y + mInTileC * LayoutThread::m, 37 | LayoutTileT::n * blockIdx.x + nInTileC * LayoutThreadT::n); 38 | 39 | constexpr unsigned tileSizeA = LayoutTile::m * LayoutTile::k; 40 | constexpr unsigned tileSizeB = LayoutTile::n * LayoutTile::k; 41 | constexpr unsigned tileIterationsA = tileSizeA / blockSize / ratio; 42 | constexpr unsigned tileGlobalIntervalA = blockSize / LayoutTileT::k; 43 | constexpr unsigned tileComputeIterationsA = LayoutTileT::m / LayoutBlock::m; 44 | constexpr unsigned tileSharedIntervalAT = 45 | LayoutTileT::m / tileComputeIterationsA; 46 | constexpr unsigned tileIterationsB = tileSizeB / blockSize / ratio; 47 | constexpr unsigned tileGlobalIntervalB = blockSize / LayoutTileT::n; 48 | constexpr unsigned tileComputeIterationsB = LayoutTileT::n / LayoutBlock::n; 49 | constexpr unsigned tileSharedIntervalBT = 50 | LayoutTileT::n / tileComputeIterationsB; 51 | 52 | openmlsys::float4 bufferA[tileIterationsA]; 53 | openmlsys::float4 bufferB[tileIterationsB]; 54 | bool validLoadTileA[tileIterationsA]; 55 | bool validLoadTileB[tileIterationsB]; 56 | 57 | #pragma unroll 58 | for (unsigned i = 0; i < tileIterationsA; ++i) { 59 | validLoadTileA[i] = pA.validRowOffset(i * tileGlobalIntervalA); 60 | } 61 | 62 | #pragma unroll 63 | for (unsigned i = 0; i < tileIterationsB; ++i) { 64 | validLoadTileB[i] = pB.validColOffset(0); 65 | } 66 | 67 | openmlsys::float4 c[tileComputeIterationsA * LayoutThread::m] 68 | [tileComputeIterationsB * LayoutThreadT::n]; 69 | memset(c, 0, sizeof(c)); 70 | 71 | openmlsys::float4 fragmentA[tileComputeIterationsA * LayoutThreadT::m]; 72 | openmlsys::float4 fragmentB[tileComputeIterationsB * LayoutThreadT::n]; 73 | 74 | for (unsigned i = 0; i < K; i += LayoutTile::k) { 75 | #pragma unroll 76 | for (unsigned j = 0; j < tileIterationsA; ++j) { 77 | validLoadTileA[j] &= pA.validColOffset(0); 78 | bufferA[j] = 79 | validLoadTileA[j] ? pA(j * tileGlobalIntervalA, 0) : float4Zero; 80 | } 81 | 82 | #pragma unroll 83 | for (unsigned j = 0; j < tileIterationsB; ++j) { 84 | validLoadTileB[j] &= pB.validRowOffset(j * tileGlobalIntervalB); 85 | bufferB[j] = 86 | validLoadTileB[j] ? pB(j * tileGlobalIntervalB, 0) : float4Zero; 87 | } 88 | 89 | __syncthreads(); 90 | #pragma unroll 91 | for (unsigned a = 0; a < tileIterationsA; ++a) { 92 | #pragma unroll 93 | for (unsigned j = 0; j < LayoutThread::m; ++j) { 94 | tileA[kInTileA * ratio + j] 95 | [(a * tileGlobalIntervalA + mInTileA) / ratio] 96 | [(a * tileGlobalIntervalA + mInTileA) % ratio] = bufferA[a][j]; 97 | } 98 | } 99 | 100 | #pragma unroll 101 | for (unsigned a = 0; a < tileIterationsB; ++a) { 102 | tileB[kinTileB + a * tileGlobalIntervalB][nInTileB] = bufferB[a]; 103 | } 104 | __syncthreads(); 105 | 106 | #pragma unroll 107 | for (unsigned j = 0; j < LayoutTile::k; j++) { 108 | #pragma unroll 109 | for (unsigned a = 0; a < tileComputeIterationsA; ++a) { 110 | fragmentA[a] = tileA[j][a * tileSharedIntervalAT + mInTileC]; 111 | } 112 | #pragma unroll 113 | for (unsigned a = 0; a < tileComputeIterationsB; ++a) { 114 | fragmentB[a] = tileB[j][a * tileSharedIntervalBT + nInTileC]; 115 | } 116 | #pragma unroll 117 | for (unsigned d = 0; d < tileComputeIterationsA * LayoutThread::m; ++d) { 118 | #pragma unroll 119 | for (unsigned e = 0; e < tileComputeIterationsB * LayoutThreadT::n; 120 | ++e) { 121 | c[d][e] = 122 | c[d][e] + fragmentB[e] * 123 | fragmentA[d / LayoutThread::m][d % LayoutThread::m]; 124 | } 125 | } 126 | } 127 | pA.addOffset(0, LayoutTileT::k); 128 | pB.addOffset(LayoutTile::k, 0); 129 | } 130 | 131 | #pragma unroll 132 | for (auto &a : c) { 133 | #pragma unroll 134 | for (auto &b : a) { 135 | b = b * alpha; 136 | } 137 | } 138 | 139 | #pragma unroll 140 | for (unsigned i = 0; i < tileComputeIterationsA; ++i) { 141 | #pragma unroll 142 | for (unsigned a = 0; a < LayoutThread::m; a++) { 143 | const bool mValid = pC.validRowOffset(a); 144 | #pragma unroll 145 | for (unsigned b = 0; b < tileComputeIterationsB; b++) { 146 | const bool nValid = pC.validColOffset(b * tileSharedIntervalBT); 147 | if (mValid && nValid) { 148 | openmlsys::float4 result{c[a + i * LayoutThread::m][b]}; 149 | if (beta != 0) { 150 | result = result + pC(a, b * tileSharedIntervalBT) * beta; 151 | } 152 | pC(a, b * tileSharedIntervalBT) = result; 153 | } 154 | } 155 | } 156 | pC.addOffset(tileSharedIntervalAT * ratio, 0); 157 | } 158 | } 159 | } // namespace 160 | 161 | void gemmTransposeSmem(const float *deviceAPtr, const float *deviceBPtr, 162 | float *deviceCPtr, float alpha, float beta, unsigned M, 163 | unsigned N, unsigned K) { 164 | using LayoutTile = openmlsys::Layout<128, 128, 16>; 165 | using LayoutBlock = openmlsys::Layout<16, 16>; 166 | using LayoutThread = openmlsys::Layout<4, 4>; 167 | 168 | dim3 block(LayoutBlock::m * LayoutBlock::n); 169 | dim3 grid((M - 1) / LayoutTile::m + 1, (N - 1) / LayoutTile::n + 1); 170 | 171 | gemmKernel<<>>( 172 | deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 173 | } 174 | -------------------------------------------------------------------------------- /gemm_hide_smem_latency.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | template 5 | __global__ void gemmKernel(const float *__restrict__ A, 6 | const float *__restrict__ B, float *__restrict__ C, 7 | float alpha, float beta, unsigned M, unsigned N, 8 | unsigned K) { 9 | constexpr unsigned ratio = sizeof(openmlsys::float4) / sizeof(float); 10 | using LayoutTileT = 11 | openmlsys::Layout; 13 | using LayoutThreadT = 14 | openmlsys::Layout; 15 | constexpr unsigned blockSize = LayoutBlock::m * LayoutBlock::n; 16 | constexpr openmlsys::float4 float4Zero{0.f, 0.f, 0.f, 0.f}; 17 | 18 | __shared__ openmlsys::float4 tileA[LayoutTile::k][LayoutTileT::m]; 19 | __shared__ openmlsys::float4 tileB[LayoutTile::k][LayoutTileT::n]; 20 | 21 | const unsigned nInTileC = threadIdx.x % LayoutBlock::m; 22 | const unsigned mInTileC = threadIdx.x / LayoutBlock::m; 23 | 24 | const unsigned kInTileA = threadIdx.x % LayoutTileT::k; 25 | const unsigned mInTileA = threadIdx.x / LayoutTileT::k; 26 | 27 | const unsigned nInTileB = threadIdx.x % LayoutTileT::n; 28 | const unsigned kinTileB = threadIdx.x / LayoutTileT::n; 29 | 30 | openmlsys::Tensor2D pA{A, M, K / ratio}; 31 | pA.addOffset(LayoutTile::m * blockIdx.y + mInTileA, kInTileA); 32 | openmlsys::Tensor2D pB{B, K, N / ratio}; 33 | pB.addOffset(kinTileB, 34 | LayoutTileT::n * blockIdx.x + nInTileB * LayoutThreadT::n); 35 | openmlsys::Tensor2D pC{C, M, N / ratio}; 36 | pC.addOffset(LayoutTile::m * blockIdx.y + mInTileC * LayoutThread::m, 37 | LayoutTileT::n * blockIdx.x + nInTileC * LayoutThreadT::n); 38 | 39 | constexpr unsigned tileSizeA = LayoutTile::m * LayoutTile::k; 40 | constexpr unsigned tileSizeB = LayoutTile::n * LayoutTile::k; 41 | constexpr unsigned tileIterationsA = tileSizeA / blockSize / ratio; 42 | constexpr unsigned tileGlobalIntervalA = blockSize / LayoutTileT::k; 43 | constexpr unsigned tileComputeIterationsA = LayoutTileT::m / LayoutBlock::m; 44 | constexpr unsigned tileSharedIntervalAT = 45 | LayoutTileT::m / tileComputeIterationsA; 46 | constexpr unsigned tileIterationsB = tileSizeB / blockSize / ratio; 47 | constexpr unsigned tileGlobalIntervalB = blockSize / LayoutTileT::n; 48 | constexpr unsigned tileComputeIterationsB = LayoutTileT::n / LayoutBlock::n; 49 | constexpr unsigned tileSharedIntervalBT = 50 | LayoutTileT::n / tileComputeIterationsB; 51 | 52 | openmlsys::float4 bufferA[tileIterationsA]; 53 | openmlsys::float4 bufferB[tileIterationsB]; 54 | bool validLoadTileA[tileIterationsA]; 55 | bool validLoadTileB[tileIterationsB]; 56 | 57 | #pragma unroll 58 | for (unsigned i = 0; i < tileIterationsA; ++i) { 59 | validLoadTileA[i] = pA.validRowOffset(i * tileGlobalIntervalA); 60 | } 61 | 62 | #pragma unroll 63 | for (unsigned i = 0; i < tileIterationsB; ++i) { 64 | validLoadTileB[i] = pB.validColOffset(0); 65 | } 66 | 67 | openmlsys::float4 c[tileComputeIterationsA * LayoutThread::m] 68 | [tileComputeIterationsB * LayoutThreadT::n]; 69 | memset(c, 0, sizeof(c)); 70 | 71 | openmlsys::float4 fragmentA[2][tileComputeIterationsA * LayoutThreadT::m]; 72 | openmlsys::float4 fragmentB[2][tileComputeIterationsB * LayoutThreadT::n]; 73 | 74 | for (unsigned i = 0; i < K; i += LayoutTile::k) { 75 | #pragma unroll 76 | for (unsigned j = 0; j < tileIterationsA; ++j) { 77 | validLoadTileA[j] &= pA.validColOffset(0); 78 | bufferA[j] = 79 | validLoadTileA[j] ? pA(j * tileGlobalIntervalA, 0) : float4Zero; 80 | } 81 | 82 | #pragma unroll 83 | for (unsigned j = 0; j < tileIterationsB; ++j) { 84 | validLoadTileB[j] &= pB.validRowOffset(j * tileGlobalIntervalB); 85 | bufferB[j] = 86 | validLoadTileB[j] ? pB(j * tileGlobalIntervalB, 0) : float4Zero; 87 | } 88 | 89 | __syncthreads(); 90 | #pragma unroll 91 | for (unsigned a = 0; a < tileIterationsA; ++a) { 92 | #pragma unroll 93 | for (unsigned j = 0; j < LayoutThread::m; ++j) { 94 | tileA[kInTileA * ratio + j] 95 | [(a * tileGlobalIntervalA + mInTileA) / ratio] 96 | [(a * tileGlobalIntervalA + mInTileA) % ratio] = bufferA[a][j]; 97 | } 98 | } 99 | 100 | #pragma unroll 101 | for (unsigned a = 0; a < tileIterationsB; ++a) { 102 | tileB[kinTileB + a * tileGlobalIntervalB][nInTileB] = bufferB[a]; 103 | } 104 | __syncthreads(); 105 | 106 | #pragma unroll 107 | for (unsigned a = 0; a < tileComputeIterationsA; ++a) { 108 | fragmentA[0][a] = tileA[0][a * tileSharedIntervalAT + mInTileC]; 109 | } 110 | #pragma unroll 111 | for (unsigned a = 0; a < tileComputeIterationsB; ++a) { 112 | fragmentB[0][a] = tileB[0][a * tileSharedIntervalBT + nInTileC]; 113 | } 114 | 115 | #pragma unroll 116 | for (unsigned j = 0; j < LayoutTile::k; j++) { 117 | #pragma unroll 118 | for (unsigned a = 0; a < tileComputeIterationsA; ++a) { 119 | fragmentA[(j + 1) % 2][a] = 120 | tileA[j + 1][a * tileSharedIntervalAT + mInTileC]; 121 | } 122 | #pragma unroll 123 | for (unsigned a = 0; a < tileComputeIterationsB; ++a) { 124 | fragmentB[(j + 1) % 2][a] = 125 | tileB[j + 1][a * tileSharedIntervalBT + nInTileC]; 126 | } 127 | #pragma unroll 128 | for (unsigned d = 0; d < tileComputeIterationsA * LayoutThread::m; ++d) { 129 | #pragma unroll 130 | for (unsigned e = 0; e < tileComputeIterationsB * LayoutThreadT::n; 131 | ++e) { 132 | c[d][e] = 133 | c[d][e] + 134 | fragmentB[j % 2][e] * 135 | fragmentA[j % 2][d / LayoutThread::m][d % LayoutThread::m]; 136 | } 137 | } 138 | } 139 | pA.addOffset(0, LayoutTileT::k); 140 | pB.addOffset(LayoutTile::k, 0); 141 | } 142 | 143 | #pragma unroll 144 | for (auto &a : c) { 145 | #pragma unroll 146 | for (auto &b : a) { 147 | b = b * alpha; 148 | } 149 | } 150 | 151 | #pragma unroll 152 | for (unsigned i = 0; i < tileComputeIterationsA; ++i) { 153 | #pragma unroll 154 | for (unsigned a = 0; a < LayoutThread::m; a++) { 155 | const bool mValid = pC.validRowOffset(a); 156 | #pragma unroll 157 | for (unsigned b = 0; b < tileComputeIterationsB; b++) { 158 | const bool nValid = pC.validColOffset(b * tileSharedIntervalBT); 159 | if (mValid && nValid) { 160 | openmlsys::float4 result{c[a + i * LayoutThread::m][b]}; 161 | if (beta != 0) { 162 | result = result + pC(a, b * tileSharedIntervalBT) * beta; 163 | } 164 | pC(a, b * tileSharedIntervalBT) = result; 165 | } 166 | } 167 | } 168 | pC.addOffset(tileSharedIntervalAT * ratio, 0); 169 | } 170 | } 171 | } // namespace 172 | 173 | void gemmHideSmemLatency(const float *deviceAPtr, const float *deviceBPtr, 174 | float *deviceCPtr, float alpha, float beta, unsigned M, 175 | unsigned N, unsigned K) { 176 | using LayoutTile = openmlsys::Layout<128, 128, 16>; 177 | using LayoutBlock = openmlsys::Layout<16, 16>; 178 | using LayoutThread = openmlsys::Layout<4, 4>; 179 | 180 | dim3 block(LayoutBlock::m * LayoutBlock::n); 181 | dim3 grid((M - 1) / LayoutTile::m + 1, (N - 1) / LayoutTile::n + 1); 182 | 183 | gemmKernel<<>>( 184 | deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 185 | } 186 | -------------------------------------------------------------------------------- /gemm_final.cu: -------------------------------------------------------------------------------- 1 | #include "util.cuh" 2 | 3 | namespace { 4 | template 5 | __global__ void gemmKernel(const float *__restrict__ A, 6 | const float *__restrict__ B, float *__restrict__ C, 7 | float alpha, float beta, unsigned M, unsigned N, 8 | unsigned K) { 9 | constexpr unsigned ratio = sizeof(openmlsys::float4) / sizeof(float); 10 | using LayoutTileT = 11 | openmlsys::Layout; 13 | using LayoutThreadT = 14 | openmlsys::Layout; 15 | constexpr unsigned blockSize = LayoutBlock::m * LayoutBlock::n; 16 | constexpr openmlsys::float4 float4Zero{0.f, 0.f, 0.f, 0.f}; 17 | 18 | __shared__ openmlsys::float4 tileA[2][LayoutTile::k][LayoutTileT::m]; 19 | __shared__ openmlsys::float4 tileB[2][LayoutTile::k][LayoutTileT::n]; 20 | 21 | const unsigned nInTileC = threadIdx.x % LayoutBlock::m; 22 | const unsigned mInTileC = threadIdx.x / LayoutBlock::m; 23 | 24 | const unsigned kInTileA = threadIdx.x % LayoutTileT::k; 25 | const unsigned mInTileA = threadIdx.x / LayoutTileT::k; 26 | 27 | const unsigned nInTileB = threadIdx.x % LayoutTileT::n; 28 | const unsigned kinTileB = threadIdx.x / LayoutTileT::n; 29 | 30 | openmlsys::Tensor2D pA{A, M, K / ratio}; 31 | pA.addOffset(LayoutTile::m * blockIdx.y + mInTileA, kInTileA); 32 | openmlsys::Tensor2D pB{B, K, N / ratio}; 33 | pB.addOffset(kinTileB, 34 | LayoutTileT::n * blockIdx.x + nInTileB * LayoutThreadT::n); 35 | openmlsys::Tensor2D pC{C, M, N / ratio}; 36 | pC.addOffset(LayoutTile::m * blockIdx.y + mInTileC * LayoutThread::m, 37 | LayoutTileT::n * blockIdx.x + nInTileC * LayoutThreadT::n); 38 | 39 | constexpr unsigned tileSizeA = LayoutTile::m * LayoutTile::k; 40 | constexpr unsigned tileSizeB = LayoutTile::n * LayoutTile::k; 41 | constexpr unsigned tileIterationsA = tileSizeA / blockSize / ratio; 42 | constexpr unsigned tileGlobalIntervalA = blockSize / LayoutTileT::k; 43 | constexpr unsigned tileComputeIterationsA = LayoutTileT::m / LayoutBlock::m; 44 | constexpr unsigned tileSharedIntervalAT = 45 | LayoutTileT::m / tileComputeIterationsA; 46 | constexpr unsigned tileIterationsB = tileSizeB / blockSize / ratio; 47 | constexpr unsigned tileGlobalIntervalB = blockSize / LayoutTileT::n; 48 | constexpr unsigned tileComputeIterationsB = LayoutTileT::n / LayoutBlock::n; 49 | constexpr unsigned tileSharedIntervalBT = 50 | LayoutTileT::n / tileComputeIterationsB; 51 | 52 | openmlsys::float4 bufferA[tileIterationsA]; 53 | openmlsys::float4 bufferB[tileIterationsB]; 54 | bool validLoadTileA[tileIterationsA]; 55 | bool validLoadTileB[tileIterationsB]; 56 | 57 | #pragma unroll 58 | for (unsigned i = 0; i < tileIterationsA; ++i) { 59 | validLoadTileA[i] = 60 | pA.validRowOffset(i * tileGlobalIntervalA) && pA.validColOffset(0); 61 | bufferA[i] = 62 | validLoadTileA[i] ? pA(i * tileGlobalIntervalA, 0) : float4Zero; 63 | } 64 | 65 | #pragma unroll 66 | for (unsigned i = 0; i < tileIterationsB; ++i) { 67 | validLoadTileB[i] = 68 | pB.validColOffset(0) && pB.validRowOffset(i * tileGlobalIntervalB); 69 | bufferB[i] = 70 | validLoadTileB[i] ? pB(i * tileGlobalIntervalB, 0) : float4Zero; 71 | } 72 | 73 | openmlsys::float4 c[tileComputeIterationsA * LayoutThread::m] 74 | [tileComputeIterationsB * LayoutThreadT::n]; 75 | memset(c, 0, sizeof(c)); 76 | bool writeStageIdx = false; 77 | #pragma unroll 78 | for (unsigned i = 0; i < tileIterationsA; ++i) { 79 | #pragma unroll 80 | for (unsigned j = 0; j < LayoutThread::m; ++j) { 81 | tileA[writeStageIdx][kInTileA * ratio + j] 82 | [(i * tileGlobalIntervalA + mInTileA) / ratio] 83 | [(i * tileGlobalIntervalA + mInTileA) % ratio] = bufferA[i][j]; 84 | } 85 | } 86 | 87 | #pragma unroll 88 | for (unsigned i = 0; i < tileIterationsB; ++i) { 89 | tileB[writeStageIdx][kinTileB + i * tileGlobalIntervalB][nInTileB] = 90 | bufferB[i]; 91 | } 92 | 93 | writeStageIdx = !writeStageIdx; 94 | 95 | __syncthreads(); 96 | 97 | openmlsys::float4 fragmentA[2][tileComputeIterationsA * LayoutThreadT::m]; 98 | openmlsys::float4 fragmentB[2][tileComputeIterationsB * LayoutThreadT::n]; 99 | 100 | #pragma unroll 101 | for (unsigned i = 0; i < tileComputeIterationsA; ++i) { 102 | fragmentA[0][i] = 103 | tileA[!writeStageIdx][0][i * tileSharedIntervalAT + mInTileC]; 104 | } 105 | #pragma unroll 106 | for (unsigned i = 0; i < tileComputeIterationsB; ++i) { 107 | fragmentB[0][i] = 108 | tileB[!writeStageIdx][0][i * tileSharedIntervalBT + nInTileC]; 109 | } 110 | 111 | for (unsigned i = LayoutTile::k; i < K + LayoutTile::k; i += LayoutTile::k) { 112 | pA.addOffset(0, LayoutTileT::k); 113 | pB.addOffset(LayoutTile::k, 0); 114 | #pragma unroll 115 | for (unsigned j = 0; j < tileIterationsA; ++j) { 116 | validLoadTileA[j] &= pA.validColOffset(0); 117 | bufferA[j] = 118 | validLoadTileA[j] ? pA(j * tileGlobalIntervalA, 0) : float4Zero; 119 | } 120 | 121 | #pragma unroll 122 | for (unsigned j = 0; j < tileIterationsB; ++j) { 123 | validLoadTileB[j] &= pB.validRowOffset(j * tileGlobalIntervalB); 124 | bufferB[j] = 125 | validLoadTileB[j] ? pB(j * tileGlobalIntervalB, 0) : float4Zero; 126 | } 127 | 128 | #pragma unroll 129 | for (unsigned j = 0; j < LayoutTile::k; j++) { 130 | if ((i < K) && (j == LayoutTile::k - 1)) { 131 | #pragma unroll 132 | for (unsigned d = 0; d < tileIterationsA; ++d) { 133 | #pragma unroll 134 | for (unsigned e = 0; e < LayoutThread::m; ++e) { 135 | tileA[writeStageIdx][kInTileA * ratio + e] 136 | [(d * tileGlobalIntervalA + mInTileA) / ratio] 137 | [(d * tileGlobalIntervalA + mInTileA) % ratio] = bufferA[d][e]; 138 | } 139 | } 140 | #pragma unroll 141 | for (unsigned a = 0; a < tileIterationsB; ++a) { 142 | tileB[writeStageIdx][kinTileB + a * tileGlobalIntervalB][nInTileB] = 143 | bufferB[a]; 144 | } 145 | writeStageIdx = !writeStageIdx; 146 | __syncthreads(); 147 | } 148 | #pragma unroll 149 | for (unsigned a = 0; a < tileComputeIterationsA; ++a) { 150 | fragmentA[(j + 1) % 2][a] = 151 | tileA[!writeStageIdx][(j + 1) % LayoutTile::k] 152 | [a * tileSharedIntervalAT + mInTileC]; 153 | } 154 | #pragma unroll 155 | for (unsigned a = 0; a < tileComputeIterationsB; ++a) { 156 | fragmentB[(j + 1) % 2][a] = 157 | tileB[!writeStageIdx][(j + 1) % LayoutTile::k] 158 | [a * tileSharedIntervalBT + nInTileC]; 159 | } 160 | #pragma unroll 161 | for (unsigned d = 0; d < tileComputeIterationsA * LayoutThread::m; ++d) { 162 | #pragma unroll 163 | for (unsigned e = 0; e < tileComputeIterationsB * LayoutThreadT::n; 164 | ++e) { 165 | c[d][e] = 166 | c[d][e] + 167 | fragmentB[j % 2][e] * 168 | fragmentA[j % 2][d / LayoutThread::m][d % LayoutThread::m]; 169 | } 170 | } 171 | } 172 | } 173 | 174 | #pragma unroll 175 | for (auto &a : c) { 176 | #pragma unroll 177 | for (auto &b : a) { 178 | b = b * alpha; 179 | } 180 | } 181 | 182 | #pragma unroll 183 | for (unsigned i = 0; i < tileComputeIterationsA; ++i) { 184 | #pragma unroll 185 | for (unsigned a = 0; a < LayoutThread::m; a++) { 186 | const bool mValid = pC.validRowOffset(a); 187 | #pragma unroll 188 | for (unsigned b = 0; b < tileComputeIterationsB; b++) { 189 | const bool nValid = pC.validColOffset(b * tileSharedIntervalBT); 190 | if (mValid && nValid) { 191 | openmlsys::float4 result{c[a + i * LayoutThread::m][b]}; 192 | if (beta != 0) { 193 | result = result + pC(a, b * tileSharedIntervalBT) * beta; 194 | } 195 | pC(a, b * tileSharedIntervalBT) = result; 196 | } 197 | } 198 | } 199 | pC.addOffset(tileSharedIntervalAT * ratio, 0); 200 | } 201 | } 202 | } // namespace 203 | 204 | void gemmFinal(const float *deviceAPtr, const float *deviceBPtr, 205 | float *deviceCPtr, float alpha, float beta, unsigned M, 206 | unsigned N, unsigned K) { 207 | using LayoutTile = openmlsys::Layout<128, 128, 16>; 208 | using LayoutBlock = openmlsys::Layout<16, 16>; 209 | using LayoutThread = openmlsys::Layout<4, 4>; 210 | 211 | dim3 block(LayoutBlock::m * LayoutBlock::n); 212 | dim3 grid((M - 1) / LayoutTile::m + 1, (N - 1) / LayoutTile::n + 1); 213 | 214 | gemmKernel<<>>( 215 | deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 216 | } 217 | -------------------------------------------------------------------------------- /gemm.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define declGemmFn(name) \ 12 | void name(const float *deviceAPtr, const float *deviceBPtr, \ 13 | float *deviceCPtr, float alpha, float beta, unsigned M, \ 14 | unsigned N, unsigned K) 15 | 16 | declGemmFn(gemmFinal); 17 | declGemmFn(gemmUse128); 18 | declGemmFn(gemmUseTile); 19 | declGemmFn(gemmNaive); 20 | declGemmFn(gemmHideSmemLatency); 21 | declGemmFn(gemmTransposeSmem); 22 | declGemmFn(gemmUseSmem); 23 | 24 | class GemmTester { 25 | class cuTimer { 26 | cudaEvent_t startEvent{}, stopEvent{}; 27 | 28 | public: 29 | cuTimer() { 30 | cudaEventCreate(&startEvent); 31 | cudaEventCreate(&stopEvent); 32 | } 33 | ~cuTimer() { 34 | cudaEventDestroy(stopEvent); 35 | cudaEventDestroy(startEvent); 36 | } 37 | 38 | void start() { cudaEventRecord(startEvent); } 39 | 40 | float end() { 41 | cudaEventRecord(stopEvent); 42 | auto error = cudaEventSynchronize(stopEvent); 43 | if (error != cudaSuccess) { 44 | throw std::runtime_error(cudaGetErrorString(error)); 45 | } 46 | float milliseconds = 0; 47 | cudaEventElapsedTime(&milliseconds, startEvent, stopEvent); 48 | 49 | return milliseconds; 50 | } 51 | }; 52 | 53 | cuTimer timer{}; 54 | Eigen::Matrix hostC; 55 | Eigen::Matrix 56 | deviceCCopied; 57 | const float *deviceAPtr, *deviceBPtr; 58 | float *deviceCPtr; 59 | const float *deviceCInitPtr; 60 | float alpha, beta; 61 | unsigned M, N, K; 62 | int iteration; 63 | 64 | void tearUp() { 65 | cudaMemcpy(deviceCPtr, deviceCInitPtr, M * N * sizeof(float), 66 | cudaMemcpyDeviceToDevice); 67 | } 68 | 69 | void checkValue() const { 70 | printf("Max Error: %f\n", (hostC - deviceCCopied).cwiseAbs().maxCoeff()); 71 | } 72 | 73 | template 74 | void profile(Function &&gemmFunction) { 75 | double elapsedTime = 0; 76 | for (int i = 0; i < iteration; ++i) { 77 | tearUp(); 78 | timer.start(); 79 | gemmFunction(deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 80 | elapsedTime += timer.end(); 81 | } 82 | elapsedTime /= iteration; 83 | double GFLOPS = 2 * 1e-9 * M * N * K / (elapsedTime * 1e-3); 84 | printf("Average Time: %.3f ms, Throughput: %.3f GFLOPS\n", elapsedTime, 85 | GFLOPS); 86 | } 87 | 88 | public: 89 | explicit GemmTester(float alpha, float beta, unsigned M, unsigned N, 90 | unsigned K, int iteration) 91 | : hostC{M, N}, 92 | deviceCCopied{M, N}, 93 | alpha(alpha), 94 | beta(beta), 95 | M(M), 96 | N(N), 97 | K(K), 98 | iteration{iteration} { 99 | Eigen::Matrix A{M, 100 | K}; 101 | Eigen::Matrix B{K, 102 | N}; 103 | A.setRandom(); 104 | B.setRandom(); 105 | hostC.setRandom(); 106 | 107 | float *_deviceCPtr, *_deviceCInitPtr; 108 | cudaMalloc(&_deviceCPtr, M * N * sizeof(float)); 109 | cudaMalloc(&_deviceCInitPtr, M * N * sizeof(float)); 110 | deviceCPtr = _deviceCPtr; 111 | deviceCInitPtr = _deviceCInitPtr; 112 | cudaMemcpy(_deviceCInitPtr, hostC.data(), M * N * sizeof(float), 113 | cudaMemcpyHostToDevice); 114 | cudaDeviceSynchronize(); 115 | 116 | clock_t begin, end; 117 | begin = clock(); 118 | hostC = alpha * (A * B) + beta * hostC; 119 | end = clock(); 120 | printf("CPU use: %.3f ms\n", double(end - begin) / CLOCKS_PER_SEC * 1e3); 121 | 122 | float *_deviceAPtr, *_deviceBPtr; 123 | cudaMalloc(&_deviceAPtr, M * K * sizeof(float)); 124 | cudaMalloc(&_deviceBPtr, K * N * sizeof(float)); 125 | cudaMemcpy(_deviceAPtr, A.data(), M * K * sizeof(float), 126 | cudaMemcpyHostToDevice); 127 | cudaMemcpy(_deviceBPtr, B.data(), K * N * sizeof(float), 128 | cudaMemcpyHostToDevice); 129 | cudaDeviceSynchronize(); 130 | 131 | deviceAPtr = _deviceAPtr; 132 | deviceBPtr = _deviceBPtr; 133 | } 134 | ~GemmTester() { 135 | cudaFree((void *)deviceAPtr); 136 | cudaFree((void *)deviceBPtr); 137 | cudaFree(deviceCPtr); 138 | } 139 | 140 | template 141 | void evaluate(Function &&gemmFunction, const char *name) { 142 | tearUp(); 143 | printf("-----------------------------------\n"); 144 | printf("Evaluating %s\n", name); 145 | gemmFunction(deviceAPtr, deviceBPtr, deviceCPtr, alpha, beta, M, N, K); 146 | cudaMemcpy(deviceCCopied.data(), deviceCPtr, M * N * sizeof(float), 147 | cudaMemcpyDeviceToHost); 148 | cudaDeviceSynchronize(); 149 | checkValue(); 150 | profile(std::forward(gemmFunction)); 151 | printf("-----------------------------------\n"); 152 | } 153 | }; 154 | 155 | class gemmCuBlas { 156 | cublasHandle_t handle{nullptr}; 157 | 158 | public: 159 | gemmCuBlas() { cublasCreate(&handle); } 160 | ~gemmCuBlas() { cublasDestroy(handle); } 161 | 162 | void operator()(const float *A, const float *B, float *C, float &alpha, 163 | float &beta, int M, int N, int K) const { 164 | int lda = N, ldb = K, ldc = N; 165 | cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, lda, A, 166 | ldb, &beta, C, ldc); 167 | } 168 | }; 169 | 170 | int getSPcores(cudaDeviceProp devProp) { 171 | int cores = 0; 172 | int mp = devProp.multiProcessorCount; 173 | switch (devProp.major) { 174 | case 2: // Fermi 175 | if (devProp.minor == 1) 176 | cores = mp * 48; 177 | else 178 | cores = mp * 32; 179 | break; 180 | case 3: // Kepler 181 | cores = mp * 192; 182 | break; 183 | case 5: // Maxwell 184 | cores = mp * 128; 185 | break; 186 | case 6: // Pascal 187 | if ((devProp.minor == 1) || (devProp.minor == 2)) 188 | cores = mp * 128; 189 | else if (devProp.minor == 0) 190 | cores = mp * 64; 191 | else 192 | throw std::runtime_error("Unknown device type"); 193 | break; 194 | case 7: // Volta and Turing 195 | if ((devProp.minor == 0) || (devProp.minor == 5)) 196 | cores = mp * 64; 197 | else 198 | throw std::runtime_error("Unknown device type"); 199 | break; 200 | case 8: // Ampere 201 | if (devProp.minor == 0) 202 | cores = mp * 64; 203 | else if (devProp.minor == 6) 204 | cores = mp * 128; 205 | else 206 | throw std::runtime_error("Unknown device type"); 207 | break; 208 | default: 209 | throw std::runtime_error("Unknown device type"); 210 | } 211 | return cores; 212 | } 213 | 214 | DEFINE_int32(cpu_procs, omp_get_num_procs(), "processor num used of CPU"); 215 | DEFINE_int32(gpu_rank, 0, "the used GPU rank"); 216 | DEFINE_int32(repeat_iterations, 10, 217 | "repeat iteration numbers and average the result"); 218 | DEFINE_double(alpha, 1., "alpha"); 219 | DEFINE_double(beta, 1., "beta"); 220 | DEFINE_uint32(M, 2048, "M"); 221 | DEFINE_uint32(N, 2048, "N"); 222 | DEFINE_uint32(K, 1024, "K"); 223 | 224 | int main(int argc, char *argv[]) { 225 | GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true); 226 | 227 | printf("Program start with %d CPU processes on the %d-th GPU\n", 228 | FLAGS_cpu_procs, FLAGS_gpu_rank); 229 | omp_set_num_threads(FLAGS_cpu_procs); 230 | cudaDeviceProp deviceProp{}; 231 | cudaGetDeviceProperties(&deviceProp, FLAGS_gpu_rank); 232 | cudaSetDevice(FLAGS_gpu_rank); 233 | printf("GPU %s status: ", deviceProp.name); 234 | double boostFrequency = deviceProp.clockRate / 1e6; 235 | int fp32CoresNum = getSPcores(deviceProp); 236 | double peakPerformance = boostFrequency * fp32CoresNum * 2; 237 | printf( 238 | "clock rate %.3f GHz, FP32 cores num %d, FP32 peak throughput %.3f " 239 | "GFLOPS\n", 240 | boostFrequency, fp32CoresNum, peakPerformance); 241 | printf("A: %d x %d, B: %d x %d, C: %d x %d\n", FLAGS_M, FLAGS_K, FLAGS_K, 242 | FLAGS_N, FLAGS_M, FLAGS_N); 243 | 244 | GemmTester tester{ 245 | (float)FLAGS_alpha, (float)FLAGS_beta, FLAGS_M, FLAGS_N, FLAGS_K, 246 | FLAGS_repeat_iterations}; 247 | tester.evaluate(gemmCuBlas{}, "cuBlas"); 248 | tester.evaluate(gemmNaive, "Naive"); 249 | tester.evaluate(gemmUse128, "Use128"); 250 | tester.evaluate(gemmUseTile, "UseTile"); 251 | tester.evaluate(gemmUseSmem, "UseSmem"); 252 | tester.evaluate(gemmTransposeSmem, "TransposeSmem"); 253 | tester.evaluate(gemmHideSmemLatency, "HideSmemLatency"); 254 | tester.evaluate(gemmFinal, "Final"); 255 | 256 | GFLAGS_NAMESPACE::ShutDownCommandLineFlags(); 257 | return 0; 258 | } 259 | --------------------------------------------------------------------------------