#include #include #include #include #include static int checkCudaError(cudaError_t code, const char* expr, const char* file, int line) { if (code) { printf("CUDA error at %s:%d, code=%d (%s) in '%s'", file, line, (int) code, cudaGetErrorString(code), expr); return 1; } return 0; } #define checkCudaErr(...) do { int err = checkCudaError(__VA_ARGS__, #__VA_ARGS__, __FILE__, __LINE__); if (err) return err; } while (0) static int checkCudnnError(cudnnStatus_t code, const char* expr, const char* file, int line) { if (code) { printf("CUDNN error at %s:%d, code=%d (%s) in '%s'\n", file, line, (int) code, cudnnGetErrorString(code), expr); return 1; } return 0; } #define checkCudnnErr(...) do { int err = checkCudnnError(__VA_ARGS__, #__VA_ARGS__, __FILE__, __LINE__); if (err) return err; } while (0) int conv_op_process(int& batch, int& in_channel, int& height, int& width, int& kernel_size, int& num_filter, int& stride, int& group, int&iter, bool& half); int main(int argc, char** argv){ if(argc != 10){ printf("Input: ./conv_op in_channel height width num_filter kernel_size stride group iter half/float\n"); return -1; } int batch = 1; int in_channel = atoi(argv[1]); int height = atoi(argv[2]); int width = atoi(argv[3]); int num_filter = atoi(argv[4]); int kernel_size = atoi(argv[5]); int stride = atoi(argv[6]); int group = atoi(argv[7]); int iter = atoi(argv[8]); bool half = true; if(atoi(argv[9]) == 32){ half = false; printf("Using FP32\n"); } //printf("Conv Info: %d %d %d %d %d\n", in_channel, height, width, num_filter, kernel_size); conv_op_process(batch,in_channel,height,width,kernel_size,num_filter,stride, group, iter, half); return 0; } using perf_t = cudnnConvolutionFwdAlgoPerf_t; int conv_op_process(int& batch, int& in_channel, int& height, int& width, int& kernel_size, int& num_filter, int& stride, int& group, int&iter, bool& half){ int input_size = batch * in_channel * height * width; float* A = (float*)malloc(sizeof(float) * input_size); int kernel = kernel_size * kernel_size * in_channel * num_filter; float* W = (float*)malloc(sizeof(float) * kernel); int output_size = batch * num_filter * height * width / stride / stride; float* B = (float*)malloc(sizeof(float) * output_size); // random assign data for(int i = 0; i < input_size; i++){ A[i] = i/10; } for(int i = 0; i < kernel; i++){ W[i] = i/15; } for(int i = 0; i < output_size; i++){ B[i] = 1; } // alloc cuda memory void* d_A; void* d_W; void* d_B; checkCudaErr(cudaMalloc(&d_A, sizeof(float) * input_size)); checkCudaErr(cudaMalloc(&d_W, sizeof(float) * kernel)); checkCudaErr(cudaMalloc(&d_B, sizeof(float) * output_size)); // mem for warm up void* d_A_w; void* d_W_w; void* d_B_w; checkCudaErr(cudaMalloc(&d_A_w, sizeof(float) * input_size)); checkCudaErr(cudaMalloc(&d_W_w, sizeof(float) * kernel)); checkCudaErr(cudaMalloc(&d_B_w, sizeof(float) * output_size)); // memory copy checkCudaErr(cudaMemcpy(d_A, (void*)A, sizeof(float) * input_size, cudaMemcpyHostToDevice)); checkCudaErr(cudaMemcpy(d_W, (void*)W, sizeof(float) * kernel, cudaMemcpyHostToDevice)); checkCudaErr(cudaMemcpy(d_B, (void*)B, sizeof(float) * output_size, cudaMemcpyHostToDevice)); checkCudaErr(cudaDeviceSynchronize()); // wait for copy finish struct timeval start, handle_create, end; // cudnn function init cudnnHandle_t handle; cudnnTensorDescriptor_t input_desc; cudnnTensorDescriptor_t output_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_op_desc; // handle create process is time cost checkCudnnErr(cudnnCreate(&handle)); gettimeofday(&start, NULL); checkCudnnErr(cudnnCreateTensorDescriptor(&input_desc)); checkCudnnErr(cudnnCreateTensorDescriptor(&output_desc)); checkCudnnErr(cudnnCreateFilterDescriptor(&filter_desc)); checkCudnnErr(cudnnCreateConvolutionDescriptor(&conv_op_desc)); checkCudnnErr(cudnnSetConvolutionGroupCount(conv_op_desc, group)); cudnnTensorFormat_t infer_type = CUDNN_TENSOR_NCHW; int pad = (kernel_size - 1) / 2; if(half){ printf("Using FP16\n"); // link mem with desc checkCudnnErr(cudnnSetTensor4dDescriptor(input_desc, infer_type, CUDNN_DATA_HALF, batch, in_channel, height, width)); checkCudnnErr(cudnnSetTensor4dDescriptor(output_desc, infer_type, CUDNN_DATA_HALF, batch, num_filter, int(height/stride), int(width/stride))); checkCudnnErr(cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_HALF, infer_type, num_filter, int(in_channel/group), kernel_size, kernel_size)); if(group != 1){ // CUDNN_CROSS_CORRELATION is very importance in this config checkCudnnErr(cudnnSetConvolution2dDescriptor(conv_op_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); } else{ checkCudnnErr(cudnnSetConvolution2dDescriptor(conv_op_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_HALF)); } // using tensor core checkCudnnErr( cudnnSetConvolutionMathType(conv_op_desc, CUDNN_TENSOR_OP_MATH)); } else{ // link mem with desc checkCudnnErr(cudnnSetTensor4dDescriptor(input_desc, infer_type, CUDNN_DATA_FLOAT, batch, in_channel, height, width)); checkCudnnErr(cudnnSetTensor4dDescriptor(output_desc, infer_type, CUDNN_DATA_FLOAT, batch, num_filter, int(height/stride), int(width/stride))); checkCudnnErr(cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_FLOAT, infer_type, num_filter, int(in_channel/group), kernel_size, kernel_size)); checkCudnnErr(cudnnSetConvolution2dDescriptor(conv_op_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); } printf("conv info %d %d %d %d %d %d %d %d\n", in_channel, height, width, num_filter, kernel_size, pad, stride, group); // conv op perpape // cudnnConvolutionFwdAlgoPerf_t algo_pref; std::unique_ptr perf_algos(new perf_t[1]); int num_algo = 1; int num_algo_get = 0; void* workspace_data_temp; size_t workspace_data_temp_size = 30 * 1024 * 102; checkCudaErr(cudaMalloc(&workspace_data_temp, workspace_data_temp_size)); cudnnConvolutionFwdAlgo_t algo; //checkCudnnErr(cudnnGetConvolutionForwardAlgorithm_v7(handle, input_desc, filter_desc, conv_op_desc, output_desc, num_algo, &num_algo_get, perf_algos.get())); //checkCudnnErr(cudnnFindConvolutionForwardAlgorithmEx(handle, input_desc, d_A_w, filter_desc, d_W_w, conv_op_desc, output_desc,d_B_w, num_algo, &num_algo_get, perf_algos.get(), workspace_data_temp, workspace_data_temp_size)); //algo = perf_algos[0].algo; //if(half){ algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; //} printf("Algo: %d\n", algo); size_t workspace_size = 0; checkCudnnErr(cudnnGetConvolutionForwardWorkspaceSize(handle, input_desc, filter_desc, conv_op_desc, output_desc, algo, &workspace_size)); void* workspace; cudaMalloc(&workspace, workspace_size); // printf("workspace_size %d\n", (int)workspace_size); //#define ITER 2 // do conv float alpha = 1; float beta = 0; // warm up for(int i = 0; i < 0; i++){ checkCudnnErr(cudnnConvolutionForward(handle, (void*)&alpha, input_desc, d_A_w, filter_desc, d_W_w, conv_op_desc, algo, workspace, workspace_size, (void*)&beta, output_desc, d_B_w)); checkCudaErr(cudaDeviceSynchronize()); } gettimeofday(&handle_create, NULL); for(int i = 0; i < iter; i++){ checkCudnnErr(cudnnConvolutionForward(handle, (void*)&alpha, input_desc, d_A, filter_desc, d_W, conv_op_desc, algo, workspace, workspace_size, (void*)&beta, output_desc, d_B)); checkCudaErr(cudaDeviceSynchronize()); } gettimeofday(&end, NULL); int time = 0; printf("========== Time ==========\n"); //time = 1000000 * (handle_create.tv_sec - start.tv_sec) + (handle_create.tv_usec - start.tv_usec); //printf("Handle Create: %d us\n", time); time = -1000000 * (handle_create.tv_sec - end.tv_sec) - (handle_create.tv_usec - end.tv_usec); printf("Conv Op Run: %d us\n", time/iter); // copy data into host checkCudaErr(cudaMemcpy((void*)B, d_B, sizeof(float) * output_size, cudaMemcpyDeviceToHost)); checkCudaErr(cudaMemcpy((void*)A, d_A, sizeof(float) * input_size, cudaMemcpyDeviceToHost)); // checkCudaErr(cudaMemcpy((void*)B, d_B, sizeof(float) * output_size, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); // for(int i = 0; i < output_size; i++){ // printf("%d -> %f\n", i, B[i]); // } return 0; }