我们玩过OpenCL的朋友都知道,我们可以通过clGetDeviceInfo接口来查询当前计算设备的几乎所有属性,包括当前计算单元的个数、最大工作组大小、本地存储器大小等等。但这些属性值都是基于当前计算设备的最大可支持能力,而不是当前内核程序执行上下文。一个内核程序的复杂与否会关系到当前内核程序可使用的各个资源的多少,比如,一个计算单元的寄存器池大小是固定的,因此如果我们对一个工作组安排较多的工作项,那么每个工作项可使用的寄存器就会变少;反之,如果我们对一个工作组安排较少的工作项,那么每个工作项可使用的寄存器也就多了。所以,为了充分发挥当前计算设备执行内核程序的效率,我们往往应当选用内核对象所查询出来的相关属性值的大小做资源分配。
在OpenCL中,我们可以使用clGetKernelWorkGroupInfo接口来查询当前每个工作组可分派多少个工作项,然后根据这个数据再推导出当前应该一共使用多少个工作项。我们下面举的例子为了简洁性,因此把全局工作项个数与工作组大小设置为一样,使得我们仅使用一个工作组。
#include#include #include #include #include #ifdef __APPLE__ #include #else #include #endif #define var __auto_type int main(void) { cl_platform_id platform_id = NULL; cl_device_id device_id = NULL; cl_context context = NULL; cl_command_queue command_queue = NULL; cl_mem memObj = NULL; char *kernelSource = NULL; cl_program program = NULL; cl_kernel kernel = NULL; cl_int ret; // 获得OpenCL平台 clGetPlatformIDs(1, &platform_id, NULL); if(platform_id == NULL) { puts("Get OpenCL platform failed!"); goto FINISH; } // 获得OpenCL计算设备,这里使用GPU类型的计算设备 clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL); if(device_id == NULL) { puts("No GPU available as a compute device!"); goto FINISH; } // 根据设备ID来创建上下文 context = clCreateContext(NULL, 1, (const cl_device_id[]){device_id}, NULL, NULL, &ret); if(context == NULL) { puts("Context not established!"); goto FINISH; } // 根据上下文与设备ID来创建命令队列 command_queue = clCreateCommandQueue(context, device_id, 0, &ret); if(command_queue == NULL) { puts("Command queue cannot be created!"); goto FINISH; } // 我们分配一个数组用于做测试数据,然后对它进行初始化 float hostBuffer[64]; for(int i = 0; i < 64; i++) hostBuffer[i] = 1.0f; memObj = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(hostBuffer), NULL, &ret); if(memObj == NULL) { puts("memory object failed to create!"); goto FINISH; } // 对memObj的数据传输 ret = clEnqueueWriteBuffer(command_queue, memObj, CL_TRUE, 0, sizeof(hostBuffer), hostBuffer, 0, NULL, NULL); if(ret != CL_SUCCESS) { puts("Data transfer failed"); goto FINISH; } // 指定内核源文件路径,这个路径根据读者当前环境可以更改 // 这里使用绝对路径也是避免不同系统需要调用不同API来获取当前路径 var pFileName = "/Users/zennychen/Desktop/test.cl"; // 读取内核文件的内容 var fp = fopen(pFileName, "r"); if (fp == NULL) { puts("The specified kernel source file cannot be opened!"); goto FINISH; } fseek(fp, 0, SEEK_END); const long kernelLength = ftell(fp); fseek(fp, 0, SEEK_SET); kernelSource = malloc(kernelLength); fread(kernelSource, 1, kernelLength, fp); fclose(fp); // 创建内核程序 program = clCreateProgramWithSource(context, 1, (const char*[]){kernelSource}, (const size_t[]){kernelLength}, &ret); // 构建内核程序 ret = clBuildProgram(program, 1, (const cl_device_id[]){device_id}, NULL, NULL, NULL); if (ret != CL_SUCCESS) { size_t len = 0; char buffer[8 * 1024]; 5 printf("Error: Failed to build program executable!/n"); clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &len); printf("%s/n", buffer); goto FINISH; } // kernelSource后面不再使用,这里可以立即对它释放 free(kernelSource); kernelSource = NULL; // 创建内核函数 kernel = clCreateKernel(program, "test", &ret); if(kernel == NULL) { puts("Kernel failed to create!"); goto FINISH; } // 查询当前执行上下文可用的最大工作组大小 size_t workgroupSize = 0; ret = clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE, sizeof(workgroupSize), &workgroupSize, NULL); if(ret != CL_SUCCESS) { puts("Query max workgroup size failed!"); goto FINISH; } printf("Current work-group size: %zu/n", workgroupSize); // 查询当前执行上下文可用的私有存储器大小 cl_ulong privateMemSize = 0; ret = clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_PRIVATE_MEM_SIZE, sizeof(privateMemSize), &privateMemSize, NULL); if(ret != CL_SUCCESS) { puts("Query max workgroup size failed!"); goto FINISH; } printf("Current private memory size: %tu bytes/n", privateMemSize); // 第一个参数为可读可写的缓存对象;第二个参数为指定大小的本地存储器;第三个参数为私有存储器大小 ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void*)&memObj); ret |= clSetKernelArg(kernel, 1, sizeof(float) * 4 * 16, NULL); ret |= clSetKernelArg(kernel, 2, sizeof(privateMemSize), &privateMemSize); if(ret != CL_SUCCESS) { puts("Set arguments error!"); goto FINISH; } // 将内核执行命令排入命令队列 ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, (const size_t[]){workgroupSize}, (const size_t[]){workgroupSize}, 0, NULL, NULL); if(ret != CL_SUCCESS) { puts("kernel1 execution failed"); goto FINISH; } // 这里用clFinish做命令执行同步 clFinish(command_queue); // 读取结果 float result = 0.0f; ret = clEnqueueReadBuffer(command_queue, memObj, CL_TRUE, 0, sizeof(result), &result, 0, NULL, NULL); if(ret != CL_SUCCESS) puts("Fetch result failed!"); else printf("Result = %f/n", result); FINISH: if(kernelSource != NULL) free(kernelSource); if(memObj != NULL) clReleaseMemObject(memObj); if(kernel != NULL) clReleaseKernel(kernel); if(program != NULL) clReleaseProgram(program); if(command_queue != NULL) clReleaseCommandQueue(command_queue); if(context != NULL) clReleaseContext(context); puts("Program complete"); return 0; }
下面给出内核代码源文件:test.cl。各位注意,在保存好自己编写的test.cl文件之后,需要把上述代码中test.cl的路径改成自己系统环境中test.cl的路径。
kernel void test(global float4 *pMemBuf, local float4 *pLocalMem, ulong privateSize) { float4 v1, v2, v3, v4, v5, v6, v7, v8; float4 v9, v10, v11, v12, v13, v14, v15, v16; v1 = pMemBuf[0]; v2 = pMemBuf[1]; v3 = pMemBuf[2]; v4 = pMemBuf[3]; v5 = pMemBuf[4]; v6 = pMemBuf[5]; v7 = pMemBuf[6]; v8 = pMemBuf[7]; v9 = pMemBuf[8]; v10 = pMemBuf[9]; v11 = pMemBuf[10]; v12 = pMemBuf[11]; v13 = pMemBuf[12]; v14 = pMemBuf[13]; v15 = pMemBuf[14]; v16 = pMemBuf[15]; for(int i = 0; i < 2; i++) { v1 = fma(v1, v1, v1); v2 = fma(v2, v2, v2); v3 = fma(v3, v3, v3); v4 = fma(v4, v4, v4); v5 = fma(v5, v5, v5); v6 = fma(v6, v6, v6); v7 = fma(v7, v7, v7); v8 = fma(v8, v8, v8); v9 = fma(v9, v9, v9); v10 = fma(v10, v10, v10); v11 = fma(v11, v11, v11); v12 = fma(v12, v12, v12); v13 = fma(v13, v13, v13); v14 = fma(v14, v14, v14); v15 = fma(v15, v15, v15); v16 = fma(v16, v16, v16); } pLocalMem[0] = v1; pLocalMem[1] = v2; pLocalMem[2] = v3; pLocalMem[3] = v4; pLocalMem[4] = v5; pLocalMem[5] = v6; pLocalMem[6] = v7; pLocalMem[7] = v8; pLocalMem[8] = v9; pLocalMem[9] = v10; pLocalMem[10] = v11; pLocalMem[11] = v12; pLocalMem[12] = v13; pLocalMem[13] = v14; pLocalMem[14] = v15; pLocalMem[15] = v16; barrier(CLK_LOCAL_MEM_FENCE); float4 sum = 0.0f; for(int i = 0; i < 16; i++) sum += pLocalMem[i]; pMemBuf[0] = sum; }
下面谈谈Metal API的处理方式。Metal API是比OpenCL更为底层的API,它提供了对GPU访问的非常直接的接口工具。不过OpenCL可应用于各类计算设备,而Metal API只能用于GPU。在Metal API中,我们使用id
// // main.m // MetalTest // // Created by Zenny Chen on 2018/2/12. // Copyright © 2018年 GreenGames Studio. All rights reserved. // @import Foundation; @import Metal; #define var __auto_type int main(int argc, const char * argv[]) { @autoreleasepool { // 创建默认计算设备 var device = MTLCreateSystemDefaultDevice(); // 创建库 var library = device.newDefaultLibrary; // 创建计算函数 var function = [library newFunctionWithName:@"test"]; [library release]; // 创建计算流水线 var pipelineState = [device newComputePipelineStateWithFunction:function error:NULL]; [function release]; // 获得当前上下文中一个线程组中最多可以容纳多少个线程 const var threadgroupSize = pipelineState.maxTotalThreadsPerThreadgroup; NSLog(@"Current threadgroup size: %tu", threadgroupSize); // 创建命令队列 var commandQueue = device.newCommandQueue; // 初始化数据 float hostBuffer[64]; for(int i = 0; i < 64; i++) hostBuffer[i] = 1.0f; // 创建缓存对象 var memBuffer = [device newBufferWithBytes:hostBuffer length:sizeof(hostBuffer) options:MTLResourceStorageModeShared]; // 获取命令缓存 var commandBuffer = commandQueue.commandBuffer; // 获取命令编码器并设置其流水线状态 var commandEncoder = commandBuffer.computeCommandEncoder; [commandEncoder setComputePipelineState:pipelineState]; // 对命令编码器设置参数, // 我们在Metal Shading文件中所看到的参数次序就是根据这个次序安排的 [commandEncoder setBuffer:memBuffer offset:0 atIndex:0]; [commandEncoder setThreadgroupMemoryLength:sizeof(float) * 4 * 16 atIndex:0]; MTLSize threadsPerGroup = {threadgroupSize, 1, 1}; MTLSize nThreadgroups = {1, 1, 1}; // 分派计算线程 [commandEncoder dispatchThreadgroups:nThreadgroups threadsPerThreadgroup:threadsPerGroup]; [commandEncoder endEncoding]; // 提交 [commandBuffer commit]; // 这里挂起当前线程,等待命令完全执行完毕后再继续执行后续指令 [commandBuffer waitUntilCompleted]; NSLog(@"The value is: %f/n", *(float*)memBuffer.contents); // 释放资源 [memBuffer release]; [pipelineState release]; [commandQueue release]; [device release]; } return 0; }
最后列出Metal Shader文件代码:
#includeusing namespace metal; kernel void test(device float4 *memBuffer [[ buffer(0) ]], threadgroup float4 *localBuffer [[ threadgroup(0) ]]) { auto v1 = memBuffer[0]; auto v2 = memBuffer[1]; auto v3 = memBuffer[2]; auto v4 = memBuffer[3]; auto v5 = memBuffer[4]; auto v6 = memBuffer[5]; auto v7 = memBuffer[6]; auto v8 = memBuffer[7]; auto v9 = memBuffer[8]; auto v10 = memBuffer[9]; auto v11 = memBuffer[10]; auto v12 = memBuffer[11]; auto v13 = memBuffer[12]; auto v14 = memBuffer[13]; auto v15 = memBuffer[14]; auto v16 = memBuffer[15]; for(int i = 0; i < 2; i++) { v1 = fma(v1, v1, v1); v2 = fma(v2, v2, v2); v3 = fma(v3, v3, v3); v4 = fma(v4, v4, v4); v5 = fma(v5, v5, v5); v6 = fma(v6, v6, v6); v7 = fma(v7, v7, v7); v8 = fma(v8, v8, v8); v9 = fma(v9, v9, v9); v10 = fma(v10, v10, v10); v11 = fma(v11, v11, v11); v12 = fma(v12, v12, v12); v13 = fma(v13, v13, v13); v14 = fma(v14, v14, v14); v15 = fma(v15, v15, v15); v16 = fma(v16, v16, v16); } localBuffer[0] = v1; localBuffer[1] = v2; localBuffer[2] = v3; localBuffer[3] = v4; localBuffer[4] = v5; localBuffer[5] = v6; localBuffer[6] = v7; localBuffer[7] = v8; localBuffer[8] = v9; localBuffer[9] = v10; localBuffer[10] = v11; localBuffer[11] = v12; localBuffer[12] = v13; localBuffer[13] = v14; localBuffer[14] = v15; localBuffer[15] = v16; threadgroup_barrier(mem_flags::mem_threadgroup); float4 sum = 0.0f; for(int i = 0; i < 16; i++) sum += localBuffer[i]; memBuffer[0] = sum; }
至此,我们应该了解了应用OpenCL与Metal API做高性能计算的基本正确姿势。当然,各位也不能盲目追求这一种模式,做到因地制宜还是更为重要的。