blablabla
// MP 3: Due Sunday, Dec 30, 2012 at 11:59 p.m. PST
#include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
__shared__ float Ads[TILE_WIDTH][TILE_HEIGHT];
__shared__ float Bds[TILE_WIDTH][TILE_HEIGHT];
int bx=blockIdx.x; int by=blockIdx.y;
int tx=threadIdx.x; int ty=threadIdx.y;
int Row=by*TILE_HEIGHT+ty;
int Col=bx*TILE_WIDTH+tx;
float Pvalue=0;
for(int m=0;m<ceil(numAColumns/(float)TILE_WIDTH);++m){
if(Row<numARows&&m*TILE_WIDTH+tx<numAColumns){
Ads[ty][tx]=A[Row*numAColumns+(m*TILE_WIDTH+tx)];
}else{
Ads[ty][tx]=0;
}
if(m*TILE_HEIGHT+ty<numBRows&&Col<numBColumns){
Bds[ty][tx]=B[(m*TILE_HEIGHT+ty)*numBColumns+Col];
}else{
Bds[ty][tx]=0;
}
__syncthreads();
if(Row<numCRows&&Col<numCColumns){
for(int k=0;k<TILE_HEIGHT;++k){
Pvalue+=Ads[ty][k]*Bds[k][tx];
}
}
__syncthreads();
if(Row<numCRows&&Col<numCColumns)
C[Row*numCColumns+Col]=Pvalue;
}
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC=(float*)malloc(numCRows*numCColumns*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void**)&deviceA,numARows*numAColumns*sizeof(float));
cudaMalloc((void**)&deviceB,numBRows*numBColumns*sizeof(float));
cudaMalloc((void**)&deviceC,numCRows*numCColumns*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimBlock(TILE_HEIGHT,TILE_WIDTH,1);
dim3 dimGrid(ceil(numCColumns/(float)TILE_HEIGHT),ceil(numCRows/(float)TILE_WIDTH),1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<dimGrid,dimBlock>>>(deviceA,deviceB,deviceC,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float),cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);cudaFree(deviceB);cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
相关推荐
“Coursera 课程下载利器Windows版本”指的是专为Windows操作系统设计的一款软件,用于方便用户下载Coursera平台上的在线课程。这个工具简化了下载过程,使得用户能够更便捷地获取和保存Coursera上的学习资源。 **...
《Coursera机器学习课程答案》是一份宝贵的教育资源,它涵盖了Andrew Ng教授在Coursera平台上讲授的机器学习课程的全部作业解答。这门课程是机器学习领域的经典入门课程,由斯坦福大学的著名教授授课,深受全球学习...
Coursera机器学习课程作业源码
吴恩达Coursera深度学习课程的笔记。在Coursera上吴恩达老师的DeepLearning.ai课程项目中,第一部分《神经网络和深度学习》第四周课程“深层神经网络”部分关键点的笔记。笔记并不包含全部小视频课程的记录,如需...
Coursera machine learning-答案
Coursera是一个国际知名的在线教育平台,它与世界各地的著名大学合作,提供了大量免费和付费的课程资源。该平台以其高质量的教学内容和灵活性吸引了世界各地的学习者。随着平台的不断发展,Coursera增加了中文界面的...
Coursera机器学习的8个练习所有答案,自己写的
标题 "Coursera 吴恩达作业及答案" 指的是在Coursera平台上的著名课程"吴恩达的机器学习"(Machine Learning by Andrew Ng)或"深度学习专项课程"(Deep Learning Specialization)中的课后练习与解答资源。...
Coursera data science课程 第二次大作业
Coursera Industry Skills Report 2021
coursera机器学习编程答案
coursera-dl, 下载 Coursera.org 视频和命名它们的脚本 Coursera下载器 简介特性免责声明安装指令推荐所有操作系统的安装方法。安装缺少的依赖项的其他方法Unix系统的可选安装方法( )安装依赖你自己的产品。Windows...
Coursera 在线全部课程,包含讲解视频和资料 .全套资料
这里是coursera课程Hadoop Platform and Application Framework的所有项目源代码,都通过测试考核,所以应该准确无误。原课程讲解Hadoop和Spark,有兴趣的小伙伴们可以参考,不过建议自己编程,有助于能力的提高。原...
【标题】中的“北京大学Coursera课程C++程序设计答案”指的是在Coursera这个在线教育平台上,北京大学开设的一门关于C++编程的课程。这门课程可能涵盖了C++的基础语法、面向对象编程、模板、异常处理等核心概念。...
在本资源中,“Coursera机器学习课后习题答案全套”涵盖了Andrew NG教授在斯坦福大学开设的著名在线课程“机器学习”的全部习题解答。这个课程是全球范围内最受欢迎的机器学习入门课程之一,其内容广泛且深入,旨在...
本资源是coursera机器学习每周每课时(2--11周)的测验题目,包括每节课都有暂停之后的小练习,都是自己截图下来的,是纯英文版的,部分图片里面我个人用FastStone添加了个人理解解题思路,如有问题可以一起交流。...
这门课程由斯坦福大学的教授Andrew Ng教授主讲,并通过在线教育平台Coursera向全球开放。该课程的内容非常全面,涵盖了机器学习的各个方面,从基础理论到实际应用,无所不包。为了方便大家学习,我将会对这门课程的...
机器学习coursera课程答案1
吴恩达倾力奉献,创建的Coursera的deeplearning.ia。里面囊括了所有的上课笔记! 吴恩达Coursera深度学习教程中文笔记,这些课程专为已有一定基础(基本的编程知识,熟悉Python、对机器学习有基本了解),想要尝试...