diff --git a/README.md b/README.md index 0e38ddb..665a0fc 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,85 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Xiao Wei +* Tested on: Windows 10, i9-9900k @ 3.6GHz 16.0GB, RTX 2080 SUPER 16GB -### (TODO: Your README) -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +Feature +====================== +* CPU SCAN and Stream Compaction +* Naive GPU Scan +* Work-Efficient GPU Scan and Stream Compaction +* Thrust scan + +Performance Analysis +====================== +![PROJECT2](https://user-images.githubusercontent.com/66859615/135018239-b5681125-c5f1-414e-8d9b-87430e9eecd0.jpg) + +From the data obtained, we can learn that the rate of change with the growth of array size is slower when we are using GPU methods. The advantage of GPU will probably shows up when the array size grows really huge + +From Nsight Profiling, basically it is memory I/O which is the bottleneck. This is better for thrust implementation + +![微信图片_20210928114609](https://user-images.githubusercontent.com/66859615/135020062-e14f2ec4-ba5f-4e27-8364-695a14b27ab8.png) + + +output Example: +``` +``` +``` +**************** +** SCAN TESTS ** +**************** + [ 15 15 16 4 18 27 3 37 8 13 32 30 16 ... 46 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 0.0014ms (std::chrono Measured) + [ 0 15 30 46 50 68 95 98 135 143 156 188 218 ... 25300 25346 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 0.0016ms (std::chrono Measured) + [ 0 15 30 46 50 68 95 98 135 143 156 188 218 ... 25262 25280 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.020672ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.018944ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.053248ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.05184ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 0.044032ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.044896ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 3 1 0 0 0 3 1 3 0 3 2 2 0 ... 0 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 0.0022ms (std::chrono Measured) + [ 3 1 3 1 3 3 2 2 1 1 2 1 1 ... 3 2 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.0017ms (std::chrono Measured) + [ 3 1 3 1 3 3 2 2 1 1 2 1 1 ... 3 2 ] + passed +==== cpu compact with scan ==== + elapsed time: 0.0044ms (std::chrono Measured) + [ 3 1 3 1 3 3 2 2 1 1 2 1 1 ... 3 2 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 0.058144ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.057856ms (CUDA Measured) + +``` +``` +``` diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..c158c99 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,7 +13,7 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 10; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..4b33675 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,18 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) { + return; + } + + if (idata[index] == 0) { + bools[index] = 0; + } + else { + bools[index] = 1; + } } /** @@ -33,6 +45,15 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + + if (bools[index] == 1) { + odata[indices[index]] = idata[index]; + } } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..3214cf5 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -20,6 +20,14 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + + if (n <= 0) { + return; + } + odata[0] = 0; + for (int i = 1; i < n; i++) { + odata[i] = idata[i - 1] + odata[i - 1]; + } timer().endCpuTimer(); } @@ -31,8 +39,17 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + + int writeIndex = 0; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + odata[writeIndex] = idata[i]; + writeIndex++; + + } + } timer().endCpuTimer(); - return -1; + return writeIndex; } /** @@ -43,8 +60,37 @@ namespace StreamCompaction { int compactWithScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + int* tempArr = new int[n]; + + for (int i = 0; i < n; i++) { + if (idata[i] == 0) { + tempArr[i] = 0; + } + else { + tempArr[i] = 1; + } + } + + int* scanned = new int[n]; + scanned[0] = 0; + for (int i = 1; i < n; i++) { + scanned[i] = scanned[i - 1] + tempArr[i - 1]; + } + + int result = 0; + for (int i = 0; i < n; i++) { + if (tempArr[i] == 1) { + odata[scanned[i]] = idata[i]; + result++; + } + } + delete[] tempArr; + delete[] scanned; + timer().endCpuTimer(); - return -1; + return result; + + } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..ffa8ce4 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -12,13 +12,95 @@ namespace StreamCompaction { return timer; } + /*for d = 0 to log2n - 1 + for all k = 0 to n ¨C 1 by 2^(d + 1) in parallel + x[k + 2^(d + 1) ¨C 1] += x[k + 2^d ¨C 1];*/ + __global__ void kernUpSweep(int* data, int d, int maxSize) { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index > maxSize) { + return; + } + int powD = powf(2.0, d); + int powDplusOne = powf(2.0, d + 1); + + int selected = index * powDplusOne; + + if (selected >= maxSize) { + return; + } + + data[selected + powDplusOne - 1] += data[selected + powD - 1]; + + + + } + + + //x[n - 1] = 0 + // for d = log2n ¨C 1 to 0 + // for all k = 0 to n ¨C 1 by 2d + 1 in parallel + // t = x[k + 2d ¨C 1]; // Save left child + // x[k + 2d ¨C 1] = x[k + 2d + 1 ¨C 1]; // Set left child to this node¡¯s value + // x[k + 2d + 1 ¨C 1] += t; // Set right child to old left value + + // // this node¡¯s value + + __global__ void kernDownSweep(int* data, int d, int maxSize) { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index > maxSize) { + return; + } + + int powD = powf(2.0, d); + int powDplusOne = powf(2.0, d + 1); + + int selected = index * powDplusOne; + + if (selected >= maxSize) { + return; + } + + int temp = data[selected + powD - 1]; + data[selected + powD - 1] = data[selected + powDplusOne - 1]; + data[selected + powDplusOne - 1] = temp + data[selected + powDplusOne - 1]; + + + + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + + int totalD = ilog2ceil(n); + int maxSize = pow(2, totalD); + int blockSize = 128; + dim3 fullBlocksPerGrid((maxSize + blockSize - 1) / blockSize); + + int* device_idata; + int* device_odata; + + cudaMalloc((void**)&device_idata, maxSize * sizeof(int)); + cudaMalloc((void**)&device_odata, maxSize * sizeof(int)); + + cudaMemset(device_idata, 0, maxSize * sizeof(int)); + cudaMemcpy(device_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); // TODO + for (int d = 0; d <= totalD - 1; d++) { + kernUpSweep << < fullBlocksPerGrid, blockSize >> > (device_idata, d, maxSize); + } + + cudaMemset(device_idata + maxSize - 1, 0, sizeof(int)); + for (int d = totalD - 1; d >= 0; d--) { + kernDownSweep << < fullBlocksPerGrid, blockSize >> > (device_idata, d, maxSize); + } timer().endGpuTimer(); + + cudaMemcpy(odata, device_idata, n * sizeof(int), cudaMemcpyDeviceToHost); + cudaFree(device_idata); + cudaFree(device_odata); } /** @@ -31,10 +113,58 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + + int totalD = ilog2ceil(n); + int maxSize = pow(2, totalD); + int blockSize = 128; + + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + dim3 scanBlocksPerGrid((maxSize + blockSize - 1) / blockSize); + + + int* device_idata; + int* device_mappedArr; + int* device_scannedArr; + int* device_odata; + + cudaMalloc((void**)&device_idata, maxSize * sizeof(int)); + cudaMalloc((void**)&device_mappedArr, maxSize * sizeof(int)); + cudaMalloc((void**)&device_scannedArr, maxSize * sizeof(int)); + cudaMalloc((void**)&device_odata, maxSize * sizeof(int)); + + cudaMemcpy(device_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + timer().startGpuTimer(); // TODO + + Common::kernMapToBoolean << < fullBlocksPerGrid, blockSize >> > (n, device_mappedArr, device_idata); + + cudaMemcpy(device_scannedArr, device_mappedArr, maxSize * sizeof(int), cudaMemcpyDeviceToDevice); + + for (int d = 0; d <= totalD - 1; d++) { + kernUpSweep << < scanBlocksPerGrid, blockSize >> > (device_scannedArr, d, maxSize); + } + + cudaMemset(device_scannedArr + maxSize - 1, 0, sizeof(int)); + for (int d = totalD - 1; d >= 0; d--) { + kernDownSweep << < scanBlocksPerGrid, blockSize >> > (device_scannedArr, d, maxSize); + } + + //scatter + Common::kernScatter << < fullBlocksPerGrid, blockSize >> > (maxSize, device_odata, device_idata, device_mappedArr, device_scannedArr); + timer().endGpuTimer(); - return -1; + + int count = 0; + cudaMemcpy(&count, device_scannedArr + maxSize - 1, sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(odata, device_odata, count * sizeof(int), cudaMemcpyDeviceToHost); + cudaFree(device_idata); + cudaFree(device_mappedArr); + cudaFree(device_scannedArr); + cudaFree(device_odata); + + return count; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..97a0919 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -11,15 +11,88 @@ namespace StreamCompaction { static PerformanceTimer timer; return timer; } - // TODO: __global__ + // TODO: + + __global__ void kernNaiveScan(const int n, const int d, int* odata, const int* idata) { + + /*1: for d = 1 to log2 n do + 2 : for all k in parallel do + 3 : if k U2265.GIF 2 d then + 4 : x[out][k] = x[in][k ¨C 2 d - 1] + x[in][k] + 5 : else + 6 : x[out][k] = x[in][k]*/ + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + int dPow = powf(2, d - 1); + + if (index >= dPow) { + odata[index] = idata[index - dPow] + idata[index]; + } + else { + odata[index] = idata[index]; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + int blockSize = 128; + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + + int* device_idata; + int* device_odata; + + cudaMalloc((void**)&device_idata, n * sizeof(int)); + cudaMalloc((void**)&device_odata, n * sizeof(int)); + + cudaMemcpy(device_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(device_odata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + timer().startGpuTimer(); // TODO + for (int d = 1; d <= ilog2ceil(n); d++) { + kernNaiveScan << < fullBlocksPerGrid, blockSize >> > (n, d, device_odata, device_idata); + int* temp = device_idata; + device_idata = device_odata; + device_odata = temp; + } + timer().endGpuTimer(); + + cudaThreadSynchronize(); + + cudaMemcpy(odata + 1, device_idata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(device_idata); + cudaFree(device_odata); + + } } } + + +//__global__ void scan(float* g_odata, float* g_idata, int n) { +// extern __shared__ float temp[]; // allocated on invocation +// int thid = threadIdx.x; +// int pout = 0, pin = 1; // Load input into shared memory. +// // This is exclusive scan, so shift right by one +// // and set first element to 0 +// temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; +// __syncthreads(); +// for (int offset = 1; offset < n; offset *= 2) +// { +// pout = 1 - pout; // swap double buffer indices +// pin = 1 - pout; +// if (thid >= offset) +// temp[pout*n+thid] += temp[pin*n+thid - offset]; +// else +// temp[pout*n+thid] = temp[pin*n+thid]; +// __syncthreads(); +// } +// g_odata[thid] = temp[pout*n+thid]; // write output +//} \ No newline at end of file diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..a837958 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -18,11 +18,20 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + thrust::host_vector hostIdata(idata, idata + n); + thrust::device_vector devIdata = hostIdata; + thrust::device_vector devOdata(n); timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + + thrust::exclusive_scan(devIdata.begin(), devIdata.end(), devOdata.begin()); + timer().endGpuTimer(); + + thrust::copy(devOdata.begin(), devOdata.end(), odata); } } }