Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Project2: Xiao Wei #24

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 79 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,85 @@ CUDA Stream Compaction

**University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2**

* (TODO) YOUR NAME HERE
* (TODO) [LinkedIn](), [personal website](), [twitter](), etc.
* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab)
* Xiao Wei
* Tested on: Windows 10, i9-9900k @ 3.6GHz 16.0GB, RTX 2080 SUPER 16GB

### (TODO: Your README)

Include analysis, etc. (Remember, this is public, so don't put
anything here that you don't want to share with the world.)
Feature
======================
* CPU SCAN and Stream Compaction
* Naive GPU Scan
* Work-Efficient GPU Scan and Stream Compaction
* Thrust scan

Performance Analysis
======================
![PROJECT2](https://user-images.githubusercontent.com/66859615/135018239-b5681125-c5f1-414e-8d9b-87430e9eecd0.jpg)

From the data obtained, we can learn that the rate of change with the growth of array size is slower when we are using GPU methods. The advantage of GPU will probably shows up when the array size grows really huge

From Nsight Profiling, basically it is memory I/O which is the bottleneck. This is better for thrust implementation

![微信图片_20210928114609](https://user-images.githubusercontent.com/66859615/135020062-e14f2ec4-ba5f-4e27-8364-695a14b27ab8.png)


output Example:
```
```
```
****************
** SCAN TESTS **
****************
[ 15 15 16 4 18 27 3 37 8 13 32 30 16 ... 46 0 ]
==== cpu scan, power-of-two ====
elapsed time: 0.0014ms (std::chrono Measured)
[ 0 15 30 46 50 68 95 98 135 143 156 188 218 ... 25300 25346 ]
==== cpu scan, non-power-of-two ====
elapsed time: 0.0016ms (std::chrono Measured)
[ 0 15 30 46 50 68 95 98 135 143 156 188 218 ... 25262 25280 ]
passed
==== naive scan, power-of-two ====
elapsed time: 0.020672ms (CUDA Measured)
passed
==== naive scan, non-power-of-two ====
elapsed time: 0.018944ms (CUDA Measured)
passed
==== work-efficient scan, power-of-two ====
elapsed time: 0.053248ms (CUDA Measured)
passed
==== work-efficient scan, non-power-of-two ====
elapsed time: 0.05184ms (CUDA Measured)
passed
==== thrust scan, power-of-two ====
elapsed time: 0.044032ms (CUDA Measured)
passed
==== thrust scan, non-power-of-two ====
elapsed time: 0.044896ms (CUDA Measured)
passed

*****************************
** STREAM COMPACTION TESTS **
*****************************
[ 3 1 0 0 0 3 1 3 0 3 2 2 0 ... 0 0 ]
==== cpu compact without scan, power-of-two ====
elapsed time: 0.0022ms (std::chrono Measured)
[ 3 1 3 1 3 3 2 2 1 1 2 1 1 ... 3 2 ]
passed
==== cpu compact without scan, non-power-of-two ====
elapsed time: 0.0017ms (std::chrono Measured)
[ 3 1 3 1 3 3 2 2 1 1 2 1 1 ... 3 2 ]
passed
==== cpu compact with scan ====
elapsed time: 0.0044ms (std::chrono Measured)
[ 3 1 3 1 3 3 2 2 1 1 2 1 1 ... 3 2 ]
passed
==== work-efficient compact, power-of-two ====
elapsed time: 0.058144ms (CUDA Measured)
passed
==== work-efficient compact, non-power-of-two ====
elapsed time: 0.057856ms (CUDA Measured)

```
```
```

2 changes: 1 addition & 1 deletion src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include <stream_compaction/thrust.h>
#include "testing_helpers.hpp"

const int SIZE = 1 << 8; // feel free to change the size of array
const int SIZE = 1 << 10; // feel free to change the size of array
const int NPOT = SIZE - 3; // Non-Power-Of-Two
int *a = new int[SIZE];
int *b = new int[SIZE];
Expand Down
21 changes: 21 additions & 0 deletions stream_compaction/common.cu
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,18 @@ namespace StreamCompaction {
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO

int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}

if (idata[index] == 0) {
bools[index] = 0;
}
else {
bools[index] = 1;
}
}

/**
Expand All @@ -33,6 +45,15 @@ namespace StreamCompaction {
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO

int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}

if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}

}
Expand Down
50 changes: 48 additions & 2 deletions stream_compaction/cpu.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,14 @@ namespace StreamCompaction {
void scan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO

if (n <= 0) {
return;
}
odata[0] = 0;
for (int i = 1; i < n; i++) {
odata[i] = idata[i - 1] + odata[i - 1];
}
timer().endCpuTimer();
}

Expand All @@ -31,8 +39,17 @@ namespace StreamCompaction {
int compactWithoutScan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO

int writeIndex = 0;
for (int i = 0; i < n; i++) {
if (idata[i] != 0) {
odata[writeIndex] = idata[i];
writeIndex++;

}
}
timer().endCpuTimer();
return -1;
return writeIndex;
}

/**
Expand All @@ -43,8 +60,37 @@ namespace StreamCompaction {
int compactWithScan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO
int* tempArr = new int[n];

for (int i = 0; i < n; i++) {
if (idata[i] == 0) {
tempArr[i] = 0;
}
else {
tempArr[i] = 1;
}
}

int* scanned = new int[n];
scanned[0] = 0;
for (int i = 1; i < n; i++) {
scanned[i] = scanned[i - 1] + tempArr[i - 1];
}

int result = 0;
for (int i = 0; i < n; i++) {
if (tempArr[i] == 1) {
odata[scanned[i]] = idata[i];
result++;
}
}
delete[] tempArr;
delete[] scanned;

timer().endCpuTimer();
return -1;
return result;


}
}
}
132 changes: 131 additions & 1 deletion stream_compaction/efficient.cu
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,95 @@ namespace StreamCompaction {
return timer;
}

/*for d = 0 to log2n - 1
for all k = 0 to n �C 1 by 2^(d + 1) in parallel
x[k + 2^(d + 1) �C 1] += x[k + 2^d �C 1];*/
__global__ void kernUpSweep(int* data, int d, int maxSize) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > maxSize) {
return;
}
int powD = powf(2.0, d);
int powDplusOne = powf(2.0, d + 1);

int selected = index * powDplusOne;

if (selected >= maxSize) {
return;
}

data[selected + powDplusOne - 1] += data[selected + powD - 1];



}


//x[n - 1] = 0
// for d = log2n �C 1 to 0
// for all k = 0 to n �C 1 by 2d + 1 in parallel
// t = x[k + 2d �C 1]; // Save left child
// x[k + 2d �C 1] = x[k + 2d + 1 �C 1]; // Set left child to this node��s value
// x[k + 2d + 1 �C 1] += t; // Set right child to old left value +
// // this node��s value

__global__ void kernDownSweep(int* data, int d, int maxSize) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > maxSize) {
return;
}

int powD = powf(2.0, d);
int powDplusOne = powf(2.0, d + 1);

int selected = index * powDplusOne;

if (selected >= maxSize) {
return;
}

int temp = data[selected + powD - 1];
data[selected + powD - 1] = data[selected + powDplusOne - 1];
data[selected + powDplusOne - 1] = temp + data[selected + powDplusOne - 1];



}

/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {


int totalD = ilog2ceil(n);
int maxSize = pow(2, totalD);
int blockSize = 128;
dim3 fullBlocksPerGrid((maxSize + blockSize - 1) / blockSize);

int* device_idata;
int* device_odata;

cudaMalloc((void**)&device_idata, maxSize * sizeof(int));
cudaMalloc((void**)&device_odata, maxSize * sizeof(int));

cudaMemset(device_idata, 0, maxSize * sizeof(int));
cudaMemcpy(device_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
timer().startGpuTimer();
// TODO
for (int d = 0; d <= totalD - 1; d++) {
kernUpSweep << < fullBlocksPerGrid, blockSize >> > (device_idata, d, maxSize);
}

cudaMemset(device_idata + maxSize - 1, 0, sizeof(int));
for (int d = totalD - 1; d >= 0; d--) {
kernDownSweep << < fullBlocksPerGrid, blockSize >> > (device_idata, d, maxSize);
}
timer().endGpuTimer();

cudaMemcpy(odata, device_idata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_idata);
cudaFree(device_odata);
}

/**
Expand All @@ -31,10 +113,58 @@ namespace StreamCompaction {
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {

int totalD = ilog2ceil(n);
int maxSize = pow(2, totalD);
int blockSize = 128;

dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 scanBlocksPerGrid((maxSize + blockSize - 1) / blockSize);


int* device_idata;
int* device_mappedArr;
int* device_scannedArr;
int* device_odata;

cudaMalloc((void**)&device_idata, maxSize * sizeof(int));
cudaMalloc((void**)&device_mappedArr, maxSize * sizeof(int));
cudaMalloc((void**)&device_scannedArr, maxSize * sizeof(int));
cudaMalloc((void**)&device_odata, maxSize * sizeof(int));

cudaMemcpy(device_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);


timer().startGpuTimer();
// TODO

Common::kernMapToBoolean << < fullBlocksPerGrid, blockSize >> > (n, device_mappedArr, device_idata);

cudaMemcpy(device_scannedArr, device_mappedArr, maxSize * sizeof(int), cudaMemcpyDeviceToDevice);

for (int d = 0; d <= totalD - 1; d++) {
kernUpSweep << < scanBlocksPerGrid, blockSize >> > (device_scannedArr, d, maxSize);
}

cudaMemset(device_scannedArr + maxSize - 1, 0, sizeof(int));
for (int d = totalD - 1; d >= 0; d--) {
kernDownSweep << < scanBlocksPerGrid, blockSize >> > (device_scannedArr, d, maxSize);
}

//scatter
Common::kernScatter << < fullBlocksPerGrid, blockSize >> > (maxSize, device_odata, device_idata, device_mappedArr, device_scannedArr);

timer().endGpuTimer();
return -1;

int count = 0;
cudaMemcpy(&count, device_scannedArr + maxSize - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(odata, device_odata, count * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_idata);
cudaFree(device_mappedArr);
cudaFree(device_scannedArr);
cudaFree(device_odata);

return count;
}
}
}
Loading