Dataset Viewer
problem
stringlengths 66
76k
| answer
stringlengths 29
75.8k
|
---|---|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void vecProductKernel(float *d_z, const float *d_x, const float *d_y, unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_z[idx] = d_x[idx] * d_y[idx];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void vecProductKernel(float *d_z, const float *d_x, const float *d_y, unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_z[idx] = d_x[idx] * d_y[idx];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
c[idx] = a[idx]+scalar*b[idx];
idx += blockDim.x * gridDim.x;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
c[idx] = a[idx]+scalar*b[idx];
idx += blockDim.x * gridDim.x;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
__device__ double* dev_vector1 = 0;
__device__ double* dev_vector2 = 0;
__device__ double* dev_results = 0;
__global__ void device_vector_mult () {
// IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the
// corresponding element of dev_vector2, and store in dev_results.
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
// Declare and fill host-side arrays of doubles.
double* vector1 = new double[sizeOfVector];
double* vector2 = new double[sizeOfVector];
double* results = new double[sizeOfVector];
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
vector1[i] = rand() % 100;
vector2[i] = rand() % 100;
results[i] = 0;
}
timeval startTime;
timeval interTime;
timeval stopsTime;
gettimeofday(&startTime, NULL);
// Use the CPU for this part.
// IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding
// element in vector2 and store in results.
for (int i = 0; i < sizeOfVector; ++i) {
results[i] = vector1[i] * vector2[i];
}
gettimeofday(&interTime, NULL);
double total = 0;
// IMPLEMENT ME 2: Sum the results array and store the sum in total.
for (int i = 0; i < sizeOfVector; +i) {
total += results[i];
}
gettimeofday(&stopsTime, NULL);
cout << "Dot product is : " << total << endl;
// IMPLEMENT ME 3: Time the above operations together and separately
// using 'gettimeofday'.
cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
// Now on to the GPU!
// IMPLEMENT ME 4: Use cudaMalloc to allocate space for the three device vectors.
// IMPLEMENT ME 5: Use cudaMemcpy to initialise dev_vector1 and dev_vector2 to have
// the same content as the host-side arrays.
// IMPLEMENT ME 6: Put in the function body for device_vector_mult, above.
// IMPLEMENT ME 7: Launch a kernel that runs device_vector_mult.
// IMPLEMENT ME 8: Use cudaMemcpy to copy back dev_results into results.
// IMPLEMENT ME 9: Calculate the dot product by summing over results, same
// as above.
// IMPLEMENT ME 10: Take the time for the kernel launch and the addition,
// and print out the results (including the dot product) as you did for the CPU.
// IMPLEMENT ME 11: Write a reduction kernel that sums over dev_results, and launch it.
// Time this operation and compare with the code that first moves the transformed data
// to the host, then sums over it.
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
__device__ double* dev_vector1 = 0;
__device__ double* dev_vector2 = 0;
__device__ double* dev_results = 0;
__global__ void device_vector_mult () {
// IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the
// corresponding element of dev_vector2, and store in dev_results.
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
// Declare and fill host-side arrays of doubles.
double* vector1 = new double[sizeOfVector];
double* vector2 = new double[sizeOfVector];
double* results = new double[sizeOfVector];
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
vector1[i] = rand() % 100;
vector2[i] = rand() % 100;
results[i] = 0;
}
timeval startTime;
timeval interTime;
timeval stopsTime;
gettimeofday(&startTime, NULL);
// Use the CPU for this part.
// IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding
// element in vector2 and store in results.
for (int i = 0; i < sizeOfVector; ++i) {
results[i] = vector1[i] * vector2[i];
}
gettimeofday(&interTime, NULL);
double total = 0;
// IMPLEMENT ME 2: Sum the results array and store the sum in total.
for (int i = 0; i < sizeOfVector; +i) {
total += results[i];
}
gettimeofday(&stopsTime, NULL);
cout << "Dot product is : " << total << endl;
// IMPLEMENT ME 3: Time the above operations together and separately
// using 'gettimeofday'.
cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
// Now on to the GPU!
// IMPLEMENT ME 4: Use cudaMalloc to allocate space for the three device vectors.
// IMPLEMENT ME 5: Use cudaMemcpy to initialise dev_vector1 and dev_vector2 to have
// the same content as the host-side arrays.
// IMPLEMENT ME 6: Put in the function body for device_vector_mult, above.
// IMPLEMENT ME 7: Launch a kernel that runs device_vector_mult.
// IMPLEMENT ME 8: Use cudaMemcpy to copy back dev_results into results.
// IMPLEMENT ME 9: Calculate the dot product by summing over results, same
// as above.
// IMPLEMENT ME 10: Take the time for the kernel launch and the addition,
// and print out the results (including the dot product) as you did for the CPU.
// IMPLEMENT ME 11: Write a reduction kernel that sums over dev_results, and launch it.
// Time this operation and compare with the code that first moves the transformed data
// to the host, then sums over it.
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 10
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int right_boundary = k*TILE_SIZE*by + k;
float Sum = 0;
for (int a=k*TILE_SIZE*by, b=bx*TILE_SIZE; a<right_boundary; a+=TILE_SIZE,b+=(TILE_SIZE*n))
{
__shared__ float Acache[TILE_SIZE][TILE_SIZE];
__shared__ float Bcache[TILE_SIZE][TILE_SIZE];
Acache[ty][tx] = A[a + k * ty + tx];
Bcache[ty][tx] = B[b + n * ty + tx];
__syncthreads();
for (int i=0; i<TILE_SIZE; i++) {
Sum += Acache[ty][i] * Bcache[i][tx];
}
__syncthreads();
}
// INSERT KERNEL CODE HERE
int c = n * TILE_SIZE * by + TILE_SIZE * bx;
C[c + n * ty + tx] = Sum;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(n / dimBlock.x, m / dimBlock.y);
mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C);
// Invoke CUDA kernel -----------------------------------------------------
}
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#define TILE_SIZE 10
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int right_boundary = k*TILE_SIZE*by + k;
float Sum = 0;
for (int a=k*TILE_SIZE*by, b=bx*TILE_SIZE; a<right_boundary; a+=TILE_SIZE,b+=(TILE_SIZE*n))
{
__shared__ float Acache[TILE_SIZE][TILE_SIZE];
__shared__ float Bcache[TILE_SIZE][TILE_SIZE];
Acache[ty][tx] = A[a + k * ty + tx];
Bcache[ty][tx] = B[b + n * ty + tx];
__syncthreads();
for (int i=0; i<TILE_SIZE; i++) {
Sum += Acache[ty][i] * Bcache[i][tx];
}
__syncthreads();
}
// INSERT KERNEL CODE HERE
int c = n * TILE_SIZE * by + TILE_SIZE * bx;
C[c + n * ty + tx] = Sum;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(n / dimBlock.x, m / dimBlock.y);
mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C);
// Invoke CUDA kernel -----------------------------------------------------
}
|
Convert the following CUDA code to AMD GPU code:
cuda
///*
// * LinearSysSolver.cpp
// *
// * Created on: Jul 8, 2013
// * Author: adm85
// */
//
//#include <vector>
//#include <iostream>
//#include <time.h>
//#include "LinearSysSolver.h"
//#include "cublas_v2.h"
//#include "cula.h"
//
//
//LinearSysSolver::LinearSysSolver()
//{
// // TODO Auto-generated constructor stub
//
//}
//
//LinearSysSolver::~LinearSysSolver()
//{
// // TODO Auto-generated destructor stub
//}
//
///**
// * Solves A*x=B for x. The result is stored in the vector pointed to by B.
// */
//void LinearSysSolver::solveSystem(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// //Get the LU Factorization
// cuComplex* LUMat = new cuComplex[M_A*N_A];
// int ipivLength = N_A;
// int* ipiv = new int[ipivLength];
// getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength);
//
// //Calculate P*b
// swapPivotRows(B, M_B, N_B, ipiv, ipivLength);
//
// //Solve the system. The result will be stored in B
// cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B);
//
// // DEBUG CODE -------
// //cuComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N);
// cuComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B);
// cout << endl << "X * XInv" << endl;
// columnMajorPrintArray(test, M_A, N_B);
// delete [] test;
// // END DEBUG CODE ---
//
// delete [] LUMat;
// delete [] ipiv;
//}
//
//
///**
// * Uses the CULA library to get the LU decomposition of the matrix.
// */
//void LinearSysSolver::getLUDecomposition(cuComplex* x, int M, int N, cuComplex* LUMat, int* ipiv, int ipivLength) {
//
// culaDeviceFloatComplex* devxTx;
// culaDeviceInt* devIPIV;
//
// cudaMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex));
// cudaMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt));
// cudaMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyHostToDevice);
//
// culaStatus culaStat;
// culaInitialize();
//
// culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV);
// if(culaStat != culaNoError) {
// cout << "Cula Cgetrf failure" << endl;
// }
//
// culaShutdown();
//
// //LUMat = new cuComplex[M*N];
// cudaMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), cudaMemcpyDeviceToHost);
//
//// getL(L, LUMat, M, N);
////
// cout << "LUMat Inside:" << endl;
// columnMajorPrintArray(LUMat, M, N);
////
//// getU(U, LUMat, M, N);
//// cout << endl << "U" << endl;
//// columnMajorPrintArray(U, M, N);
//
// cudaFree(devxTx);
// cudaFree(devIPIV);
//}
//
///**
// * Using the information from the CULA generated IPIF array,
// * this function swaps rows as appropriate.
// */
//void LinearSysSolver::swapPivotRows(cuComplex* x, int M, int N, int* ipiv, int ipivLength) {
// //Temporary row vector
// cuComplex rowVec[N];
//
// //We use index 1 based ordering because this is what CULA returns
// for(int i=1; i <= ipivLength; i++) {
// //Check to see if the row swaps. This happens when element x of the ipif
// //array is not equal to x. When element x is different, it means that row x
// //and the row specified in element x swap places.
// if(ipiv[i-1] != i) {
// int startIndex = i-1;
// //Copy the current row into the temporary row vector
// for(int j = 0; j < N; j++) {
// rowVec[j].x = x[startIndex+j*M].x;
// rowVec[j].y = x[startIndex+j*M].y;
// }
//
// //Copy the specified row into the current row
// int specRowStart = ipiv[i-1]-1;
// for(int j=0; j < N; j++) {
// x[startIndex+j*M].x = x[specRowStart+j*M].x;
// x[startIndex+j*M].y = x[specRowStart+j*M].y;
// }
//
// //Copy the temp row into the specified row
// for(int j=0; j < N; j++) {
// x[specRowStart+j*M].x = rowVec[j].x;
// x[specRowStart+j*M].y = rowVec[j].y;
// }
// }
// }
//
//}
//
//void LinearSysSolver::cublasSolveLinearSystem(cuComplex* A, int M, int N, cuComplex* B, int M_B, int N_B) {
// cuComplex* xInv = new cuComplex[M*N_B];
//
// //Now put L, U, and the I matrix on the GPU
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cudaMalloc(&devA, M*N*sizeof(cuComplex));
// cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
//
// stat = cublasCreate(&handle);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M, N, sizeof(cuComplex), A, M, devA, M);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
//
// //Set up Alpha
// cuComplex alpha;
// alpha.x = 1;
// alpha.y = 0;
//
// //First solve L*y = P*b
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for y" << endl;
// }
//
// //Then solve U*x = y
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for x" << endl;
// }
//
// //Get results, and store them in matrix B
// cudaMemcpy(B, devB, M*N_B*sizeof(cuComplex), cudaMemcpyDeviceToHost);
//
// //Free resources
// cublasDestroy(handle);
// cudaFree(devA);
// cudaFree(devB);
//}
//
///**
// * Multiplies two matrices together. Result is stored in B on exit.
// */
//cuComplex* LinearSysSolver::multiplyMatrices(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cuComplex* devC;
// cuComplex* alpha = new cuComplex;
// cuComplex* beta = new cuComplex;
// cuComplex* hostC = new cuComplex[M_A*N_B];
// alpha->x = 1;
// alpha->y = 0;
// beta->x = 0;
// beta->y = 0;
//
// cudaStat = cudaMalloc(&devA, M_A*N_A*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devC, M_A*N_B*sizeof(cuComplex));
// if(cudaStat != cudaSuccess) {
// cout << "Horrible failure!" << endl;
// }
//
// stat = cublasCreate(&handle);
//
// stat = cublasSetMatrix(M_A, N_A, sizeof(cuComplex), A, M_A, devA, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download A failed" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download B failed" << endl;
// }
//
// //Perform the multiply.
// stat = cublasCgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A);
//
// stat = cublasGetMatrix(M_A, N_B, sizeof(cuComplex), devC, M_A, hostC, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Failed to get devC to hostC" << endl;
// cout << stat << endl;
// }
//
// cudaFree(devA);
// cudaFree(devB);
// cudaFree(devC);
// cublasDestroy(handle);
//
// delete alpha;
// delete beta;
// return hostC;
//
//}
//
///**
// * Prints out an array that is stored in column-major order in memory.
// */
//void LinearSysSolver::columnMajorPrintArray(cuComplex* x, int M, int N) {
// int realIndex;
// cout << "------------------------------------------------------" << endl;
// cout << " Printing Column Order Matrix " << endl;
// cout << "------------------------------------------------------" << endl;
// for(int i=0; i < M; i++) {
// cout << "Row: " << (i+1) << " ";
// for(int j=0; j < N; j++) {
// realIndex = (M*j)+i;
// cout << x[realIndex].x;
// if(x[realIndex].y >= 0) {
// cout << "+";
// }
// cout << x[realIndex].y << "i ";
// }
// cout << endl;
// }
//}
|
#include <hip/hip_runtime.h>
///*
// * LinearSysSolver.cpp
// *
// * Created on: Jul 8, 2013
// * Author: adm85
// */
//
//#include <vector>
//#include <iostream>
//#include <time.h>
//#include "LinearSysSolver.h"
//#include "cublas_v2.h"
//#include "cula.h"
//
//
//LinearSysSolver::LinearSysSolver()
//{
// // TODO Auto-generated constructor stub
//
//}
//
//LinearSysSolver::~LinearSysSolver()
//{
// // TODO Auto-generated destructor stub
//}
//
///**
// * Solves A*x=B for x. The result is stored in the vector pointed to by B.
// */
//void LinearSysSolver::solveSystem(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// //Get the LU Factorization
// cuComplex* LUMat = new cuComplex[M_A*N_A];
// int ipivLength = N_A;
// int* ipiv = new int[ipivLength];
// getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength);
//
// //Calculate P*b
// swapPivotRows(B, M_B, N_B, ipiv, ipivLength);
//
// //Solve the system. The result will be stored in B
// cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B);
//
// // DEBUG CODE -------
// //cuComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N);
// cuComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B);
// cout << endl << "X * XInv" << endl;
// columnMajorPrintArray(test, M_A, N_B);
// delete [] test;
// // END DEBUG CODE ---
//
// delete [] LUMat;
// delete [] ipiv;
//}
//
//
///**
// * Uses the CULA library to get the LU decomposition of the matrix.
// */
//void LinearSysSolver::getLUDecomposition(cuComplex* x, int M, int N, cuComplex* LUMat, int* ipiv, int ipivLength) {
//
// culaDeviceFloatComplex* devxTx;
// culaDeviceInt* devIPIV;
//
// cudaMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex));
// cudaMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt));
// cudaMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyHostToDevice);
//
// culaStatus culaStat;
// culaInitialize();
//
// culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV);
// if(culaStat != culaNoError) {
// cout << "Cula Cgetrf failure" << endl;
// }
//
// culaShutdown();
//
// //LUMat = new cuComplex[M*N];
// cudaMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), cudaMemcpyDeviceToHost);
//
//// getL(L, LUMat, M, N);
////
// cout << "LUMat Inside:" << endl;
// columnMajorPrintArray(LUMat, M, N);
////
//// getU(U, LUMat, M, N);
//// cout << endl << "U" << endl;
//// columnMajorPrintArray(U, M, N);
//
// cudaFree(devxTx);
// cudaFree(devIPIV);
//}
//
///**
// * Using the information from the CULA generated IPIF array,
// * this function swaps rows as appropriate.
// */
//void LinearSysSolver::swapPivotRows(cuComplex* x, int M, int N, int* ipiv, int ipivLength) {
// //Temporary row vector
// cuComplex rowVec[N];
//
// //We use index 1 based ordering because this is what CULA returns
// for(int i=1; i <= ipivLength; i++) {
// //Check to see if the row swaps. This happens when element x of the ipif
// //array is not equal to x. When element x is different, it means that row x
// //and the row specified in element x swap places.
// if(ipiv[i-1] != i) {
// int startIndex = i-1;
// //Copy the current row into the temporary row vector
// for(int j = 0; j < N; j++) {
// rowVec[j].x = x[startIndex+j*M].x;
// rowVec[j].y = x[startIndex+j*M].y;
// }
//
// //Copy the specified row into the current row
// int specRowStart = ipiv[i-1]-1;
// for(int j=0; j < N; j++) {
// x[startIndex+j*M].x = x[specRowStart+j*M].x;
// x[startIndex+j*M].y = x[specRowStart+j*M].y;
// }
//
// //Copy the temp row into the specified row
// for(int j=0; j < N; j++) {
// x[specRowStart+j*M].x = rowVec[j].x;
// x[specRowStart+j*M].y = rowVec[j].y;
// }
// }
// }
//
//}
//
//void LinearSysSolver::cublasSolveLinearSystem(cuComplex* A, int M, int N, cuComplex* B, int M_B, int N_B) {
// cuComplex* xInv = new cuComplex[M*N_B];
//
// //Now put L, U, and the I matrix on the GPU
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cudaMalloc(&devA, M*N*sizeof(cuComplex));
// cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
//
// stat = cublasCreate(&handle);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M, N, sizeof(cuComplex), A, M, devA, M);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
//
// //Set up Alpha
// cuComplex alpha;
// alpha.x = 1;
// alpha.y = 0;
//
// //First solve L*y = P*b
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for y" << endl;
// }
//
// //Then solve U*x = y
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for x" << endl;
// }
//
// //Get results, and store them in matrix B
// cudaMemcpy(B, devB, M*N_B*sizeof(cuComplex), cudaMemcpyDeviceToHost);
//
// //Free resources
// cublasDestroy(handle);
// cudaFree(devA);
// cudaFree(devB);
//}
//
///**
// * Multiplies two matrices together. Result is stored in B on exit.
// */
//cuComplex* LinearSysSolver::multiplyMatrices(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cuComplex* devC;
// cuComplex* alpha = new cuComplex;
// cuComplex* beta = new cuComplex;
// cuComplex* hostC = new cuComplex[M_A*N_B];
// alpha->x = 1;
// alpha->y = 0;
// beta->x = 0;
// beta->y = 0;
//
// cudaStat = cudaMalloc(&devA, M_A*N_A*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devC, M_A*N_B*sizeof(cuComplex));
// if(cudaStat != cudaSuccess) {
// cout << "Horrible failure!" << endl;
// }
//
// stat = cublasCreate(&handle);
//
// stat = cublasSetMatrix(M_A, N_A, sizeof(cuComplex), A, M_A, devA, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download A failed" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download B failed" << endl;
// }
//
// //Perform the multiply.
// stat = cublasCgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A);
//
// stat = cublasGetMatrix(M_A, N_B, sizeof(cuComplex), devC, M_A, hostC, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Failed to get devC to hostC" << endl;
// cout << stat << endl;
// }
//
// cudaFree(devA);
// cudaFree(devB);
// cudaFree(devC);
// cublasDestroy(handle);
//
// delete alpha;
// delete beta;
// return hostC;
//
//}
//
///**
// * Prints out an array that is stored in column-major order in memory.
// */
//void LinearSysSolver::columnMajorPrintArray(cuComplex* x, int M, int N) {
// int realIndex;
// cout << "------------------------------------------------------" << endl;
// cout << " Printing Column Order Matrix " << endl;
// cout << "------------------------------------------------------" << endl;
// for(int i=0; i < M; i++) {
// cout << "Row: " << (i+1) << " ";
// for(int j=0; j < N; j++) {
// realIndex = (M*j)+i;
// cout << x[realIndex].x;
// if(x[realIndex].y >= 0) {
// cout << "+";
// }
// cout << x[realIndex].y << "i ";
// }
// cout << endl;
// }
//}
|
Convert the following CUDA code to AMD GPU code:
cuda
// Each thread calculates fitness for one individual
// Result: vector of fitness
extern "C"
__global__ void fitness_kernel(int populationCnt, int *population,
int pointsCnt, float *pointsX, float *pointsY, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < populationCnt)
{
int shift = 5*i;
float fitness = 0.0f;
for (int p = 0; p < pointsCnt; p++)
{
float fApprox = population[shift + 4];
for (int k = 3; k >= 0; k--)
{
fApprox = fApprox * (*pointsX) + population[shift + k];
}
fApprox /= 10.0f;
++pointsX;
fitness += pow(fApprox - *(pointsY++), 2);
}
result[i] = fitness / pointsCnt;
}
}
|
#include <hip/hip_runtime.h>
// Each thread calculates fitness for one individual
// Result: vector of fitness
extern "C"
__global__ void fitness_kernel(int populationCnt, int *population,
int pointsCnt, float *pointsX, float *pointsY, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < populationCnt)
{
int shift = 5*i;
float fitness = 0.0f;
for (int p = 0; p < pointsCnt; p++)
{
float fApprox = population[shift + 4];
for (int k = 3; k >= 0; k--)
{
fApprox = fApprox * (*pointsX) + population[shift + k];
}
fApprox /= 10.0f;
++pointsX;
fitness += pow(fApprox - *(pointsY++), 2);
}
result[i] = fitness / pointsCnt;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include <cstdio>
#include "time.h"
constexpr int segment_size = 1024;
constexpr int threads = 512;
__device__ char *pool;
void __global__ alloc(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// pointers[index] = (int *)malloc(segment_size);
pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size);
}
void __global__ fill(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < segment_size / sizeof(int); i++) {
pointers[index][i] = i;
}
}
void __global__ free(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// free(pointers[index]);
}
int main() {
int **pointers;
cudaMalloc(&pointers, threads * sizeof(int *));
int bd = 32;
for (int i = 0; i < 10; i++) {
char *pool_;
cudaMallocManaged(&pool_, segment_size * threads);
cudaMemcpyToSymbol(pool, &pool_, sizeof(void *));
alloc<<<threads / bd, bd>>>(pointers);
fill<<<threads / bd, bd>>>(pointers);
free<<<threads / bd, bd>>>(pointers);
}
cudaDeviceSynchronize();
}
|
#include "hip/hip_runtime.h"
#include <cstdio>
#include "time.h"
constexpr int segment_size = 1024;
constexpr int threads = 512;
__device__ char *pool;
void __global__ alloc(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// pointers[index] = (int *)malloc(segment_size);
pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size);
}
void __global__ fill(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < segment_size / sizeof(int); i++) {
pointers[index][i] = i;
}
}
void __global__ free(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// free(pointers[index]);
}
int main() {
int **pointers;
hipMalloc(&pointers, threads * sizeof(int *));
int bd = 32;
for (int i = 0; i < 10; i++) {
char *pool_;
hipMallocManaged(&pool_, segment_size * threads);
hipMemcpyToSymbol(HIP_SYMBOL(pool), &pool_, sizeof(void *));
alloc<<<threads / bd, bd>>>(pointers);
fill<<<threads / bd, bd>>>(pointers);
free<<<threads / bd, bd>>>(pointers);
}
hipDeviceSynchronize();
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <algorithm>
#include <iostream>
#include <vector>
std::vector<double> add(std::vector<double> inarr1, std::vector<double> inarr2);
void test_integration()
{
constexpr size_t arr_size = 2 << 24;
std::cout << "Initializing test arrays...\n";
std::vector<double> arr1(arr_size);
std::vector<double> arr2(arr_size);
for (size_t i = 0; i < arr_size; i++)
{
arr1[i] = static_cast<double>(i);
arr2[i] = static_cast<double>(arr_size - i);
}
std::cout << "Calling the kernel wrapper...\n";
auto result = add(std::move(arr1), std::move(arr2));
std::cout << "Verifying results...\n";
if (std::all_of(result.begin(), result.end(),
[arr_size](double x) { return x == arr_size; }))
{
std::cout << "All results were valid.\n";
}
else
{
std::cout << "At least one result is invalid.\n";
}
}
int main()
{
std::cout << "Test CUDA integration\n";
test_integration();
std::cout << "Finished testing\n";
return 0;
}
|
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <vector>
std::vector<double> add(std::vector<double> inarr1, std::vector<double> inarr2);
void test_integration()
{
constexpr size_t arr_size = 2 << 24;
std::cout << "Initializing test arrays...\n";
std::vector<double> arr1(arr_size);
std::vector<double> arr2(arr_size);
for (size_t i = 0; i < arr_size; i++)
{
arr1[i] = static_cast<double>(i);
arr2[i] = static_cast<double>(arr_size - i);
}
std::cout << "Calling the kernel wrapper...\n";
auto result = add(std::move(arr1), std::move(arr2));
std::cout << "Verifying results...\n";
if (std::all_of(result.begin(), result.end(),
[arr_size](double x) { return x == arr_size; }))
{
std::cout << "All results were valid.\n";
}
else
{
std::cout << "At least one result is invalid.\n";
}
}
int main()
{
std::cout << "Test CUDA integration\n";
test_integration();
std::cout << "Finished testing\n";
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "Output_Layer_GPU_Kernels.cuh"
__constant__ float anchors_416[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 };
__device__ float Sigmoid(float x)
{
float expValue = exp((double)-x);
float result = 1 / (1 + expValue);
return result;
}
__global__ void XY_BoundingBox_Coordinates_Transform_Kernel(float* input, int inputHeight, int inputWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = inputHeight * inputWidth;
int tensorSize = boundingBoxesPerGridCell * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % boundingBoxesPerGridCell;
//int threadDepthIndexY = (threadIndex % XYCoordinatesCount) + 1;
int threadXYIndex = threadIndex % tensorXYSize;
int cy = threadXYIndex / inputWidth;
int cx = threadXYIndex % inputWidth;
//tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex;
input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex] = (cx + Sigmoid(input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex])) * downsampleFactor;
input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex] = (cy + Sigmoid(input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex])) * downsampleFactor;
//input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex] = 1;
//input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex] = 1;
}
}
__global__ void WH_BoundingBox_Transform_Kernel(float* input, int inputHeight, int inputWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = inputHeight * inputWidth;
int tensorSize = boundingBoxesPerGridCell * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % boundingBoxesPerGridCell;
//int threadDepthIndexY = (threadIndex % XYCoordinatesCount) + 1;
int threadXYIndex = threadIndex % tensorXYSize;
//tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex;
input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex] = exp(input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex]) *
anchors_416[2 * threadDepthIndex] * downsampleFactor;
input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex] = exp(input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex]) *
anchors_416[2 * threadDepthIndex + 1] * downsampleFactor;
//input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex] = anchors_416[2 * threadDepthIndex] = 1;
//input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex] = anchors_416[2 * threadDepthIndex + 1] = 1;
input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex] = Sigmoid(input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex]);
//input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex] = 2;
}
}
__global__ void Softmax_Kernel(float* input, int classesCount, int inputHeight, int inputWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = inputHeight * inputWidth;
int tensorSize = boundingBoxesPerGridCell * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % boundingBoxesPerGridCell;
int threadXYIndex = threadIndex % tensorXYSize;
float maxClassProbability = FLOAT_MIN;
for (size_t i = 0; i < classesCount; i++)
{
float classProbability = input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex];
if (classProbability > maxClassProbability)
{
maxClassProbability = classProbability;
}
}
float classProbabilitiesSum = 0;
for (size_t i = 0; i < classesCount; i++)
{
float exponent = exp(input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] - maxClassProbability);
classProbabilitiesSum += exponent;
input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = exponent;
}
for (size_t i = 0; i < classesCount; i++)
{
input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] /= classProbabilitiesSum;
//input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = i;
//input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = 3;
}
}
}
void WH_BoundingBox_Transform(float* input, int inputHeight, int inputWidth)
{
int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth;
int gridXDim = ceil(tensorSize / 512.0);
WH_BoundingBox_Transform_Kernel << <gridXDim, 512 >> > (input, inputHeight, inputWidth);
}
void XY_BoundingBox_Coordinates_Transform(float* input, int inputHeight, int inputWidth)
{
int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth;
int gridXDim = ceil(tensorSize / 512.0);
XY_BoundingBox_Coordinates_Transform_Kernel << <gridXDim, 512 >> > (input, inputHeight, inputWidth);
}
void Softmax_GPU(float* input, int classesCount, int inputHeight, int inputWidth)
{
int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth;
int gridXDim = ceil(tensorSize / 512.0);
Softmax_Kernel << <gridXDim, 512 >> > (input, classesCount, inputHeight, inputWidth);
}
|
#pragma once
#include<hip/hip_runtime.h>
#include <math.h>
__device__ const int downsampleFactor = 32;
__device__ const int boundingBoxesPerGridCell = 5;
__device__ const float FLOAT_MIN = 1.17549435e-38;
//__global__ void BoundingBox_ConfidenceScores_Transform_Kernel(float* input, int inputHeight, int inputWidth);
__global__ void XY_BoundingBox_Coordinates_Transform_Kernel(float* input, int inputHeight, int inputWidth);
__global__ void WH_BoundingBox_Transform_Kernel(float* input, int inputHeight, int inputWidth);
__global__ void Softmax_Kernel(float* input, int classesCount, int inputHeight, int inputWidth);
void XY_BoundingBox_Coordinates_Transform(float* input, int inputHeight, int inputWidth);
void WH_BoundingBox_Transform(float* input, int inputHeight, int inputWidth);
void Softmax_GPU(float* input, int classesCount, int inputHeight, int inputWidth);
//void BoundingBox_ConfidenceScores_Transform(float* input, int inputHeight, int inputWidth);
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda_runtime.h>
#include <assert.h>
int main(int argc, char **argv){
float *a_h, *b_h; // Host data
float *a_d, *b_d; // Device data
int N = 14, nBytes, i;
printf("Start allocating\n");
nBytes = N * sizeof(float);
printf("Allocating in Host\n");
a_h = (float*) malloc(nBytes);
b_h = (float*) malloc(nBytes);
printf("Allocating in Device\n");
cudaMalloc((void**)&a_d, nBytes);
cudaMalloc((void**)&b_d, nBytes);
printf("End allocating\n");
for(i=0; i<N; i++)
a_h[i] = 100.0 + i;
printf("Start memcpy\n");
cudaMemcpy(a_d, a_h, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, a_d, nBytes, cudaMemcpyDeviceToDevice);
cudaMemcpy(b_h, b_d, nBytes, cudaMemcpyDeviceToHost);
printf("End memcpy\n");
for(i=0; i<N; i++)
assert(a_h[i] == b_h[i]);
free(a_h);
free(b_h);
cudaFree(a_d);
cudaFree(b_d);
return 0;
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
int main(int argc, char **argv){
float *a_h, *b_h; // Host data
float *a_d, *b_d; // Device data
int N = 14, nBytes, i;
printf("Start allocating\n");
nBytes = N * sizeof(float);
printf("Allocating in Host\n");
a_h = (float*) malloc(nBytes);
b_h = (float*) malloc(nBytes);
printf("Allocating in Device\n");
hipMalloc((void**)&a_d, nBytes);
hipMalloc((void**)&b_d, nBytes);
printf("End allocating\n");
for(i=0; i<N; i++)
a_h[i] = 100.0 + i;
printf("Start memcpy\n");
hipMemcpy(a_d, a_h, nBytes, hipMemcpyHostToDevice);
hipMemcpy(b_d, a_d, nBytes, hipMemcpyDeviceToDevice);
hipMemcpy(b_h, b_d, nBytes, hipMemcpyDeviceToHost);
printf("End memcpy\n");
for(i=0; i<N; i++)
assert(a_h[i] == b_h[i]);
free(a_h);
free(b_h);
hipFree(a_d);
hipFree(b_d);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#define KERNEL_SIZE 3
#define BLOCK_SIZE 512
typedef signed int pixel_channel;
typedef unsigned long resolution;
__constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE];
pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE] = { -1, -1, -1,
-1, 9, -1,
-1, -1, -1 };
__global__ void Pixel_Shared_Convolution(pixel_channel *channel_cuda, pixel_channel *rezult_cuda, resolution width, resolution lineQuantity)
{
__shared__ pixel_channel sharedMemory [3][BLOCK_SIZE + 2];
for(long line = 1; line < lineQuantity; line++)
{
long temp = blockIdx.x * BLOCK_SIZE + threadIdx.x;
sharedMemory [0][threadIdx.x + 1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][threadIdx.x + 1] = channel_cuda[temp + width * line];
sharedMemory [2][threadIdx.x + 1] = channel_cuda[temp + width * (line + 1)];
if(threadIdx.x == 0)
{
if(blockIdx.x != 0)
temp--;
sharedMemory [0][0] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][0] = channel_cuda[temp + width * line];
sharedMemory [2][0] = channel_cuda[temp + width * (line+1)];
}
if(threadIdx.x == (BLOCK_SIZE - 1))
{
temp++;
sharedMemory [0][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][BLOCK_SIZE + 1] = channel_cuda[temp + width * line];
sharedMemory [2][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line + 1)];
}
__syncthreads();
long Sum = 0;
for (int i = 0; i < KERNEL_SIZE; i++)
for (int j = 0; j < KERNEL_SIZE; j++)
Sum += sharedMemory[j][threadIdx.x + i] * kernel_cuda[i * 3 + j];
if (Sum < 0)
Sum = 0;
if (Sum > 255)
Sum = 255;
__syncthreads();
if((blockIdx.x * BLOCK_SIZE + threadIdx.x) > width)
continue;
rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line] = Sum;
}
__syncthreads();
return;
}
extern "C" __host__ pixel_channel** asyncConvolution(pixel_channel **image, resolution width, resolution height)
{
pixel_channel **channel_cuda;
channel_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*));
pixel_channel **rezult_cuda;
rezult_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*));
resolution size = width * height;
cudaHostRegister(image[0], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped);
cudaHostRegister(image[1], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped);
cudaHostRegister(image[2], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped);
cudaMalloc((void **)& rezult_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& rezult_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& rezult_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& channel_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));;
cudaMalloc((void **)& channel_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& channel_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMemcpyToSymbol(kernel_cuda, kernel_host, 9 * sizeof(pixel_channel), 0, cudaMemcpyHostToDevice);
resolution block_count = 0;
if(((width - 2)%BLOCK_SIZE) == 0)
block_count = (width - 2)/BLOCK_SIZE;
else
block_count = (width - 2)/BLOCK_SIZE + 1;
dim3 gridSize = dim3(block_count, 1, 1);
dim3 blockSize = dim3(BLOCK_SIZE, 1, 1);
cudaStream_t stream[3];
for(int i = 0; i < 3; i++)
{
cudaStreamCreate(&stream[i]);
cudaMemcpyAsync(channel_cuda[i], image[i], size*sizeof(pixel_channel), cudaMemcpyHostToDevice, stream[i]);
Pixel_Shared_Convolution<<<gridSize, blockSize, 0, stream[i]>>>(channel_cuda[i], rezult_cuda[i], width, height);
cudaMemcpyAsync(image[i], rezult_cuda[i], size*sizeof(pixel_channel), cudaMemcpyDeviceToHost,stream[i]);
cudaStreamDestroy(stream[i]);
}
for(int i=0;i<3;i++)
{
cudaFree(rezult_cuda[i]);
cudaFree(channel_cuda[i]);
}
cudaDeviceReset();
return image;
}
|
#include <hip/hip_runtime.h>
#define KERNEL_SIZE 3
#define BLOCK_SIZE 512
typedef signed int pixel_channel;
typedef unsigned long resolution;
__constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE];
pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE] = { -1, -1, -1,
-1, 9, -1,
-1, -1, -1 };
__global__ void Pixel_Shared_Convolution(pixel_channel *channel_cuda, pixel_channel *rezult_cuda, resolution width, resolution lineQuantity)
{
__shared__ pixel_channel sharedMemory [3][BLOCK_SIZE + 2];
for(long line = 1; line < lineQuantity; line++)
{
long temp = blockIdx.x * BLOCK_SIZE + threadIdx.x;
sharedMemory [0][threadIdx.x + 1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][threadIdx.x + 1] = channel_cuda[temp + width * line];
sharedMemory [2][threadIdx.x + 1] = channel_cuda[temp + width * (line + 1)];
if(threadIdx.x == 0)
{
if(blockIdx.x != 0)
temp--;
sharedMemory [0][0] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][0] = channel_cuda[temp + width * line];
sharedMemory [2][0] = channel_cuda[temp + width * (line+1)];
}
if(threadIdx.x == (BLOCK_SIZE - 1))
{
temp++;
sharedMemory [0][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][BLOCK_SIZE + 1] = channel_cuda[temp + width * line];
sharedMemory [2][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line + 1)];
}
__syncthreads();
long Sum = 0;
for (int i = 0; i < KERNEL_SIZE; i++)
for (int j = 0; j < KERNEL_SIZE; j++)
Sum += sharedMemory[j][threadIdx.x + i] * kernel_cuda[i * 3 + j];
if (Sum < 0)
Sum = 0;
if (Sum > 255)
Sum = 255;
__syncthreads();
if((blockIdx.x * BLOCK_SIZE + threadIdx.x) > width)
continue;
rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line] = Sum;
}
__syncthreads();
return;
}
extern "C" __host__ pixel_channel** asyncConvolution(pixel_channel **image, resolution width, resolution height)
{
pixel_channel **channel_cuda;
channel_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*));
pixel_channel **rezult_cuda;
rezult_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*));
resolution size = width * height;
hipHostRegister(image[0], (size + BLOCK_SIZE) * sizeof(pixel_channel), hipHostRegisterMapped);
hipHostRegister(image[1], (size + BLOCK_SIZE) * sizeof(pixel_channel), hipHostRegisterMapped);
hipHostRegister(image[2], (size + BLOCK_SIZE) * sizeof(pixel_channel), hipHostRegisterMapped);
hipMalloc((void **)& rezult_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));
hipMalloc((void **)& rezult_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel));
hipMalloc((void **)& rezult_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel));
hipMalloc((void **)& channel_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));;
hipMalloc((void **)& channel_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel));
hipMalloc((void **)& channel_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel));
hipMemcpyToSymbol(HIP_SYMBOL(kernel_cuda), kernel_host, 9 * sizeof(pixel_channel), 0, hipMemcpyHostToDevice);
resolution block_count = 0;
if(((width - 2)%BLOCK_SIZE) == 0)
block_count = (width - 2)/BLOCK_SIZE;
else
block_count = (width - 2)/BLOCK_SIZE + 1;
dim3 gridSize = dim3(block_count, 1, 1);
dim3 blockSize = dim3(BLOCK_SIZE, 1, 1);
hipStream_t stream[3];
for(int i = 0; i < 3; i++)
{
hipStreamCreate(&stream[i]);
hipMemcpyAsync(channel_cuda[i], image[i], size*sizeof(pixel_channel), hipMemcpyHostToDevice, stream[i]);
Pixel_Shared_Convolution<<<gridSize, blockSize, 0, stream[i]>>>(channel_cuda[i], rezult_cuda[i], width, height);
hipMemcpyAsync(image[i], rezult_cuda[i], size*sizeof(pixel_channel), hipMemcpyDeviceToHost,stream[i]);
hipStreamDestroy(stream[i]);
}
for(int i=0;i<3;i++)
{
hipFree(rezult_cuda[i]);
hipFree(channel_cuda[i]);
}
hipDeviceReset();
return image;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
//Device Memory allocations
cudaError_t err = cudaMalloc((void**)&dev_c, sizeof(&dev_c));
if(err != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(err));
}
add<<<1,1>>>(2, 7, dev_c);
if(cudaPeekAtLastError() != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(cudaGetLastError()));
}
cudaError_t err2 = cudaMemcpy( &c, dev_c, sizeof(c), cudaMemcpyDeviceToHost);
if(err2 != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(err2));
}
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
//Device Memory allocations
hipError_t err = hipMalloc((void**)&dev_c, sizeof(&dev_c));
if(err != hipSuccess) {
printf("The error is %s\n", hipGetErrorString(err));
}
add<<<1,1>>>(2, 7, dev_c);
if(hipPeekAtLastError() != hipSuccess) {
printf("The error is %s\n", hipGetErrorString(hipGetLastError()));
}
hipError_t err2 = hipMemcpy( &c, dev_c, sizeof(c), hipMemcpyDeviceToHost);
if(err2 != hipSuccess) {
printf("The error is %s\n", hipGetErrorString(err2));
}
printf("2 + 7 = %d\n", c);
hipFree(dev_c);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <memory>
/*CUDAлȡGPU豸*/
int main(void) {
int device_count = 0;
cudaGetDeviceCount(&device_count);
//ú֧CUDAGPU豸ĸ
if (device_count ==0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", device_count);
}
//ͨ豸Ϣ
/*
cudaDevicePropṹṩ˿ʶ豸Լȷʹõİ汾Ϣԡṩnameԣַ
ʽ豸ơͨѯcudaDriverGetVersioncudaRuntimeGetVersionԻ豸ʹõCUDA Driver
ʱİ汾ж豸ϣʹеľǸͨmultiProcessorCount
жϡԷ豸ϵദͨʹclockRateԻȡGPUʱʣKHzʱ
ʡ
*/
int device;
cudaDeviceProp device_Property;
cudaGetDevice(&device);
cudaGetDeviceProperties(&device_Property, device);
printf("\nDevice %d:\"%s\"\n", device, device_Property.name);
int driver_Version;
int runtime_Version;
cudaDriverGetVersion(&driver_Version);
cudaRuntimeGetVersion(&runtime_Version);
printf("CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10);
printf("Total amount of global memory:%.0f Mbytes (%1lu bytes)\n", (float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long)device_Property.totalGlobalMem);
printf("(%2d) Multiprocessors", device_Property.multiProcessorCount);
printf("GPU Max Clock rate:%.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f);
/*
̶߳ʱάģdim3͡ˣ֪ÿάпԲ̺߳Ϳ顣ÿദ
߳ÿ߳ҲơֿͨmaxThreadsPerMultiProcessormaxThreadsPerBlockҵ
ÿ߳ÿпܱܵ߳
ͨmaxThreadsDimȷÿάϵ߳ͬÿάÿͨ
maxGridSizeʶǶһֵ飬ֱʾxyzάеֵ
*/
printf("Maximum number of threads per multiprocessor:%d\n", device_Property.maxThreadsPerMultiProcessor);
printf("Maximum number of threads per block:%d\n", device_Property.maxThreadsPerBlock);
printf("Max dimension size of a thread block (x,y,z):(%d,%d,%d)\n", device_Property.maxThreadsDim[0],
device_Property.maxThreadsDim[1],
device_Property.maxThreadsDim[2]);
printf("Max dimension size of a grid size (x,y,z):(%d,%d,%d)\n", device_Property.maxGridSize[0],
device_Property.maxGridSize[1],
device_Property.maxGridSize[2]);
}
|
#include <iostream>
#include <hip/hip_runtime.h>
#include <memory>
/*CUDAлȡGPU豸*/
int main(void) {
int device_count = 0;
hipGetDeviceCount(&device_count);
//ú֧CUDAGPU豸ĸ
if (device_count ==0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", device_count);
}
//ͨ豸Ϣ
/*
cudaDevicePropṹṩ˿ʶ豸Լȷʹõİ汾Ϣԡṩnameԣַ
ʽ豸ơͨѯcudaDriverGetVersioncudaRuntimeGetVersionԻ豸ʹõCUDA Driver
ʱİ汾ж豸ϣʹеľǸͨmultiProcessorCount
жϡԷ豸ϵദͨʹclockRateԻȡGPUʱʣKHzʱ
ʡ
*/
int device;
hipDeviceProp_t device_Property;
hipGetDevice(&device);
hipGetDeviceProperties(&device_Property, device);
printf("\nDevice %d:\"%s\"\n", device, device_Property.name);
int driver_Version;
int runtime_Version;
hipDriverGetVersion(&driver_Version);
hipRuntimeGetVersion(&runtime_Version);
printf("CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10);
printf("Total amount of global memory:%.0f Mbytes (%1lu bytes)\n", (float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long)device_Property.totalGlobalMem);
printf("(%2d) Multiprocessors", device_Property.multiProcessorCount);
printf("GPU Max Clock rate:%.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f);
/*
̶߳ʱάģdim3͡ˣ֪ÿάпԲ̺߳Ϳ顣ÿദ
߳ÿ߳ҲơֿͨmaxThreadsPerMultiProcessormaxThreadsPerBlockҵ
ÿ߳ÿпܱܵ߳
ͨmaxThreadsDimȷÿάϵ߳ͬÿάÿͨ
maxGridSizeʶǶһֵ飬ֱʾxyzάеֵ
*/
printf("Maximum number of threads per multiprocessor:%d\n", device_Property.maxThreadsPerMultiProcessor);
printf("Maximum number of threads per block:%d\n", device_Property.maxThreadsPerBlock);
printf("Max dimension size of a thread block (x,y,z):(%d,%d,%d)\n", device_Property.maxThreadsDim[0],
device_Property.maxThreadsDim[1],
device_Property.maxThreadsDim[2]);
printf("Max dimension size of a grid size (x,y,z):(%d,%d,%d)\n", device_Property.maxGridSize[0],
device_Property.maxGridSize[1],
device_Property.maxGridSize[2]);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define XNOR 5
__global__ void computeLogicGates(char* d_input, char* d_output, int size) {
// calculate the index of the thread
int index = threadIdx.x + blockIdx.x * blockDim.x;
int input_index = index * 3;
// if the index is inside the range of the array
if (input_index < size) {
int output;
switch (d_input[input_index+2] - '0') {
case AND:
if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1;
else output = 0;
break;
case OR:
if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0;
else output = 1;
break;
case NAND:
if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0;
else output = 1;
break;
case NOR:
if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1;
else output = 0;
break;
case XOR:
if (d_input[input_index] == d_input[input_index+1]) output = 0;
else output = 1;
break;
case XNOR:
if (d_input[input_index] == d_input[input_index+1]) output = 1;
else output = 0;
break;
}
d_output[index] = output + '0';
}
}
int main(int argc, char* argv[]) {
// check if necessary arguments are provided
if (argc == 1) {
return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!");
}
else if (argc == 2) {
return printf("Input file length and output file path are not provided!");
}
else if (argc == 3) {
return printf("Output file path is not provided!");
}
char* input_file = argv[1];
int input_size = atoi(argv[2]);
char* output_file = argv[3];
// read the input file
FILE* input_fptr;
input_fptr = fopen(input_file, "r");
if (!input_fptr) return printf("Error opening the input file!");
// read the file line by line and populate input_data array
char line[10];
// allocate CUDA variables
char* d_input;
char* d_output;
int input_array_size = input_size * 3 * sizeof(char);
int output_array_size = input_size * sizeof(char);
cudaMallocManaged(&d_input, input_array_size);
cudaMallocManaged(&d_output, output_array_size);
for (int i = 0; i < input_size; i++) {
fgets(line, 9, input_fptr);
d_input[i*3] = line[0];
d_input[i*3+1] = line[2];
d_input[i*3+2] = line[4];
}
// close file pointer
fclose(input_fptr);
clock_t start = clock();
// call device kernel
computeLogicGates<<<input_size, 1>>>(d_input, d_output, input_array_size);
// synchronize threads
cudaDeviceSynchronize();
clock_t end = clock();
// write the results into the output file
FILE* output_fptr;
output_fptr = fopen(output_file, "w");
if(!output_fptr) return printf("Error opening output file!");
for (int i = 0; i < input_size; i++) {
char data[3];
sprintf(data, "%c\n", d_output[i]);
fputs(data, output_fptr);
}
// close file pointer
fclose(output_fptr);
// free up device memory
cudaFree(d_input);
cudaFree(d_output);
// calculate execution time
double runtime = (double) (end-start) / CLOCKS_PER_SEC;
printf("Execution time: %f ms\n", runtime * 1000);
return 0;
}
|
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define XNOR 5
__global__ void computeLogicGates(char* d_input, char* d_output, int size) {
// calculate the index of the thread
int index = threadIdx.x + blockIdx.x * blockDim.x;
int input_index = index * 3;
// if the index is inside the range of the array
if (input_index < size) {
int output;
switch (d_input[input_index+2] - '0') {
case AND:
if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1;
else output = 0;
break;
case OR:
if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0;
else output = 1;
break;
case NAND:
if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0;
else output = 1;
break;
case NOR:
if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1;
else output = 0;
break;
case XOR:
if (d_input[input_index] == d_input[input_index+1]) output = 0;
else output = 1;
break;
case XNOR:
if (d_input[input_index] == d_input[input_index+1]) output = 1;
else output = 0;
break;
}
d_output[index] = output + '0';
}
}
int main(int argc, char* argv[]) {
// check if necessary arguments are provided
if (argc == 1) {
return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!");
}
else if (argc == 2) {
return printf("Input file length and output file path are not provided!");
}
else if (argc == 3) {
return printf("Output file path is not provided!");
}
char* input_file = argv[1];
int input_size = atoi(argv[2]);
char* output_file = argv[3];
// read the input file
FILE* input_fptr;
input_fptr = fopen(input_file, "r");
if (!input_fptr) return printf("Error opening the input file!");
// read the file line by line and populate input_data array
char line[10];
// allocate CUDA variables
char* d_input;
char* d_output;
int input_array_size = input_size * 3 * sizeof(char);
int output_array_size = input_size * sizeof(char);
hipMallocManaged(&d_input, input_array_size);
hipMallocManaged(&d_output, output_array_size);
for (int i = 0; i < input_size; i++) {
fgets(line, 9, input_fptr);
d_input[i*3] = line[0];
d_input[i*3+1] = line[2];
d_input[i*3+2] = line[4];
}
// close file pointer
fclose(input_fptr);
clock_t start = clock();
// call device kernel
computeLogicGates<<<input_size, 1>>>(d_input, d_output, input_array_size);
// synchronize threads
hipDeviceSynchronize();
clock_t end = clock();
// write the results into the output file
FILE* output_fptr;
output_fptr = fopen(output_file, "w");
if(!output_fptr) return printf("Error opening output file!");
for (int i = 0; i < input_size; i++) {
char data[3];
sprintf(data, "%c\n", d_output[i]);
fputs(data, output_fptr);
}
// close file pointer
fclose(output_fptr);
// free up device memory
hipFree(d_input);
hipFree(d_output);
// calculate execution time
double runtime = (double) (end-start) / CLOCKS_PER_SEC;
printf("Execution time: %f ms\n", runtime * 1000);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "Matrix.cuh"
#include <cstring>
#include <fstream>
#include <ctime>
#include <device_functions.h>
#ifdef __CUDACC__
#define cuda_SYNCTHREADS() __syncthreads()
#else
#define cuda_SYNCTHREADS()
#endif
#define Zero ZeroCPU
#define PRINT_LOG false
//#define TARGET_RESIDUE ((double)1.0e-9);
const double TARGET_RESIDUE = 1.0e-6;
Matrix::Matrix(int cols, int rows) : cols(cols), rows(rows)
{
if (PRINT_LOG) printf("Matrix constructor\n");
cudaMallocManaged(&mat, cols * rows * sizeof(double));
}
unsigned Matrix::getRows() const
{
return rows;
}
unsigned Matrix::getCols() const
{
return cols;
}
Matrix::Matrix(int cols, int rows, double* mat) : cols(cols), rows(rows), mat(mat)
{
if (PRINT_LOG) printf("Matrix constructor\n");
//cudaMallocManaged(&mat, cols * rows * sizeof(double));
}
Matrix::Matrix(const Matrix& a)
{
if (PRINT_LOG) printf("Matrix copy constructor\n");
rows = a.rows;
cols = a.cols;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
std::memcpy(mat, a.mat, cols * rows * sizeof(double));
}
void Matrix::operator=(const Matrix& a)
{
if (PRINT_LOG) printf("Matrix assignment operator\n");
rows = a.rows;
cols = a.cols;
cudaFree(mat);
cudaMallocManaged(&mat, cols * rows * sizeof(double));
std::memcpy(mat, a.mat, cols * rows * sizeof(double));
}
Matrix Matrix::Stub()
{
return Matrix(1, 1);
}
Matrix Matrix::ZeroCPU(int cols, int rows)
{
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
cudaDeviceSynchronize();
for (long i = 0; i < cols * rows; i++)
{
mat[i] = 0.0f;
}
return Matrix(cols, rows, mat);
}
Matrix Matrix::OneCPU(int cols, int rows)
{
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
for (long i = 0; i < cols * rows; i++)
{
mat[i] = 1.0f;
}
return Matrix(cols, rows, mat);
}
__global__ void ZeroGPUKernel(const int n, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
A[index] = 0.0f;
}
}
Matrix Matrix::ZeroGPU(int cols, int rows)
{
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
int blockCount = (cols * rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
ZeroGPUKernel <<<blockCount, BLOCK_SIZE >>>(cols * rows, mat);
cudaDeviceSynchronize();
return Matrix(cols, rows, mat);
}
Matrix Matrix::IdentityCPU(int cols, int rows)
{
if (cols != rows) throw "Identity matrix must be square";
auto ret = Zero(cols, rows);
for (int i = 0; i < cols; ++i)
{
ret.mat[i * cols + i] = 1.0f;
}
return ret;
}
Matrix Matrix::FromFile(std::string path)
{
std::fstream reader;
int cols, rows;
reader.open(path, std::ios::in);
reader.seekp(0);
reader >> cols;
reader >> rows;
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
for (int i = 0; i < cols * rows; ++i)
{
reader >> mat[i];
}
reader.close();
return Matrix(cols, rows, mat);
}
Matrix Matrix::Jacobi(const Matrix& A, const Matrix& b)
{
auto LU = A;
auto invD = (LU.separateDiagonal());
auto x = ZeroCPU(1, A.getRows());
invD.inverseDiagonalInPlaceCPU();
auto M = -invD * LU;
auto temp = invD * b;
double res = 1;
int counter = 0;
do
{
x = (M * x + temp);
//if (counter++ == 9)
//{
// counter = 0;
res = (A * x - b).vectorEuclideanNorm();
// printf("res: %f\n", res);
//}
counter++;
}
while (res > TARGET_RESIDUE);
printf("res: %d \n", counter);
return x;
}
Matrix Matrix::JacobiOptimal(const Matrix& A, const Matrix& b)
{
// 25% czasu wykonania (80000us) prawdopodobnie kopiowanie pamieci z device na host i z powrotem
//auto LU = A;
//->
auto LU = Matrix(A.cols, A.rows);
copyGPU(LU, A);
//32x wzrost wydajnosci
//auto invD = (LU.separateDiagonal());
//invD.inverseDiagonalInPlaceCPU();
auto invD = Matrix(A.cols, A.rows);
separateDiagonalAndInverseGPU(invD, LU);
auto x = ZeroGPU(1, A.getRows());
//auto temp1 = invD * b;
auto temp1 = Matrix(1, A.rows);
refMul(temp1, invD, b);
//auto M = -invD * LU;
//auto M = Matrix(A.cols, A.rows);
auto M = Matrix(A.cols, A.rows);
additiveInverseInPlaceGPU(invD);
refMulDiag(M, invD, LU);
double res = 100;
int counter = 9;
auto memmul = Matrix(1, A.rows);
auto _Amulx = Matrix(1, A.rows);
auto resVector = Matrix(1, A.rows);
do
{
refMul(memmul, M, x);
refAdd(x, memmul, temp1);
//x = (M * x + temp);
if (counter++ == 9)
{
counter = 0;
refMul(_Amulx, A, x);
refSub(resVector, _Amulx, b);
res = resVector.vectorEuclideanNorm();
//printf("res: %f\n", res);
}
}
while (res > TARGET_RESIDUE);
return x;
}
Matrix Matrix::ForwardSubstitution(const Matrix& A, const Matrix& b)
{
if (!(A.cols == A.rows && A.rows == b.rows)) throw "Incorrect dimensions";
auto x = Matrix(1, A.getRows());
for (int i = 0; i < x.rows; ++i)
{
double sum = 0;
for (int j = 0; j < i; ++j)
{
sum += A.mat[i * A.cols + j] * x.mat[j];
}
x.mat[i] = (b.mat[i] - sum) / A.mat[i * A.cols + i];
}
return x;
}
Matrix Matrix::BackwardSubstitution(const Matrix& A, const Matrix& b)
{
if (!(A.cols == A.rows && A.rows == b.rows)) throw "Incorrect dimensions";
auto x = Matrix(1, A.getRows());
x.mat[0] = b.mat[0] / A.mat[0];
for (int i = x.rows - 1; i >= 0; --i)
{
double sum = 0;
for (int j = i + 1; j < A.cols; ++j)
{
sum += A.mat[i * A.cols + j] * x.mat[j];
}
x.mat[i] = (b.mat[i] - sum) / A.mat[i * A.cols + i];
}
return x;
}
Matrix Matrix::GaussSeidel(const Matrix& A, const Matrix& b)
{
auto DL = -(A.lowerCPU() + A.diagonalCPU());
auto U = A.upperCPU();
auto x = ZeroCPU(1, A.getRows());
auto temp = Matrix::ForwardSubstitution(DL, b);
double res = 1;
int counter = 0;
do
{
//x = -(Matrix::ForwardSubstitution(DL, U * x)) + temp;
x = (Matrix::ForwardSubstitution(DL, U * x)) + temp;
//if (counter++ == 9)
//{
counter++;
res = (A * (-x) - b).vectorEuclideanNorm();
//}
//printf("res: %f \n", res);
//(x).print();
}
while (res > TARGET_RESIDUE);
printf("res: %d \n", counter);
return -x;
}
Matrix Matrix::GaussSeidelOptimal(const Matrix& A, const Matrix& b)
{
//auto DL = (A.lowerCPU() + A.diagonalCPU());
//auto U = A.upperCPU();
auto DL = Matrix(A.cols, A.rows);
auto U = Matrix(A.cols, A.rows);
copyGPU(DL, A);
separateUpperGPU(U, DL);
//auto DL = (A.lowerCPU() + A.diagonalCPU());
//auto U = A.upperCPU();
auto x = ZeroCPU(1, A.getRows());
auto temp = Matrix::ForwardSubstitution(DL, b);
auto memmul = Matrix(1, A.rows);
auto memforwardsub = Matrix(1, A.rows);
auto memmulres = Matrix(1, A.rows);
auto resVector = Matrix(1, A.rows);
double res;
int counter = 9;
do
{
//x = -(Matrix::ForwardSubstitution(DL, U * x)) + temp;
refMul(memmul, U, x);
forwardSubstitutionGPU(memforwardsub, DL, memmul);
//memforwardsub = Matrix::ForwardSubstitution(DL, memmul);
//double xd = maxError(memforwardsub, memforwardsub2);
additiveInverseInPlaceGPU(memforwardsub);
refAdd(x, memforwardsub, temp);
//x = memforwardsub + temp;
if (counter++ == 9)
{
counter = 0;
refMul(memmulres, A, x);
refSub(resVector, memmulres, b);
res = resVector.vectorEuclideanNorm();
}
//printf("res: %f \n", res);
//(x).print();
}
while (res > TARGET_RESIDUE);
return x;
}
Matrix Matrix::LUMehtod(const Matrix& A, const Matrix& b)
{
Matrix L = Matrix::Stub();
Matrix U = Matrix::Stub();
Matrix::doolitle(L, U, A);
auto y = Matrix::ForwardSubstitution(L, b);
return Matrix::BackwardSubstitution(U, y);
}
Matrix Matrix::LUMehtodOptimal(const Matrix& A, const Matrix& b)
{
Matrix L = Matrix::Stub();
Matrix U = Matrix::Stub();
Matrix::doolitle(L, U, A);
auto y = Matrix::ForwardSubstitution(L, b);
return Matrix::BackwardSubstitution(U, y);
}
void Matrix::doolitle(Matrix& L, Matrix& U, const Matrix& A)
{
if (A.cols != A.rows) throw "Matrix is not square";
L = OneCPU(A.cols, A.rows).diagonalCPU();
U = ZeroCPU(A.cols, A.rows);
for (int j = 0; j < A.cols; ++j)
{
for (int i = 0; i <= j; ++i)
{
double sum = 0;
for (int k = 0; k < i; ++k)
{
sum += L.mat[i * L.cols + k] * U.mat[k * U.cols + j];
}
U.mat[i * U.cols + j] = A.mat[i * U.cols + j] - sum;
}
for (int i = j + 1; i < A.cols; ++i)
{
double sum = 0;
for (int k = 0; k < j; ++k)
{
sum += L.mat[i * L.cols + k] * U.mat[k * U.cols + j];
}
L.mat[i * U.cols + j] = 1 / U.mat[j * U.cols + j] * (A.mat[i * U.cols + j] - sum);
}
}
}
__global__ void doolitleKernel(const int n, double* A, double* B)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = B[j];
}
}
void Matrix::doolitleGPU(Matrix& L, Matrix& U, const Matrix& A)
{
int blockCount = (A.rows * A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
//doolitleKernel <<< blockCount, BLOCK_SIZE >>> (A.rows * A.cols, A.mat);
cudaDeviceSynchronize();
}
void Matrix::createTest(Matrix& A, Matrix& b, Matrix& x, int size)
{
srand(time(NULL));
const int constrange = 100;
const auto r = [](int range)-> double { return (double)(rand() % 20000) / 100 - 100; };
x = Matrix(1, size);
A = Matrix(size, size);
b = Matrix(1, size);
for (int i = 0; i < size; ++i)
{
x.mat[i] = r(100);
}
for (int i = 0; i < size; ++i)
{
double sum = 0;
for (int j = 0; j < size; ++j)
{
if (i != j)
{
A.mat[i * size + j] = r(100);
sum += fabs(A.mat[i * size + j]);
}
double randomized = r(100);
if (randomized > 0)
{
A.mat[i * size + i] = sum + r(10);
}
else
{
A.mat[i * size + i] = -sum + r(10);
}
}
}
for (int i = 0; i < size; ++i)
{
double sum = 0;
for (int j = 0; j < size; ++j)
{
sum += A.mat[i * size + j] * x.mat[j];
}
b.mat[i] = sum;
}
}
void Matrix::createTask(Matrix& A, Matrix& b, const int size)
{
//const int size = 994;
const int a1 = 5 + 7;
const int a2 = -1;
const int a3 = a2;
const int inSin(1 + 1);
A = Matrix::ZeroCPU(size, size);
b = Matrix(1, size);
for (int i = 0; i < size; ++i)
{
A.mat[size * i + i] = a1;
if (size * i + i - 1 >= 0)
A.mat[size * i + i - 1] = a2;
if (size * i + i - 2 >= 0)
A.mat[size * i + i - 2] = a3;
if (size * i + i + 1 < size * size)
A.mat[size * i + i + 1] = a2;
if (size * i + i + 2 < size * size)
A.mat[size * i + i + 2] = a3;
}
for (int i = 0; i < size; ++i)
{
b.mat[i] = sin(i * inSin);
}
}
void Matrix::createTaskC(Matrix& A, Matrix& b)
{
const int size = 994;
const int a1 = 3;
const int a2 = -1;
const int a3 = a2;
const int inSin(1 + 1);
A = Matrix::ZeroCPU(size, size);
b = Matrix(1, size);
for (int i = 0; i < size; ++i)
{
A.mat[size * i + i] = a1;
if (size * i + i - 1 >= 0)
A.mat[size * i + i - 1] = a2;
if (size * i + i - 2 >= 0)
A.mat[size * i + i - 2] = a3;
if (size * i + i + 1 < size * size)
A.mat[size * i + i + 1] = a2;
if (size * i + i + 2 < size * size)
A.mat[size * i + i + 2] = a3;
}
for (int i = 0; i < size; ++i)
{
b.mat[i] = sin(i * inSin);
}
}
double Matrix::maxError(Matrix& x, Matrix& r)
{
if (x.rows * x.cols != r.rows * r.cols) throw "Matrices are not the same size";
double max = 0;
for (int i = 0; i < x.rows * x.cols; ++i)
{
if (fabs(x.mat[i] - r.mat[i]) > max)
max = fabs(x.mat[i] - r.mat[i]);
}
return max;
}
__global__ void copyKernel(const int n, double* A, double* B)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = B[j];
}
}
void Matrix::copyGPU(Matrix& a, const Matrix& b)
{
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
copyKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat);
cudaDeviceSynchronize();
}
__global__ void separateDiagonalKernel(const int n, double* d, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
d[j * n + j] = 1 / A[j * n + j];
A[j * n + j] = 0;
}
}
void Matrix::separateDiagonalAndInverseGPU(Matrix& d, Matrix& A)
{
int blockCount = (A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
separateDiagonalKernel <<< blockCount, BLOCK_SIZE >>>(A.cols, d.mat, A.mat);
cudaDeviceSynchronize();
}
__global__ void separateUpperKernel(const int n, const int cols, double* U, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
int row = j / cols;
int col = j % cols;
if (col > row)
{
U[j] = A[j];
A[j] = 0;
}
}
}
void Matrix::separateUpperGPU(Matrix& U, Matrix& A)
{
int blockCount = (A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
separateUpperKernel <<< blockCount, BLOCK_SIZE >>>(A.cols * A.rows, A.cols, U.mat, A.mat);
cudaDeviceSynchronize();
}
__global__ void additiveInverseInPlaceKernel(const int n, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = -A[j];
}
}
void Matrix::additiveInverseInPlaceGPU(Matrix& A)
{
int blockCount = (A.rows * A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
additiveInverseInPlaceKernel <<< blockCount, BLOCK_SIZE >>>(A.rows * A.cols, A.mat);
cudaDeviceSynchronize();
}
__global__ void forwardSubstitutionKernel(const int n, double* A, double* b, double* x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
double sum = 0;
for (int i = 0; i < n; i++)
{
if (i == j)
{
x[j] = (b[j] - sum) / A[j * n + j];
}
cuda_SYNCTHREADS();
if (i < j)
{
sum += A[j * n + i] * x[i];
}
}
}
}
void Matrix::forwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b)
{
int blockCount = 1;
int blockSize = pow(2, ceil(log2f(A.cols)));
forwardSubstitutionKernel <<< blockCount, blockSize >>>(A.cols, A.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
void Matrix::backwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b)
{
}
void Matrix::toFile(std::string path)
{
std::fstream writer;
writer.open(path, std::ios::out);
writer.seekg(0);
writer << cols << ' ' << rows << '\n';
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
writer << mat[i * cols + j] << ' ';
}
writer << "\n";
}
writer.close();
}
Matrix Matrix::separateDiagonal()
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int i = 0; i < cols; ++i)
{
ret.mat[i * cols + i] = mat[i * cols + i];
mat[i * cols + i] = 0.0f;
}
return ret;
}
Matrix Matrix::diagonalCPU() const
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int i = 0; i < cols; ++i)
{
ret.mat[i * cols + i] = mat[i * cols + i];
}
return ret;
}
Matrix Matrix::lowerCPU() const
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int j = 0; j < cols; ++j)
{
for (int i = 0; i < j; ++i)
{
ret.mat[j * cols + i] = mat[j * cols + i];
}
}
return ret;
}
Matrix Matrix::upperCPU() const
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int j = 0; j < cols; ++j)
{
for (int i = j + 1; i < cols; ++i)
{
ret.mat[j * cols + i] = mat[j * cols + i];
}
}
return ret;
}
void Matrix::inverseDiagonalInPlaceCPU()
{
if (cols != rows) throw "Matrix is not square";
for (int i = 0; i < cols; ++i)
{
if (mat[i * cols + i] == 0) throw "0 on diagonal";
mat[i * cols + i] = 1 / mat[i * cols + i];
}
}
void Matrix::transposeVectorInPlace()
{
unsigned int tmp = cols;
cols = rows;
rows = tmp;
}
double Matrix::vectorEuclideanNorm()
{
if (cols != 1 && rows != 1) throw "Matrix is not a vector";
double sum = 0;
for (int i = 0; i < cols * rows; ++i)
{
sum += mat[i] * mat[i];
}
return sqrt(sum);
}
Matrix Matrix::lu()
{
throw "Not implemented";
}
void Matrix::print() const
{
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
printf("%f ", mat[i * cols + j]);
}
printf("\n");
}
printf("\n");
}
Matrix::~Matrix()
{
if (PRINT_LOG) printf("Matrix destructor\n");
cudaFree(mat);
//free(mat);
}
__global__ void mulKernel(const int commonDim, const int cols, const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
int row = j / cols;
int col = j % cols;
C[j] = 0;
for (int i = 0; i < commonDim; i++)
{
C[j] += A[row * commonDim + i] * B[i * cols + col];
}
}
}
void Matrix::refMul(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
mulKernel <<< blockCount, BLOCK_SIZE >>>(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
__global__ void mulDiagKernel(const int commonDim, const int cols, const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
int row = j / cols;
int col = j % cols;
C[j] = A[row * commonDim + row] * B[row * commonDim + col];
}
}
void Matrix::refMulDiag(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
mulDiagKernel << < blockCount, BLOCK_SIZE >> >(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
Matrix operator*(const Matrix& a, const Matrix& b)
{
if (a.cols != b.rows) throw "wrong dimensions for multiplication";
double* mat;
cudaMallocManaged(&mat, b.cols * a.rows * sizeof(double));
int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (PRINT_LOG) printf("Matrix multiplication on %d blocks x %d threads\n", blockCount, BLOCK_SIZE);
mulKernel <<< blockCount, BLOCK_SIZE >>>(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, mat);
cudaDeviceSynchronize();
return Matrix(b.cols, a.rows, mat);
}
__global__ void addKernel(const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
C[j] = A[j] + B[j];
}
}
void Matrix::refAdd(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
addKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
Matrix operator+(const Matrix& a, const Matrix& b)
{
if (a.cols != b.cols || a.rows != b.rows) throw "dimensions must equal for addition";
double* mat;
cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double));
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (PRINT_LOG) printf("Matrix addition on %d blocks x %d threads\n", blockCount, BLOCK_SIZE);
addKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, mat);
cudaDeviceSynchronize();
return Matrix(a.cols, a.rows, mat);
}
__global__ void subKernel(const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
C[j] = A[j] - B[j];
}
}
void Matrix::refSub(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
subKernel <<< blockCount, BLOCK_SIZE >> >(a.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
Matrix operator-(const Matrix& a, const Matrix& b)
{
if (a.cols != b.cols || a.rows != b.rows) throw "dimensions must equal for addition";
double* mat;
cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double));
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (PRINT_LOG) printf("Matrix addition on %d blocks x %d threads\n", blockCount, BLOCK_SIZE);
subKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, mat);
cudaDeviceSynchronize();
return Matrix(a.cols, a.rows, mat);
}
__global__ void additiveInverseKernel(const int n, double* A, double* B)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = -B[j];
}
}
Matrix operator-(const Matrix& a)
{
double* mat;
cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double));
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
additiveInverseKernel <<<blockCount, BLOCK_SIZE >>>(a.cols * a.rows, mat, a.mat);
cudaDeviceSynchronize();
return Matrix(a.cols, a.rows, mat);
}
|
#pragma once
#define BLOCK_SIZE 256
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string>
class Matrix
{
private:
unsigned int rows;
unsigned int cols;
double* mat;
public:
//getters
unsigned int getRows() const;
unsigned int getCols() const;
//constructors
Matrix(int cols, int rows, double* mat);
Matrix(int cols, int rows);
Matrix(const Matrix& a);
void operator=(const Matrix& a);
static Matrix Stub();
static Matrix ZeroCPU(int cols, int rows);
static Matrix OneCPU(int cols, int rows);
static Matrix ZeroGPU(int cols, int rows);
static Matrix IdentityCPU(int cols, int rows);
static Matrix FromFile(std::string path);
static Matrix Jacobi(const Matrix& A, const Matrix& b);
static Matrix JacobiOptimal(const Matrix& A, const Matrix& b);
static Matrix ForwardSubstitution(const Matrix& A, const Matrix& b);
static Matrix BackwardSubstitution(const Matrix& A, const Matrix& b);
static Matrix GaussSeidel(const Matrix& A, const Matrix& b);
static Matrix GaussSeidelOptimal(const Matrix& A, const Matrix& b);
static Matrix LUMehtod(const Matrix& A, const Matrix& b);
static Matrix LUMehtodOptimal(const Matrix& A, const Matrix& b);
//nowy pomysl
static void doolitle(Matrix& L, Matrix& U, const Matrix& A);
static void doolitleGPU(Matrix& L, Matrix& U, const Matrix& A);
static void createTest(Matrix& A, Matrix& b, Matrix& x, int size);
static void createTask(Matrix& A, Matrix& b, const int size);
static void createTaskC(Matrix& A, Matrix& b);
static double maxError(Matrix& x, Matrix& r);
static void copyGPU(Matrix& a, const Matrix& b);
static void separateDiagonalAndInverseGPU(Matrix& d, Matrix& A);
static void separateUpperGPU(Matrix& U, Matrix& A);
static void additiveInverseInPlaceGPU(Matrix& A);
static void forwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b);
static void backwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b);
void toFile(std::string path);
Matrix separateDiagonal();
Matrix diagonalCPU() const;
Matrix lowerCPU() const;
Matrix upperCPU() const;
void inverseDiagonalInPlaceCPU();
void transposeVectorInPlace();
double vectorEuclideanNorm();
Matrix lu();
void print() const;
~Matrix();
friend Matrix operator*(const Matrix& a, const Matrix& b);
friend Matrix operator+(const Matrix& a, const Matrix& b);
friend Matrix operator-(const Matrix& a, const Matrix& b);
friend Matrix operator-(const Matrix& a);
static void refMul(Matrix& result, const Matrix& a, const Matrix& b);
static void refMulDiag(Matrix& result, const Matrix& a, const Matrix& b);
static void refAdd(Matrix& result, const Matrix& a, const Matrix& b);
static void refSub(Matrix& result, const Matrix& a, const Matrix& b);
};
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void multiply_by_itself_training_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input_buf[elem_id];
val.x *= val.x;
val.y *= val.y;
val.z *= val.z;
val.w *= val.w;
output_buf[elem_id] = val;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void multiply_by_itself_training_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input_buf[elem_id];
val.x *= val.x;
val.y *= val.y;
val.z *= val.z;
val.w *= val.w;
output_buf[elem_id] = val;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <algorithm>
#include <iostream>
#include <vector>
typedef unsigned long long data_t;
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
// get the 0 bit of each number by bit_shift
// example: number : 10001, bit_shit: 1, One: 1,
//
// it means check if the second bit is 1 or not.
__global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
data_t bit = 0;
data_t one=1;
data_t shift=one<<bit_shift;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
bit=d_in[i]&shift;
bit = (bit > 0) ? 1 : 0;
d_out[i] = (One ? bit : 1 - bit);
}
}
__global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n,
unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for (unsigned int i=start; i<end && i<n; i++){
d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre;
if(d_index[i]>=n){
printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]);
}
// if(d_mask[i]==1){
// d_index[i]=total_pre+d_sum[i];
// }
}
}
__global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
d_out[d_index[i]]=d_in[i];
}
}
// idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs
// I will use y,z,s to specify which step I am in.
// in particular, I split the whole array into multiple smaller array. each small array has [len] numbers
// Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers.
// Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block.
// Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers.
// Block level y: this will get prefix sum in block level.
// Block level z: only one block and one thread are used here, do addition sequentially.
// Block level s: each threads will add the result from its previous block.
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
// int step=len*block_size;//each block has step number
// int start=2*step;
// for(unsigned int i=start; i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
__global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
//only one block and one thread
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
// __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// if (index==0) return; //the first block is not needed to merge
// int step=len*blockDim.x;
// int start=index*step+1; //exclusive
// int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[start-1];//last element at last block
// for(int i=start; i<end && i<n; i++){
// sum[i]+=base;
// }
// }
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
if (blockIdx.x==0) return;//the first block is not needed to merge
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int base_index=blockIdx.x*step*blockDim.x;
unsigned int base=sum[base_index];
int start=index*step; //only the first thread in block should excluded the first element
int end=start+step;
start=(start==base_index)?start+1:start;
// int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
void psort(int n, data_t *data) {
if(n<=0) return;
// FIXME: Implement a more efficient parallel sorting algorithm for the GPU.
const int block_size=256;//64 threads per block;
const int len=2000; // add 1000 prefix sum per thread;
data_t *d_temp;
data_t *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(data_t)));
data_t *d_out_long=NULL;
CHECK(cudaMalloc((void**)&d_out_long,n*sizeof(data_t)));
unsigned int *d_out=NULL;
CHECK(cudaMalloc((void**)&d_out,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int)));
unsigned int *d_index=NULL;
CHECK(cudaMalloc((void**)&d_index,n*sizeof(unsigned int)));
// std::vector<unsigned int> inter_sum(n);
// unsigned int inter_sum[n];
cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
data_t bits=sizeof(data_t)*8;
// unsigned int out[n];
// unsigned int sum[n];
unsigned int total_zeros, mask_last;
//one pass here
for(data_t i=0; i<bits; i++){
CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"out "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<out[j]<<" ";
// }
// std::cout<<std::endl;
//inclusive prefix sum
prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
CHECK(cudaGetLastError());
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// CHECK(cudaGetLastError());
mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"sum "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(cudaMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost));
total_zeros+=(mask_last==1)?1:0;
getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// std::cout<<"index "<<std::endl;
// CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(cudaGetLastError());
scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n);
CHECK(cudaGetLastError());
//must swap pointers
d_temp = d_in;
d_in = d_out_long;
d_out_long = d_temp;
}
cuda_memcpy(data, d_in, n, cudaMemcpyDeviceToHost);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out_long));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_sum));
CHECK(cudaFree(d_index));
// std::sort(data, data + n);
}
|
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <vector>
typedef unsigned long long data_t;
static inline void check(hipError_t err, const char* context) {
if (err != hipSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< hipGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, hipMemcpyKind direction) {
CHECK(hipMemcpy(target, source, num * sizeof(T), direction));
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
// get the 0 bit of each number by bit_shift
// example: number : 10001, bit_shit: 1, One: 1,
//
// it means check if the second bit is 1 or not.
__global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
data_t bit = 0;
data_t one=1;
data_t shift=one<<bit_shift;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
bit=d_in[i]&shift;
bit = (bit > 0) ? 1 : 0;
d_out[i] = (One ? bit : 1 - bit);
}
}
__global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n,
unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for (unsigned int i=start; i<end && i<n; i++){
d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre;
if(d_index[i]>=n){
printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]);
}
// if(d_mask[i]==1){
// d_index[i]=total_pre+d_sum[i];
// }
}
}
__global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
d_out[d_index[i]]=d_in[i];
}
}
// idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs
// I will use y,z,s to specify which step I am in.
// in particular, I split the whole array into multiple smaller array. each small array has [len] numbers
// Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers.
// Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block.
// Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers.
// Block level y: this will get prefix sum in block level.
// Block level z: only one block and one thread are used here, do addition sequentially.
// Block level s: each threads will add the result from its previous block.
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
// int step=len*block_size;//each block has step number
// int start=2*step;
// for(unsigned int i=start; i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
__global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
//only one block and one thread
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
// __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// if (index==0) return; //the first block is not needed to merge
// int step=len*blockDim.x;
// int start=index*step+1; //exclusive
// int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[start-1];//last element at last block
// for(int i=start; i<end && i<n; i++){
// sum[i]+=base;
// }
// }
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
if (blockIdx.x==0) return;//the first block is not needed to merge
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int base_index=blockIdx.x*step*blockDim.x;
unsigned int base=sum[base_index];
int start=index*step; //only the first thread in block should excluded the first element
int end=start+step;
start=(start==base_index)?start+1:start;
// int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
void psort(int n, data_t *data) {
if(n<=0) return;
// FIXME: Implement a more efficient parallel sorting algorithm for the GPU.
const int block_size=256;//64 threads per block;
const int len=2000; // add 1000 prefix sum per thread;
data_t *d_temp;
data_t *d_in=NULL;
CHECK(hipMalloc((void**)&d_in,n*sizeof(data_t)));
data_t *d_out_long=NULL;
CHECK(hipMalloc((void**)&d_out_long,n*sizeof(data_t)));
unsigned int *d_out=NULL;
CHECK(hipMalloc((void**)&d_out,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int)));
unsigned int *d_index=NULL;
CHECK(hipMalloc((void**)&d_index,n*sizeof(unsigned int)));
// std::vector<unsigned int> inter_sum(n);
// unsigned int inter_sum[n];
cuda_memcpy(d_in,data,n,hipMemcpyHostToDevice);
data_t bits=sizeof(data_t)*8;
// unsigned int out[n];
// unsigned int sum[n];
unsigned int total_zeros, mask_last;
//one pass here
for(data_t i=0; i<bits; i++){
CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int)));
getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"out "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<out[j]<<" ";
// }
// std::cout<<std::endl;
//inclusive prefix sum
prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
CHECK(hipGetLastError());
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// CHECK(cudaGetLastError());
mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"sum "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(hipMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost));
total_zeros+=(mask_last==1)?1:0;
getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// std::cout<<"index "<<std::endl;
// CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(hipGetLastError());
scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n);
CHECK(hipGetLastError());
//must swap pointers
d_temp = d_in;
d_in = d_out_long;
d_out_long = d_temp;
}
cuda_memcpy(data, d_in, n, hipMemcpyDeviceToHost);
CHECK(hipFree(d_in));
CHECK(hipFree(d_out_long));
CHECK(hipFree(d_out));
CHECK(hipFree(d_sum));
CHECK(hipFree(d_index));
// std::sort(data, data + n);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
using namespace std;
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void square(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float *d_in;
float *d_out;
CUDA_CALL(cudaMalloc((void**) &d_in, ARRAY_BYTES));
CUDA_CALL(cudaMalloc((void**) &d_out, ARRAY_BYTES));
CUDA_CALL(cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice));
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
CUDA_CALL(cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost));
for(int i=0; i< ARRAY_SIZE; i++){
cout << h_out[i];
if(i%4!=3) cout << "\t";
else cout << endl;
}
}
|
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPU assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void square(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float *d_in;
float *d_out;
CUDA_CALL(hipMalloc((void**) &d_in, ARRAY_BYTES));
CUDA_CALL(hipMalloc((void**) &d_out, ARRAY_BYTES));
CUDA_CALL(hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice));
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
CUDA_CALL(hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost));
for(int i=0; i< ARRAY_SIZE; i++){
cout << h_out[i];
if(i%4!=3) cout << "\t";
else cout << endl;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
extern "C"
__global__ void cuAdd(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i];
}
}
extern "C"
__global__ void cuMult(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b[i];
}
}
extern "C"
__global__ void cuDiv(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] / b[i];
}
}
extern "C"
__global__ void cuExp(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = expf(a[i]);
}
}
|
#include <hip/hip_runtime.h>
extern "C"
__global__ void cuAdd(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i];
}
}
extern "C"
__global__ void cuMult(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b[i];
}
}
extern "C"
__global__ void cuDiv(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] / b[i];
}
}
extern "C"
__global__ void cuExp(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = expf(a[i]);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<bits/stdc++.h>
using namespace std;
__global__ void vec_add(int N, int *A, int *B, int *C){
int i = threadIdx.x + blockIdx.x * blockDim.x;
// assert( i<N );
if(i < N) C[i] = A[i] + B[i];
}
int main(int argc, char *argv[]){
srand(0);
int N = 10000, block_size = 256;
if(argc>1) N = stoi(argv[1]);
if(argc>2) block_size = stoi(argv[2]);
int n_block = (N+block_size-1)/block_size;
int *A = new int [N], *B = new int [N], *C = new int [N];
for(int i=0;i<N;++i) A[i] = rand()%50;
for(int i=0;i<N;++i) B[i] = rand()%50;
clock_t start_time, mid_time1, mid_time2, end_time;
// Record the starting time
start_time = clock();
int *dA, *dB, *dC;
cudaMalloc((void **)&dA, N*sizeof(int));
cudaMalloc((void **)&dB, N*sizeof(int));
cudaMalloc((void **)&dC, N*sizeof(int));
// Copy data to divice
cudaMemcpy(dA, A, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, N*sizeof(int), cudaMemcpyHostToDevice);
mid_time1 = clock();
// Running code on GPUs
vec_add<<<n_block, block_size>>>(N, dA, dB, dC);
mid_time2 = clock();
cudaMemcpy(C, dC, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
// Record the ending time
end_time = clock();
double dt = double(end_time - start_time)/CLOCKS_PER_SEC;
double dt_trans = double(mid_time1 + end_time - start_time - mid_time2)/CLOCKS_PER_SEC;
cout<<"Data Transfer Time Usage: "<<dt_trans<<"s"<<endl;
cout<<"Total Time Usage: "<<dt<<"s\nResults:\n";
int stride = N/10;
for(int i=0;i<N;i+=stride) cout<<C[i]<<' ';
cout<<endl;
delete [] A;
delete [] B;
delete [] C;
return 0;
}
|
#include <hip/hip_runtime.h>
#include<bits/stdc++.h>
using namespace std;
__global__ void vec_add(int N, int *A, int *B, int *C){
int i = threadIdx.x + blockIdx.x * blockDim.x;
// assert( i<N );
if(i < N) C[i] = A[i] + B[i];
}
int main(int argc, char *argv[]){
srand(0);
int N = 10000, block_size = 256;
if(argc>1) N = stoi(argv[1]);
if(argc>2) block_size = stoi(argv[2]);
int n_block = (N+block_size-1)/block_size;
int *A = new int [N], *B = new int [N], *C = new int [N];
for(int i=0;i<N;++i) A[i] = rand()%50;
for(int i=0;i<N;++i) B[i] = rand()%50;
clock_t start_time, mid_time1, mid_time2, end_time;
// Record the starting time
start_time = clock();
int *dA, *dB, *dC;
hipMalloc((void **)&dA, N*sizeof(int));
hipMalloc((void **)&dB, N*sizeof(int));
hipMalloc((void **)&dC, N*sizeof(int));
// Copy data to divice
hipMemcpy(dA, A, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dB, B, N*sizeof(int), hipMemcpyHostToDevice);
mid_time1 = clock();
// Running code on GPUs
vec_add<<<n_block, block_size>>>(N, dA, dB, dC);
mid_time2 = clock();
hipMemcpy(C, dC, N*sizeof(int), hipMemcpyDeviceToHost);
hipFree(dA);
hipFree(dB);
hipFree(dC);
// Record the ending time
end_time = clock();
double dt = double(end_time - start_time)/CLOCKS_PER_SEC;
double dt_trans = double(mid_time1 + end_time - start_time - mid_time2)/CLOCKS_PER_SEC;
cout<<"Data Transfer Time Usage: "<<dt_trans<<"s"<<endl;
cout<<"Total Time Usage: "<<dt<<"s\nResults:\n";
int stride = N/10;
for(int i=0;i<N;i+=stride) cout<<C[i]<<' ';
cout<<endl;
delete [] A;
delete [] B;
delete [] C;
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*用gpu实现2个矩阵之间的乘法*/
#include<iostream>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#include"cuda_runtime.h"
using namespace std;
#define cols 1024
#define rows 1024
__global__ void multiply(float**Ad,float**Bd,float**Cd)
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
if(x<rows && y<cols)
{
for(int i=0;i<cols;i++)
{
Cd[y][x]+=Ad[y][i]*Bd[i][x];
}
}
}
int main()
{
struct timeval start, end;
int n=cols*rows;
float **A,**B,**C,**Ad,**Bd,**Cd;
float *a,*b,*c,*ad,*bd,*cd;
A=new float* [cols];
B=new float* [cols];
C=new float* [cols];
a=new float [n];
b=new float [n];
c=new float [n];
cudaMalloc((void**)&Ad,sizeof(float*)*cols);
cudaMalloc((void**)&Bd,sizeof(float*)*cols);
cudaMalloc((void**)&Cd,sizeof(float*)*cols);
cudaMalloc((void**)&ad,sizeof(float)*n);
cudaMalloc((void**)&bd,sizeof(float)*n);
cudaMalloc((void**)&cd,sizeof(float)*n);
for(int i=0;i<n;i++)
{
a[i]=2;
b[i]=2;
}
for(int i=0;i<cols;i++)
{
A[i]=ad+i*rows;
B[i]=bd+i*rows;
C[i]=cd+i*rows;
}
gettimeofday( &start, NULL);//以开始向gpu拷贝数据为起点,记录时间
cudaMemcpy(Ad,A,sizeof(float*)*cols,cudaMemcpyHostToDevice);
cudaMemcpy(Bd,B,sizeof(float*)*cols,cudaMemcpyHostToDevice);
cudaMemcpy(Cd,C,sizeof(float*)*cols,cudaMemcpyHostToDevice);
cudaMemcpy(ad,a,sizeof(float)*n,cudaMemcpyHostToDevice);
cudaMemcpy(bd,b,sizeof(float)*n,cudaMemcpyHostToDevice);
dim3 dimBlock(16,16);
dim3 dimGrid(cols/16+1,rows/16+1);
multiply<<<dimGrid,dimBlock>>>(Ad,Bd,Cd);
cudaMemcpy(c,cd,sizeof(float)*n,cudaMemcpyDeviceToHost);
gettimeofday( &end, NULL );//以从gpu返回计算数据为终点,记录时间
float target=4096;
float error=0.0;
for(int i=0;i<n;i++)
{
error+=abs(c[i]-target);
}
cout<<"error is "<<error<<endl;
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time is " << timeuse/1000 << "ms" <<endl;
delete [] a;
delete [] b;
delete [] c;
delete [] A;
delete [] B;
delete [] C;
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
return 0;
}
|
/*用gpu实现2个矩阵之间的乘法*/
#include<iostream>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#include"hip/hip_runtime.h"
using namespace std;
#define cols 1024
#define rows 1024
__global__ void multiply(float**Ad,float**Bd,float**Cd)
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
if(x<rows && y<cols)
{
for(int i=0;i<cols;i++)
{
Cd[y][x]+=Ad[y][i]*Bd[i][x];
}
}
}
int main()
{
struct timeval start, end;
int n=cols*rows;
float **A,**B,**C,**Ad,**Bd,**Cd;
float *a,*b,*c,*ad,*bd,*cd;
A=new float* [cols];
B=new float* [cols];
C=new float* [cols];
a=new float [n];
b=new float [n];
c=new float [n];
hipMalloc((void**)&Ad,sizeof(float*)*cols);
hipMalloc((void**)&Bd,sizeof(float*)*cols);
hipMalloc((void**)&Cd,sizeof(float*)*cols);
hipMalloc((void**)&ad,sizeof(float)*n);
hipMalloc((void**)&bd,sizeof(float)*n);
hipMalloc((void**)&cd,sizeof(float)*n);
for(int i=0;i<n;i++)
{
a[i]=2;
b[i]=2;
}
for(int i=0;i<cols;i++)
{
A[i]=ad+i*rows;
B[i]=bd+i*rows;
C[i]=cd+i*rows;
}
gettimeofday( &start, NULL);//以开始向gpu拷贝数据为起点,记录时间
hipMemcpy(Ad,A,sizeof(float*)*cols,hipMemcpyHostToDevice);
hipMemcpy(Bd,B,sizeof(float*)*cols,hipMemcpyHostToDevice);
hipMemcpy(Cd,C,sizeof(float*)*cols,hipMemcpyHostToDevice);
hipMemcpy(ad,a,sizeof(float)*n,hipMemcpyHostToDevice);
hipMemcpy(bd,b,sizeof(float)*n,hipMemcpyHostToDevice);
dim3 dimBlock(16,16);
dim3 dimGrid(cols/16+1,rows/16+1);
multiply<<<dimGrid,dimBlock>>>(Ad,Bd,Cd);
hipMemcpy(c,cd,sizeof(float)*n,hipMemcpyDeviceToHost);
gettimeofday( &end, NULL );//以从gpu返回计算数据为终点,记录时间
float target=4096;
float error=0.0;
for(int i=0;i<n;i++)
{
error+=abs(c[i]-target);
}
cout<<"error is "<<error<<endl;
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time is " << timeuse/1000 << "ms" <<endl;
delete [] a;
delete [] b;
delete [] c;
delete [] A;
delete [] B;
delete [] C;
hipFree(Ad);
hipFree(Bd);
hipFree(Cd);
hipFree(ad);
hipFree(bd);
hipFree(cd);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
__global__ void firstParallel()
{
printf("This is running in parallel.\n");
}
int main()
{
firstParallel<<<5, 5>>>();
cudaDeviceSynchronize();
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void firstParallel()
{
printf("This is running in parallel.\n");
}
int main()
{
firstParallel<<<5, 5>>>();
hipDeviceSynchronize();
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void conv2(float *A, float *kernel,int inputSize, int depth, int kernelSize , int stride, int pad, float *B, int outputSize) {
// 计算元素output(i,j)的值 一次卷积运算
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if( !(i < outputSize) || !(j < outputSize) ) return;
int Ai = i*stride;
int Aj = j*stride;
// 除去填充的0
int startk = (pad-Ai) < 0? 0 : pad-Ai;
int endk = kernelSize < (inputSize + pad - Ai) ? kernelSize : (inputSize + pad - Ai);
int startl = (pad-Aj) < 0? 0 : pad-Aj;
int endl = kernelSize < (inputSize + pad - Aj) ? kernelSize : (inputSize + pad - Aj);
float sum = 0;
for(int d = 0; d < depth; d++) {
for( int k = startk ; k < endk; k++) {
for( int l = startl; l < endl; l++) {
sum += A[d*inputSize*inputSize + (Ai+k-pad)*inputSize + Aj+l-pad]*kernel[d*kernelSize*kernelSize + k*kernelSize+l];
}
}
B[d*outputSize*outputSize + i*outputSize + j] = sum;
}
B[i*outputSize + j] = sum;
}
int main(int argc, char * argv[] ) {
// input: inputSize*inputSize*depth
// kernel: kernelSize*kernelSize*depth
// output: outputSize*outputSize
int inputSize = 7;
int depth = 3;
int kernelSize = 3;
int kernelNum = 3;
int stride[3] = {1 , 2 , 3 };
int pad[3] = {0,0,0};
int outputSize[3];
// 计算不同stride下需要的padding数量pad和output的规模outputSize
for(int i = 0; i < kernelNum; i++) {
if((inputSize - kernelSize)%stride[i] != 0) {
pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2;
}
outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1;
}
// ============================= 资源申请的初始化 =========================
// ==== CPU资源申请和初始化
// input:A kernel:kernel output:B
float *A, *kernel[3], *B[3];
A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth);
B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth);
}
// 初始化input A
for(int d = 0; d < depth; d++) {
for(int i=0; i<inputSize*inputSize; i++) {
A[d*inputSize*inputSize + i] = i;
}
}
// 初始化kernel
for(int i = 0; i < 3; i++){
for(int j = 0; j < kernelSize*kernelSize*depth; j++) {
kernel[i][j] = 1;
}
}
// ==== GPU资源申请和初始化
float *d_A, *d_kernel[3], *d_B[3];
cudaMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
cudaMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth);
cudaMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]);
}
cudaMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,cudaMemcpyHostToDevice);
for(int i = 0; i < 3; i++) {
cudaMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,cudaMemcpyHostToDevice);
}
// ============================= 调用核函数 =========================
struct timeval start, end;
gettimeofday( &start, NULL );
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i])+ 1);
int blocky = blockx;
dim3 Block(blockx,blocky);
dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y );
conv2 <<< Grid, Block >>> (d_A,d_kernel[i],inputSize,depth,kernelSize,stride[i],pad[i],d_B[i],outputSize[i]);
}
// 结果回传
for( int i = 0; i < 3; i++ ) {
cudaMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i],cudaMemcpyDeviceToHost);
}
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
//printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y);
printf("total time is %f ms\n", timeuse/(float)1000);
// 输出结果
FILE *b[3];
b[0] = fopen("matrixB11.m", "wb");
b[1] = fopen("matrixB12.m", "wb");
b[2] = fopen("matrixB13.m", "wb");
for(int k = 0; k < 3; k++ ) {
fprintf(b[k], "B = [ \n");
for (int i = 0; i < outputSize[k]; i++)
{
for (int j = 0; j < outputSize[k]; j++)
fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]);
fprintf(b[k], "\n");
}
fprintf(b[k], "];");
}
// ============================= 资源释放 =========================
free(A);
cudaFree(d_A);
for(int i = 0; i < 3; i++) {
free(kernel[i]);
free(B[i]);
cudaFree(d_B[i]);
cudaFree(d_kernel[i]);
fclose(b[i]);
}
return 0;
}
|
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void conv2(float *A, float *kernel,int inputSize, int depth, int kernelSize , int stride, int pad, float *B, int outputSize) {
// 计算元素output(i,j)的值 一次卷积运算
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if( !(i < outputSize) || !(j < outputSize) ) return;
int Ai = i*stride;
int Aj = j*stride;
// 除去填充的0
int startk = (pad-Ai) < 0? 0 : pad-Ai;
int endk = kernelSize < (inputSize + pad - Ai) ? kernelSize : (inputSize + pad - Ai);
int startl = (pad-Aj) < 0? 0 : pad-Aj;
int endl = kernelSize < (inputSize + pad - Aj) ? kernelSize : (inputSize + pad - Aj);
float sum = 0;
for(int d = 0; d < depth; d++) {
for( int k = startk ; k < endk; k++) {
for( int l = startl; l < endl; l++) {
sum += A[d*inputSize*inputSize + (Ai+k-pad)*inputSize + Aj+l-pad]*kernel[d*kernelSize*kernelSize + k*kernelSize+l];
}
}
B[d*outputSize*outputSize + i*outputSize + j] = sum;
}
B[i*outputSize + j] = sum;
}
int main(int argc, char * argv[] ) {
// input: inputSize*inputSize*depth
// kernel: kernelSize*kernelSize*depth
// output: outputSize*outputSize
int inputSize = 7;
int depth = 3;
int kernelSize = 3;
int kernelNum = 3;
int stride[3] = {1 , 2 , 3 };
int pad[3] = {0,0,0};
int outputSize[3];
// 计算不同stride下需要的padding数量pad和output的规模outputSize
for(int i = 0; i < kernelNum; i++) {
if((inputSize - kernelSize)%stride[i] != 0) {
pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2;
}
outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1;
}
// ============================= 资源申请的初始化 =========================
// ==== CPU资源申请和初始化
// input:A kernel:kernel output:B
float *A, *kernel[3], *B[3];
A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth);
B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth);
}
// 初始化input A
for(int d = 0; d < depth; d++) {
for(int i=0; i<inputSize*inputSize; i++) {
A[d*inputSize*inputSize + i] = i;
}
}
// 初始化kernel
for(int i = 0; i < 3; i++){
for(int j = 0; j < kernelSize*kernelSize*depth; j++) {
kernel[i][j] = 1;
}
}
// ==== GPU资源申请和初始化
float *d_A, *d_kernel[3], *d_B[3];
hipMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
hipMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth);
hipMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]);
}
hipMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,hipMemcpyHostToDevice);
for(int i = 0; i < 3; i++) {
hipMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,hipMemcpyHostToDevice);
}
// ============================= 调用核函数 =========================
struct timeval start, end;
gettimeofday( &start, NULL );
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i])+ 1);
int blocky = blockx;
dim3 Block(blockx,blocky);
dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y );
conv2 <<< Grid, Block >>> (d_A,d_kernel[i],inputSize,depth,kernelSize,stride[i],pad[i],d_B[i],outputSize[i]);
}
// 结果回传
for( int i = 0; i < 3; i++ ) {
hipMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i],hipMemcpyDeviceToHost);
}
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
//printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y);
printf("total time is %f ms\n", timeuse/(float)1000);
// 输出结果
FILE *b[3];
b[0] = fopen("matrixB11.m", "wb");
b[1] = fopen("matrixB12.m", "wb");
b[2] = fopen("matrixB13.m", "wb");
for(int k = 0; k < 3; k++ ) {
fprintf(b[k], "B = [ \n");
for (int i = 0; i < outputSize[k]; i++)
{
for (int j = 0; j < outputSize[k]; j++)
fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]);
fprintf(b[k], "\n");
}
fprintf(b[k], "];");
}
// ============================= 资源释放 =========================
free(A);
hipFree(d_A);
for(int i = 0; i < 3; i++) {
free(kernel[i]);
free(B[i]);
hipFree(d_B[i]);
hipFree(d_kernel[i]);
fclose(b[i]);
}
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//#include <hayai/hayai.hpp>
//
//#include "btree.cuh"
//
//#include "concurrent-xfasttrie-fixture.cu"
//
//using BTREE = gpu::BTree<key_type, mapped_type>;
//using BTreeInsertionFixture = XTrieInsertionFixture<BTREE, Structure::BTREE>;
//using BTreeGetThreadFixture = XTrieGetThreadFixture<BTREE, Structure::BTREE>;
//using BTreeGetWarpFixture = XTrieGetWarpFixture<BTREE, Structure::BTREE>;
//using BTreePredecessorFixture = XTriePredecessorFixture<BTREE, Structure::BTREE, true>;
//using BTreeSuccessorFixture = XTrieSuccessorFixture<BTREE, Structure::BTREE, true>;
//
//BENCHMARK_F(BTreeInsertionFixture, InsertionBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// insert();
//}
///*
//BENCHMARK_F(BTreeGetThreadFixture, GetThreadBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_thread();
//}
//
//BENCHMARK_F(BTreeGetWarpFixture, GetWarpBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_warp();
//}
//
//BENCHMARK_F(BTreePredecessorFixture, PredecessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// predecessor();
//}*/
///*
//BENCHMARK_F(BTreeSuccessorFixture, SuccessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// successor();
//}*/
|
#include <hip/hip_runtime.h>
//#include <hayai/hayai.hpp>
//
//#include "btree.cuh"
//
//#include "concurrent-xfasttrie-fixture.cu"
//
//using BTREE = gpu::BTree<key_type, mapped_type>;
//using BTreeInsertionFixture = XTrieInsertionFixture<BTREE, Structure::BTREE>;
//using BTreeGetThreadFixture = XTrieGetThreadFixture<BTREE, Structure::BTREE>;
//using BTreeGetWarpFixture = XTrieGetWarpFixture<BTREE, Structure::BTREE>;
//using BTreePredecessorFixture = XTriePredecessorFixture<BTREE, Structure::BTREE, true>;
//using BTreeSuccessorFixture = XTrieSuccessorFixture<BTREE, Structure::BTREE, true>;
//
//BENCHMARK_F(BTreeInsertionFixture, InsertionBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// insert();
//}
///*
//BENCHMARK_F(BTreeGetThreadFixture, GetThreadBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_thread();
//}
//
//BENCHMARK_F(BTreeGetWarpFixture, GetWarpBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_warp();
//}
//
//BENCHMARK_F(BTreePredecessorFixture, PredecessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// predecessor();
//}*/
///*
//BENCHMARK_F(BTreeSuccessorFixture, SuccessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// successor();
//}*/
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define MAX 65535
#define imin(a,b) (a<b?a:b)
const int arr_size =8;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32,(arr_size +threadsPerBlock -1)/threadsPerBlock);
__global__ void kernel(float*arrA , float* arrB, float* arrC)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < arr_size)
{
temp += arrA[tid] * arrB[tid];
tid += blockIdx.x * blockDim.x;
}
//set cache values
cache[cacheIndex] = temp;
__syncthreads();
//REDUCTION FUNCTION
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
arrC[blockIdx.x] = cache[0];
}
}
int main(int argc, char **argv)
{
const int arr_bytes = arr_size * sizeof(float);
float arr_a[MAX];
float arr_b[MAX];
float partial_c[MAX];
float* dev_a;
float* dev_b;
float* partialdev_c;
int i;
float j = 1.0;
for (i = 0; i < arr_size; i++)
{
arr_a[i] = j;
arr_b[i] = j * j;
}
cudaMalloc((void**)&dev_a, arr_bytes);
cudaMalloc((void**)&dev_b, arr_bytes);
cudaMalloc((void**)&partialdev_c, blocksPerGrid * sizeof(float));
cudaMemcpy(dev_a, arr_a, arr_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, arr_b, arr_bytes, cudaMemcpyHostToDevice);
kernel <<<blocksPerGrid,threadsPerBlock >>>(dev_a,dev_b,partialdev_c);
cudaMemcpy(partial_c, partialdev_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
//calculate final dot product on cpu side
float c = 0;
for (i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("The value of Dot product is : %f\n", c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(partialdev_c);
}
|
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define MAX 65535
#define imin(a,b) (a<b?a:b)
const int arr_size =8;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32,(arr_size +threadsPerBlock -1)/threadsPerBlock);
__global__ void kernel(float*arrA , float* arrB, float* arrC)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < arr_size)
{
temp += arrA[tid] * arrB[tid];
tid += blockIdx.x * blockDim.x;
}
//set cache values
cache[cacheIndex] = temp;
__syncthreads();
//REDUCTION FUNCTION
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
arrC[blockIdx.x] = cache[0];
}
}
int main(int argc, char **argv)
{
const int arr_bytes = arr_size * sizeof(float);
float arr_a[MAX];
float arr_b[MAX];
float partial_c[MAX];
float* dev_a;
float* dev_b;
float* partialdev_c;
int i;
float j = 1.0;
for (i = 0; i < arr_size; i++)
{
arr_a[i] = j;
arr_b[i] = j * j;
}
hipMalloc((void**)&dev_a, arr_bytes);
hipMalloc((void**)&dev_b, arr_bytes);
hipMalloc((void**)&partialdev_c, blocksPerGrid * sizeof(float));
hipMemcpy(dev_a, arr_a, arr_bytes, hipMemcpyHostToDevice);
hipMemcpy(dev_b, arr_b, arr_bytes, hipMemcpyHostToDevice);
kernel <<<blocksPerGrid,threadsPerBlock >>>(dev_a,dev_b,partialdev_c);
hipMemcpy(partial_c, partialdev_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost);
//calculate final dot product on cpu side
float c = 0;
for (i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("The value of Dot product is : %f\n", c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(partialdev_c);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#define N 5
#define BR() printf("\n")
#define BRS(str) printf("%s\n",str)
typedef struct {
int top;
int* data;
int stack_size;
}FIFO;
void exec();
void initialize_array(int*);
void print_array(int*);
int main(int argc, char const *argv[]) {
exec();
return 0;
}
// __device__ int i,j,k;
__device__ int push(int new_data,FIFO* stack_t){
if(stack_t->top > stack_t->stack_size){
return -1;
}
stack_t->data[stack_t->top] = new_data;
stack_t->top++;
return 1;
}
__device__ int pop(FIFO* stack_t){
if(stack_t->top == 0){
return -1;
}
stack_t->top--;
return 1;
}
__device__ int initialize_stack(FIFO* stack_t,int stack_size){
stack_t->top = 0;
stack_t->stack_size = stack_size;
stack_t->data = (int*) malloc(stack_size*sizeof(int));
if(stack_t->data == NULL){
return -1;
}
return 1;
}
__device__ int top(FIFO* stack_t){
if(stack_t->top == 0){
return -1;
}
return stack_t->data[stack_t->top-1];
}
__device__ int isEmpty(FIFO* stack_t){
if(stack_t->top == 0)
return 1;
else
return 0;
}
__device__ void swap(int *x, int *y)
{
int tmp;
tmp = *x;
*x = *y;
*y = tmp;
}
__device__ void print_d_array(int *array){
int i;
BRS(__func__);
printf("blockIdx.x %d , threadIdx.x %d\n", blockIdx.x, threadIdx.x);
for (i = 0; i < N; i++) {
printf("%d ",array[i]);
}//for
BR();
}
__global__ void kernel_test_stack(int *d_array){
int status;
int i, x = 3, y = 6;
FIFO stack1;
print_d_array(d_array);
//スワップの確認
printf("x: %d y: %d\n", x, y);
swap(&x,&y);
printf("x: %d y: %d\n", x, y);
//スタックの確認
if ((status = initialize_stack(&stack1, N)) == -1) {
printf("initialize_stack error LINE:%d \n", __LINE__);
}
printf("blockIdx.x %d , threadIdx.x %d stack address %p x %p y%p \n", blockIdx.x, threadIdx.x, &stack1, &x, &y);
if(isEmpty(&stack1)){
BRS("Empty");
}//if
else{
BRS("NOT Empty");
}//else
for(i = 1 ; i < N ; i++){
push(i, &stack1);
printf("push: %d\n",i);
if(isEmpty(&stack1)){
BRS("Empty");
// printf("top: %d \n",top(&stack1));
}//if
else{
BRS("NOT Empty");
// printf("top: %d \n",top(&stack1));
}//else
}//for
for(i = 1 ; i < N ; i++){
pop(&stack1);
BRS("pop");
if(isEmpty(&stack1)){
BRS("Empty");
printf("top: %d \n",top(&stack1));
}//if
else{
BRS("NOT Empty");
printf("top: %d \n",top(&stack1));
}//else
}//for
}//Kernel
void exec(){
int array[N];
int *d_array;
int iDev = 0;
dim3 grid, block;
cudaDeviceProp iProp;
cudaSetDevice(iDev);
cudaGetDeviceProperties(&iProp, iDev);
printf("Device %d: %s\n", iDev, iProp.name);
initialize_array(array);
print_array(array);
cudaMalloc((int**)&d_array, sizeof(array));
cudaMemcpy(d_array, array, sizeof(array), cudaMemcpyHostToDevice);
grid.x = 1;
block.x = 2;
kernel_test_stack<<<grid, block>>>(d_array);
cudaMemcpy(array, d_array, sizeof(array), cudaMemcpyDeviceToHost);
print_array(array);
cudaFree(d_array);
cudaDeviceReset();
}
void initialize_array(int* array){
int i;
for (i = 0; i < N; i++) {
array[i] = rand() % N * 2;
}//for
}//function
void print_array(int* array){
int i;
BRS(__func__);
for (i = 0; i < N; i++) {
printf("%d ",array[i]);
}//for
BR();
}//function
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 5
#define BR() printf("\n")
#define BRS(str) printf("%s\n",str)
typedef struct {
int top;
int* data;
int stack_size;
}FIFO;
void exec();
void initialize_array(int*);
void print_array(int*);
int main(int argc, char const *argv[]) {
exec();
return 0;
}
// __device__ int i,j,k;
__device__ int push(int new_data,FIFO* stack_t){
if(stack_t->top > stack_t->stack_size){
return -1;
}
stack_t->data[stack_t->top] = new_data;
stack_t->top++;
return 1;
}
__device__ int pop(FIFO* stack_t){
if(stack_t->top == 0){
return -1;
}
stack_t->top--;
return 1;
}
__device__ int initialize_stack(FIFO* stack_t,int stack_size){
stack_t->top = 0;
stack_t->stack_size = stack_size;
stack_t->data = (int*) malloc(stack_size*sizeof(int));
if(stack_t->data == NULL){
return -1;
}
return 1;
}
__device__ int top(FIFO* stack_t){
if(stack_t->top == 0){
return -1;
}
return stack_t->data[stack_t->top-1];
}
__device__ int isEmpty(FIFO* stack_t){
if(stack_t->top == 0)
return 1;
else
return 0;
}
__device__ void swap(int *x, int *y)
{
int tmp;
tmp = *x;
*x = *y;
*y = tmp;
}
__device__ void print_d_array(int *array){
int i;
BRS(__func__);
printf("blockIdx.x %d , threadIdx.x %d\n", blockIdx.x, threadIdx.x);
for (i = 0; i < N; i++) {
printf("%d ",array[i]);
}//for
BR();
}
__global__ void kernel_test_stack(int *d_array){
int status;
int i, x = 3, y = 6;
FIFO stack1;
print_d_array(d_array);
//スワップの確認
printf("x: %d y: %d\n", x, y);
swap(&x,&y);
printf("x: %d y: %d\n", x, y);
//スタックの確認
if ((status = initialize_stack(&stack1, N)) == -1) {
printf("initialize_stack error LINE:%d \n", __LINE__);
}
printf("blockIdx.x %d , threadIdx.x %d stack address %p x %p y%p \n", blockIdx.x, threadIdx.x, &stack1, &x, &y);
if(isEmpty(&stack1)){
BRS("Empty");
}//if
else{
BRS("NOT Empty");
}//else
for(i = 1 ; i < N ; i++){
push(i, &stack1);
printf("push: %d\n",i);
if(isEmpty(&stack1)){
BRS("Empty");
// printf("top: %d \n",top(&stack1));
}//if
else{
BRS("NOT Empty");
// printf("top: %d \n",top(&stack1));
}//else
}//for
for(i = 1 ; i < N ; i++){
pop(&stack1);
BRS("pop");
if(isEmpty(&stack1)){
BRS("Empty");
printf("top: %d \n",top(&stack1));
}//if
else{
BRS("NOT Empty");
printf("top: %d \n",top(&stack1));
}//else
}//for
}//Kernel
void exec(){
int array[N];
int *d_array;
int iDev = 0;
dim3 grid, block;
hipDeviceProp_t iProp;
hipSetDevice(iDev);
hipGetDeviceProperties(&iProp, iDev);
printf("Device %d: %s\n", iDev, iProp.name);
initialize_array(array);
print_array(array);
hipMalloc((int**)&d_array, sizeof(array));
hipMemcpy(d_array, array, sizeof(array), hipMemcpyHostToDevice);
grid.x = 1;
block.x = 2;
kernel_test_stack<<<grid, block>>>(d_array);
hipMemcpy(array, d_array, sizeof(array), hipMemcpyDeviceToHost);
print_array(array);
hipFree(d_array);
hipDeviceReset();
}
void initialize_array(int* array){
int i;
for (i = 0; i < N; i++) {
array[i] = rand() % N * 2;
}//for
}//function
void print_array(int* array){
int i;
BRS(__func__);
for (i = 0; i < N; i++) {
printf("%d ",array[i]);
}//for
BR();
}//function
|
Convert the following CUDA code to AMD GPU code:
cuda
// nvcc -arch sm_21 -o test -run --keep --ptxas-options="-v" test.cu
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void transpose (int* Input, int* Output) {
}
|
// nvcc -arch sm_21 -o test -run --keep --ptxas-options="-v" test.cu
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void transpose (int* Input, int* Output) {
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
//Note that any functions that want to be called from the kernel must be preceeded with __device__
//Function we are integrating
__device__ float myFunction(float x){
return pow(x,4);
}
//Trapezoidal rule calculation
__device__ float trapezoidal(float a, float b){
return (b-a)*((myFunction(a)+myFunction(b))/2);
}
//Composite trap rule calculation
__device__ float composite_trapezoidal(float a, float b, int n){
float h=(b-a)/(n);
float total=0;
int i;
for (i=0;i<n;i++){
total=total+trapezoidal(a+i*h,a+(i+1)*h);
}
return total;
}
//This section runs on the GPUs
__global__ void kernel(float* arr, float A, float B, int P, int N){
//Who am I?
int id = blockIdx.x * blockDim.x + threadIdx.x;
//calculate number of intervals, where they start, and where they end, and what interval this processor will use
float intervalWidth = (B-A)/(P);
float intervalStart = A+(intervalWidth)*(id);
float intervalEnd = intervalStart+intervalWidth;
//calculate the partial sum of this interval
arr[id] = composite_trapezoidal(intervalStart,intervalEnd,N);
}
int main(int argc, char** argv){
//Process input from command line
if (argc<3){
printf("Please enter a,b,N\n");
return 1;
}
float A=atof(argv[1]);
float B=atof(argv[2]);
int N=atoi(argv[3]);
printf("Integrating x^4 from %.3f to %.3f with %d points\n", A, B, N);
//How many threads will we use and how much data is in each thread?
int elements = 512;
int bytes = elements * sizeof(float);
//Create pointers to host and device arrays
float *hostArray = 0;
float *deviceArray = 0;
//Create the array on the host and on the GPU
hostArray = (float*) malloc(bytes);
cudaMalloc((void**)&deviceArray, bytes);
int blockSize = 128;
int gridSize = elements / blockSize;
//Instruct each GPU core to run its kernel section
kernel<<<gridSize,blockSize>>>(deviceArray, A, B, elements, N);
//Gather all the partial sums
cudaMemcpy(hostArray, deviceArray, bytes, cudaMemcpyDeviceToHost);
//Reduce the partial sums to a single integral
float sum = 0;
for(int i=0; i < elements; ++i){
sum += hostArray[i];
}
//Print result
printf("Integrating x^4 from %.3f to %.3f with %d points is: %.3f\n", A, B, N, sum);
//Deallocate the two arrays
free(hostArray);
cudaFree(deviceArray);
//Exit from the calling program
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
//Note that any functions that want to be called from the kernel must be preceeded with __device__
//Function we are integrating
__device__ float myFunction(float x){
return pow(x,4);
}
//Trapezoidal rule calculation
__device__ float trapezoidal(float a, float b){
return (b-a)*((myFunction(a)+myFunction(b))/2);
}
//Composite trap rule calculation
__device__ float composite_trapezoidal(float a, float b, int n){
float h=(b-a)/(n);
float total=0;
int i;
for (i=0;i<n;i++){
total=total+trapezoidal(a+i*h,a+(i+1)*h);
}
return total;
}
//This section runs on the GPUs
__global__ void kernel(float* arr, float A, float B, int P, int N){
//Who am I?
int id = blockIdx.x * blockDim.x + threadIdx.x;
//calculate number of intervals, where they start, and where they end, and what interval this processor will use
float intervalWidth = (B-A)/(P);
float intervalStart = A+(intervalWidth)*(id);
float intervalEnd = intervalStart+intervalWidth;
//calculate the partial sum of this interval
arr[id] = composite_trapezoidal(intervalStart,intervalEnd,N);
}
int main(int argc, char** argv){
//Process input from command line
if (argc<3){
printf("Please enter a,b,N\n");
return 1;
}
float A=atof(argv[1]);
float B=atof(argv[2]);
int N=atoi(argv[3]);
printf("Integrating x^4 from %.3f to %.3f with %d points\n", A, B, N);
//How many threads will we use and how much data is in each thread?
int elements = 512;
int bytes = elements * sizeof(float);
//Create pointers to host and device arrays
float *hostArray = 0;
float *deviceArray = 0;
//Create the array on the host and on the GPU
hostArray = (float*) malloc(bytes);
hipMalloc((void**)&deviceArray, bytes);
int blockSize = 128;
int gridSize = elements / blockSize;
//Instruct each GPU core to run its kernel section
kernel<<<gridSize,blockSize>>>(deviceArray, A, B, elements, N);
//Gather all the partial sums
hipMemcpy(hostArray, deviceArray, bytes, hipMemcpyDeviceToHost);
//Reduce the partial sums to a single integral
float sum = 0;
for(int i=0; i < elements; ++i){
sum += hostArray[i];
}
//Print result
printf("Integrating x^4 from %.3f to %.3f with %d points is: %.3f\n", A, B, N, sum);
//Deallocate the two arrays
free(hostArray);
hipFree(deviceArray);
//Exit from the calling program
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "time.h"
#define A_w 50
#define A_h 50
#define B_w 32
#define B_h 32
typedef struct{
int width;
int height;
float * elements;
}Matrix;
// #define
void rightKronecker1(Matrix A, Matrix B, Matrix C){
for(int c_row=0; c_row<C.height; c_row++){
for(int c_col=0; c_col<C.width; c_col++){
C.elements[c_col + c_row*C.width] =
A.elements[c_col/B.width + c_row/B.height * A.width]
* B.elements[c_col%B.width + c_row%B.height*B.width];
}
}
}
void rightKronecker2(Matrix A, Matrix B, Matrix C){
for(int a_row=0; a_row<A.height; a_row++){
for(int a_col=0; a_col<A.width; a_col++){
for(int b_row=0; b_row<B.height; b_row++){
for(int b_col=0; b_col<B.width; b_col++){
C.elements[(b_col+a_col*B.width)+(b_row+a_row*B.height)*A.width*B.width]
= A.elements[a_col+a_row*A.width] * B.elements[b_col+b_row*B.width];
}
}
}
}
}
void generatorNum(float* array, int num)
{
// srand((unsigned)time(NULL));
for(int i=0;i<num;i++)
{
array[i]=rand()%5;
}
}
void printUsage(void)
{
printf("\n");
printf("The program aims to calculate the product of matrix A and B\n");
printf("-h matrix A row num\n");
printf("-w matrix A col num\n");
printf("-H matrix B row num\n");
printf("-W matrix B col num\n");
}
int main(int argc,char** argv){
// int A_w,B_w,A_h,B_h;
// if(argc==1)
// {
// printf("Error: no enough parameters.Please input the col and row number of Matrix A and B,respectively\n");
// exit(0);
// }
// else if(argc==2)
// {
// if(strcmp("--help",argv[1])==0)
// {
// printUsage();
// exit(0);
// }
// }
// for(int id=1;id<argc;id+=2)
// {
// if(strcmp("-h",argv[id])==0)
// A_h=atoi(argv[id+1]);
// else if(strcmp("-w",argv[id])==0)
// A_w=atoi(argv[id+1]);
// else if(strcmp("-W",argv[id])==0)
// B_w=atoi(argv[id+1]);
// else if(strcmp("-H",argv[id])==0)
// B_h=atoi(argv[id+1]);
// }
// Matrix A,d_A,B,d_B,C,d_C;
Matrix A, B, C1, C2;
A.width=A_w;A.height=A_h;
B.width=B_w;B.height=B_h;
C1.width=A_w*B_w;C1.height=A_h*B_h;
C2.width=A_w*B_w;C2.height=A_h*B_h;
A.elements=(float *)malloc(A.width*A.height*sizeof(float));
B.elements=(float *)malloc(B.width*B.height*sizeof(float));
C1.elements=(float *)malloc(C1.width*C1.height*sizeof(float));
C2.elements=(float *)malloc(C2.width*C2.height*sizeof(float));
// A.elements=(float *)malloc(A.width*A.height*sizeof(float));
// B.elements=(float *)malloc(B.width*B.height*sizeof(float));
// C.elements=(float *)malloc(C.width*C.height*sizeof(float));
generatorNum(A.elements,A.width*A.height);
generatorNum(B.elements,B.width*B.height);
memset(C1.elements,0,C1.width*sizeof(float)*C1.height);
memset(C2.elements,0,C2.width*sizeof(float)*C2.height);
// printf("A.elements:\n");
// for(int i=0;i<A.height;i++){
// for(int j=0;j<A.width;j++){
// printf("%d ", int(A.elements[j+i*A.width]));
// }
// printf("\n");
// }
// printf("B.elements:\n");
// for(int i=0;i<B.height;i++){
// for(int j=0;j<B.width;j++){
// printf("%d ", int(B.elements[j+i*B.width]));
// }
// printf("\n");
// }
srand(time(0));
clock_t start,finish1, finish2;
start=clock();
rightKronecker1(A, B, C1);
finish1=clock();
rightKronecker2(A, B, C2);
finish2=clock();
// printf("C1.elements:\n");
// for(int i=0;i<C1.height;i++){
// for(int j=0;j<C1.width;j++){
// printf("%d ", C1.elements[j+i*C1.width]);
// }
// printf("\n");
// }
// printf("C2.elements:\n");
// for(int i=0;i<C2.height;i++){
// for(int j=0;j<C2.width;j++){
// printf("%d ", C2.elements[j+i*C2.width]);
// }
// printf("\n");
// }
printf("Difference between 2 method:\n");
float diff = 0;
for(int i=0;i<C2.height;i++){
for(int j=0;j<C2.width;j++){
diff = C2.elements[j+i*C2.width] - C1.elements[j+i*C2.width];
}
}
printf("%f\n", diff);
printf("method1 cost time %f ms\n",(finish1-start)*1000.0/CLOCKS_PER_SEC);
printf("method2 cost time %f ms\n",(finish2-finish1)*1000.0/CLOCKS_PER_SEC);
// malloc matrix A B C on GPU
// cudaMalloc(&d_A.elements,sizeof(float)*A.width*A.height);
// cudaMalloc(&d_B.elements,sizeof(float)*B.width*B.height);
// cudaMalloc(&d_C.elements,sizeof(float)*C.width*C.height);
return 0;
}
|
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "time.h"
#define A_w 50
#define A_h 50
#define B_w 32
#define B_h 32
typedef struct{
int width;
int height;
float * elements;
}Matrix;
// #define
void rightKronecker1(Matrix A, Matrix B, Matrix C){
for(int c_row=0; c_row<C.height; c_row++){
for(int c_col=0; c_col<C.width; c_col++){
C.elements[c_col + c_row*C.width] =
A.elements[c_col/B.width + c_row/B.height * A.width]
* B.elements[c_col%B.width + c_row%B.height*B.width];
}
}
}
void rightKronecker2(Matrix A, Matrix B, Matrix C){
for(int a_row=0; a_row<A.height; a_row++){
for(int a_col=0; a_col<A.width; a_col++){
for(int b_row=0; b_row<B.height; b_row++){
for(int b_col=0; b_col<B.width; b_col++){
C.elements[(b_col+a_col*B.width)+(b_row+a_row*B.height)*A.width*B.width]
= A.elements[a_col+a_row*A.width] * B.elements[b_col+b_row*B.width];
}
}
}
}
}
void generatorNum(float* array, int num)
{
// srand((unsigned)time(NULL));
for(int i=0;i<num;i++)
{
array[i]=rand()%5;
}
}
void printUsage(void)
{
printf("\n");
printf("The program aims to calculate the product of matrix A and B\n");
printf("-h matrix A row num\n");
printf("-w matrix A col num\n");
printf("-H matrix B row num\n");
printf("-W matrix B col num\n");
}
int main(int argc,char** argv){
// int A_w,B_w,A_h,B_h;
// if(argc==1)
// {
// printf("Error: no enough parameters.Please input the col and row number of Matrix A and B,respectively\n");
// exit(0);
// }
// else if(argc==2)
// {
// if(strcmp("--help",argv[1])==0)
// {
// printUsage();
// exit(0);
// }
// }
// for(int id=1;id<argc;id+=2)
// {
// if(strcmp("-h",argv[id])==0)
// A_h=atoi(argv[id+1]);
// else if(strcmp("-w",argv[id])==0)
// A_w=atoi(argv[id+1]);
// else if(strcmp("-W",argv[id])==0)
// B_w=atoi(argv[id+1]);
// else if(strcmp("-H",argv[id])==0)
// B_h=atoi(argv[id+1]);
// }
// Matrix A,d_A,B,d_B,C,d_C;
Matrix A, B, C1, C2;
A.width=A_w;A.height=A_h;
B.width=B_w;B.height=B_h;
C1.width=A_w*B_w;C1.height=A_h*B_h;
C2.width=A_w*B_w;C2.height=A_h*B_h;
A.elements=(float *)malloc(A.width*A.height*sizeof(float));
B.elements=(float *)malloc(B.width*B.height*sizeof(float));
C1.elements=(float *)malloc(C1.width*C1.height*sizeof(float));
C2.elements=(float *)malloc(C2.width*C2.height*sizeof(float));
// A.elements=(float *)malloc(A.width*A.height*sizeof(float));
// B.elements=(float *)malloc(B.width*B.height*sizeof(float));
// C.elements=(float *)malloc(C.width*C.height*sizeof(float));
generatorNum(A.elements,A.width*A.height);
generatorNum(B.elements,B.width*B.height);
memset(C1.elements,0,C1.width*sizeof(float)*C1.height);
memset(C2.elements,0,C2.width*sizeof(float)*C2.height);
// printf("A.elements:\n");
// for(int i=0;i<A.height;i++){
// for(int j=0;j<A.width;j++){
// printf("%d ", int(A.elements[j+i*A.width]));
// }
// printf("\n");
// }
// printf("B.elements:\n");
// for(int i=0;i<B.height;i++){
// for(int j=0;j<B.width;j++){
// printf("%d ", int(B.elements[j+i*B.width]));
// }
// printf("\n");
// }
srand(time(0));
clock_t start,finish1, finish2;
start=clock();
rightKronecker1(A, B, C1);
finish1=clock();
rightKronecker2(A, B, C2);
finish2=clock();
// printf("C1.elements:\n");
// for(int i=0;i<C1.height;i++){
// for(int j=0;j<C1.width;j++){
// printf("%d ", C1.elements[j+i*C1.width]);
// }
// printf("\n");
// }
// printf("C2.elements:\n");
// for(int i=0;i<C2.height;i++){
// for(int j=0;j<C2.width;j++){
// printf("%d ", C2.elements[j+i*C2.width]);
// }
// printf("\n");
// }
printf("Difference between 2 method:\n");
float diff = 0;
for(int i=0;i<C2.height;i++){
for(int j=0;j<C2.width;j++){
diff = C2.elements[j+i*C2.width] - C1.elements[j+i*C2.width];
}
}
printf("%f\n", diff);
printf("method1 cost time %f ms\n",(finish1-start)*1000.0/CLOCKS_PER_SEC);
printf("method2 cost time %f ms\n",(finish2-finish1)*1000.0/CLOCKS_PER_SEC);
// malloc matrix A B C on GPU
// cudaMalloc(&d_A.elements,sizeof(float)*A.width*A.height);
// cudaMalloc(&d_B.elements,sizeof(float)*B.width*B.height);
// cudaMalloc(&d_C.elements,sizeof(float)*C.width*C.height);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/* Block size X: 32 */
__global__ void fct_ale_b2(const int maxLevels, const double dt, const double fluxEpsilon, const int * __restrict__ nLevels, const double * __restrict__ area_inv, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict__ fct_minus)
{
int index = 0;
double area_item = 0;
for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 )
{
index = (blockIdx.x * maxLevels) + level;
area_item = area_inv[index + blockIdx.x];
fct_plus[index] = fmin(1.0, fct_ttf_max[index] / (fct_plus[index] * dt * area_item + fluxEpsilon));
fct_minus[index] = fmin(1.0, fct_ttf_min[index] / (fct_minus[index] * dt * area_item - fluxEpsilon));
}
}
|
#include <hip/hip_runtime.h>
/* Block size X: 32 */
__global__ void fct_ale_b2(const int maxLevels, const double dt, const double fluxEpsilon, const int * __restrict__ nLevels, const double * __restrict__ area_inv, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict__ fct_minus)
{
int index = 0;
double area_item = 0;
for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 )
{
index = (blockIdx.x * maxLevels) + level;
area_item = area_inv[index + blockIdx.x];
fct_plus[index] = fmin(1.0, fct_ttf_max[index] / (fct_plus[index] * dt * area_item + fluxEpsilon));
fct_minus[index] = fmin(1.0, fct_ttf_min[index] / (fct_minus[index] * dt * area_item - fluxEpsilon));
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
using namespace std;
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__device__ float sigmoid(float x) {
return 1.0f / (1 + __expf(-x));
}
__global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = sigmoid(Z[index]);
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__device__ float sigmoid(float x) {
return 1.0f / (1 + __expf(-x));
}
__global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = sigmoid(Z[index]);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
// #define NUM_PARTICLES 10000
// #define NUM_ITERATIONS 10000
// int TPB = 16;
#define SEED 10
#define EPSILON 1e-5
typedef struct {
float3 position;
float3 velocity;
} Particle;
// Deterministically generates a "random" float, provided a seed and 3 integers.
__host__ __device__ float gen_random(int seed, int a, int b, int c) {
return (float)((seed * a + b) % c) / c;
}
// Given an array of particles and an index, print that particle.
void printParticle(Particle* particles, int index){
printf("%f %f %f %f %f %f\n",
particles[index].position.x, particles[index].position.y, particles[index].position.z,
particles[index].velocity.x, particles[index].velocity.y, particles[index].velocity.z);
}
// Compare two arrays of Particles. If their position coordinates are all within EPSILON of each other,
// return true, else false.
__host__ bool arraysMatch(Particle* arr1, Particle* arr2, int num_particles)
{
for (int i = 0; i < num_particles; i++) {
if (fabs(arr1[i].position.x - arr2[i].position.x) > EPSILON ||
fabs(arr1[i].position.y - arr2[i].position.y) > EPSILON ||
fabs(arr1[i].position.z - arr2[i].position.z) > EPSILON)
return false;
}
return true;
}
// Get the current time
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// Replaces the x, y and z values in a float3 to random values between 0 and 1.
void randomizeFloat3(float3* f3) {
f3->x = (float) rand() / RAND_MAX;
f3->y = (float) rand() / RAND_MAX;
f3->z = (float) rand() / RAND_MAX;
}
// Randomizes the position and velocity of all Particles in an array.
void randomizeParticles(Particle* particles, int num_particles) {
srand(0);
for (int i = 0; i < num_particles; i++) {
randomizeFloat3(&particles[i].position);
randomizeFloat3(&particles[i].velocity);
}
}
// Updates a particle's position by its velocity, then updates its velocity
__host__ __device__ void updateParticle(Particle* particle, int id, int iter, int num_particles) {
int dt = 1;
// update position
particle->position.x += dt * particle->velocity.x;
particle->position.y += dt * particle->velocity.y;
particle->position.z += dt * particle->velocity.z;
// update the velocity randomly
particle->velocity.x += gen_random(SEED, id, iter, num_particles);
particle->velocity.y += gen_random(SEED, id, iter, num_particles);
particle->velocity.z += gen_random(SEED, id, iter, num_particles);
}
// CPU function that updates a given particle.
void cpu_updatePositionAndVelocity(Particle* particle, int id, int iter, int num_particles) {
updateParticle(particle, id, iter, num_particles);
}
// Kernel that finds a given Particle's ID then updates it if within range.
__global__ void gpu_updatePositionAndVelocity(Particle* particles, int iter, int num_particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num_particles) // If out of bounds, ignore the Particle.
return;
else
updateParticle(&particles[id], id, iter, num_particles);
}
// Perform the update step for all Particles in the array on CPU with a for loop.
void cpu_updateParticles(Particle* particles, int iter, int num_particles) {
// srand(time(NULL))
for (int i = 0; i < num_particles; i++) {
cpu_updatePositionAndVelocity(&particles[i], i, iter, num_particles);
}
}
// Perform the update step for all Particles in the array by launching GPU kernels.
void gpu_updateParticles(Particle* particles, int iter, int num_particles, int tpb) {
gpu_updatePositionAndVelocity<<<(num_particles + tpb - 1)/tpb, tpb>>>(particles, iter, num_particles);
}
int main(int argc, char** argv) {
printf("Running the simulations with the following params:\n");
if (argc < 5) {
printf("Usage: ./a NUM_PARTICLES NUM_ITERATIONS TPB INCLUDE_CPU\nExample usage: ./a 10000 10000 32 include_cpu\n");
return -1;
}
// reading the command line arguments, without any kind of error checking
const int num_particles = (int) strtol(argv[1], NULL, 10); // e.g. 10000 - NULL is the endpointer and 10 is the base
const int num_iterations = (int) strtol(argv[2], NULL, 10); // e.g. 10000
const int tpb = (int) strtol(argv[3], NULL, 10); // e.g. 32
const char* include_cpu = argv[4];
printf("======== %s: %d, %s: %d, %s: %d\n\n", "num_particles", num_particles, "num_iterations", num_iterations, "tpb", tpb);
// Declare variables
Particle *c_particles, *g_particles, *g_result;
double iStart, iElaps;
// Initialize array for CPU
c_particles = (Particle*) malloc(num_particles*sizeof(Particle));
randomizeParticles(c_particles, num_particles);
// Initialize array for GPU - particle positions/velocities in device memory are a copy of those in host memory
// g_result = (Particle*) malloc(num_particles*sizeof(Particle)); // Used to store the result of GPU simulation
// cudaMallocHost(&g_result, num_particles*sizeof(Particle));
// cudaMalloc(&g_particles, num_particles*sizeof(Particle));
cudaMallocManaged(&g_particles, num_particles*sizeof(Particle));
iStart = cpuSecond();
memcpy(g_particles, c_particles, num_particles*sizeof(Particle));
double copy_time = cpuSecond() - iStart;
// CPU Version
if (strcmp(include_cpu, "include_cpu") == 0) { // perfrom CPU version if wanted by the user
printf("CPU simulation started...\n"); fflush(stdout);
iStart = cpuSecond();
for (int i = 0; i < num_iterations; i++) {
cpu_updateParticles(c_particles, i, num_particles);
}
iElaps = cpuSecond() - iStart;
printf("Done in %f!\n\n", iElaps); fflush(stdout);
}
else
printf("Excluded the CPU experiment...\n\n");
// GPU Version
printf("GPU simulation started...\n"); fflush(stdout);
iStart = cpuSecond();
for (int i = 0; i < num_iterations; i++) {
// cudaMemcpy(g_particles, g_result, num_particles*sizeof(Particle), cudaMemcpyHostToDevice);
gpu_updateParticles(g_particles, i, num_particles, tpb);
cudaDeviceSynchronize();
// cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost);
}
iElaps = cpuSecond() - iStart;
printf("Done in %f!\n\n", iElaps + copy_time); fflush(stdout);
// copying the result back from the GPU memory to the CUP memory
// cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost);
// if CPU version is perfromed, then compare it with GPU version
if (strcmp(include_cpu, "include_cpu") == 0)
printf(arraysMatch(g_particles, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n");
// printf(arraysMatch(g_result, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n");
printf("========================================================== \n\n\n");
// Free arrays
free(c_particles);
cudaFree(g_particles);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
// #define NUM_PARTICLES 10000
// #define NUM_ITERATIONS 10000
// int TPB = 16;
#define SEED 10
#define EPSILON 1e-5
typedef struct {
float3 position;
float3 velocity;
} Particle;
// Deterministically generates a "random" float, provided a seed and 3 integers.
__host__ __device__ float gen_random(int seed, int a, int b, int c) {
return (float)((seed * a + b) % c) / c;
}
// Given an array of particles and an index, print that particle.
void printParticle(Particle* particles, int index){
printf("%f %f %f %f %f %f\n",
particles[index].position.x, particles[index].position.y, particles[index].position.z,
particles[index].velocity.x, particles[index].velocity.y, particles[index].velocity.z);
}
// Compare two arrays of Particles. If their position coordinates are all within EPSILON of each other,
// return true, else false.
__host__ bool arraysMatch(Particle* arr1, Particle* arr2, int num_particles)
{
for (int i = 0; i < num_particles; i++) {
if (fabs(arr1[i].position.x - arr2[i].position.x) > EPSILON ||
fabs(arr1[i].position.y - arr2[i].position.y) > EPSILON ||
fabs(arr1[i].position.z - arr2[i].position.z) > EPSILON)
return false;
}
return true;
}
// Get the current time
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// Replaces the x, y and z values in a float3 to random values between 0 and 1.
void randomizeFloat3(float3* f3) {
f3->x = (float) rand() / RAND_MAX;
f3->y = (float) rand() / RAND_MAX;
f3->z = (float) rand() / RAND_MAX;
}
// Randomizes the position and velocity of all Particles in an array.
void randomizeParticles(Particle* particles, int num_particles) {
srand(0);
for (int i = 0; i < num_particles; i++) {
randomizeFloat3(&particles[i].position);
randomizeFloat3(&particles[i].velocity);
}
}
// Updates a particle's position by its velocity, then updates its velocity
__host__ __device__ void updateParticle(Particle* particle, int id, int iter, int num_particles) {
int dt = 1;
// update position
particle->position.x += dt * particle->velocity.x;
particle->position.y += dt * particle->velocity.y;
particle->position.z += dt * particle->velocity.z;
// update the velocity randomly
particle->velocity.x += gen_random(SEED, id, iter, num_particles);
particle->velocity.y += gen_random(SEED, id, iter, num_particles);
particle->velocity.z += gen_random(SEED, id, iter, num_particles);
}
// CPU function that updates a given particle.
void cpu_updatePositionAndVelocity(Particle* particle, int id, int iter, int num_particles) {
updateParticle(particle, id, iter, num_particles);
}
// Kernel that finds a given Particle's ID then updates it if within range.
__global__ void gpu_updatePositionAndVelocity(Particle* particles, int iter, int num_particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num_particles) // If out of bounds, ignore the Particle.
return;
else
updateParticle(&particles[id], id, iter, num_particles);
}
// Perform the update step for all Particles in the array on CPU with a for loop.
void cpu_updateParticles(Particle* particles, int iter, int num_particles) {
// srand(time(NULL))
for (int i = 0; i < num_particles; i++) {
cpu_updatePositionAndVelocity(&particles[i], i, iter, num_particles);
}
}
// Perform the update step for all Particles in the array by launching GPU kernels.
void gpu_updateParticles(Particle* particles, int iter, int num_particles, int tpb) {
gpu_updatePositionAndVelocity<<<(num_particles + tpb - 1)/tpb, tpb>>>(particles, iter, num_particles);
}
int main(int argc, char** argv) {
printf("Running the simulations with the following params:\n");
if (argc < 5) {
printf("Usage: ./a NUM_PARTICLES NUM_ITERATIONS TPB INCLUDE_CPU\nExample usage: ./a 10000 10000 32 include_cpu\n");
return -1;
}
// reading the command line arguments, without any kind of error checking
const int num_particles = (int) strtol(argv[1], NULL, 10); // e.g. 10000 - NULL is the endpointer and 10 is the base
const int num_iterations = (int) strtol(argv[2], NULL, 10); // e.g. 10000
const int tpb = (int) strtol(argv[3], NULL, 10); // e.g. 32
const char* include_cpu = argv[4];
printf("======== %s: %d, %s: %d, %s: %d\n\n", "num_particles", num_particles, "num_iterations", num_iterations, "tpb", tpb);
// Declare variables
Particle *c_particles, *g_particles, *g_result;
double iStart, iElaps;
// Initialize array for CPU
c_particles = (Particle*) malloc(num_particles*sizeof(Particle));
randomizeParticles(c_particles, num_particles);
// Initialize array for GPU - particle positions/velocities in device memory are a copy of those in host memory
// g_result = (Particle*) malloc(num_particles*sizeof(Particle)); // Used to store the result of GPU simulation
// cudaMallocHost(&g_result, num_particles*sizeof(Particle));
// cudaMalloc(&g_particles, num_particles*sizeof(Particle));
hipMallocManaged(&g_particles, num_particles*sizeof(Particle));
iStart = cpuSecond();
memcpy(g_particles, c_particles, num_particles*sizeof(Particle));
double copy_time = cpuSecond() - iStart;
// CPU Version
if (strcmp(include_cpu, "include_cpu") == 0) { // perfrom CPU version if wanted by the user
printf("CPU simulation started...\n"); fflush(stdout);
iStart = cpuSecond();
for (int i = 0; i < num_iterations; i++) {
cpu_updateParticles(c_particles, i, num_particles);
}
iElaps = cpuSecond() - iStart;
printf("Done in %f!\n\n", iElaps); fflush(stdout);
}
else
printf("Excluded the CPU experiment...\n\n");
// GPU Version
printf("GPU simulation started...\n"); fflush(stdout);
iStart = cpuSecond();
for (int i = 0; i < num_iterations; i++) {
// cudaMemcpy(g_particles, g_result, num_particles*sizeof(Particle), cudaMemcpyHostToDevice);
gpu_updateParticles(g_particles, i, num_particles, tpb);
hipDeviceSynchronize();
// cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost);
}
iElaps = cpuSecond() - iStart;
printf("Done in %f!\n\n", iElaps + copy_time); fflush(stdout);
// copying the result back from the GPU memory to the CUP memory
// cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost);
// if CPU version is perfromed, then compare it with GPU version
if (strcmp(include_cpu, "include_cpu") == 0)
printf(arraysMatch(g_particles, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n");
// printf(arraysMatch(g_result, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n");
printf("========================================================== \n\n\n");
// Free arrays
free(c_particles);
hipFree(g_particles);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
__device__ void rot_x(float3 *vec, float angle)
{
float tmp;
tmp = vec->y;
vec->y = tmp * cosf(angle) + vec->z * -sinf(angle);
vec->z = tmp * sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_y(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->z * sinf(angle);
vec->z = tmp * -sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_z(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->y * -sinf(angle);
vec->y = tmp * sinf(angle) + vec->y * cosf(angle);
}
__device__ void rot_vec(float3 *vec, float3 angle)
{
rot_x(vec, angle.x);
rot_y(vec, angle.y);
rot_z(vec, angle.z);
}
|
#include <hip/hip_runtime.h>
__device__ void rot_x(float3 *vec, float angle)
{
float tmp;
tmp = vec->y;
vec->y = tmp * cosf(angle) + vec->z * -sinf(angle);
vec->z = tmp * sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_y(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->z * sinf(angle);
vec->z = tmp * -sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_z(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->y * -sinf(angle);
vec->y = tmp * sinf(angle) + vec->y * cosf(angle);
}
__device__ void rot_vec(float3 *vec, float3 angle)
{
rot_x(vec, angle.x);
rot_y(vec, angle.y);
rot_z(vec, angle.z);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void cuSetupSincKernel_kernel(float *r_filter_, const int i_filtercoef_, const float r_soff_, const float r_wgthgt_, const int i_weight_, const float r_soff_inverse_, const float r_beta_, const float r_decfactor_inverse_, const float r_relfiltlen_inverse_)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i > i_filtercoef_) return;
float r_wa = i - r_soff_;
float r_wgt = (1.0f - r_wgthgt_) + r_wgthgt_*cos(PI*r_wa*r_soff_inverse_);
float r_s = r_wa*r_beta_*r_decfactor_inverse_*PI;
float r_fct;
if(r_s != 0.0f) {
r_fct = sin(r_s)/r_s;
}
else {
r_fct = 1.0f;
}
if(i_weight_ == 1) {
r_filter_[i] = r_fct*r_wgt;
}
else {
r_filter_[i] = r_fct;
}
//printf("kernel %d %f\n", i, r_filter_[i]);
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cuSetupSincKernel_kernel(float *r_filter_, const int i_filtercoef_, const float r_soff_, const float r_wgthgt_, const int i_weight_, const float r_soff_inverse_, const float r_beta_, const float r_decfactor_inverse_, const float r_relfiltlen_inverse_)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i > i_filtercoef_) return;
float r_wa = i - r_soff_;
float r_wgt = (1.0f - r_wgthgt_) + r_wgthgt_*cos(PI*r_wa*r_soff_inverse_);
float r_s = r_wa*r_beta_*r_decfactor_inverse_*PI;
float r_fct;
if(r_s != 0.0f) {
r_fct = sin(r_s)/r_s;
}
else {
r_fct = 1.0f;
}
if(i_weight_ == 1) {
r_filter_[i] = r_fct*r_wgt;
}
else {
r_filter_[i] = r_fct;
}
//printf("kernel %d %f\n", i, r_filter_[i]);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
using namespace std;
struct compressed_sparse_column {
int* data;
int* row;
int* column;
int* index_column;
int* index_row_start;
int* index_row_end;
};
struct graph {
compressed_sparse_column* dataset;
bool* roots;
bool* leaves;
bool* singletons;
int vertices;
int edges;
};
__global__ void pre_post_order(int* depth, int* zeta, int* zeta_tilde, graph* dataset_graph) {
int* pre = new int[dataset_graph->vertices];
int* post = new int[dataset_graph->vertices];
memset(pre, 0, dataset_graph->vertices * sizeof(int));
memset(post, 0, dataset_graph->vertices * sizeof(int));
bool* incoming_edges = new bool[dataset_graph->edges];
memset(incoming_edges, false, dataset_graph->edges * sizeof(bool));
bool* q = new bool[dataset_graph->vertices];
memcpy(q, dataset_graph->roots, sizeof(int) * dataset_graph->vertices);
while(true) {
bool* p = new bool[dataset_graph->vertices];
memset(p, false, dataset_graph->vertices * sizeof(bool));
bool global_check = false;
for(int i = 0; i < dataset_graph->vertices; i++) {
if( q[i] ) {
int pre_node = pre[i];
int post_node = post[i];
for(int j = dataset_graph->dataset->index_column[i]; dataset_graph->dataset->column[j] == i; j++) {
int neighbor_vertex = dataset_graph->dataset->row[j];
// zeta[i] = undefined!
pre[neighbor_vertex] = pre_node + zeta_tilde[neighbor_vertex];
post[neighbor_vertex] = post_node + zeta_tilde[neighbor_vertex];
incoming_edges[j] = true;
bool flag = true;
for(int k = 0; k < dataset_graph->edges; k++) {
if( dataset_graph->dataset->row[k] == neighbor_vertex && !incoming_edges[k] ) {
flag = false;
break;
}
}
if( flag ) {
global_check = true;
p[neighbor_vertex] = true;
}
}
pre[i] = pre_node + depth[i];
post[i] = post_node + (zeta[i] - 1);
}
}
q = p;
if( !global_check ) {
break;
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
struct compressed_sparse_column {
int* data;
int* row;
int* column;
int* index_column;
int* index_row_start;
int* index_row_end;
};
struct graph {
compressed_sparse_column* dataset;
bool* roots;
bool* leaves;
bool* singletons;
int vertices;
int edges;
};
__global__ void pre_post_order(int* depth, int* zeta, int* zeta_tilde, graph* dataset_graph) {
int* pre = new int[dataset_graph->vertices];
int* post = new int[dataset_graph->vertices];
memset(pre, 0, dataset_graph->vertices * sizeof(int));
memset(post, 0, dataset_graph->vertices * sizeof(int));
bool* incoming_edges = new bool[dataset_graph->edges];
memset(incoming_edges, false, dataset_graph->edges * sizeof(bool));
bool* q = new bool[dataset_graph->vertices];
memcpy(q, dataset_graph->roots, sizeof(int) * dataset_graph->vertices);
while(true) {
bool* p = new bool[dataset_graph->vertices];
memset(p, false, dataset_graph->vertices * sizeof(bool));
bool global_check = false;
for(int i = 0; i < dataset_graph->vertices; i++) {
if( q[i] ) {
int pre_node = pre[i];
int post_node = post[i];
for(int j = dataset_graph->dataset->index_column[i]; dataset_graph->dataset->column[j] == i; j++) {
int neighbor_vertex = dataset_graph->dataset->row[j];
// zeta[i] = undefined!
pre[neighbor_vertex] = pre_node + zeta_tilde[neighbor_vertex];
post[neighbor_vertex] = post_node + zeta_tilde[neighbor_vertex];
incoming_edges[j] = true;
bool flag = true;
for(int k = 0; k < dataset_graph->edges; k++) {
if( dataset_graph->dataset->row[k] == neighbor_vertex && !incoming_edges[k] ) {
flag = false;
break;
}
}
if( flag ) {
global_check = true;
p[neighbor_vertex] = true;
}
}
pre[i] = pre_node + depth[i];
post[i] = post_node + (zeta[i] - 1);
}
}
q = p;
if( !global_check ) {
break;
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
Resultat[j*height + i] = Source[i*width + j];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
Resultat[j*height + i] = Source[i*width + j];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cstdio>
#include <cstdlib>
#include <time.h>
#include "cuda_timer.cuh"
#define SafeTimerCall(err) __safeTimerCall(err, __FILE__, __LINE__)
inline void __safeTimerCall(cudaError err, const char *file, const int line) {
#pragma warning(push)
#pragma warning(disable: 4127) Prevent warning on do-while(0);
do {
if (cudaSuccess != err) {
fprintf(stderr, "CudaTimer failed at %s:%i : %s\n", file, line, cudaGetErrorString(err));
exit(-1);
}
} while (0);
#pragma warning(pop)
return;
}
CudaTimer::CudaTimer() {
SafeTimerCall(cudaEventCreate(&_begEvent));
SafeTimerCall(cudaEventCreate(&_endEvent));
return;
}
CudaTimer::~CudaTimer() {
SafeTimerCall(cudaEventDestroy(_begEvent));
SafeTimerCall(cudaEventDestroy(_endEvent));
return;
}
void CudaTimer::start() {
SafeTimerCall(cudaEventRecord(_begEvent, 0));
return;
}
void CudaTimer::stop() {
SafeTimerCall(cudaEventRecord(_endEvent, 0));
return;
}
float CudaTimer::value() {
SafeTimerCall(cudaEventSynchronize(_endEvent));
float timeVal;
SafeTimerCall(cudaEventElapsedTime(&timeVal, _begEvent, _endEvent));
return timeVal / CLOCKS_PER_SEC;
}
|
#include <hip/hip_runtime.h>
class CudaTimer
{
private:
hipEvent_t _begEvent;
hipEvent_t _endEvent;
public:
CudaTimer();
~CudaTimer();
void start();
void stop();
float value();
};
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 103