I am running simulations that require large amounts of data (too much for a standard array). If I’m going to use CUDA to parallelize the simulations, am I better off using vectors or dynamic memory (new etc.) to hold my data?
I’ve written this simple program as an example of what I’m trying to accomplish in serial:
#include <cmath>
#include <cstdlib>
#include <vector>
#include <cstdio>
#define XDIM 2000
#define YDIM XDIM
using namespace std;
void circle(vector<int> &f)
{
int j(0);
int n = XDIM*YDIM;
int r((YDIM / 3));
int centre[2] = { (XDIM / 2),(XDIM / 2) };
for (j = 0; j < XDIM*YDIM; j++)
{
if (((j%XDIM) - centre[0])*((j%XDIM) - centre[0]) + ((j - j%XDIM) / XDIM - centre[1])*((j - j%XDIM) / XDIM - centre[1]) < (r + 1)*(r + 1) && ((j%XDIM) - centre[0])*((j%XDIM) - centre[0]) + ((j - j%XDIM) / XDIM - centre[1])*((j - j%XDIM) / XDIM - centre[1]) >= (r - 1)*(r - 1))
{
f.at(j) = 1;
}
}
}
void main()
{
int j(0), x(0), y(0);
vector<int> field(XDIM*XDIM, 0);
circle(field);
FILE * f;
if (fopen_s(&f, "circle.dat", "a") != 0)
{
printf("I could not open the file circle.dat\n");
printf("Aborting now.\n");
abort();
}
for (j = 0; j < XDIM*YDIM; j++)
{
if (field.at(j) == 1)
{
x = j%XDIM;
y = (j - j%XDIM) / XDIM;
fprintf(f, "%d\t%d\n\n", x, y);
}
}
fclose(f);
}
Any help you could offer would be appreciated.