bioem_cuda.cu 11.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
#define BIOEM_GPUCODE

#if defined(_WIN32)
#include <windows.h>
#endif

#include <iostream>
using namespace std;

#include "bioem_cuda_internal.h"
#include "bioem_algorithm.h"

//__constant__ bioem_map gConvMap;

15
16
17
18
19
20
21
#define checkCudaErrors(error) \
{ \
	if ((error) != cudaSuccess) \
	{ \
		printf("CUDA Error %d / %s (%s: %d)\n", error, cudaGetErrorString(error), __FILE__, __LINE__); \
		exit(1); \
	} \
22
23
24
25
26
27
}

bioem_cuda::bioem_cuda()
{
	deviceInitialized = 0;
	GPUAlgo = getenv("GPUALGO") == NULL ? 2 : atoi(getenv("GPUALGO"));
28
29
	GPUAsync = getenv("GPUASYNC") == NULL ? 1 : atoi(getenv("GPUASYNC"));
	GPUWorkload = getenv("GPUWORKLOAD") == NULL ? 100 : atoi(getenv("GPUWORKLOAD"));
30
31
32
33
34
35
36
}

bioem_cuda::~bioem_cuda()
{
	deviceExit();
}

37
__global__ void compareRefMap_kernel(const int iOrient, const int iConv, const bioem_map* pMap, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap_Mod* RefMap, const int cent_x, const int cent_y, const int maxRef)
38
39
{
	const int iRefMap = myBlockIdxX * myBlockDimX + myThreadIdxX;
40
	if (iRefMap < maxRef)
41
42
43
44
45
	{
		compareRefMap<0>(iRefMap,iOrient,iConv,*pMap, pProb, param, *RefMap, cent_x, cent_y);
	}
}

46
__global__ void compareRefMapShifted_kernel(const int iOrient, const int iConv, const bioem_map* pMap, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap_Mod* RefMap, const int maxRef)
47
48
{
	const int iRefMap = myBlockIdxX * myBlockDimX + myThreadIdxX;
49
	if (iRefMap < maxRef)
50
51
52
53
54
	{
		compareRefMapShifted<1>(iRefMap,iOrient,iConv,*pMap, pProb, param, *RefMap);
	}
}

55
56
57
58
59
60
61
62
63
__global__ void cudaZeroMem(void* ptr, size_t size)
{
	int* myptr = (int*) ptr;
	int mysize = size / sizeof(int);
	int myid = myBlockDimX * myBlockIdxX + myThreadIdxX;
	int mygrid = myBlockDimX * myGridDimX;
	for (int i = myid;i < mysize;i += mygrid) myptr[i] = 0;
}

64
__global__ void compareRefMapLoopShifts_kernel(const int iOrient, const int iConv, const bioem_map* pMap, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap* RefMap, const int blockoffset, const int nShifts, const int nShiftBits, const int maxRef)
65
66
67
68
69
70
71
72
73
{
	const size_t myid = (myBlockIdxX + blockoffset) * myBlockDimX + myThreadIdxX;
	const int iRefMap = myid >> (nShiftBits << 1);
	const int myRef = myThreadIdxX >> (nShiftBits << 1);
	const int myShiftIdx = (myid >> nShiftBits) & (nShifts - 1);
	const int myShiftIdy = myid & (nShifts - 1);
	const int myShift = myid & (nShifts * nShifts - 1);
	const int cent_x = myShiftIdx * param.GridSpaceCenter - param.maxDisplaceCenter;
	const int cent_y = myShiftIdy * param.GridSpaceCenter - param.maxDisplaceCenter;
74

75
	const bool threadActive = myShiftIdx < nShifts && myShiftIdy < nShifts && iRefMap < maxRef;
76

77
	compareRefMap<2>(iRefMap,iOrient,iConv,*pMap, pProb, param, *RefMap, cent_x, cent_y, myShift, nShifts * nShifts, myRef, threadActive);
78
79
}

80
__global__ void multComplexMap(const mycomplex_t* convmap, const mycomplex_t* refmap, mycuComplex_t* out, const int NumberPixelsTotal, const int MapSize, const int NumberMaps, const int Offset)
81
82
{
	if (myBlockIdxX >= NumberMaps) return;
83
84
	const mycomplex_t* myin = &refmap[myBlockIdxX * MapSize + Offset];
	mycuComplex_t* myout = &out[(myBlockIdxX * MapSize)];
85
86
87
88
89
90
91
	for(int i = myThreadIdxX;i < NumberPixelsTotal;i += myBlockDimX)
	{
		myout[i].x = convmap[i][0] * myin[i][0] + convmap[i][1] * myin[i][1];
		myout[i].y = convmap[i][1] * myin[i][0] - convmap[i][0] * myin[i][1];
	}
}

92
__global__ void cuDoRefMapsFFT(const int iOrient, const int iConv, const myfloat_t* lCC, const myfloat_t sumC, const myfloat_t sumsquareC, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap* RefMap, const int maxRef, const int Offset)
93
{
94
95
	const int iRefMap = myBlockIdxX * myBlockDimX + myThreadIdxX + Offset;
	const myfloat_t* mylCC = &lCC[(myBlockIdxX * myBlockDimX + myThreadIdxX) * param.NumberPixels * param.NumberPixels];
96
97
98
99
	if (iRefMap >= maxRef) return;
	doRefMapFFT(iRefMap, iOrient, iConv, mylCC, sumC, sumsquareC, pProb, param, *RefMap);
}

100
101
102
103
104
105
106
107
108
109
110
111
112
template <class T> static inline T divup(T num, T divider) {return((num + divider - 1) / divider);}
static inline bool IsPowerOf2(int x) {return ((x > 0) && ((x & (x - 1)) == 0));}
#if defined(_WIN32)
static inline int ilog2 (int value)
{
	DWORD index;
	_BitScanReverse (&index, value);
	return(value);
}
#else
static inline int ilog2(int value) {return 31 - __builtin_clz(value);}
#endif

113
int bioem_cuda::compareRefMaps(int iProjectionOut, int iConv, const bioem_map& conv_map, mycomplex_t* localmultFFT, myfloat_t sumC, myfloat_t sumsquareC, const int startMap)
114
{
115
116
117
118
119
	if (startMap)
	{
		cout << "Error startMap not implemented for GPU Code\n";
		exit(1);
	}
120
121
122
123
124
	//cudaMemcpyToSymbolAsync(&gConvMap, &conv_map, sizeof(bioem_map), 0, cudaMemcpyHostToDevice, cudaStream);
	if (GPUAsync)
	{
		checkCudaErrors(cudaEventSynchronize(cudaEvent[iConv & 1]));
	}
125

126
	if (FFTAlgo)
127
	{
128
		checkCudaErrors(cudaMemcpyAsync(&pConvMapFFT[(iConv & 1) * param.RefMapSize], localmultFFT, param.RefMapSize * sizeof(mycomplex_t), cudaMemcpyHostToDevice, cudaStream));
129
		for (int i = 0;i < maxRef;i += CUDA_FFTS_AT_ONCE)
130
		{
131
132
133
134
135
136
137
138
			const int num = min(CUDA_FFTS_AT_ONCE, maxRef - i);
			multComplexMap<<<num, CUDA_THREAD_COUNT, 0, cudaStream>>>(&pConvMapFFT[(iConv & 1) * param.RefMapSize], pRefMapsFFT, pFFTtmp2, param.param_device.NumberPixels * param.param_device.NumberFFTPixels1D, param.RefMapSize, num, i);
			if (mycufftExecC2R(i + CUDA_FFTS_AT_ONCE > maxRef ? plan[1] : plan[0], pFFTtmp2, pFFTtmp) != CUFFT_SUCCESS)
			{
				cout << "Error running CUFFT\n";
				exit(1);
			}
			cuDoRefMapsFFT<<<divup(num, CUDA_THREAD_COUNT), CUDA_THREAD_COUNT, 0, cudaStream>>>(iProjectionOut, iConv, pFFTtmp, sumC, sumsquareC, pProb_device, param.param_device, pRefMap_device, num, i);
139
		}
140
141
142
143
144
145
146
		checkCudaErrors(cudaGetLastError());
	}
	else
	{
		checkCudaErrors(cudaMemcpyAsync(pConvMap_device[iConv & 1], &conv_map, sizeof(bioem_map), cudaMemcpyHostToDevice, cudaStream));

		if (GPUAlgo == 2) //Loop over shifts
147
		{
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
			const int nShifts = 2 * param.param_device.maxDisplaceCenter / param.param_device.GridSpaceCenter + 1;
			if (!IsPowerOf2(nShifts))
			{
				cout << "Invalid number of displacements, no power of two\n";
				exit(1);
			}
			if (CUDA_THREAD_COUNT % (nShifts * nShifts))
			{
				cout << "CUDA Thread count (" << CUDA_THREAD_COUNT << ") is no multiple of number of shifts (" << (nShifts * nShifts) << ")\n";
				exit(1);
			}
			if (nShifts > CUDA_MAX_SHIFT_REDUCE)
			{
				cout << "Too many displacements for CUDA reduction\n";
				exit(1);
			}
			const int nShiftBits = ilog2(nShifts);
			size_t totalBlocks = divup((size_t) maxRef * (size_t) nShifts * (size_t) nShifts, (size_t) CUDA_THREAD_COUNT);
			size_t nBlocks = CUDA_BLOCK_COUNT;
			for (size_t i = 0;i < totalBlocks;i += nBlocks)
			{
				compareRefMapLoopShifts_kernel <<<min(nBlocks, totalBlocks - i), CUDA_THREAD_COUNT, (CUDA_THREAD_COUNT * 2 + CUDA_THREAD_COUNT / (nShifts * nShifts) * 4) * sizeof(myfloat_t), cudaStream>>> (iProjectionOut, iConv, pConvMap_device[iConv & 1], pProb_device, param.param_device, pRefMap_device, i, nShifts, nShiftBits, maxRef);
			}
171
		}
172
		else if (GPUAlgo == 1) //Split shifts in multiple kernels
173
		{
174
175
176
177
178
179
180
			for (int cent_x = -param.param_device.maxDisplaceCenter; cent_x <= param.param_device.maxDisplaceCenter; cent_x=cent_x+param.param_device.GridSpaceCenter)
			{
				for (int cent_y = -param.param_device.maxDisplaceCenter; cent_y <= param.param_device.maxDisplaceCenter; cent_y=cent_y+param.param_device.GridSpaceCenter)
				{
					compareRefMap_kernel <<<divup(maxRef, CUDA_THREAD_COUNT), CUDA_THREAD_COUNT, 0, cudaStream>>> (iProjectionOut, iConv, pConvMap_device[iConv & 1], pProb_device, param.param_device, pRefMap_device_Mod, cent_x, cent_y, maxRef);
				}
			}
181
		}
182
		else if (GPUAlgo == 0) //All shifts in one kernel
183
		{
184
			compareRefMapShifted_kernel <<<divup(maxRef, CUDA_THREAD_COUNT), CUDA_THREAD_COUNT, 0, cudaStream>>> (iProjectionOut, iConv, pConvMap_device[iConv & 1], pProb_device, param.param_device, pRefMap_device_Mod, maxRef);
185
		}
186
		else
187
		{
188
189
			cout << "Invalid GPU Algorithm selected\n";
			exit(1);
190
		}
191
	}
192
193
	if (GPUWorkload < 100)
	{
194
		bioem::compareRefMaps(iProjectionOut, iConv, conv_map, localmultFFT, sumC, sumsquareC, maxRef);
195
	}
196
197
198
	if (GPUAsync)
	{
		checkCudaErrors(cudaEventRecord(cudaEvent[iConv & 1], cudaStream));
199
	}
200
201
202
203
204
205
206
207
208
209
	else
	{
		checkCudaErrors(cudaStreamSynchronize(cudaStream));
	}
	return(0);
}

int bioem_cuda::deviceInit()
{
	deviceExit();
210

211
212
	if (FFTAlgo) GPUAlgo = 2;

213
	checkCudaErrors(cudaStreamCreate(&cudaStream));
214
	cout << "\tSize RefMap\t\t" << sizeof(bioem_RefMap) << "\n";
215
	checkCudaErrors(cudaMalloc(&pRefMap_device, sizeof(bioem_RefMap)));
216
	cout << "\tSize Probability\t" << sizeof(bioem_Probability) * RefMap.ntotRefMap << "\n";
217
218
219
220
221
222
223
	checkCudaErrors(cudaMalloc(&pProb_device, sizeof(bioem_Probability) * RefMap.ntotRefMap));
	for (int i = 0;i < 2;i++)
	{
		checkCudaErrors(cudaEventCreate(&cudaEvent[i]));
		checkCudaErrors(cudaMalloc(&pConvMap_device[i], sizeof(bioem_map)));
	}
	pRefMap_device_Mod = (bioem_RefMap_Mod*) pRefMap_device;
224

225
226
	if (FFTAlgo)
	{
227
		cout << "\tSize RefMapFFT\t\t" << RefMap.ntotRefMap * param.RefMapSize * sizeof(mycomplex_t) << "\n";
228
		checkCudaErrors(cudaMalloc(&pRefMapsFFT, RefMap.ntotRefMap * param.RefMapSize * sizeof(mycomplex_t)));
229
230
231
232
		cout << "\tSize RefMapFFT Copy\t" << CUDA_FFTS_AT_ONCE * param.RefMapSize * sizeof(mycomplex_t) << "\n";
		checkCudaErrors(cudaMalloc(&pFFTtmp2, CUDA_FFTS_AT_ONCE * param.RefMapSize * sizeof(mycomplex_t)));
		cout << "\tSize RefMapFFT Read\t" << CUDA_FFTS_AT_ONCE * param.param_device.NumberPixels * param.param_device.NumberPixels * sizeof(myfloat_t) << "\n";
		checkCudaErrors(cudaMalloc(&pFFTtmp, CUDA_FFTS_AT_ONCE * param.param_device.NumberPixels * param.param_device.NumberPixels * sizeof(myfloat_t)));
233
234
235
236
		checkCudaErrors(cudaMalloc(&pConvMapFFT, param.RefMapSize * sizeof(mycomplex_t) * 2));
		cudaMemcpy(pRefMapsFFT, RefMap.RefMapsFFT, RefMap.ntotRefMap * param.RefMapSize * sizeof(mycomplex_t), cudaMemcpyHostToDevice);
	}

237
238
239
240
241
242
243
244
245
246
	if (GPUAlgo == 0 || GPUAlgo == 1)
	{
		bioem_RefMap_Mod* RefMapGPU = new bioem_RefMap_Mod(RefMap);
		cudaMemcpy(pRefMap_device_Mod, RefMapGPU, sizeof(bioem_RefMap), cudaMemcpyHostToDevice);
		delete RefMapGPU;
	}
	else
	{
		cudaMemcpy(pRefMap_device, &RefMap, sizeof(bioem_RefMap), cudaMemcpyHostToDevice);
	}
247

248
249
250
251
252
253
254
	deviceInitialized = 1;
	return(0);
}

int bioem_cuda::deviceExit()
{
	if (deviceInitialized == 0) return(0);
255

256
257
258
259
260
261
262
263
	cudaStreamDestroy(cudaStream);
	cudaFree(pRefMap_device);
	cudaFree(pProb_device);
	for (int i = 0;i < 2;i++)
	{
		cudaEventDestroy(cudaEvent[i]);
		cudaFree(pConvMap_device);
	}
264
265
266
267
	if (FFTAlgo)
	{
		cudaFree(pRefMapsFFT);
		cudaFree(pConvMapFFT);
268
		//cudaFree(pFFTtmp);
269
270
		cudaFree(pFFTtmp2);
	}
271
	cudaThreadExit();
272

273
274
275
276
277
278
	deviceInitialized = 0;
	return(0);
}

int bioem_cuda::deviceStartRun()
{
279
	maxRef = GPUWorkload >= 100 ? RefMap.ntotRefMap : ((size_t) RefMap.ntotRefMap * (size_t) GPUWorkload / 100);
280

281
	cudaMemcpy(pProb_device, pProb, sizeof(bioem_Probability) * maxRef, cudaMemcpyHostToDevice);
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304

	if (FFTAlgo)
	{
		for (int i = 0;i < 2;i++)
		{
			int n[2] = {param.param_device.NumberPixels, param.param_device.NumberPixels};
			if (cufftPlanMany(&plan[i], 2, n, NULL, 1, 0, NULL, 1, 0, CUFFT_C2R, i ? (maxRef % CUDA_FFTS_AT_ONCE) : CUDA_FFTS_AT_ONCE) != CUFFT_SUCCESS)
			{
				cout << "Error planning CUFFT\n";
				exit(1);
			}
			if (cufftSetCompatibilityMode(plan[i], CUFFT_COMPATIBILITY_NATIVE) != CUFFT_SUCCESS)
			{
				cout << "Error planning CUFFT compatibility\n";
				exit(1);
			}
			if (cufftSetStream(plan[i], cudaStream) != CUFFT_SUCCESS)
			{
				cout << "Error setting CUFFT stream\n";
				exit(1);
			}
		}
	}
305
306
307
308
309
310
	return(0);
}

int bioem_cuda::deviceFinishRun()
{
	if (GPUAsync) cudaStreamSynchronize(cudaStream);
311
	cudaMemcpy(pProb, pProb_device, sizeof(bioem_Probability) * maxRef, cudaMemcpyDeviceToHost);
312

313
314
315
316
317
	if (FFTAlgo)
	{
		for (int i = 0;i < 2;i++) cufftDestroy(plan[i]);
	}

318
319
320
321
322
323
324
	return(0);
}

bioem* bioem_cuda_create()
{
	return new bioem_cuda;
}