Planned maintenance on Wednesday, 2021-01-20, 17:00-18:00. Expect some interruptions during that time

bioem_cuda.cu 10.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#define BIOEM_GPUCODE

#if defined(_WIN32)
#include <windows.h>
#endif

#include <iostream>
using namespace std;

#include "bioem_cuda_internal.h"
#include "bioem_algorithm.h"

//__constant__ bioem_map gConvMap;

15 16 17 18 19 20 21
#define checkCudaErrors(error) \
{ \
	if ((error) != cudaSuccess) \
	{ \
		printf("CUDA Error %d / %s (%s: %d)\n", error, cudaGetErrorString(error), __FILE__, __LINE__); \
		exit(1); \
	} \
22 23 24 25 26 27
}

bioem_cuda::bioem_cuda()
{
	deviceInitialized = 0;
	GPUAlgo = getenv("GPUALGO") == NULL ? 2 : atoi(getenv("GPUALGO"));
28 29
	GPUAsync = getenv("GPUASYNC") == NULL ? 1 : atoi(getenv("GPUASYNC"));
	GPUWorkload = getenv("GPUWORKLOAD") == NULL ? 100 : atoi(getenv("GPUWORKLOAD"));
30 31 32 33 34 35 36
}

bioem_cuda::~bioem_cuda()
{
	deviceExit();
}

37
__global__ void compareRefMap_kernel(const int iOrient, const int iConv, const bioem_map* pMap, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap_Mod* RefMap, const int cent_x, const int cent_y, const int maxRef)
38 39
{
	const int iRefMap = myBlockIdxX * myBlockDimX + myThreadIdxX;
40
	if (iRefMap < maxRef)
41 42 43 44 45
	{
		compareRefMap<0>(iRefMap,iOrient,iConv,*pMap, pProb, param, *RefMap, cent_x, cent_y);
	}
}

46
__global__ void compareRefMapShifted_kernel(const int iOrient, const int iConv, const bioem_map* pMap, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap_Mod* RefMap, const int maxRef)
47 48
{
	const int iRefMap = myBlockIdxX * myBlockDimX + myThreadIdxX;
49
	if (iRefMap < maxRef)
50 51 52 53 54
	{
		compareRefMapShifted<1>(iRefMap,iOrient,iConv,*pMap, pProb, param, *RefMap);
	}
}

55 56 57 58 59 60 61 62 63
__global__ void cudaZeroMem(void* ptr, size_t size)
{
	int* myptr = (int*) ptr;
	int mysize = size / sizeof(int);
	int myid = myBlockDimX * myBlockIdxX + myThreadIdxX;
	int mygrid = myBlockDimX * myGridDimX;
	for (int i = myid;i < mysize;i += mygrid) myptr[i] = 0;
}

64
__global__ void compareRefMapLoopShifts_kernel(const int iOrient, const int iConv, const bioem_map* pMap, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap* RefMap, const int blockoffset, const int nShifts, const int nShiftBits, const int maxRef)
65 66 67 68 69 70 71 72 73
{
	const size_t myid = (myBlockIdxX + blockoffset) * myBlockDimX + myThreadIdxX;
	const int iRefMap = myid >> (nShiftBits << 1);
	const int myRef = myThreadIdxX >> (nShiftBits << 1);
	const int myShiftIdx = (myid >> nShiftBits) & (nShifts - 1);
	const int myShiftIdy = myid & (nShifts - 1);
	const int myShift = myid & (nShifts * nShifts - 1);
	const int cent_x = myShiftIdx * param.GridSpaceCenter - param.maxDisplaceCenter;
	const int cent_y = myShiftIdy * param.GridSpaceCenter - param.maxDisplaceCenter;
74

75
	const bool threadActive = myShiftIdx < nShifts && myShiftIdy < nShifts && iRefMap < maxRef;
76

77
	compareRefMap<2>(iRefMap,iOrient,iConv,*pMap, pProb, param, *RefMap, cent_x, cent_y, myShift, nShifts * nShifts, myRef, threadActive);
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
__global__ void multComplexMap(const mycomplex_t* convmap, const mycomplex_t* refmap, mycuComplex_t* out, const int NumberPixelsTotal, const int MapSize, const int NumberMaps)
{
	if (myBlockIdxX >= NumberMaps) return;
	const mycomplex_t* myin = &refmap[myBlockIdxX * MapSize];
	mycuComplex_t* myout = &out[myBlockIdxX * MapSize];
	for(int i = myThreadIdxX;i < NumberPixelsTotal;i += myBlockDimX)
	{
		myout[i].x = convmap[i][0] * myin[i][0] + convmap[i][1] * myin[i][1];
		myout[i].y = convmap[i][1] * myin[i][0] - convmap[i][0] * myin[i][1];
	}
}

__global__ void cuDoRefMapsFFT(const int iOrient, const int iConv, const myfloat_t* lCC, const myfloat_t sumC, const myfloat_t sumsquareC, bioem_Probability* pProb, const bioem_param_device param, const bioem_RefMap* RefMap, const int maxRef)
{
	const int iRefMap = myBlockIdxX * myBlockDimX + myThreadIdxX;
	const myfloat_t* mylCC = &lCC[iRefMap * param.NumberPixels * param.NumberPixels];
	if (iRefMap >= maxRef) return;
	doRefMapFFT(iRefMap, iOrient, iConv, mylCC, sumC, sumsquareC, pProb, param, *RefMap);
}

100 101 102 103 104 105 106 107 108 109 110 111 112
template <class T> static inline T divup(T num, T divider) {return((num + divider - 1) / divider);}
static inline bool IsPowerOf2(int x) {return ((x > 0) && ((x & (x - 1)) == 0));}
#if defined(_WIN32)
static inline int ilog2 (int value)
{
	DWORD index;
	_BitScanReverse (&index, value);
	return(value);
}
#else
static inline int ilog2(int value) {return 31 - __builtin_clz(value);}
#endif

113
int bioem_cuda::compareRefMaps(int iProjectionOut, int iConv, const bioem_map& conv_map, mycomplex_t* localmultFFT, myfloat_t sumC, myfloat_t sumsquareC, const int startMap)
114
{
115 116 117 118 119
	if (startMap)
	{
		cout << "Error startMap not implemented for GPU Code\n";
		exit(1);
	}
120 121 122 123 124
	//cudaMemcpyToSymbolAsync(&gConvMap, &conv_map, sizeof(bioem_map), 0, cudaMemcpyHostToDevice, cudaStream);
	if (GPUAsync)
	{
		checkCudaErrors(cudaEventSynchronize(cudaEvent[iConv & 1]));
	}
125

126
	if (FFTAlgo)
127
	{
128 129 130 131
		checkCudaErrors(cudaMemcpyAsync(&pConvMapFFT[(iConv & 1) * param.RefMapSize], localmultFFT, param.RefMapSize * sizeof(mycomplex_t), cudaMemcpyHostToDevice, cudaStream));
		multComplexMap<<<maxRef, CUDA_THREAD_COUNT, 0, cudaStream>>>(&pConvMapFFT[(iConv & 1) * param.RefMapSize], pRefMapsFFT, pFFTtmp2, param.param_device.NumberPixels * param.param_device.NumberFFTPixels1D, param.RefMapSize, maxRef);
		cudaZeroMem<<<32, 256>>>(pFFTtmp, maxRef * sizeof(myfloat_t) * param.param_device.NumberPixels * param.param_device.NumberPixels);
		if (mycufftExecC2R(plan, pFFTtmp2, pFFTtmp) != CUFFT_SUCCESS)
132
		{
133
			cout << "Error running CUFFT\n";
134 135
			exit(1);
		}
136 137 138 139 140 141 142 143
		cuDoRefMapsFFT<<<divup(maxRef, CUDA_THREAD_COUNT), CUDA_THREAD_COUNT, 0, cudaStream>>>(iProjectionOut, iConv, pFFTtmp, sumC, sumsquareC, pProb_device, param.param_device, pRefMap_device, maxRef);
		checkCudaErrors(cudaGetLastError());
	}
	else
	{
		checkCudaErrors(cudaMemcpyAsync(pConvMap_device[iConv & 1], &conv_map, sizeof(bioem_map), cudaMemcpyHostToDevice, cudaStream));

		if (GPUAlgo == 2) //Loop over shifts
144
		{
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
			const int nShifts = 2 * param.param_device.maxDisplaceCenter / param.param_device.GridSpaceCenter + 1;
			if (!IsPowerOf2(nShifts))
			{
				cout << "Invalid number of displacements, no power of two\n";
				exit(1);
			}
			if (CUDA_THREAD_COUNT % (nShifts * nShifts))
			{
				cout << "CUDA Thread count (" << CUDA_THREAD_COUNT << ") is no multiple of number of shifts (" << (nShifts * nShifts) << ")\n";
				exit(1);
			}
			if (nShifts > CUDA_MAX_SHIFT_REDUCE)
			{
				cout << "Too many displacements for CUDA reduction\n";
				exit(1);
			}
			const int nShiftBits = ilog2(nShifts);
			size_t totalBlocks = divup((size_t) maxRef * (size_t) nShifts * (size_t) nShifts, (size_t) CUDA_THREAD_COUNT);
			size_t nBlocks = CUDA_BLOCK_COUNT;
			for (size_t i = 0;i < totalBlocks;i += nBlocks)
			{
				compareRefMapLoopShifts_kernel <<<min(nBlocks, totalBlocks - i), CUDA_THREAD_COUNT, (CUDA_THREAD_COUNT * 2 + CUDA_THREAD_COUNT / (nShifts * nShifts) * 4) * sizeof(myfloat_t), cudaStream>>> (iProjectionOut, iConv, pConvMap_device[iConv & 1], pProb_device, param.param_device, pRefMap_device, i, nShifts, nShiftBits, maxRef);
			}
168
		}
169
		else if (GPUAlgo == 1) //Split shifts in multiple kernels
170
		{
171 172 173 174 175 176 177
			for (int cent_x = -param.param_device.maxDisplaceCenter; cent_x <= param.param_device.maxDisplaceCenter; cent_x=cent_x+param.param_device.GridSpaceCenter)
			{
				for (int cent_y = -param.param_device.maxDisplaceCenter; cent_y <= param.param_device.maxDisplaceCenter; cent_y=cent_y+param.param_device.GridSpaceCenter)
				{
					compareRefMap_kernel <<<divup(maxRef, CUDA_THREAD_COUNT), CUDA_THREAD_COUNT, 0, cudaStream>>> (iProjectionOut, iConv, pConvMap_device[iConv & 1], pProb_device, param.param_device, pRefMap_device_Mod, cent_x, cent_y, maxRef);
				}
			}
178
		}
179
		else if (GPUAlgo == 0) //All shifts in one kernel
180
		{
181
			compareRefMapShifted_kernel <<<divup(maxRef, CUDA_THREAD_COUNT), CUDA_THREAD_COUNT, 0, cudaStream>>> (iProjectionOut, iConv, pConvMap_device[iConv & 1], pProb_device, param.param_device, pRefMap_device_Mod, maxRef);
182
		}
183
		else
184
		{
185 186
			cout << "Invalid GPU Algorithm selected\n";
			exit(1);
187
		}
188
	}
189 190
	if (GPUWorkload < 100)
	{
191
		bioem::compareRefMaps(iProjectionOut, iConv, conv_map, localmultFFT, sumC, sumsquareC, maxRef);
192
	}
193 194 195
	if (GPUAsync)
	{
		checkCudaErrors(cudaEventRecord(cudaEvent[iConv & 1], cudaStream));
196
	}
197 198 199 200 201 202 203 204 205 206
	else
	{
		checkCudaErrors(cudaStreamSynchronize(cudaStream));
	}
	return(0);
}

int bioem_cuda::deviceInit()
{
	deviceExit();
207

208 209
	if (FFTAlgo) GPUAlgo = 2;

210 211 212 213 214 215 216 217 218
	checkCudaErrors(cudaStreamCreate(&cudaStream));
	checkCudaErrors(cudaMalloc(&pRefMap_device, sizeof(bioem_RefMap)));
	checkCudaErrors(cudaMalloc(&pProb_device, sizeof(bioem_Probability) * RefMap.ntotRefMap));
	for (int i = 0;i < 2;i++)
	{
		checkCudaErrors(cudaEventCreate(&cudaEvent[i]));
		checkCudaErrors(cudaMalloc(&pConvMap_device[i], sizeof(bioem_map)));
	}
	pRefMap_device_Mod = (bioem_RefMap_Mod*) pRefMap_device;
219

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	if (FFTAlgo)
	{
		checkCudaErrors(cudaMalloc(&pRefMapsFFT, RefMap.ntotRefMap * param.RefMapSize * sizeof(mycomplex_t)));
		checkCudaErrors(cudaMalloc(&pFFTtmp2, RefMap.ntotRefMap * param.RefMapSize * sizeof(mycomplex_t)));
		checkCudaErrors(cudaMalloc(&pFFTtmp, RefMap.ntotRefMap * param.param_device.NumberPixels * param.param_device.NumberPixels * sizeof(myfloat_t)));
		checkCudaErrors(cudaMalloc(&pConvMapFFT, param.RefMapSize * sizeof(mycomplex_t) * 2));
		cudaMemcpy(pRefMapsFFT, RefMap.RefMapsFFT, RefMap.ntotRefMap * param.RefMapSize * sizeof(mycomplex_t), cudaMemcpyHostToDevice);

		int n[2] = {param.param_device.NumberPixels, param.param_device.NumberPixels};
        if (cufftPlanMany(&plan, 2, n, NULL, 1, 0, NULL, 1, 0, CUFFT_C2R, RefMap.ntotRefMap) != CUFFT_SUCCESS)
        {
			cout << "Error planning CUFFT\n";
			exit(1);
		}
		if (cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE) != CUFFT_SUCCESS)
		{
			cout << "Error planning CUFFT compatibility\n";
			exit(1);
		}
		if (cufftSetStream(plan, cudaStream) != CUFFT_SUCCESS)
		{
			cout << "Error setting CUFFT stream\n";
			exit(1);
		}
	}

246 247 248 249 250 251 252 253 254 255
	if (GPUAlgo == 0 || GPUAlgo == 1)
	{
		bioem_RefMap_Mod* RefMapGPU = new bioem_RefMap_Mod(RefMap);
		cudaMemcpy(pRefMap_device_Mod, RefMapGPU, sizeof(bioem_RefMap), cudaMemcpyHostToDevice);
		delete RefMapGPU;
	}
	else
	{
		cudaMemcpy(pRefMap_device, &RefMap, sizeof(bioem_RefMap), cudaMemcpyHostToDevice);
	}
256

257 258 259 260 261 262 263
	deviceInitialized = 1;
	return(0);
}

int bioem_cuda::deviceExit()
{
	if (deviceInitialized == 0) return(0);
264

265 266 267 268 269 270 271 272
	cudaStreamDestroy(cudaStream);
	cudaFree(pRefMap_device);
	cudaFree(pProb_device);
	for (int i = 0;i < 2;i++)
	{
		cudaEventDestroy(cudaEvent[i]);
		cudaFree(pConvMap_device);
	}
273 274 275 276 277 278 279 280
	if (FFTAlgo)
	{
		cudaFree(pRefMapsFFT);
		cudaFree(pConvMapFFT);
		cudaFree(pFFTtmp);
		cudaFree(pFFTtmp2);
		cufftDestroy(plan);
	}
281
	cudaThreadExit();
282

283 284 285 286 287 288
	deviceInitialized = 0;
	return(0);
}

int bioem_cuda::deviceStartRun()
{
289
	maxRef = GPUWorkload >= 100 ? RefMap.ntotRefMap : ((size_t) RefMap.ntotRefMap * (size_t) GPUWorkload / 100);
290

291
	cudaMemcpy(pProb_device, pProb, sizeof(bioem_Probability) * maxRef, cudaMemcpyHostToDevice);
292 293 294 295 296 297
	return(0);
}

int bioem_cuda::deviceFinishRun()
{
	if (GPUAsync) cudaStreamSynchronize(cudaStream);
298
	cudaMemcpy(pProb, pProb_device, sizeof(bioem_Probability) * maxRef, cudaMemcpyDeviceToHost);
299

300 301 302 303 304 305 306
	return(0);
}

bioem* bioem_cuda_create()
{
	return new bioem_cuda;
}