Actual source code: sfcuda.cu
petsc-3.15.0 2021-03-30
1: #include <../src/vec/is/sf/impls/basic/sfpack.h>
2: #include <cuda_runtime.h>
3: #include <petsccublas.h>
5: /* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
6: __device__ static inline PetscInt MapTidToIndex(const PetscInt *opt,PetscInt tid)
7: {
8: PetscInt i,j,k,m,n,r;
9: const PetscInt *offset,*start,*dx,*dy,*X,*Y;
11: n = opt[0];
12: offset = opt + 1;
13: start = opt + n + 2;
14: dx = opt + 2*n + 2;
15: dy = opt + 3*n + 2;
16: X = opt + 5*n + 2;
17: Y = opt + 6*n + 2;
18: for (r=0; r<n; r++) {if (tid < offset[r+1]) break;}
19: m = (tid - offset[r]);
20: k = m/(dx[r]*dy[r]);
21: j = (m - k*dx[r]*dy[r])/dx[r];
22: i = m - k*dx[r]*dy[r] - j*dx[r];
24: return (start[r] + k*X[r]*Y[r] + j*X[r] + i);
25: }
27: /*====================================================================================*/
28: /* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
29: /*====================================================================================*/
31: /* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
32: <Type> is PetscReal, which is the primitive type we operate on.
33: <bs> is 16, which says <unit> contains 16 primitive types.
34: <BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
35: <EQ> is 0, which is (bs == BS ? 1 : 0)
37: If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
38: For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
39: */
40: template<class Type,PetscInt BS,PetscInt EQ>
41: __global__ static void d_Pack(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,const Type *data,Type *buf)
42: {
43: PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
44: const PetscInt grid_size = gridDim.x * blockDim.x;
45: const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
46: const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
48: for (; tid<count; tid += grid_size) {
49: /* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
50: opt == NULL && idx == NULL ==> the indices are contiguous;
51: */
52: t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
53: s = tid*MBS;
54: for (i=0; i<MBS; i++) buf[s+i] = data[t+i];
55: }
56: }
58: template<class Type,class Op,PetscInt BS,PetscInt EQ>
59: __global__ static void d_UnpackAndOp(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,Type *data,const Type *buf)
60: {
61: PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
62: const PetscInt grid_size = gridDim.x * blockDim.x;
63: const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
64: Op op;
66: for (; tid<count; tid += grid_size) {
67: t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
68: s = tid*MBS;
69: for (i=0; i<MBS; i++) op(data[t+i],buf[s+i]);
70: }
71: }
73: template<class Type,class Op,PetscInt BS,PetscInt EQ>
74: __global__ static void d_FetchAndOp(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,Type *leafbuf)
75: {
76: PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
77: const PetscInt grid_size = gridDim.x * blockDim.x;
78: const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
79: Op op;
81: for (; tid<count; tid += grid_size) {
82: r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
83: l = tid*MBS;
84: for (i=0; i<MBS; i++) leafbuf[l+i] = op(rootdata[r+i],leafbuf[l+i]);
85: }
86: }
88: template<class Type,class Op,PetscInt BS,PetscInt EQ>
89: __global__ static void d_ScatterAndOp(PetscInt bs,PetscInt count,PetscInt srcx,PetscInt srcy,PetscInt srcX,PetscInt srcY,PetscInt srcStart,const PetscInt* srcIdx,const Type *src,PetscInt dstx,PetscInt dsty,PetscInt dstX,PetscInt dstY,PetscInt dstStart,const PetscInt *dstIdx,Type *dst)
90: {
91: PetscInt i,j,k,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
92: const PetscInt grid_size = gridDim.x * blockDim.x;
93: const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
94: Op op;
96: for (; tid<count; tid += grid_size) {
97: if (!srcIdx) { /* src is either contiguous or 3D */
98: k = tid/(srcx*srcy);
99: j = (tid - k*srcx*srcy)/srcx;
100: i = tid - k*srcx*srcy - j*srcx;
101: s = srcStart + k*srcX*srcY + j*srcX + i;
102: } else {
103: s = srcIdx[tid];
104: }
106: if (!dstIdx) { /* dst is either contiguous or 3D */
107: k = tid/(dstx*dsty);
108: j = (tid - k*dstx*dsty)/dstx;
109: i = tid - k*dstx*dsty - j*dstx;
110: t = dstStart + k*dstX*dstY + j*dstX + i;
111: } else {
112: t = dstIdx[tid];
113: }
115: s *= MBS;
116: t *= MBS;
117: for (i=0; i<MBS; i++) op(dst[t+i],src[s+i]);
118: }
119: }
121: template<class Type,class Op,PetscInt BS,PetscInt EQ>
122: __global__ static void d_FetchAndOpLocal(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,PetscInt leafstart,const PetscInt *leafopt,const PetscInt *leafidx,const Type *leafdata,Type *leafupdate)
123: {
124: PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
125: const PetscInt grid_size = gridDim.x * blockDim.x;
126: const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
127: Op op;
129: for (; tid<count; tid += grid_size) {
130: r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
131: l = (leafopt? MapTidToIndex(leafopt,tid) : (leafidx? leafidx[tid] : leafstart+tid))*MBS;
132: for (i=0; i<MBS; i++) leafupdate[l+i] = op(rootdata[r+i],leafdata[l+i]);
133: }
134: }
136: /*====================================================================================*/
137: /* Regular operations on device */
138: /*====================================================================================*/
139: template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
140: template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
141: template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
142: template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
143: template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
144: template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
145: template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
146: template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
147: template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
148: template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
149: template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
150: template<typename Type> struct Minloc {
151: __device__ Type operator() (Type& x,Type y) const {
152: Type old = x;
153: if (y.a < x.a) x = y;
154: else if (y.a == x.a) x.b = min(x.b,y.b);
155: return old;
156: }
157: };
158: template<typename Type> struct Maxloc {
159: __device__ Type operator() (Type& x,Type y) const {
160: Type old = x;
161: if (y.a > x.a) x = y;
162: else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
163: return old;
164: }
165: };
167: /*====================================================================================*/
168: /* Atomic operations on device */
169: /*====================================================================================*/
171: /*
172: Atomic Insert (exchange) operations
174: CUDA C Programming Guide V10.1 Chapter B.12.1.3:
176: int atomicExch(int* address, int val);
177: unsigned int atomicExch(unsigned int* address, unsigned int val);
178: unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
179: float atomicExch(float* address, float val);
181: reads the 32-bit or 64-bit word old located at the address address in global or shared
182: memory and stores val back to memory at the same address. These two operations are
183: performed in one atomic transaction. The function returns old.
185: PETSc notes:
187: It may be useful in PetscSFFetchAndOp with op = MPI_REPLACE.
189: VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
190: atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
191: be atomic itself.
193: With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
194: insertion is atomic. Hope no user codes rely on that.
195: */
196: __device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((ullint*)address,__double_as_longlong(val)));}
198: __device__ static llint atomicExch(llint* address,llint val) {return (llint)(atomicExch((ullint*)address,(ullint)val));}
200: template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
202: #if defined(PETSC_HAVE_COMPLEX)
203: #if defined(PETSC_USE_REAL_DOUBLE)
204: /* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
205: template<> struct AtomicInsert<PetscComplex> {
206: __device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
207: PetscComplex old, *z = &old;
208: double *xp = (double*)&x,*yp = (double*)&y;
209: AtomicInsert<double> op;
210: z[0] = op(xp[0],yp[0]);
211: z[1] = op(xp[1],yp[1]);
212: return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
213: }
214: };
215: #elif defined(PETSC_USE_REAL_SINGLE)
216: template<> struct AtomicInsert<PetscComplex> {
217: __device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
218: double *xp = (double*)&x,*yp = (double*)&y;
219: AtomicInsert<double> op;
220: return op(xp[0],yp[0]);
221: }
222: };
223: #endif
224: #endif
226: /*
227: Atomic add operations
229: CUDA C Programming Guide V10.1 Chapter B.12.1.1:
231: int atomicAdd(int* address, int val);
232: unsigned int atomicAdd(unsigned int* address,unsigned int val);
233: unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
234: float atomicAdd(float* address, float val);
235: double atomicAdd(double* address, double val);
236: __half2 atomicAdd(__half2 *address, __half2 val);
237: __half atomicAdd(__half *address, __half val);
239: reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
240: and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
241: function returns old.
243: The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
244: The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
245: The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
246: higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
247: the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
248: The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
249: */
250: __device__ static llint atomicAdd(llint* address,llint val) {return (llint)atomicAdd((ullint*)address,(ullint)val);}
252: template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
254: template<> struct AtomicAdd<double> {
255: __device__ double operator() (double& x,double y) const {
256: #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
257: return atomicAdd(&x,y);
258: #else
259: double *address = &x, val = y;
260: ullint *address_as_ull = (ullint*)address;
261: ullint old = *address_as_ull, assumed;
262: do {
263: assumed = old;
264: old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
265: /* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
266: } while (assumed != old);
267: return __longlong_as_double(old);
268: #endif
269: }
270: };
272: template<> struct AtomicAdd<float> {
273: __device__ float operator() (float& x,float y) const {
274: #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
275: return atomicAdd(&x,y);
276: #else
277: float *address = &x, val = y;
278: int *address_as_int = (int*)address;
279: int old = *address_as_int, assumed;
280: do {
281: assumed = old;
282: old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
283: /* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
284: } while (assumed != old);
285: return __int_as_float(old);
286: #endif
287: }
288: };
290: #if defined(PETSC_HAVE_COMPLEX)
291: template<> struct AtomicAdd<PetscComplex> {
292: __device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
293: PetscComplex old, *z = &old;
294: PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
295: AtomicAdd<PetscReal> op;
296: z[0] = op(xp[0],yp[0]);
297: z[1] = op(xp[1],yp[1]);
298: return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
299: }
300: };
301: #endif
303: /*
304: Atomic Mult operations:
306: CUDA has no atomicMult at all, so we build our own with atomicCAS
307: */
308: #if defined(PETSC_USE_REAL_DOUBLE)
309: __device__ static double atomicMult(double* address, double val)
310: {
311: ullint *address_as_ull = (ullint*)(address);
312: ullint old = *address_as_ull, assumed;
313: do {
314: assumed = old;
315: /* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
316: old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
317: } while (assumed != old);
318: return __longlong_as_double(old);
319: }
320: #elif defined(PETSC_USE_REAL_SINGLE)
321: __device__ static float atomicMult(float* address,float val)
322: {
323: int *address_as_int = (int*)(address);
324: int old = *address_as_int, assumed;
325: do {
326: assumed = old;
327: old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
328: } while (assumed != old);
329: return __int_as_float(old);
330: }
331: #endif
333: __device__ static int atomicMult(int* address,int val)
334: {
335: int *address_as_int = (int*)(address);
336: int old = *address_as_int, assumed;
337: do {
338: assumed = old;
339: old = atomicCAS(address_as_int, assumed, val*assumed);
340: } while (assumed != old);
341: return (int)old;
342: }
344: __device__ static llint atomicMult(llint* address,llint val)
345: {
346: ullint *address_as_ull = (ullint*)(address);
347: ullint old = *address_as_ull, assumed;
348: do {
349: assumed = old;
350: old = atomicCAS(address_as_ull, assumed, (ullint)(val*(llint)assumed));
351: } while (assumed != old);
352: return (llint)old;
353: }
355: template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
357: /*
358: Atomic Min/Max operations
360: CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
362: int atomicMin(int* address, int val);
363: unsigned int atomicMin(unsigned int* address,unsigned int val);
364: unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
366: reads the 32-bit or 64-bit word old located at the address address in global or shared
367: memory, computes the minimum of old and val, and stores the result back to memory
368: at the same address. These three operations are performed in one atomic transaction.
369: The function returns old.
370: The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
372: atomicMax() is similar.
373: */
375: #if defined(PETSC_USE_REAL_DOUBLE)
376: __device__ static double atomicMin(double* address, double val)
377: {
378: ullint *address_as_ull = (ullint*)(address);
379: ullint old = *address_as_ull, assumed;
380: do {
381: assumed = old;
382: old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
383: } while (assumed != old);
384: return __longlong_as_double(old);
385: }
387: __device__ static double atomicMax(double* address, double val)
388: {
389: ullint *address_as_ull = (ullint*)(address);
390: ullint old = *address_as_ull, assumed;
391: do {
392: assumed = old;
393: old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
394: } while (assumed != old);
395: return __longlong_as_double(old);
396: }
397: #elif defined(PETSC_USE_REAL_SINGLE)
398: __device__ static float atomicMin(float* address,float val)
399: {
400: int *address_as_int = (int*)(address);
401: int old = *address_as_int, assumed;
402: do {
403: assumed = old;
404: old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
405: } while (assumed != old);
406: return __int_as_float(old);
407: }
409: __device__ static float atomicMax(float* address,float val)
410: {
411: int *address_as_int = (int*)(address);
412: int old = *address_as_int, assumed;
413: do {
414: assumed = old;
415: old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
416: } while (assumed != old);
417: return __int_as_float(old);
418: }
419: #endif
421: /*
422: atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
423: atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
424: This causes compilation errors with pgi compilers and 64-bit indices:
425: error: function "atomicMin(long long *, long long)" has already been defined
427: So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
428: */
429: #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
430: __device__ static llint atomicMin(llint* address,llint val)
431: {
432: ullint *address_as_ull = (ullint*)(address);
433: ullint old = *address_as_ull, assumed;
434: do {
435: assumed = old;
436: old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMin(val,(llint)assumed)));
437: } while (assumed != old);
438: return (llint)old;
439: }
441: __device__ static llint atomicMax(llint* address,llint val)
442: {
443: ullint *address_as_ull = (ullint*)(address);
444: ullint old = *address_as_ull, assumed;
445: do {
446: assumed = old;
447: old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMax(val,(llint)assumed)));
448: } while (assumed != old);
449: return (llint)old;
450: }
451: #endif
453: template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
454: template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
456: /*
457: Atomic bitwise operations
459: CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
461: int atomicAnd(int* address, int val);
462: unsigned int atomicAnd(unsigned int* address,unsigned int val);
463: unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
465: reads the 32-bit or 64-bit word old located at the address address in global or shared
466: memory, computes (old & val), and stores the result back to memory at the same
467: address. These three operations are performed in one atomic transaction.
468: The function returns old.
470: The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
472: atomicOr() and atomicXor are similar.
473: */
475: #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin() above */
476: __device__ static llint atomicAnd(llint* address,llint val)
477: {
478: ullint *address_as_ull = (ullint*)(address);
479: ullint old = *address_as_ull, assumed;
480: do {
481: assumed = old;
482: old = atomicCAS(address_as_ull, assumed, (ullint)(val & (llint)assumed));
483: } while (assumed != old);
484: return (llint)old;
485: }
486: __device__ static llint atomicOr(llint* address,llint val)
487: {
488: ullint *address_as_ull = (ullint*)(address);
489: ullint old = *address_as_ull, assumed;
490: do {
491: assumed = old;
492: old = atomicCAS(address_as_ull, assumed, (ullint)(val | (llint)assumed));
493: } while (assumed != old);
494: return (llint)old;
495: }
497: __device__ static llint atomicXor(llint* address,llint val)
498: {
499: ullint *address_as_ull = (ullint*)(address);
500: ullint old = *address_as_ull, assumed;
501: do {
502: assumed = old;
503: old = atomicCAS(address_as_ull, assumed, (ullint)(val ^ (llint)assumed));
504: } while (assumed != old);
505: return (llint)old;
506: }
507: #endif
509: template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
510: template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
511: template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
513: /*
514: Atomic logical operations:
516: CUDA has no atomic logical operations at all. We support them on integer types.
517: */
519: /* A template without definition makes any instantiation not using given specializations erroneous at compile time,
520: which is what we want since we only support 32-bit and 64-bit integers.
521: */
522: template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
524: template<typename Type,class Op>
525: struct AtomicLogical<Type,Op,4> {
526: __device__ Type operator()(Type& x,Type y) const {
527: int *address_as_int = (int*)(&x);
528: int old = *address_as_int, assumed;
529: Op op;
530: do {
531: assumed = old;
532: old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
533: } while (assumed != old);
534: return (Type)old;
535: }
536: };
538: template<typename Type,class Op>
539: struct AtomicLogical<Type,Op,8> {
540: __device__ Type operator()(Type& x,Type y) const {
541: ullint *address_as_ull = (ullint*)(&x);
542: ullint old = *address_as_ull, assumed;
543: Op op;
544: do {
545: assumed = old;
546: old = atomicCAS(address_as_ull, assumed, (ullint)(op((Type)assumed,y)));
547: } while (assumed != old);
548: return (Type)old;
549: }
550: };
552: /* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
553: template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
554: template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
555: template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
557: template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
558: template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
559: template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
561: /*====================================================================================*/
562: /* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
563: /*====================================================================================*/
564: template<typename Type,PetscInt BS,PetscInt EQ>
565: static PetscErrorCode Pack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,const void *data,void *buf)
566: {
567: cudaError_t cerr;
568: PetscInt nthreads=256;
569: PetscInt nblocks=(count+nthreads-1)/nthreads;
570: const PetscInt *iarray=opt ? opt->array : NULL;
573: if (!count) return(0);
574: if (!opt && !idx) { /* It is a 'CUDA data to nvshmem buf' memory copy */
575: cerr = cudaMemcpyAsync(buf,(char*)data+start*link->unitbytes,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
576: } else {
577: nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
578: d_Pack<Type,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(const Type*)data,(Type*)buf);
579: cerr = cudaGetLastError();CHKERRCUDA(cerr);
580: }
581: return(0);
582: }
584: /* To specialize UnpackAndOp for the cudaMemcpyAsync() below. Usually if this is a contiguous memcpy, we use root/leafdirect and do
585: not need UnpackAndOp. Only with nvshmem, we need this 'nvshmem buf to CUDA data' memory copy
586: */
587: template<typename Type,PetscInt BS,PetscInt EQ>
588: static PetscErrorCode Unpack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
589: {
590: cudaError_t cerr;
591: PetscInt nthreads=256;
592: PetscInt nblocks=(count+nthreads-1)/nthreads;
593: const PetscInt *iarray=opt ? opt->array : NULL;
596: if (!count) return(0);
597: if (!opt && !idx) { /* It is a 'nvshmem buf to CUDA data' memory copy */
598: cerr = cudaMemcpyAsync((char*)data+start*link->unitbytes,buf,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
599: } else {
600: nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
601: d_UnpackAndOp<Type,Insert<Type>,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
602: cerr = cudaGetLastError();CHKERRCUDA(cerr);
603: }
604: return(0);
605: }
607: template<typename Type,class Op,PetscInt BS,PetscInt EQ>
608: static PetscErrorCode UnpackAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
609: {
610: cudaError_t cerr;
611: PetscInt nthreads=256;
612: PetscInt nblocks=(count+nthreads-1)/nthreads;
613: const PetscInt *iarray=opt ? opt->array : NULL;
616: if (!count) return(0);
617: nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
618: d_UnpackAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
619: cerr = cudaGetLastError();CHKERRCUDA(cerr);
620: return(0);
621: }
623: template<typename Type,class Op,PetscInt BS,PetscInt EQ>
624: static PetscErrorCode FetchAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,void *buf)
625: {
626: cudaError_t cerr;
627: PetscInt nthreads=256;
628: PetscInt nblocks=(count+nthreads-1)/nthreads;
629: const PetscInt *iarray=opt ? opt->array : NULL;
632: if (!count) return(0);
633: nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
634: d_FetchAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(Type*)buf);
635: cerr = cudaGetLastError();CHKERRCUDA(cerr);
636: return(0);
637: }
639: template<typename Type,class Op,PetscInt BS,PetscInt EQ>
640: static PetscErrorCode ScatterAndOp(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
641: {
642: cudaError_t cerr;
643: PetscInt nthreads=256;
644: PetscInt nblocks=(count+nthreads-1)/nthreads;
645: PetscInt srcx=0,srcy=0,srcX=0,srcY=0,dstx=0,dsty=0,dstX=0,dstY=0;
648: if (!count) return(0);
649: nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
651: /* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
652: if (srcOpt) {srcx = srcOpt->dx[0]; srcy = srcOpt->dy[0]; srcX = srcOpt->X[0]; srcY = srcOpt->Y[0]; srcStart = srcOpt->start[0]; srcIdx = NULL;}
653: else if (!srcIdx) {srcx = srcX = count; srcy = srcY = 1;}
655: if (dstOpt) {dstx = dstOpt->dx[0]; dsty = dstOpt->dy[0]; dstX = dstOpt->X[0]; dstY = dstOpt->Y[0]; dstStart = dstOpt->start[0]; dstIdx = NULL;}
656: else if (!dstIdx) {dstx = dstX = count; dsty = dstY = 1;}
658: d_ScatterAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,srcx,srcy,srcX,srcY,srcStart,srcIdx,(const Type*)src,dstx,dsty,dstX,dstY,dstStart,dstIdx,(Type*)dst);
659: cerr = cudaGetLastError();CHKERRCUDA(cerr);
660: return(0);
661: }
663: /* Specialization for Insert since we may use cudaMemcpyAsync */
664: template<typename Type,PetscInt BS,PetscInt EQ>
665: static PetscErrorCode ScatterAndInsert(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
666: {
667: PetscErrorCode ierr;
668: cudaError_t cerr;
671: if (!count) return(0);
672: /*src and dst are contiguous */
673: if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
674: cerr = cudaMemcpyAsync((Type*)dst+dstStart*link->bs,(const Type*)src+srcStart*link->bs,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
675: } else {
676: ScatterAndOp<Type,Insert<Type>,BS,EQ>(link,count,srcStart,srcOpt,srcIdx,src,dstStart,dstOpt,dstIdx,dst);
677: }
678: return(0);
679: }
681: template<typename Type,class Op,PetscInt BS,PetscInt EQ>
682: static PetscErrorCode FetchAndOpLocal(PetscSFLink link,PetscInt count,PetscInt rootstart,PetscSFPackOpt rootopt,const PetscInt *rootidx,void *rootdata,PetscInt leafstart,PetscSFPackOpt leafopt,const PetscInt *leafidx,const void *leafdata,void *leafupdate)
683: {
684: cudaError_t cerr;
685: PetscInt nthreads=256;
686: PetscInt nblocks=(count+nthreads-1)/nthreads;
687: const PetscInt *rarray = rootopt ? rootopt->array : NULL;
688: const PetscInt *larray = leafopt ? leafopt->array : NULL;
691: if (!count) return(0);
692: nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
693: d_FetchAndOpLocal<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,rootstart,rarray,rootidx,(Type*)rootdata,leafstart,larray,leafidx,(const Type*)leafdata,(Type*)leafupdate);
694: cerr = cudaGetLastError();CHKERRCUDA(cerr);
695: return(0);
696: }
698: /*====================================================================================*/
699: /* Init various types and instantiate pack/unpack function pointers */
700: /*====================================================================================*/
701: template<typename Type,PetscInt BS,PetscInt EQ>
702: static void PackInit_RealType(PetscSFLink link)
703: {
704: /* Pack/unpack for remote communication */
705: link->d_Pack = Pack<Type,BS,EQ>;
706: link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
707: link->d_UnpackAndAdd = UnpackAndOp <Type,Add<Type> ,BS,EQ>;
708: link->d_UnpackAndMult = UnpackAndOp <Type,Mult<Type> ,BS,EQ>;
709: link->d_UnpackAndMin = UnpackAndOp <Type,Min<Type> ,BS,EQ>;
710: link->d_UnpackAndMax = UnpackAndOp <Type,Max<Type> ,BS,EQ>;
711: link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
713: /* Scatter for local communication */
714: link->d_ScatterAndInsert = ScatterAndInsert<Type ,BS,EQ>; /* Has special optimizations */
715: link->d_ScatterAndAdd = ScatterAndOp <Type,Add<Type> ,BS,EQ>;
716: link->d_ScatterAndMult = ScatterAndOp <Type,Mult<Type> ,BS,EQ>;
717: link->d_ScatterAndMin = ScatterAndOp <Type,Min<Type> ,BS,EQ>;
718: link->d_ScatterAndMax = ScatterAndOp <Type,Max<Type> ,BS,EQ>;
719: link->d_FetchAndAddLocal = FetchAndOpLocal <Type,Add <Type> ,BS,EQ>;
721: /* Atomic versions when there are data-race possibilities */
722: link->da_UnpackAndInsert = UnpackAndOp <Type,AtomicInsert<Type>,BS,EQ>;
723: link->da_UnpackAndAdd = UnpackAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
724: link->da_UnpackAndMult = UnpackAndOp <Type,AtomicMult<Type> ,BS,EQ>;
725: link->da_UnpackAndMin = UnpackAndOp <Type,AtomicMin<Type> ,BS,EQ>;
726: link->da_UnpackAndMax = UnpackAndOp <Type,AtomicMax<Type> ,BS,EQ>;
727: link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
729: link->da_ScatterAndInsert = ScatterAndOp <Type,AtomicInsert<Type>,BS,EQ>;
730: link->da_ScatterAndAdd = ScatterAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
731: link->da_ScatterAndMult = ScatterAndOp <Type,AtomicMult<Type> ,BS,EQ>;
732: link->da_ScatterAndMin = ScatterAndOp <Type,AtomicMin<Type> ,BS,EQ>;
733: link->da_ScatterAndMax = ScatterAndOp <Type,AtomicMax<Type> ,BS,EQ>;
734: link->da_FetchAndAddLocal = FetchAndOpLocal <Type,AtomicAdd<Type> ,BS,EQ>;
735: }
737: /* Have this templated class to specialize for char integers */
738: template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
739: struct PackInit_IntegerType_Atomic {
740: static void Init(PetscSFLink link) {
741: link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
742: link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
743: link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
744: link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
745: link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
746: link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
747: link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
748: link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
749: link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
750: link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
751: link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
752: link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
754: link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
755: link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
756: link->da_ScatterAndMult = ScatterAndOp<Type,AtomicMult<Type> ,BS,EQ>;
757: link->da_ScatterAndMin = ScatterAndOp<Type,AtomicMin<Type> ,BS,EQ>;
758: link->da_ScatterAndMax = ScatterAndOp<Type,AtomicMax<Type> ,BS,EQ>;
759: link->da_ScatterAndLAND = ScatterAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
760: link->da_ScatterAndLOR = ScatterAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
761: link->da_ScatterAndLXOR = ScatterAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
762: link->da_ScatterAndBAND = ScatterAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
763: link->da_ScatterAndBOR = ScatterAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
764: link->da_ScatterAndBXOR = ScatterAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
765: link->da_FetchAndAddLocal = FetchAndOpLocal<Type,AtomicAdd<Type>,BS,EQ>;
766: }
767: };
769: /* CUDA does not support atomics on chars. It is TBD in PETSc. */
770: template<typename Type,PetscInt BS,PetscInt EQ>
771: struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
772: static void Init(PetscSFLink link) {/* Nothing to leave function pointers NULL */}
773: };
775: template<typename Type,PetscInt BS,PetscInt EQ>
776: static void PackInit_IntegerType(PetscSFLink link)
777: {
778: link->d_Pack = Pack<Type,BS,EQ>;
779: link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
780: link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
781: link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
782: link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
783: link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
784: link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
785: link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
786: link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
787: link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
788: link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
789: link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
790: link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
792: link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
793: link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
794: link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
795: link->d_ScatterAndMin = ScatterAndOp<Type,Min<Type> ,BS,EQ>;
796: link->d_ScatterAndMax = ScatterAndOp<Type,Max<Type> ,BS,EQ>;
797: link->d_ScatterAndLAND = ScatterAndOp<Type,LAND<Type> ,BS,EQ>;
798: link->d_ScatterAndLOR = ScatterAndOp<Type,LOR<Type> ,BS,EQ>;
799: link->d_ScatterAndLXOR = ScatterAndOp<Type,LXOR<Type> ,BS,EQ>;
800: link->d_ScatterAndBAND = ScatterAndOp<Type,BAND<Type> ,BS,EQ>;
801: link->d_ScatterAndBOR = ScatterAndOp<Type,BOR<Type> ,BS,EQ>;
802: link->d_ScatterAndBXOR = ScatterAndOp<Type,BXOR<Type> ,BS,EQ>;
803: link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
804: PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
805: }
807: #if defined(PETSC_HAVE_COMPLEX)
808: template<typename Type,PetscInt BS,PetscInt EQ>
809: static void PackInit_ComplexType(PetscSFLink link)
810: {
811: link->d_Pack = Pack<Type,BS,EQ>;
812: link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
813: link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
814: link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
815: link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
817: link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
818: link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
819: link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
820: link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
822: link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
823: link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
824: link->da_UnpackAndMult = NULL; /* Not implemented yet */
825: link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
827: link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
828: link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type>,BS,EQ>;
829: }
830: #endif
832: typedef signed char SignedChar;
833: typedef unsigned char UnsignedChar;
834: typedef struct {int a; int b; } PairInt;
835: typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
837: template<typename Type>
838: static void PackInit_PairType(PetscSFLink link)
839: {
840: link->d_Pack = Pack<Type,1,1>;
841: link->d_UnpackAndInsert = Unpack<Type,1,1>;
842: link->d_UnpackAndMaxloc = UnpackAndOp<Type,Maxloc<Type>,1,1>;
843: link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
845: link->d_ScatterAndInsert = ScatterAndOp<Type,Insert<Type>,1,1>;
846: link->d_ScatterAndMaxloc = ScatterAndOp<Type,Maxloc<Type>,1,1>;
847: link->d_ScatterAndMinloc = ScatterAndOp<Type,Minloc<Type>,1,1>;
848: /* Atomics for pair types are not implemented yet */
849: }
851: template<typename Type,PetscInt BS,PetscInt EQ>
852: static void PackInit_DumbType(PetscSFLink link)
853: {
854: link->d_Pack = Pack<Type,BS,EQ>;
855: link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
856: link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
857: /* Atomics for dumb types are not implemented yet */
858: }
860: /* Some device-specific utilities */
861: static PetscErrorCode PetscSFLinkSyncDevice_CUDA(PetscSFLink link)
862: {
863: cudaError_t cerr;
865: cerr = cudaDeviceSynchronize();CHKERRCUDA(cerr);
866: return(0);
867: }
869: static PetscErrorCode PetscSFLinkSyncStream_CUDA(PetscSFLink link)
870: {
871: cudaError_t cerr;
873: cerr = cudaStreamSynchronize(link->stream);CHKERRCUDA(cerr);
874: return(0);
875: }
877: static PetscErrorCode PetscSFLinkMemcpy_CUDA(PetscSFLink link,PetscMemType dstmtype,void* dst,PetscMemType srcmtype,const void*src,size_t n)
878: {
880: enum cudaMemcpyKind kinds[2][2] = {{cudaMemcpyHostToHost,cudaMemcpyHostToDevice},{cudaMemcpyDeviceToHost,cudaMemcpyDeviceToDevice}};
882: if (n) {
883: if (PetscMemTypeHost(dstmtype) && PetscMemTypeHost(srcmtype)) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
884: PetscErrorCode PetscMemcpy(dst,src,n);
885: } else {
886: int stype = PetscMemTypeDevice(srcmtype) ? 1 : 0;
887: int dtype = PetscMemTypeDevice(dstmtype) ? 1 : 0;
888: cudaError_t cerr = cudaMemcpyAsync(dst,src,n,kinds[stype][dtype],link->stream);CHKERRCUDA(cerr);
889: }
890: }
891: return(0);
892: }
894: PetscErrorCode PetscSFMalloc_CUDA(PetscMemType mtype,size_t size,void** ptr)
895: {
897: if (PetscMemTypeHost(mtype)) {PetscErrorCode PetscMalloc(size,ptr);}
898: else if (PetscMemTypeDevice(mtype)) {
899: if (!PetscCUDAInitialized) {PetscErrorCode PetscCUDAInitializeCheck();}
900: cudaError_t err = cudaMalloc(ptr,size);CHKERRCUDA(err);
901: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d", (int)mtype);
902: return(0);
903: }
905: PetscErrorCode PetscSFFree_CUDA(PetscMemType mtype,void* ptr)
906: {
908: if (PetscMemTypeHost(mtype)) {PetscErrorCode PetscFree(ptr);}
909: else if (PetscMemTypeDevice(mtype)) {cudaError_t err = cudaFree(ptr);CHKERRCUDA(err);}
910: else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d",(int)mtype);
911: return(0);
912: }
914: /* Destructor when the link uses MPI for communication on CUDA device */
915: static PetscErrorCode PetscSFLinkDestroy_MPI_CUDA(PetscSF sf,PetscSFLink link)
916: {
917: cudaError_t cerr;
920: for (int i=PETSCSF_LOCAL; i<=PETSCSF_REMOTE; i++) {
921: cerr = cudaFree(link->rootbuf_alloc[i][PETSC_MEMTYPE_DEVICE]);CHKERRCUDA(cerr);
922: cerr = cudaFree(link->leafbuf_alloc[i][PETSC_MEMTYPE_DEVICE]);CHKERRCUDA(cerr);
923: }
924: return(0);
925: }
927: /* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
928: PetscErrorCode PetscSFLinkSetUp_CUDA(PetscSF sf,PetscSFLink link,MPI_Datatype unit)
929: {
931: cudaError_t cerr;
932: PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
933: PetscBool is2Int,is2PetscInt;
934: #if defined(PETSC_HAVE_COMPLEX)
935: PetscInt nPetscComplex=0;
936: #endif
939: if (link->deviceinited) return(0);
940: MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);
941: MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);
942: /* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
943: MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);
944: MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);
945: MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);
946: #if defined(PETSC_HAVE_COMPLEX)
947: MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);
948: #endif
949: MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);
950: MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);
952: if (is2Int) {
953: PackInit_PairType<PairInt>(link);
954: } else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
955: PackInit_PairType<PairPetscInt>(link);
956: } else if (nPetscReal) {
957: if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
958: else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
959: else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
960: else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
961: } else if (nPetscInt && sizeof(PetscInt) == sizeof(llint)) {
962: if (nPetscInt == 8) PackInit_IntegerType<llint,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<llint,8,0>(link);
963: else if (nPetscInt == 4) PackInit_IntegerType<llint,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<llint,4,0>(link);
964: else if (nPetscInt == 2) PackInit_IntegerType<llint,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<llint,2,0>(link);
965: else if (nPetscInt == 1) PackInit_IntegerType<llint,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<llint,1,0>(link);
966: } else if (nInt) {
967: if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
968: else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
969: else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
970: else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
971: } else if (nSignedChar) {
972: if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
973: else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
974: else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
975: else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
976: } else if (nUnsignedChar) {
977: if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
978: else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
979: else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
980: else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
981: #if defined(PETSC_HAVE_COMPLEX)
982: } else if (nPetscComplex) {
983: if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
984: else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
985: else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
986: else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
987: #endif
988: } else {
989: MPI_Aint lb,nbyte;
990: MPI_Type_get_extent(unit,&lb,&nbyte);
991: if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
992: if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
993: if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
994: else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
995: else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
996: } else {
997: nInt = nbyte / sizeof(int);
998: if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
999: else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
1000: else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
1001: else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
1002: }
1003: }
1005: if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
1006: int device;
1007: struct cudaDeviceProp props;
1008: cerr = cudaGetDevice(&device);CHKERRCUDA(cerr);
1009: cerr = cudaGetDeviceProperties(&props,device);CHKERRCUDA(cerr);
1010: sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor*props.multiProcessorCount;
1011: }
1012: link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
1014: link->stream = PetscDefaultCudaStream;
1015: link->Destroy = PetscSFLinkDestroy_MPI_CUDA;
1016: link->SyncDevice = PetscSFLinkSyncDevice_CUDA;
1017: link->SyncStream = PetscSFLinkSyncStream_CUDA;
1018: link->Memcpy = PetscSFLinkMemcpy_CUDA;
1019: link->deviceinited = PETSC_TRUE;
1020: return(0);
1021: }