elpa2_template.X90 15 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
 !    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
 function elpa_solve_evp_&
  &MATH_DATATYPE&
  &_&
  &2stage_&
  &PRECISION&
57
58
  &_impl (obj, na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all,   &
         time_evp_fwd, time_evp_solve, time_evp_back, summary_timings, useGPU, kernel &
59
#if REALCASE == 1
60
   , useQR   &
61
#endif
62
   &) result(success)
63
64
65
66
67
68

#ifdef HAVE_DETAILED_TIMINGS
   use timings
#else
   use timings_dummy
#endif
69
70
   use elpa_api
   use elpa_utilities
71
72
73
74
75
76
77
78
   use elpa1_utilities, only : gpu_usage_via_environment_variable
   use elpa1_compute
   use elpa2_compute
   use elpa_mpi
   use cuda_functions
   use mod_check_for_gpu
   use iso_c_binding
   implicit none
79
   class(elpa_t), intent(in)                 :: obj
80
   logical, intent(in)                       :: useGPU
81
82
83
#if REALCASE == 1
   logical, intent(in), optional             :: useQR
#endif
84
   logical                                   :: useQRActual
85
86
87

   integer(kind=c_int)                       :: bandwidth

88
   integer(kind=c_int)                       :: kernel
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108

   integer(kind=c_int), intent(in)           :: na, nev, lda, ldq, matrixCols, mpi_comm_rows, &
                                                mpi_comm_cols, mpi_comm_all
   integer(kind=c_int), intent(in)           :: nblk

#ifdef USE_ASSUMED_SIZE
   MATH_DATATYPE(kind=C_DATATYPE_KIND), intent(inout) :: a(lda,*), q(ldq,*)
#else
   MATH_DATATYPE(kind=C_DATATYPE_KIND), intent(inout) :: a(lda,matrixCols), q(ldq,matrixCols)
#endif
   real(kind=C_DATATYPE_KIND), intent(inout) :: ev(na)
   MATH_DATATYPE(kind=C_DATATYPE_KIND), allocatable   :: hh_trans(:,:)

   integer(kind=c_int)                       :: my_pe, n_pes, my_prow, my_pcol, np_rows, np_cols, mpierr
   integer(kind=c_int)                       :: l_cols, l_rows, l_cols_nev, nbw, num_blocks
   MATH_DATATYPE(kind=C_DATATYPE_KIND), allocatable :: tmat(:,:,:)
   real(kind=C_DATATYPE_KIND), allocatable   :: e(:)
#if COMPLEXCASE == 1
   real(kind=C_DATATYPE_KIND), allocatable   :: q_real(:,:)
#endif
109
110
   real(kind=c_double)                       :: time_evp_fwd, time_evp_solve, time_evp_back
   logical, intent(in)                       :: summary_timings
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
   integer(kind=c_intptr_t)                  :: tmat_dev, q_dev, a_dev
   real(kind=c_double)                        :: ttt0, ttt1, ttts  ! MPI_WTIME always needs double

   integer(kind=c_int)                       :: i
   logical                                   :: success, successCUDA
   logical                                   :: wantDebug
   integer(kind=c_int)                       :: istat
   character(200)                            :: errorMessage
   logical                                   :: do_useGPU, do_useGPU_trans_ev_tridi
   integer(kind=c_int)                       :: numberOfGPUDevices
   integer(kind=c_intptr_t), parameter       :: size_of_datatype = size_of_&
                                                                   &PRECISION&
                                                                   &_&
                                                                   &MATH_DATATYPE

    call timer%start("solve_evp_&
    &MATH_DATATYPE&
    &_2stage" // &
    &PRECISION_SUFFIX &
    )

    call timer%start("mpi_communication")
    call mpi_comm_rank(mpi_comm_all,my_pe,mpierr)
    call mpi_comm_size(mpi_comm_all,n_pes,mpierr)

    call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
    call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
    call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
    call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
    call timer%stop("mpi_communication")

142
    wantDebug = obj%get("debug") == 1
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    success = .true.

    do_useGPU      = .false.
    do_useGPU_trans_ev_tridi =.false.


#if REALCASE == 1
    useQRActual = .false.
    ! set usage of qr decomposition via API call
    if (present(useQR)) then
      if (useQR) useQRActual = .true.
        if (.not.(useQR)) useQRACtual = .false.
    endif

    if (useQRActual) then
      if (mod(na,2) .ne. 0) then
        if (wantDebug) then
          write(error_unit,*) "solve_evp_real_2stage: QR-decomposition: blocksize does not fit with matrixsize"
        endif
        print *, "Do not use QR-decomposition for this matrix and blocksize."
        success = .false.
        return
      endif
    endif
#endif /* REALCASE */

169
170
    if (useGPU) then
      if (check_for_gpu(my_pe,numberOfGPUDevices, wantDebug=wantDebug)) then
171

172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
         do_useGPU = .true.

         ! set the neccessary parameters
         cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
         cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
         cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
         cudaHostRegisterPortable = cuda_hostRegisterPortable()
         cudaHostRegisterMapped   = cuda_hostRegisterMapped()
      else
        print *,"GPUs are requested but not detected! Aborting..."
        success = .false.
        return
      endif
    else
      ! check whether set by environment variable
      do_useGPU = gpu_usage_via_environment_variable()
      if (do_useGPU) then
        if (check_for_gpu(my_pe,numberOfGPUDevices, wantDebug=wantDebug)) then
190
191
192
193
194
195
196
197
198
199
200
201
202

           ! set the neccessary parameters
           cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
           cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
           cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
           cudaHostRegisterPortable = cuda_hostRegisterPortable()
           cudaHostRegisterMapped   = cuda_hostRegisterMapped()
        else
          print *,"GPUs are requested but not detected! Aborting..."
          success = .false.
          return
        endif
      endif
203
    endif
204
205
206

    ! check consistency between request for GPUs and defined kernel
    if (do_useGPU) then
207
208
209
210
211
212
      if (nblk .ne. 128) then
        ! cannot run on GPU with this blocksize
        ! disable GPU usage for trans_ev_tridi
        do_useGPU_trans_ev_tridi = .false.
      else
        do_useGPU_trans_ev_tridi = .true.
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
      endif
    endif

    bandwidth = -1
    if (bandwidth .ne. -1) then
      nbw = bandwidth

      if ((nbw == 0) .or. (mod(nbw, nblk) .ne. 0)) then
        if (wantDebug) then
          write(error_unit,*) "Specified bandwidth has to be a multiple of blocksize"
        endif
        print *, "Specified bandwidth has to be a multiple of blocksize"
        success = .false.
        return
      endif

      ttts = MPI_Wtime()
    else

      ! Choose bandwidth, must be a multiple of nblk, set to a value >= 32
      ! On older systems (IBM Bluegene/P, Intel Nehalem) a value of 32 was optimal.
      ! For Intel(R) Xeon(R) E5 v2 and v3, better use 64 instead of 32!
      ! For IBM Bluegene/Q this is not clear at the moment. We have to keep an eye
      ! on this and maybe allow a run-time optimization here
      if (do_useGPU) then
        nbw = nblk
      else
#if REALCASE == 1
        nbw = (63/nblk+1)*nblk
#elif COMPLEXCASE == 1
        nbw = (31/nblk+1)*nblk
#endif
      endif

      num_blocks = (na-1)/nbw + 1

      allocate(tmat(nbw,nbw,num_blocks), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_evp_&
	&MATH_DATATYPE&
	&_2stage_&
	&PRECISION&
	&" // ": error when allocating tmat "//errorMessage
        stop 1
      endif

      ! Reduction full -> band

      ttt0 = MPI_Wtime()
      ttts = ttt0
      call bandred_&
      &MATH_DATATYPE&
      &_&
      &PRECISION &
      (na, a, &
       a_dev, lda, nblk, nbw, matrixCols, num_blocks, mpi_comm_rows, mpi_comm_cols, tmat, &
       tmat_dev,  wantDebug, do_useGPU, success &
#if REALCASE == 1
       , useQRActual &
#endif
       )
      if (.not.(success)) return
      ttt1 = MPI_Wtime()
276
      if (my_prow==0 .and. my_pcol==0 .and. summary_timings) &
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
        write(error_unit,*) "Time " // "bandred_&
	&MATH_DATATYPE&
	&_&
	&PRECISION " // "               :",ttt1-ttt0

    end if  ! matrix not already banded on input

     ! Reduction band -> tridiagonal

     allocate(e(na), stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_&
       &MATH_DATATYPE&
       &_2stage_&
       &PRECISION " // ": error when allocating e "//errorMessage
       stop 1
     endif

     ttt0 = MPI_Wtime()

     call tridiag_band_&
     &MATH_DATATYPE&
     &_&
     &PRECISION&
     (na, nbw, nblk, a, a_dev, lda, ev, e, matrixCols, hh_trans, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, do_useGPU)

     ttt1 = MPI_Wtime()
304
     if (my_prow==0 .and. my_pcol==0 .and. summary_timings) &
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
       write(error_unit,*) "Time " // "tridiag_band_&
       &MATH_DATATYPE&
       &_&
       &PRECISION " // "          :",ttt1-ttt0

#ifdef WITH_MPI
     call timer%start("mpi_communication")
     call mpi_bcast(ev, na, MPI_REAL_PRECISION, 0, mpi_comm_all, mpierr)
     call mpi_bcast(e, na, MPI_REAL_PRECISION, 0, mpi_comm_all, mpierr)
     call timer%stop("mpi_communication")
#endif /* WITH_MPI */
     ttt1 = MPI_Wtime()
     time_evp_fwd = ttt1-ttts

#if COMPLEXCASE == 1
     l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
     l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q
     l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev

     allocate(q_real(l_rows,l_cols), stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_&
       &MATH_DATATYPE&
       &_2stage: error when allocating q_real"//errorMessage
       stop 1
     endif
#endif

     ! Solve tridiagonal system

     ttt0 = MPI_Wtime()
     call solve_tridi_&
     &PRECISION &
     (na, nev, ev, e, &
#if REALCASE == 1
     q, ldq,   &
#endif
#if COMPLEXCASE == 1
     q_real, ubound(q_real,dim=1), &
#endif
     nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug, success)

     if (.not.(success)) return

     ttt1 = MPI_Wtime()
350
     if (my_prow==0 .and. my_pcol==0 .and. summary_timings) &
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
     write(error_unit,*) 'Time solve_tridi                :',ttt1-ttt0
     time_evp_solve = ttt1-ttt0
     ttts = ttt1

     deallocate(e, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_&
       &MATH_DATATYPE&
       &_2stage: error when deallocating e "//errorMessage
       stop 1
     endif

#if COMPLEXCASE == 1
     q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)

     deallocate(q_real, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_&
       &MATH_DATATYPE&
       &_2stage: error when deallocating q_real"//errorMessage
       stop 1
     endif
#endif
     ! Backtransform stage 1

     ttt0 = MPI_Wtime()

     call trans_ev_tridi_to_band_&
     &MATH_DATATYPE&
     &_&
     &PRECISION &
     (na, nev, nblk, nbw, q, &
     q_dev, &
     ldq, matrixCols, hh_trans, mpi_comm_rows, mpi_comm_cols, wantDebug, do_useGPU_trans_ev_tridi, &
385
     summary_timings, success, kernel)
386
387
388

     if (.not.(success)) return
     ttt1 = MPI_Wtime()
389
     if (my_prow==0 .and. my_pcol==0 .and. summary_timings) &
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
       write(error_unit,*) "Time " // "trans_ev_tridi_to_band_&
       &MATH_DATATYPE&
       &_&
       &PRECISION " // " :",ttt1-ttt0

     ! We can now deallocate the stored householder vectors
     deallocate(hh_trans, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *, "solve_evp_&
       &MATH_DATATYPE&
       &_2stage_&
       &PRECISION " // ": error when deallocating hh_trans "//errorMessage
       stop 1
     endif

     if( bandwidth .ne. -1) then
       time_evp_back = ttt1-ttts
     else

       if ( (do_useGPU) .and. .not.(do_useGPU_trans_ev_tridi) ) then
         ! copy to device if we want to continue on GPU
         successCUDA = cuda_malloc(q_dev, ldq*matrixCols*size_of_datatype)

         successCUDA = cuda_memcpy(q_dev, loc(q), ldq*matrixCols* size_of_datatype, cudaMemcpyHostToDevice)
       endif

       ! Backtransform stage 2
       ttt0 = MPI_Wtime()
       call trans_ev_band_to_full_&
       &MATH_DATATYPE&
       &_&
       &PRECISION &
       (na, nev, nblk, nbw, a, &
        a_dev, lda, tmat, tmat_dev,  q,  &
        q_dev, &
        ldq, matrixCols, num_blocks, mpi_comm_rows, mpi_comm_cols, do_useGPU &
#if REALCASE == 1
        , useQRActual  &
#endif
        )

       ttt1 = MPI_Wtime()
432
       if (my_prow==0 .and. my_pcol==0 .and. summary_timings) &
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
         write(error_unit,*) "Time " // "trans_ev_band_to_full_&
	 &MATH_DATATYPE&
	 &_&
	 &PRECISION " // " :",ttt1-ttt0

       time_evp_back = ttt1-ttts

       deallocate(tmat, stat=istat, errmsg=errorMessage)
       if (istat .ne. 0) then
         print *,"solve_evp_&
	 &MATH_DATATYPE&
	 _2stage_&
	 &PRECISION " // ": error when deallocating tmat"//errorMessage
         stop 1
       endif
     endif

     call timer%stop("solve_evp_&
     &MATH_DATATYPE&
     &_2stage" // &
     &PRECISION_SUFFIX &
     )
1    format(a,f10.3)

   end function elpa_solve_evp_&
   &MATH_DATATYPE&
   &_2stage_&
   &PRECISION&
461
   &_impl
462
463

! vim: syntax=fortran