elpa2_template.X90 18.5 KB
Newer Older
1
!    This file is part of ELPA.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
 function elpa_solve_evp_&
  &MATH_DATATYPE&
  &_&
  &2stage_&
  &PRECISION&
57
  &_impl (obj, a, ev, q) result(success)
58

59
   use elpa_abstract_impl
60
   use elpa_utilities
61
62
63
64
65
66
67
   use elpa1_compute
   use elpa2_compute
   use elpa_mpi
   use cuda_functions
   use mod_check_for_gpu
   use iso_c_binding
   implicit none
68
   class(elpa_abstract_impl_t), intent(inout) :: obj
69
   logical                                   :: useGPU
70
#if REALCASE == 1
71
   logical                                   :: useQR
72
#endif
73
   logical                                   :: useQRActual
74
75
76

   integer(kind=c_int)                       :: bandwidth

77
   integer(kind=c_int)                       :: kernel
78
79

#ifdef USE_ASSUMED_SIZE
Andreas Marek's avatar
Andreas Marek committed
80
81
   MATH_DATATYPE(kind=C_DATATYPE_KIND), intent(inout) :: a(obj%local_nrows,*)
   MATH_DATATYPE(kind=C_DATATYPE_KIND), optional, target, intent(out) :: q(obj%local_nrows,*)
82
#else
Andreas Marek's avatar
Andreas Marek committed
83
84
   MATH_DATATYPE(kind=C_DATATYPE_KIND), intent(inout) :: a(obj%local_nrows,obj%local_ncols)
   MATH_DATATYPE(kind=C_DATATYPE_KIND), optional, target, intent(out) :: q(obj%local_nrows,obj%local_ncols)
85
#endif
86
   real(kind=C_DATATYPE_KIND), intent(inout) :: ev(obj%na)
87
88
89
90
91
92
93
94
95
   MATH_DATATYPE(kind=C_DATATYPE_KIND), allocatable   :: hh_trans(:,:)

   integer(kind=c_int)                       :: my_pe, n_pes, my_prow, my_pcol, np_rows, np_cols, mpierr
   integer(kind=c_int)                       :: l_cols, l_rows, l_cols_nev, nbw, num_blocks
   MATH_DATATYPE(kind=C_DATATYPE_KIND), allocatable :: tmat(:,:,:)
   real(kind=C_DATATYPE_KIND), allocatable   :: e(:)
#if COMPLEXCASE == 1
   real(kind=C_DATATYPE_KIND), allocatable   :: q_real(:,:)
#endif
Andreas Marek's avatar
Andreas Marek committed
96
97
98
99
   MATH_DATATYPE(kind=C_DATATYPE_KIND), allocatable, target  :: q_dummy(:,:)
   MATH_DATATYPE(kind=C_DATATYPE_KIND), pointer              :: q_actual(:,:)


100
101
102
   integer(kind=c_intptr_t)                  :: tmat_dev, q_dev, a_dev

   integer(kind=c_int)                       :: i
103
   logical                                   :: success, successCUDA
104
   logical                                   :: wantDebug
105
   integer(kind=c_int)                       :: istat, gpu, debug, qr
106
107
108
109
110
111
112
   character(200)                            :: errorMessage
   logical                                   :: do_useGPU, do_useGPU_trans_ev_tridi
   integer(kind=c_int)                       :: numberOfGPUDevices
   integer(kind=c_intptr_t), parameter       :: size_of_datatype = size_of_&
                                                                   &PRECISION&
                                                                   &_&
                                                                   &MATH_DATATYPE
113
114
    integer(kind=ik)                         :: na, nev, lda, ldq, nblk, matrixCols, &
                                                mpi_comm_rows, mpi_comm_cols, mpi_comm_all
115

116
117
118
    logical                                  :: do_bandred, do_tridiag, do_solve_tridi,  &
                                                do_trans_to_band, do_trans_to_full

119
    call obj%timer%start("elpa_solve_evp_&
120
    &MATH_DATATYPE&
121
122
123
    &_2stage_&
    &PRECISION&
    &")
124

Andreas Marek's avatar
Andreas Marek committed
125
    if (present(q)) then
126
      obj%eigenvalues_only = .false.
Andreas Marek's avatar
Andreas Marek committed
127
    else
128
      obj%eigenvalues_only = .true.
Andreas Marek's avatar
Andreas Marek committed
129
130
    endif

131
132
133
134
135
136
137
138
    na         = obj%na
    nev        = obj%nev
    lda        = obj%local_nrows
    ldq        = obj%local_nrows
    nblk       = obj%nblk
    matrixCols = obj%local_ncols

#if REALCASE == 1
139
    call obj%get("real_kernel",kernel)
140
    ! check consistency between request for GPUs and defined kernel
141
142
    call obj%get("gpu", gpu)
    if (gpu == 1) then
143
144
      if (kernel .ne. ELPA_2STAGE_REAL_GPU) then
        write(error_unit,*) "ELPA: Warning, GPU usage has been requested but compute kernel is defined as non-GPU!"
145
      else if (nblk .ne. 128) then
146
147
148
149
150
151
        kernel = ELPA_2STAGE_REAL_GENERIC
      endif
    endif
#endif

#if COMPLEXCASE == 1
152
    call obj%get("complex_kernel",kernel)
153
    ! check consistency between request for GPUs and defined kernel
154
155
    call obj%get("gpu", gpu)
    if (gpu == 1) then
156
157
      if (kernel .ne. ELPA_2STAGE_COMPLEX_GPU) then
        write(error_unit,*) "ELPA: Warning, GPU usage has been requested but compute kernel is defined as non-GPU!"
158
      else if (nblk .ne. 128) then
159
160
161
162
        kernel = ELPA_2STAGE_COMPLEX_GENERIC
      endif
    endif
#endif
163
164
165
    call obj%get("mpi_comm_rows",mpi_comm_rows)
    call obj%get("mpi_comm_cols",mpi_comm_cols)
    call obj%get("mpi_comm_parent",mpi_comm_all)
166

167
168
    call obj%get("gpu",gpu)
    if (gpu .eq. 1) then
169
170
171
172
173
174
      useGPU = .true.
    else
      useGPU = .false.
    endif

#if REALCASE == 1
175
176
    call obj%get("qr",qr)
    if (qr .eq. 1) then
177
178
179
180
181
182
      useQR = .true.
    else
      useQR = .false.
    endif

#endif
183
    call obj%timer%start("mpi_communication")
184
185
186
187
188
189
190
    call mpi_comm_rank(mpi_comm_all,my_pe,mpierr)
    call mpi_comm_size(mpi_comm_all,n_pes,mpierr)

    call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
    call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
    call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
    call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
191
    call obj%timer%stop("mpi_communication")
192

193
194
    call obj%get("debug",debug)
    wantDebug = debug == 1
195
196
197
198
199
200
201
202
203
    success = .true.

    do_useGPU      = .false.
    do_useGPU_trans_ev_tridi =.false.


#if REALCASE == 1
    useQRActual = .false.
    ! set usage of qr decomposition via API call
204
205
    if (useQR) useQRActual = .true.
    if (.not.(useQR)) useQRACtual = .false.
206
207
208
209
210
211
212
213
214
215
216
217
218

    if (useQRActual) then
      if (mod(na,2) .ne. 0) then
        if (wantDebug) then
          write(error_unit,*) "solve_evp_real_2stage: QR-decomposition: blocksize does not fit with matrixsize"
        endif
        print *, "Do not use QR-decomposition for this matrix and blocksize."
        success = .false.
        return
      endif
    endif
#endif /* REALCASE */

219
220
    if (useGPU) then
      if (check_for_gpu(my_pe,numberOfGPUDevices, wantDebug=wantDebug)) then
221

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
         do_useGPU = .true.

         ! set the neccessary parameters
         cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
         cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
         cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
         cudaHostRegisterPortable = cuda_hostRegisterPortable()
         cudaHostRegisterMapped   = cuda_hostRegisterMapped()
      else
        print *,"GPUs are requested but not detected! Aborting..."
        success = .false.
        return
      endif
    else
      ! check whether set by environment variable
237
238
      call obj%get("gpu",gpu)
      do_useGPU = gpu == 1
239
240
      if (do_useGPU) then
        if (check_for_gpu(my_pe,numberOfGPUDevices, wantDebug=wantDebug)) then
241
242
243
244
245
246
247
248
249
250
251
252
253

           ! set the neccessary parameters
           cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
           cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
           cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
           cudaHostRegisterPortable = cuda_hostRegisterPortable()
           cudaHostRegisterMapped   = cuda_hostRegisterMapped()
        else
          print *,"GPUs are requested but not detected! Aborting..."
          success = .false.
          return
        endif
      endif
254
    endif
255
256
257

    ! check consistency between request for GPUs and defined kernel
    if (do_useGPU) then
258
259
260
261
262
263
      if (nblk .ne. 128) then
        ! cannot run on GPU with this blocksize
        ! disable GPU usage for trans_ev_tridi
        do_useGPU_trans_ev_tridi = .false.
      else
        do_useGPU_trans_ev_tridi = .true.
264
265
      endif
    endif
266

267

Andreas Marek's avatar
Andreas Marek committed
268

269
    if (.not. obj%eigenvalues_only) then
Andreas Marek's avatar
Andreas Marek committed
270
271
272
273
274
275
      q_actual => q(1:obj%local_nrows,1:obj%local_ncols)
    else
     allocate(q_dummy(1:obj%local_nrows,1:obj%local_ncols))
     q_actual => q_dummy(1:obj%local_nrows,1:obj%local_ncols)
    endif

276
277
278
279
280
281
282
283
284
285
286
287
288

    ! set the default values for each of the 5 compute steps
    do_bandred        = .true.
    do_tridiag        = .true.
    do_solve_tridi    = .true.
    do_trans_to_band  = .true.
    do_trans_to_full  = .true.

    if (obj%eigenvalues_only) then
      do_trans_to_band  = .false.
      do_trans_to_full  = .false.
    endif

Andreas Marek's avatar
Andreas Marek committed
289
    if (obj%is_set("bandwidth") == 1) then
290
      call obj%get("bandwidth",nbw)
291
      if (nbw == 0) then
292
        if (wantDebug) then
293
294
295
296
297
          write(error_unit,*) "Specified bandwidth = 0; ELPA refuses to solve the eigenvalue problem ", &
	                      "for a diagonal matrix! This is too simple"
	  endif
        print *, "Specified bandwidth = 0; ELPA refuses to solve the eigenvalue problem ", &
	         "for a diagonal matrix! This is too simple"
298
299
300
        success = .false.
        return
      endif
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
      if (mod(nbw, nblk) .ne. 0) then
        ! treat matrix with an effective bandwidth slightly bigger than specified bandwidth
	! such that effective bandwidth is a multiply of nblk. which is a prerequiste for ELPA
        nbw = nblk * ceiling(real(nbw,kind=c_double)/real(nblk,kind=c_double))

        ! just check that effective bandwidth is NOT larger than matrix size
	if (nbw .gt. na) then
          if (wantDebug) then
            write(error_unit,*) "Specified bandwidth ",nbw," leads internaly to a computed bandwidth ", &
	                        "which is larger than the matrix size ",na," ! ELPA will abort! Try to", &
				"solve your problem by not specifing a bandwidth"
	  endif
          print *, "Specified bandwidth ",nbw," leads internaly to a computed bandwidth ", &
	                        "which is larger than the matrix size ",na," ! ELPA will abort! Try to", &
				"solve your problem by not specifing a bandwidth"
          success = .false.
          return
	endif
      endif
320
321
322
323
324
      do_bandred       = .false. ! we already have a banded matrix
      do_solve_tridi   = .true.  ! we also have to solve something :-)
      do_trans_to_band = .true.  ! and still we have to backsub to banded
      do_trans_to_full = .false. ! but not to full since we have a banded matrix
    else ! bandwidth is not set
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345

      ! Choose bandwidth, must be a multiple of nblk, set to a value >= 32
      ! On older systems (IBM Bluegene/P, Intel Nehalem) a value of 32 was optimal.
      ! For Intel(R) Xeon(R) E5 v2 and v3, better use 64 instead of 32!
      ! For IBM Bluegene/Q this is not clear at the moment. We have to keep an eye
      ! on this and maybe allow a run-time optimization here
      if (do_useGPU) then
        nbw = nblk
      else
#if REALCASE == 1
        nbw = (63/nblk+1)*nblk
#elif COMPLEXCASE == 1
        nbw = (31/nblk+1)*nblk
#endif
      endif

      num_blocks = (na-1)/nbw + 1

      allocate(tmat(nbw,nbw,num_blocks), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_evp_&
346
347
348
349
        &MATH_DATATYPE&
        &_2stage_&
        &PRECISION&
        &" // ": error when allocating tmat "//errorMessage
350
351
352
        stop 1
      endif

353
354
355
356
357
358
359
360
361
362
      do_bandred       = .true.
      do_solve_tridi   = .true.
      do_trans_to_band = .true.
      do_trans_to_full = .true.
    end if  ! matrix not already banded on input

    ! start the computations in 5 steps

    if (do_bandred) then
      call obj%timer%start("bandred")
363
364
365
366
367
      ! Reduction full -> band
      call bandred_&
      &MATH_DATATYPE&
      &_&
      &PRECISION &
368
      (obj, na, a, &
369
370
      a_dev, lda, nblk, nbw, matrixCols, num_blocks, mpi_comm_rows, mpi_comm_cols, tmat, &
      tmat_dev,  wantDebug, do_useGPU, success &
371
#if REALCASE == 1
372
      , useQRActual &
373
#endif
374
375
      )
      call obj%timer%stop("bandred")
376
      if (.not.(success)) return
377
378
    endif

379
380

     ! Reduction band -> tridiagonal
381
382
383
384
385
386
387
388
389
     if (do_tridiag) then
       allocate(e(na), stat=istat, errmsg=errorMessage)
       if (istat .ne. 0) then
         print *,"solve_evp_&
         &MATH_DATATYPE&
         &_2stage_&
         &PRECISION " // ": error when allocating e "//errorMessage
         stop 1
       endif
390

391
392
       call obj%timer%start("tridiag")
       call tridiag_band_&
393
       &MATH_DATATYPE&
394
395
396
       &_&
       &PRECISION&
       (obj, na, nbw, nblk, a, a_dev, lda, ev, e, matrixCols, hh_trans, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, do_useGPU)
397
398

#ifdef WITH_MPI
399
400
401
402
       call obj%timer%start("mpi_communication")
       call mpi_bcast(ev, na, MPI_REAL_PRECISION, 0, mpi_comm_all, mpierr)
       call mpi_bcast(e, na, MPI_REAL_PRECISION, 0, mpi_comm_all, mpierr)
       call obj%timer%stop("mpi_communication")
403
#endif /* WITH_MPI */
404
405
       call obj%timer%stop("tridiag")
     endif ! do_tridiag
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421

#if COMPLEXCASE == 1
     l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
     l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q
     l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev

     allocate(q_real(l_rows,l_cols), stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_&
       &MATH_DATATYPE&
       &_2stage: error when allocating q_real"//errorMessage
       stop 1
     endif
#endif

     ! Solve tridiagonal system
422
423
424
425
426
     if (do_solve_tridi) then
       call obj%timer%start("solve")
       call solve_tridi_&
       &PRECISION &
       (obj, na, nev, ev, e, &
427
#if REALCASE == 1
428
       q_actual, ldq,   &
429
430
#endif
#if COMPLEXCASE == 1
431
       q_real, ubound(q_real,dim=1), &
432
#endif
433
434
435
436
       nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug, success)
       call obj%timer%stop("solve")
       if (.not.(success)) return
     endif ! do_solve_tridi
437
438
439
440
441
442
443
444
445

     deallocate(e, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_&
       &MATH_DATATYPE&
       &_2stage: error when deallocating e "//errorMessage
       stop 1
     endif

446
     if (obj%eigenvalues_only) then
447
448
449
       return
     endif

450
     if (do_trans_to_band) then
451
#if COMPLEXCASE == 1
Andreas Marek's avatar
Andreas Marek committed
452
       ! q must be given thats why from here on we can use q and not q_actual
453

Andreas Marek's avatar
Andreas Marek committed
454
       q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)
455

Andreas Marek's avatar
Andreas Marek committed
456
457
458
459
460
461
462
463
464
465
       deallocate(q_real, stat=istat, errmsg=errorMessage)
       if (istat .ne. 0) then
         print *,"solve_evp_&
         &MATH_DATATYPE&
         &_2stage: error when deallocating q_real"//errorMessage
         stop 1
       endif
#endif
       ! Backtransform stage 1
       call obj%timer%start("trans_ev_to_band")
466

Andreas Marek's avatar
Andreas Marek committed
467
       call trans_ev_tridi_to_band_&
468
       &MATH_DATATYPE&
Andreas Marek's avatar
Andreas Marek committed
469
470
471
472
473
474
475
       &_&
       &PRECISION &
       (obj, na, nev, nblk, nbw, q, &
       q_dev, &
       ldq, matrixCols, hh_trans, mpi_comm_rows, mpi_comm_cols, wantDebug, do_useGPU_trans_ev_tridi, &
       success=success, kernel=kernel)
       call obj%timer%stop("trans_ev_to_band")
476

Andreas Marek's avatar
Andreas Marek committed
477
       if (.not.(success)) return
478

Andreas Marek's avatar
Andreas Marek committed
479
480
481
482
483
484
485
486
       ! We can now deallocate the stored householder vectors
       deallocate(hh_trans, stat=istat, errmsg=errorMessage)
       if (istat .ne. 0) then
         print *, "solve_evp_&
         &MATH_DATATYPE&
         &_2stage_&
         &PRECISION " // ": error when deallocating hh_trans "//errorMessage
         stop 1
487
       endif
488
     endif ! do_trans_to_band
489

490
     if (do_trans_to_full) then
Andreas Marek's avatar
Andreas Marek committed
491
       call obj%timer%start("trans_ev_to_full")
492
493
494
       if ( (do_useGPU) .and. .not.(do_useGPU_trans_ev_tridi) ) then
         ! copy to device if we want to continue on GPU
         successCUDA = cuda_malloc(q_dev, ldq*matrixCols*size_of_datatype)
Andreas Marek's avatar
Andreas Marek committed
495

496
497
         successCUDA = cuda_memcpy(q_dev, loc(q), ldq*matrixCols* size_of_datatype, cudaMemcpyHostToDevice)
       endif
Andreas Marek's avatar
Andreas Marek committed
498

499
       ! Backtransform stage 2
Andreas Marek's avatar
Andreas Marek committed
500

501
502
503
504
505
506
507
508
       call trans_ev_band_to_full_&
       &MATH_DATATYPE&
       &_&
       &PRECISION &
       (obj, na, nev, nblk, nbw, a, &
       a_dev, lda, tmat, tmat_dev,  q,  &
       q_dev, &
       ldq, matrixCols, num_blocks, mpi_comm_rows, mpi_comm_cols, do_useGPU &
509
#if REALCASE == 1
510
       , useQRActual  &
511
#endif
512
       )
513

514

515
516
517
518
519
520
521
522
       deallocate(tmat, stat=istat, errmsg=errorMessage)
       if (istat .ne. 0) then
         print *,"solve_evp_&
         &MATH_DATATYPE&
         &_2stage_&
         &PRECISION " // ": error when deallocating tmat"//errorMessage
         stop 1
       endif
Andreas Marek's avatar
Andreas Marek committed
523
       call obj%timer%stop("trans_ev_to_full")
524
     endif ! do_trans_to_full
Andreas Marek's avatar
Andreas Marek committed
525

526
     if (obj%eigenvalues_only) then
Andreas Marek's avatar
Andreas Marek committed
527
       deallocate(q_dummy, stat=istat, errmsg=errorMessage)
528
529
       if (istat .ne. 0) then
         print *,"solve_evp_&
Andreas Marek's avatar
Andreas Marek committed
530
531
532
533
         &MATH_DATATYPE&
         &_1stage_&
         &PRECISION&
         &" // ": error when deallocating q_dummy "//errorMessage
534
535
536
537
         stop 1
       endif
     endif

538
     call obj%timer%stop("elpa_solve_evp_&
539
     &MATH_DATATYPE&
540
541
542
     &_2stage_&
    &PRECISION&
    &")
543
544
545
546
547
548
1    format(a,f10.3)

   end function elpa_solve_evp_&
   &MATH_DATATYPE&
   &_2stage_&
   &PRECISION&
549
   &_impl
550
551

! vim: syntax=fortran