elpa2.F90 22.6 KB
Newer Older
1
!   This file is part of ELPA.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaftrn,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!
!    More information can be found here:
!    http://elpa.rzg.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".



! ELPA2 -- 2-stage solver for ELPA
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".


#include "config-f90.h"

module ELPA2

! Version 1.1.2, 2011-02-21

65
  use elpa_utilities
Andreas Marek's avatar
Andreas Marek committed
66
67
68
  use elpa1
  use elpa1_compute
  use elpa2_compute
69
  use elpa2_utilities
70
71
  use elpa_pdgeqrf

72
  use iso_c_binding
73
74
75
76
77

#ifdef WITH_GPU_VERSION
!  use cuda_routines
!  use cuda_c_kernel
!  use iso_c_binding
78
#endif
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
  implicit none

  PRIVATE ! By default, all routines contained are private

  ! The following routines are public:

  public :: solve_evp_real_2stage
  public :: solve_evp_complex_2stage

!-------------------------------------------------------------------------------

  ! The following array contains the Householder vectors of the
  ! transformation band -> tridiagonal.
  ! It is allocated and set in tridiag_band_real and used in
  ! trans_ev_tridi_to_band_real.
  ! It must be deallocated by the user after trans_ev_tridi_to_band_real!

Andreas Marek's avatar
Andreas Marek committed
96
97
!  real*8, allocatable :: hh_trans_real(:,:)
!  complex*16, allocatable :: hh_trans_complex(:,:)
98
99
100
101
102
103
104
105

!-------------------------------------------------------------------------------

  include 'mpif.h'


!******
contains
106

107
  function solve_evp_real_2stage(na, nev, a, lda, ev, q, ldq, nblk,        &
108
                               matrixCols,                               &
109
110
111
                                 mpi_comm_rows, mpi_comm_cols,           &
                                 mpi_comm_all, THIS_REAL_ELPA_KERNEL_API,&
                                 useQR) result(success)
112

113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
  !-------------------------------------------------------------------------------
  !  solve_evp_real_2stage: Solves the real eigenvalue problem with a 2 stage approach
  !
  !  Parameters
  !
  !  na          Order of matrix a
  !
  !  nev         Number of eigenvalues needed
  !
  !  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
  !              Distribution is like in Scalapack.
  !              The full matrix must be set (not only one half like in scalapack).
  !              Destroyed on exit (upper and lower half).
  !
  !  lda         Leading dimension of a
  !  matrixCols  local columns of matrix a and q
  !
  !  ev(na)      On output: eigenvalues of a, every processor gets the complete set
  !
  !  q(ldq,matrixCols)    On output: Eigenvectors of a
  !              Distribution is like in Scalapack.
  !              Must be always dimensioned to the full size (corresponding to (na,na))
  !              even if only a part of the eigenvalues is needed.
  !
  !  ldq         Leading dimension of q
  !
  !  nblk        blocksize of cyclic distribution, must be the same in both directions!
  !
  !  mpi_comm_rows
  !  mpi_comm_cols
  !              MPI-Communicators for rows/columns
  !  mpi_comm_all
  !              MPI-Communicator for the total processor set
  !
  !-------------------------------------------------------------------------------
148
#ifdef HAVE_DETAILED_TIMINGS
149
    use timings
150
#endif
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
    use cuda_functions
    use mod_check_for_gpu

    implicit none
    logical, intent(in), optional :: useQR
    logical                       :: useQRActual, useQREnvironment
    integer, intent(in), optional :: THIS_REAL_ELPA_KERNEL_API
    integer                       :: THIS_REAL_ELPA_KERNEL

    integer, intent(in)           :: na, nev, lda, ldq, matrixCols, mpi_comm_rows, &
                                     mpi_comm_cols, mpi_comm_all
    integer, intent(in)           :: nblk
    real*8, intent(inout)         :: a(lda,matrixCols), ev(na), q(ldq,matrixCols)

    integer                       :: my_pe, n_pes, my_prow, my_pcol, np_rows, np_cols, mpierr
    integer                       :: nbw, num_blocks
    real*8, allocatable           :: tmat(:,:,:), e(:)
    real*8                        :: ttt0, ttt1, ttts
    integer                       :: i
    logical                       :: success
    logical, save                 :: firstCall = .true.
    logical                       :: wantDebug
    integer                       :: istat
    character(200)                :: errorMessage
    logical                       :: useGPU
    integer                       :: numberOfGPUDevices
177

Andreas Marek's avatar
Andreas Marek committed
178

179
#ifdef HAVE_DETAILED_TIMINGS
180
    call timer%start("solve_evp_real_2stage")
181
#endif
182
183
    call mpi_comm_rank(mpi_comm_all,my_pe,mpierr)
    call mpi_comm_size(mpi_comm_all,n_pes,mpierr)
184

185
186
187
188
    call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
    call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
    call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
    call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
189

190

191
192
193
194
195
196
    wantDebug = .false.
    if (firstCall) then
      ! are debug messages desired?
      wantDebug = debug_messages_via_environment_variable()
      firstCall = .false.
    endif
197

198
    success = .true.
199

200
201
    useQRActual = .false.
    useGPU      = .false.
202

203
204
205
206
207
    ! set usage of qr decomposition via API call
    if (present(useQR)) then
      if (useQR) useQRActual = .true.
        if (.not.(useQR)) useQRACtual = .false.
    endif
208

209
210
211
212
    ! overwrite this with environment variable settings
    if (qr_decomposition_via_environment_variable(useQREnvironment)) then
      useQRActual = useQREnvironment
    endif
213

214
215
216
217
218
219
220
221
222
223
    if (useQRActual) then
      if (mod(na,nblk) .ne. 0) then
        if (wantDebug) then
          write(error_unit,*) "solve_evp_real_2stage: QR-decomposition: blocksize does not fit with matrixsize"
        endif
        print *, "Do not use QR-decomposition for this matrix and blocksize."
        success = .false.
        return
      endif
    endif
224

225

226
227
228
229
    if (present(THIS_REAL_ELPA_KERNEL_API)) then
      ! user defined kernel via the optional argument in the API call
      THIS_REAL_ELPA_KERNEL = THIS_REAL_ELPA_KERNEL_API
    else
230

231
232
233
234
      ! if kernel is not choosen via api
      ! check whether set by environment variable
      THIS_REAL_ELPA_KERNEL = get_actual_real_kernel()
    endif
Andreas Marek's avatar
Andreas Marek committed
235

236
237
238
239
240
241
242
243
244
245
246
247
248
249
    ! check whether choosen kernel is allowed
    if (check_allowed_real_kernels(THIS_REAL_ELPA_KERNEL)) then

      if (my_pe == 0) then
        write(error_unit,*) " "
        write(error_unit,*) "The choosen kernel ",REAL_ELPA_KERNEL_NAMES(THIS_REAL_ELPA_KERNEL)
        write(error_unit,*) "is not in the list of the allowed kernels!"
        write(error_unit,*) " "
        write(error_unit,*) "Allowed kernels are:"
        do i=1,size(REAL_ELPA_KERNEL_NAMES(:))
          if (AVAILABLE_REAL_ELPA_KERNELS(i) .ne. 0) then
            write(error_unit,*) REAL_ELPA_KERNEL_NAMES(i)
          endif
        enddo
Andreas Marek's avatar
Andreas Marek committed
250

251
252
253
254
        write(error_unit,*) " "
        write(error_unit,*) "The defaul kernel REAL_ELPA_KERNEL_GENERIC will be used !"
      endif
      THIS_REAL_ELPA_KERNEL = REAL_ELPA_KERNEL_GENERIC
Andreas Marek's avatar
Andreas Marek committed
255

256
257
258
259
260
261
262
263
264
265
    endif

    if (THIS_REAL_ELPA_KERNEL .eq. REAL_ELPA_KERNEL_GPU) then
      if (check_for_gpu(my_pe,numberOfGPUDevices)) then
        useGPU = .true.
      endif
      if (nblk .ne. 128) then
        print *,"At the moment GPU version needs blocksize 128"
        stop
      endif
266

267
268
269
270
271
272
273
      ! set the neccessary parameters
      cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
      cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
      cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
      cudaHostRegisterPortable = cuda_hostRegisterPortable()
      cudaHostRegisterMapped   = cuda_hostRegisterMapped()
    endif
274

275
    ! Choose bandwidth, must be a multiple of nblk, set to a value >= 32
276

277
278
279
280
281
    if (useGPU) then
      nbw = nblk
    else
      nbw = (31/nblk+1)*nblk
    endif
282

283
    num_blocks = (na-1)/nbw + 1
284

285
286
287
288
289
    allocate(tmat(nbw,nbw,num_blocks), stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_real_2stage: error when allocating tmat "//errorMessage
      stop
    endif
290

291
    ! Reduction full -> band
292

293
294
295
296
297
298
299
300
    ttt0 = MPI_Wtime()
    ttts = ttt0
    call bandred_real(na, a, lda, nblk, nbw, matrixCols, num_blocks, mpi_comm_rows, mpi_comm_cols, &
                      tmat, wantDebug, useGPU, success, useQRActual)
    if (.not.(success)) return
    ttt1 = MPI_Wtime()
    if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time bandred_real               :',ttt1-ttt0
301

302
     ! Reduction band -> tridiagonal
303

304
305
306
307
308
     allocate(e(na), stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_real_2stage: error when allocating e "//errorMessage
       stop
     endif
309

310
311
312
313
314
315
     ttt0 = MPI_Wtime()
     call tridiag_band_real(na, nbw, nblk, a, lda, ev, e, matrixCols, mpi_comm_rows, &
                            mpi_comm_cols, mpi_comm_all)
     ttt1 = MPI_Wtime()
     if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time tridiag_band_real          :',ttt1-ttt0
316

317
318
     call mpi_bcast(ev,na,MPI_REAL8,0,mpi_comm_all,mpierr)
     call mpi_bcast(e,na,MPI_REAL8,0,mpi_comm_all,mpierr)
319

320
321
     ttt1 = MPI_Wtime()
     time_evp_fwd = ttt1-ttts
322

323
     ! Solve tridiagonal system
324

325
326
327
328
329
330
331
     ttt0 = MPI_Wtime()
     call solve_tridi(na, nev, ev, e, q, ldq, nblk, matrixCols, mpi_comm_rows,  &
                      mpi_comm_cols, wantDebug, success)
     if (.not.(success)) return

     ttt1 = MPI_Wtime()
     if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
332
     write(error_unit,*) 'Time solve_tridi                :',ttt1-ttt0
333
334
     time_evp_solve = ttt1-ttt0
     ttts = ttt1
335

336
337
338
339
340
341
342
343
     deallocate(e, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_real_2stage: error when deallocating e "//errorMessage
       stop
     endif
     ! Backtransform stage 1

     ttt0 = MPI_Wtime()
344

345
346
347
348
349
350
     call trans_ev_tridi_to_band_real(na, nev, nblk, nbw, q, ldq, matrixCols, mpi_comm_rows, &
                                      mpi_comm_cols, wantDebug, useGPU, success, THIS_REAL_ELPA_KERNEL)
     if (.not.(success)) return
     ttt1 = MPI_Wtime()
     if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time trans_ev_tridi_to_band_real:',ttt1-ttt0
351

352
353
354
355
356
357
     ! We can now deallocate the stored householder vectors
     deallocate(hh_trans_real, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_real_2stage: error when deallocating hh_trans_real "//errorMessage
       stop
     endif
358
359


360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
     ! Backtransform stage 2
     print *,"useGPU== ",useGPU
     ttt0 = MPI_Wtime()
     call trans_ev_band_to_full_real(na, nev, nblk, nbw, a, lda, tmat, q, ldq, matrixCols, num_blocks, mpi_comm_rows, &
                                     mpi_comm_cols, useGPU, useQRActual)
     ttt1 = MPI_Wtime()
     if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time trans_ev_band_to_full_real :',ttt1-ttt0
     time_evp_back = ttt1-ttts

     deallocate(tmat, stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_evp_real_2stage: error when deallocating tmat"//errorMessage
       stop
     endif
375

376
#ifdef HAVE_DETAILED_TIMINGS
377
     call timer%stop("solve_evp_real_2stage")
378
#endif
379
1    format(a,f10.3)
380

381
   end function solve_evp_real_2stage
382
383
384
385
386

!-------------------------------------------------------------------------------

!-------------------------------------------------------------------------------

387
  function solve_evp_complex_2stage(na, nev, a, lda, ev, q, ldq, nblk, &
388
                                  matrixCols, mpi_comm_rows, mpi_comm_cols,      &
389
                                    mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API) result(success)
390

391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
  !-------------------------------------------------------------------------------
  !  solve_evp_complex_2stage: Solves the complex eigenvalue problem with a 2 stage approach
  !
  !  Parameters
  !
  !  na          Order of matrix a
  !
  !  nev         Number of eigenvalues needed
  !
  !  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
  !              Distribution is like in Scalapack.
  !              The full matrix must be set (not only one half like in scalapack).
  !              Destroyed on exit (upper and lower half).
  !
  !  lda         Leading dimension of a
  !  matrixCols  local columns of matrix a and q
  !
  !  ev(na)      On output: eigenvalues of a, every processor gets the complete set
  !
  !  q(ldq,matrixCols)    On output: Eigenvectors of a
  !              Distribution is like in Scalapack.
  !              Must be always dimensioned to the full size (corresponding to (na,na))
  !              even if only a part of the eigenvalues is needed.
  !
  !  ldq         Leading dimension of q
  !
  !  nblk        blocksize of cyclic distribution, must be the same in both directions!
  !
  !  mpi_comm_rows
  !  mpi_comm_cols
  !              MPI-Communicators for rows/columns
  !  mpi_comm_all
  !              MPI-Communicator for the total processor set
  !
  !-------------------------------------------------------------------------------
426
#ifdef HAVE_DETAILED_TIMINGS
427
    use timings
428
#endif
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
    use cuda_functions
    use mod_check_for_gpu
    implicit none
    integer, intent(in), optional :: THIS_COMPLEX_ELPA_KERNEL_API
    integer                       :: THIS_COMPLEX_ELPA_KERNEL
    integer, intent(in)           :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all
    complex*16, intent(inout)     :: a(lda,matrixCols), q(ldq,matrixCols)
    real*8, intent(inout)         :: ev(na)

    integer                       :: my_prow, my_pcol, np_rows, np_cols, mpierr, my_pe, n_pes
    integer                       :: l_cols, l_rows, l_cols_nev, nbw, num_blocks
    complex*16, allocatable       :: tmat(:,:,:)
    real*8, allocatable           :: q_real(:,:), e(:)
    real*8                        :: ttt0, ttt1, ttts
    integer                       :: i

    logical                       :: success, wantDebug
    logical, save                 :: firstCall = .true.
    integer                       :: istat
    character(200)                :: errorMessage
    logical                       :: useGPU
    integer                       :: numberOfGPUDevices
451

Andreas Marek's avatar
Andreas Marek committed
452

453
#ifdef HAVE_DETAILED_TIMINGS
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
    call timer%start("solve_evp_complex_2stage")
#endif
    call mpi_comm_rank(mpi_comm_all,my_pe,mpierr)
    call mpi_comm_size(mpi_comm_all,n_pes,mpierr)

    call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
    call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
    call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
    call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)

    useGPU = .false.
    wantDebug = .false.
    if (firstCall) then
      ! are debug messages desired?
      wantDebug = debug_messages_via_environment_variable()
      firstCall = .false.
    endif
471

472

473
    success = .true.
474

475
476
477
478
479
480
481
482
    if (present(THIS_COMPLEX_ELPA_KERNEL_API)) then
      ! user defined kernel via the optional argument in the API call
      THIS_COMPLEX_ELPA_KERNEL = THIS_COMPLEX_ELPA_KERNEL_API
    else
      ! if kernel is not choosen via api
      ! check whether set by environment variable
      THIS_COMPLEX_ELPA_KERNEL = get_actual_complex_kernel()
    endif
483

484
485
486
487
488
489
490
491
492
493
494
495
496
497
    ! check whether choosen kernel is allowed
    if (check_allowed_complex_kernels(THIS_COMPLEX_ELPA_KERNEL)) then

      if (my_pe == 0) then
        write(error_unit,*) " "
        write(error_unit,*) "The choosen kernel ",COMPLEX_ELPA_KERNEL_NAMES(THIS_COMPLEX_ELPA_KERNEL)
        write(error_unit,*) "is not in the list of the allowed kernels!"
        write(error_unit,*) " "
        write(error_unit,*) "Allowed kernels are:"
        do i=1,size(COMPLEX_ELPA_KERNEL_NAMES(:))
          if (AVAILABLE_COMPLEX_ELPA_KERNELS(i) .ne. 0) then
            write(error_unit,*) COMPLEX_ELPA_KERNEL_NAMES(i)
          endif
        enddo
498

499
500
501
502
503
        write(error_unit,*) " "
        write(error_unit,*) "The defaul kernel COMPLEX_ELPA_KERNEL_GENERIC will be used !"
      endif
      THIS_COMPLEX_ELPA_KERNEL = COMPLEX_ELPA_KERNEL_GENERIC
    endif
504

505
506
507
508
509
510
511
512
    if (THIS_COMPLEX_ELPA_KERNEL .eq. COMPLEX_ELPA_KERNEL_GPU) then
      if (check_for_gpu(my_pe, numberOfGPUDevices)) then
        useGPU=.true.
      endif
      if (nblk .ne. 128) then
        print *,"At the moment GPU version needs blocksize 128"
        stop
      endif
Andreas Marek's avatar
Andreas Marek committed
513

514
515
516
517
518
519
520
      ! set the neccessary parameters
      cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
      cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
      cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
      cudaHostRegisterPortable = cuda_hostRegisterPortable()
      cudaHostRegisterMapped   = cuda_hostRegisterMapped()
    endif
521

522
    ! Choose bandwidth, must be a multiple of nblk, set to a value >= 32
523

524
    nbw = (31/nblk+1)*nblk
525

526
527
528
529
530
531
532
533
    num_blocks = (na-1)/nbw + 1

    allocate(tmat(nbw,nbw,num_blocks), stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_complex_2stage: error when allocating tmat"//errorMessage
      stop
    endif
    ! Reduction full -> band
534

535
536
537
538
539
    ttt0 = MPI_Wtime()
    ttts = ttt0
    call bandred_complex(na, a, lda, nblk, nbw, matrixCols, num_blocks, mpi_comm_rows, mpi_comm_cols, &
                         tmat, wantDebug, useGPU, success)
    if (.not.(success)) then
540
#ifdef HAVE_DETAILED_TIMINGS
541
      call timer%stop()
542
#endif
543
544
545
546
547
      return
    endif
    ttt1 = MPI_Wtime()
    if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time bandred_complex               :',ttt1-ttt0
548

549
    ! Reduction band -> tridiagonal
550

551
552
553
554
555
    allocate(e(na), stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_complex_2stage: error when allocating e"//errorMessage
      stop
    endif
556

557

558
559
560
561
562
    ttt0 = MPI_Wtime()
    call tridiag_band_complex(na, nbw, nblk, a, lda, ev, e, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all)
    ttt1 = MPI_Wtime()
    if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time tridiag_band_complex          :',ttt1-ttt0
563

564
565
    call mpi_bcast(ev,na,MPI_REAL8,0,mpi_comm_all,mpierr)
    call mpi_bcast(e,na,MPI_REAL8,0,mpi_comm_all,mpierr)
566

567
568
    ttt1 = MPI_Wtime()
    time_evp_fwd = ttt1-ttts
569

570
571
572
    l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
    l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q
    l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev
573

574
575
576
577
578
    allocate(q_real(l_rows,l_cols), stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_complex_2stage: error when allocating q_real"//errorMessage
      stop
    endif
579

580
    ! Solve tridiagonal system
581

582
583
584
585
    ttt0 = MPI_Wtime()
    call solve_tridi(na, nev, ev, e, q_real, ubound(q_real,dim=1), nblk, matrixCols, &
                     mpi_comm_rows, mpi_comm_cols, wantDebug, success)
    if (.not.(success)) return
586

587
588
589
590
591
    ttt1 = MPI_Wtime()
    if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times)  &
       write(error_unit,*) 'Time solve_tridi                   :',ttt1-ttt0
    time_evp_solve = ttt1-ttt0
    ttts = ttt1
592

593
    q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)
594

595
596
597
598
599
    deallocate(e, q_real, stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_complex_2stage: error when deallocating e, q_real"//errorMessage
      stop
    endif
600

601

602
    ! Backtransform stage 1
603

604
605
606
607
608
609
610
611
    ttt0 = MPI_Wtime()
    call trans_ev_tridi_to_band_complex(na, nev, nblk, nbw, q, ldq,  &
                                        matrixCols, mpi_comm_rows, mpi_comm_cols,&
                                        wantDebug, useGPU, success,THIS_COMPLEX_ELPA_KERNEL)
    if (.not.(success)) return
    ttt1 = MPI_Wtime()
    if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time trans_ev_tridi_to_band_complex:',ttt1-ttt0
612

613
614
615
616
617
618
    ! We can now deallocate the stored householder vectors
    deallocate(hh_trans_complex, stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_complex_2stage: error when deallocating hh_trans_complex"//errorMessage
      stop
    endif
619
620


621

622
    ! Backtransform stage 2
623

624
625
626
627
628
629
630
    ttt0 = MPI_Wtime()
    call trans_ev_band_to_full_complex(na, nev, nblk, nbw, a, lda, tmat, q, ldq, matrixCols, num_blocks, mpi_comm_rows, &
                                       mpi_comm_cols, useGPU)
    ttt1 = MPI_Wtime()
    if (my_prow==0 .and. my_pcol==0 .and. elpa_print_times) &
       write(error_unit,*) 'Time trans_ev_band_to_full_complex :',ttt1-ttt0
    time_evp_back = ttt1-ttts
631

632
633
634
635
636
    deallocate(tmat, stat=istat, errmsg=errorMessage)
    if (istat .ne. 0) then
      print *,"solve_evp_complex_2stage: error when deallocating tmat "//errorMessage
      stop
    endif
637

638
#ifdef HAVE_DETAILED_TIMINGS
639
    call timer%stop("solve_evp_complex_2stage")
640
#endif
641

642
1   format(a,f10.3)
643

644
  end function solve_evp_complex_2stage
645
646
647
648

!-------------------------------------------------------------------------------

end module ELPA2