elpa2_bandred_complex_template.X90 36.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#if 0
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), fomerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".



! ELPA2 -- 2-stage solver for ELPA
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
#endif


65
66
    subroutine bandred_complex_PRECISION(na, a, lda, nblk, nbw, matrixCols, numBlocks,  &
                           mpi_comm_rows, mpi_comm_cols, tmat, wantDebug, useGPU, success)
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
      !-------------------------------------------------------------------------------
      !  bandred_complex: Reduces a distributed hermitian matrix to band form
      !
      !  Parameters
      !
      !  na          Order of matrix
      !
      !  a(lda,matrixCols)    Distributed matrix which should be reduced.
      !              Distribution is like in Scalapack.
      !              Opposed to Scalapack, a(:,:) must be set completely (upper and lower half)
      !              a(:,:) is overwritten on exit with the band and the Householder vectors
      !              in the upper half.
      !
      !  lda         Leading dimension of a
      !  matrixCols  local columns of matrix a
      !
      !  nblk        blocksize of cyclic distribution, must be the same in both directions!
      !
      !  nbw         semi bandwith of output matrix
      !
      !  mpi_comm_rows
      !  mpi_comm_cols
      !              MPI-Communicators for rows/columns
      !
      !  tmat(nbw,nbw,numBlocks)    where numBlocks = (na-1)/nbw + 1
      !              Factors for the Householder vectors (returned), needed for back transformation
      !
      !-------------------------------------------------------------------------------
#ifdef HAVE_DETAILED_TIMINGS
      use timings
Andreas Marek's avatar
Andreas Marek committed
97
98
#else
      use timings_dummy
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
#endif
      use precision
      use cuda_functions
      use iso_c_binding

      implicit none

      logical, intent(in)                         :: useGPU

      integer(kind=ik)                            :: na, lda, nblk, nbw, matrixCols, numBlocks, mpi_comm_rows, mpi_comm_cols
#ifdef USE_ASSUMED_SIZE
      complex(kind=COMPLEX_DATATYPE)              :: a(lda,*), tmat(nbw,nbw,*)
#else
      complex(kind=COMPLEX_DATATYPE)              :: a(lda,matrixCols), tmat(nbw,nbw,numBlocks)
#endif

#ifdef DOUBLE_PRECISION_COMPLEX
      complex(kind=COMPLEX_DATATYPE), parameter   :: CZERO = (0.0_rk8, 0.0_rk8), CONE = (1.0_rk8, 0.0_rk8)
#else
      complex(kind=COMPLEX_DATATYPE), parameter   :: CZERO = (0.0_rk4, 0.0_rk4), CONE = (1.0_rk4, 0.0_rk4)
#endif

      integer(kind=ik)                            :: my_prow, my_pcol, np_rows, np_cols, mpierr
      integer(kind=ik)                            :: l_cols, l_rows
      integer(kind=ik)                            :: i, j, lcs, lce, lre, lc, lr, cur_pcol, n_cols, nrow
      integer(kind=ik)                            :: istep, ncol, lch, lcx, nlc
      integer(kind=ik)                            :: tile_size, l_rows_tile, l_cols_tile

      real(kind=REAL_DATATYPE)                    :: vnorm2
      complex(kind=COMPLEX_DATATYPE)              :: xf, aux1(nbw), aux2(nbw), vrl, tau, vav(nbw,nbw)

      complex(kind=COMPLEX_DATATYPE), allocatable :: tmp(:,:), vr(:), vmr(:,:), umc(:,:)
      integer(kind=c_intptr_t)                    :: umc_dev, tmat_dev,vav_dev,vmr_dev,a_dev
      integer(kind=ik)                            :: cur_l_rows, cur_l_cols,vmr_size ,umc_size
      integer(kind=c_size_t)                      :: lc_start, lc_end, lr_end, lce_1, lcs_1,lre_1
      integer(kind=ik)                            :: na_rows, na_cols
#ifdef WITH_MPI
      integer(kind=ik), external                  :: numroc
#endif

      logical, intent(in)                         :: wantDebug
      logical, intent(out)                        :: success
      character(200)                              :: errorMessage
      integer(kind=ik)                            :: istat
      logical                                     :: successCUDA

145
146
      call timer%start("bandred_complex" // PRECISION_SUFFIX)
      call timer%start("mpi_communication")
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
      call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
      call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
      call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
      call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
      call timer%stop("mpi_communication")

      success = .true.

      ! Semibandwith nbw must be a multiple of blocksize nblk

      if (mod(nbw,nblk)/=0) then
        if (my_prow==0 .and. my_pcol==0) then
          if (wantDebug) then
            write(error_unit,*) 'ELPA2_bandred_complex: ERROR: nbw=',nbw,', nblk=',nblk
            write(error_unit,*) 'ELPA2_bandred_complex: ELPA2 works only for nbw==n*nblk'
          endif
          success = .false.
          return
        endif
      endif
      if (useGPU) then
#ifdef WITH_MPI
        na_rows = numroc(na, nblk, my_prow, 0, np_rows)
        na_cols = numroc(na, nblk, my_pcol, 0, np_cols)
#else
        na_rows = na
        na_cols = na
#endif

176
        successCUDA = cuda_malloc(tmat_dev, nbw*nbw*size_of_PRECISION_complex)
177
178
179
180
181
        if (.not.(successCUDA)) then
          print *, " bandred_complex: cuda malloc failed tmat_dev ", istat
          stop
        endif

182
        successCUDA = cuda_malloc(vav_dev, nbw*nbw*size_of_PRECISION_complex)
183
184
185
186
187
        if (.not.(successCUDA)) then
          print *, "bandred_complex:  cuda malloc failed vav_dev ", istat
          stop
        endif

188
        successCUDA = cuda_malloc(a_dev, lda*na_cols*size_of_PRECISION_complex)
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
        if (.not.(successCUDA)) then
          print *, "bandred_complex:  cuda malloc failed a_dev ", istat
          stop
        endif
      endif ! useGPU

      ! Matrix is split into tiles; work is done only for tiles on the diagonal or above

      tile_size = nblk*least_common_multiple(np_rows,np_cols) ! minimum global tile size
      tile_size = ((128*max(np_rows,np_cols)-1)/tile_size+1)*tile_size ! make local tiles at least 128 wide

      l_rows_tile = tile_size/np_rows ! local rows of a tile
      l_cols_tile = tile_size/np_cols ! local cols of a tile

      if (useGPU) then
#if !defined(USE_ASSUMED_SIZE)
        if (size(a,dim=1) .ne. lda .or. size(a,dim=2) .ne. na_cols) then
          print *,"bandred_complex: sizes of a wrong ? ",lda,size(a,dim=1),na_cols,size(a,dim=2)
        endif
#endif

210
        successCUDA = cuda_memcpy(a_dev, loc(a(1,1)),(lda)*(na_cols)*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
        if (.not.(successCUDA)) then
          print *, "bandred_complex:  cuda memcpy faild a_dev ", istat
          stop
        endif
      endif

      do istep = (na-1)/nbw, 1, -1

        n_cols = MIN(na,(istep+1)*nbw) - istep*nbw ! Number of columns in current step

        ! Number of local columns/rows of remaining matrix
        l_cols = local_index(istep*nbw, my_pcol, np_cols, nblk, -1)
        l_rows = local_index(istep*nbw, my_prow, np_rows, nblk, -1)

        ! Allocate vmr and umc to their exact sizes so that they can be used in bcasts and reduces

        if (useGPU) then
          cur_l_rows = max(l_rows, 1)
          cur_l_cols = max(l_cols, 1)

          vmr_size = cur_l_rows * 2 * n_cols
          umc_size = cur_l_cols * 2 * n_cols

          if ((.not. allocated(umc)) .or. (umc_size .gt. ubound(umc, dim=1))) then
            if (allocated(umc)) then
              deallocate(umc, stat=istat, errmsg=errorMessage)
              if (istat .ne. 0) then
                print *,"bandred_complex: error when allocating umc "//errorMessage
                stop
              endif
              successCUDA = cuda_free(umc_dev)
              if (.not.(successCUDA))then
                print *,"bandred_complex: error in cudaFree"
                stop
              endif
            endif

            allocate(umc(max(l_cols,1),2*n_cols), stat=istat, errmsg=errorMessage)
            if (istat .ne. 0) then
              print *,"bandred_complex: error when allocating umc "//errorMessage
              stop
            endif

            if (max(l_cols,1) * 2*n_cols .gt. umc_size) then
              print *,"bandred_complex: umc_size ",max(l_cols,1) * 2*n_cols,umc_size
            endif
257
            successCUDA = cuda_malloc(umc_dev, umc_size*size_of_PRECISION_complex)
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
            if (.not.(successCUDA)) then
              print *, "bandred_complex:  cuda malloc failed umc_dev ", istat
              stop
            endif
          endif

          if ((.not. allocated(vmr)) .or. (vmr_size .gt. ubound(vmr, dim=1))) then
            if (allocated(vmr)) then
              deallocate(vmr, stat=istat, errmsg=errorMessage)
              if (istat .ne. 0) then
                print *,"bandred_complex: error when deallocating vmr "//errorMessage
                stop
              endif
              successCUDA = cuda_free(vmr_dev)
              if (.not.(successCUDA))then
                print *,"bandred_complex: error in cudaFree"
                stop
              endif
            endif

            allocate(vmr(max(l_rows,1),2*n_cols), stat=istat, errmsg=errorMessage)
            if (istat .ne. 0) then
              print *,"bandred_complex: error when allocating vmr "//errorMessage
              stop
            endif

            if (max(l_rows,1) * 2*n_cols .gt. vmr_size) then
              print *,"bandred_complex: vmc_size ",max(l_rows,1) * 2*n_cols,vmr_size
            endif

288
            successCUDA = cuda_malloc(vmr_dev, vmr_size*size_of_PRECISION_complex)
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
            if (.not.(successCUDA)) then
              print *, "bandred_complex:  cuda malloc failed vmr_dev ", istat
              stop
            endif

          endif

          if ((.not. allocated(vr)) .or. (l_rows + 1 .gt. ubound(vr, dim=1))) then
            if (allocated(vr)) then
              deallocate(vr, stat=istat, errmsg=errorMessage)
              if (istat .ne. 0) then
                print *,"bandred_complex: error when deallocating vr "//errorMessage
                stop
              endif
            endif

            allocate(vr(l_rows + 1), stat=istat, errmsg=errorMessage)
            if (istat .ne. 0) then
              print *,"bandred_complex: error when allocating vr "//errorMessage
              stop
            endif
          endif

        else ! GPU not used
          allocate(vmr(max(l_rows,1),2*n_cols), stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"bandred_complex: error when allocating vmr "//errorMessage
            stop
          endif

          allocate(umc(max(l_cols,1),2*n_cols), stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"bandred_complex: error when allocating umc "//errorMessage
            stop
          endif

          allocate(vr(l_rows+1), stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"bandred_complex: error when allocating vr "//errorMessage
            stop
          endif
        endif ! useGPU

332
333
334
        vmr(1:l_rows,1:n_cols) = CONST_COMPLEX_0_0
        vr(:) = CONST_COMPLEX_0_0
        tmat(:,:,istep) = CONST_COMPLEX_0_0
335
336
337
338
339
340
341
342
343

        if (useGPU) then
          lc_start = local_index(istep*nbw+1, my_pcol, np_cols, nblk, -1)
          lc_end   = local_index(istep*nbw+n_cols, my_pcol, np_cols, nblk, -1)
          lr_end   = local_index((istep-1)*nbw + n_cols, my_prow, np_rows, nblk, -1)

          if (lc_start .le. 0) lc_start = 1
          cur_pcol = pcol(istep*nbw+1, nblk, np_cols)
          if (my_pcol == cur_pcol) then
344
345
346
347
            successCUDA = cuda_memcpy2d(loc(a(1, lc_start)), int(lda*size_of_PRECISION_complex,kind=c_size_t),            &
                                        (a_dev + int( ( (lc_start-1) * lda*size_of_PRECISION_complex),kind=c_size_t )),      &
                                        int(lda*size_of_PRECISION_complex,kind=c_size_t),              &
                                    int(lr_end*size_of_PRECISION_complex,kind=c_size_t),               &
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
                                      int((lc_end - lc_start+1),kind=c_size_t),int(cudaMemcpyDeviceToHost,kind=c_int))
            if (.not.(successCUDA)) then
              print *, "bandred_complex: error in cudaMemcpy2"
              stop
            endif
          endif
        endif

        ! Reduce current block to lower triangular form

        do lc = n_cols, 1, -1

          ncol = istep*nbw + lc ! absolute column number of householder vector
          nrow = ncol - nbw ! Absolute number of pivot row

          lr  = local_index(nrow, my_prow, np_rows, nblk, -1) ! current row length
          lch = local_index(ncol, my_pcol, np_cols, nblk, -1) ! HV local column number

          tau = 0

          if(nrow == 1) exit ! Nothing to do

          cur_pcol = pcol(ncol, nblk, np_cols) ! Processor column owning current block

          if (my_pcol==cur_pcol) then

            ! Get vector to be transformed; distribute last element and norm of
            ! remaining elements to all procs in current column

            vr(1:lr) = a(1:lr,lch) ! vector to be transformed

            if (my_prow==prow(nrow, nblk, np_rows)) then
              aux1(1) = dot_product(vr(1:lr-1),vr(1:lr-1))
              aux1(2) = vr(lr)
            else
              aux1(1) = dot_product(vr(1:lr),vr(1:lr))
384
              aux1(2) = CONST_COMPLEX_0_0
385
386
387
            endif
#ifdef WITH_MPI
            call timer%start("mpi_communication")
388
            call mpi_allreduce(aux1, aux2, 2, MPI_COMPLEX_PRECISION, MPI_SUM, mpi_comm_rows, mpierr)
389
390
391
392
393
394
395
396
397
            call timer%stop("mpi_communication")

#else /* WITH_MPI */
            aux2 = aux1
#endif /* WITH_MPI */
            vnorm2 = aux2(1)
            vrl    = aux2(2)

            ! Householder transformation
398
            call hh_transform_complex_PRECISION(vrl, vnorm2, xf, tau)
399
400
401
402
403
404
            ! Scale vr and store Householder vector for back transformation

            vr(1:lr) = vr(1:lr) * xf
            if (my_prow==prow(nrow, nblk, np_rows)) then
              a(1:lr-1,lch) = vr(1:lr-1)
              a(lr,lch) = vrl
405
              vr(lr) = CONST_COMPLEX_1_0
406
407
408
409
410
411
412
413
414
415
416
            else
              a(1:lr,lch) = vr(1:lr)
            endif

          endif

          ! Broadcast Householder vector and tau along columns

          vr(lr+1) = tau
#ifdef WITH_MPI
          call timer%start("mpi_communication")
417
          call MPI_Bcast(vr, lr+1, MPI_COMPLEX_PRECISION, cur_pcol, mpi_comm_cols, mpierr)
418
419
420
421
422
423
424
425
426
427
          call timer%stop("mpi_communication")

#endif /* WITH_MPI */
          vmr(1:lr,lc) = vr(1:lr)
          tau = vr(lr+1)
          tmat(lc,lc,istep) = conjg(tau) ! Store tau in diagonal of tmat

          ! Transform remaining columns in current block with Householder vector

          ! Local dot product
428
          aux1 = CONST_COMPLEX_0_0
429
430
431
432
433
434
435
436
437
438
439
440
441

          nlc = 0 ! number of local columns
          do j=1,lc-1
            lcx = local_index(istep*nbw+j, my_pcol, np_cols, nblk, 0)
            if (lcx>0) then
              nlc = nlc+1
              aux1(nlc) = dot_product(vr(1:lr),a(1:lr,lcx))
            endif
          enddo

          ! Get global dot products
#ifdef WITH_MPI
          call timer%start("mpi_communication")
442
          if (nlc>0) call mpi_allreduce(aux1, aux2, nlc, MPI_COMPLEX_PRECISION, MPI_SUM, mpi_comm_rows, mpierr)
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492

          ! Transform

          nlc = 0
          do j=1,lc-1
            lcx = local_index(istep*nbw+j, my_pcol, np_cols, nblk, 0)
            if (lcx>0) then
              nlc = nlc+1
              a(1:lr,lcx) = a(1:lr,lcx) - conjg(tau)*aux2(nlc)*vr(1:lr)
            endif
          enddo

         call timer%stop("mpi_communication")

#else /* WITH_MPI */
!          if (nlc>0) aux2=aux1

          ! Transform

          nlc = 0
          do j=1,lc-1
            lcx = local_index(istep*nbw+j, my_pcol, np_cols, nblk, 0)
            if (lcx>0) then
              nlc = nlc+1
              a(1:lr,lcx) = a(1:lr,lcx) - conjg(tau)*aux1(nlc)*vr(1:lr)
            endif
          enddo


#endif /* WITH_MPI */
!
!          ! Transform
!
!          nlc = 0
!          do j=1,lc-1
!            lcx = local_index(istep*nbw+j, my_pcol, np_cols, nblk, 0)
!            if (lcx>0) then
!              nlc = nlc+1
!              a(1:lr,lcx) = a(1:lr,lcx) - conjg(tau)*aux2(nlc)*vr(1:lr)
!            endif
!          enddo

        enddo

        ! Calculate scalar products of stored Householder vectors.
        ! This can be done in different ways, we use zherk

        if (useGPU) then
          cur_pcol = pcol(istep*nbw+1, nblk, np_cols)
          if (my_pcol == cur_pcol) then
493
494
495
496
            successCUDA = cuda_memcpy2d((a_dev+int(((lc_start-1)*lda*size_of_PRECISION_complex),kind=c_size_t)),    &
                                        int(lda*size_of_PRECISION_complex,kind=c_size_t), loc(a(1,lc_start)),       &
                                        int(lda*size_of_PRECISION_complex,kind=c_size_t),                           &
                                        int(lr_end*size_of_PRECISION_complex,kind=c_size_t),                        &
497
498
499
500
501
502
503
504
505
506
                                        int((lc_end - lc_start+1),kind=c_size_t) &
                                        ,int(cudaMemcpyHostToDevice,kind=c_int))
            if (.not.(successCUDA)) then
              print *, "bandred_complex: cuda memcpy a_dev  failed ", istat
              stop
            endif
          endif
        endif

        vav = 0
507
508
        if (l_rows>0) then
          call timer%start("blas")
509
510
511
512
          call PRECISION_HERK('U', 'C', n_cols, l_rows, CONE, vmr, ubound(vmr,dim=1), CZERO, vav, ubound(vav,dim=1))
          call timer%stop("blas")
        endif
        call herm_matrix_allreduce_PRECISION(n_cols,vav, nbw,nbw,mpi_comm_rows)
513
514
515
516
517
518

        ! Calculate triangular matrix T for block Householder Transformation

        do lc=n_cols,1,-1
          tau = tmat(lc,lc,istep)
          if (lc<n_cols) then
519
            call timer%start("blas")
520
            call PRECISION_TRMV('U', 'C', 'N', n_cols-lc, tmat(lc+1,lc+1,istep), ubound(tmat,dim=1), vav(lc+1,lc), 1)
521
            call timer%stop("blas")
522
523
524
525
526
            tmat(lc,lc+1:n_cols,istep) = -tau * conjg(vav(lc+1:n_cols,lc))
          endif
        enddo

        ! Transpose vmr -> vmc (stored in umc, second half)
527
        call elpa_transpose_vectors_complex_PRECISION  (vmr, ubound(vmr,dim=1), mpi_comm_rows, &
528
529
530
531
532
533
534
                                      umc(1,n_cols+1), ubound(umc,dim=1), mpi_comm_cols, &
                                      1, istep*nbw, n_cols, nblk)

        ! Calculate umc = A**T * vmr
        ! Note that the distributed A has to be transposed
        ! Opposed to direct tridiagonalization there is no need to use the cache locality
        ! of the tiles, so we can use strips of the matrix
535
536
        umc(1:l_cols,1:n_cols) = CONST_COMPLEX_0_0
        vmr(1:l_rows,n_cols+1:2*n_cols) = CONST_COMPLEX_0_0
537
538
539
540
541
542
        if (l_cols>0 .and. l_rows>0) then
          if (useGPU) then
            if (size(vmr,dim=1)*size(vmr,dim=2) .gt. vmr_size) then
              print *,"bandred_complex: vmr size 2 :",size(vmr,dim=1)*size(vmr,dim=2),vmr_size
              stop
            endif
543
            successCUDA = cuda_memcpy(vmr_dev, loc(vmr(1,1)),vmr_size*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
544
545
546
547
548
549
550
551
552

            if (.not.(successCUDA)) then
              print *, "bandred_complex:  cuda memcpy vmr_dev failed ", istat
              stop
            endif
            if (size(umc,dim=1)*size(umc,dim=2) .gt. umc_size) then
              print *,"bandred_complex: umc size 2 :",size(umc,dim=1)*size(umc,dim=2),umc_size
              stop
            endif
553
            successCUDA = cuda_memcpy(umc_dev, loc(umc(1,1)),umc_size*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
554
555
556
557
558
559
560
561
562
563
564
565
566
567
            if (.not.(successCUDA)) then
              print *, "bandred_complex:  cuda memcpy umc_dev failed  ", istat
              stop
            endif
          endif
          do i=0,(istep*nbw-1)/tile_size

            lcs = i*l_cols_tile+1
            lce = min(l_cols,(i+1)*l_cols_tile)
            if (lce<lcs) cycle

            lre = min(l_rows,(i+1)*l_rows_tile)

            if (useGPU) then
568
569
570
571
572
              call timer%start("cublas")
              call cublas_PRECISION_GEMM('C', 'N', lce-lcs+1, n_cols, lre, CONE, (a_dev + ((lcs-1)*lda* &
                        size_of_PRECISION_complex)), lda, &
                        vmr_dev, cur_l_rows, CONE, (umc_dev +(lcs-1)*size_of_PRECISION_complex), cur_l_cols)
              call timer%stop("cublas")
573
            else
574
575
              call timer%start("blas")
              call PRECISION_GEMM('C', 'N', lce-lcs+1, n_cols, lre, CONE, a(1,lcs), ubound(a,dim=1), &
576
                         vmr, ubound(vmr,dim=1), CONE, umc(lcs,1), ubound(umc,dim=1))
577
              call timer%stop("blas")
578
579
580
581
582
            endif

            if (i==0) cycle
            lre = min(l_rows,i*l_rows_tile)
            if (useGPU) then
583
584
585
586
587
588
              call timer%start("cublas")
              call cublas_PRECISION_GEMM('N', 'N', lre, n_cols, lce-lcs+1, CONE, (a_dev+((lcs-1)*lda* &
                        size_of_PRECISION_complex)),lda,  &
                        (umc_dev+(cur_l_cols * n_cols+lcs-1)*size_of_PRECISION_complex), cur_l_cols,CONE,  &
                        (vmr_dev+(cur_l_rows * n_cols)*size_of_PRECISION_complex), cur_l_rows)
              call timer%stop("cublas")
589
            else
590
591
              call timer%start("blas")
              call PRECISION_GEMM('N', 'N', lre, n_cols, lce-lcs+1, CONE, a(1,lcs), lda, &
592
                         umc(lcs,n_cols+1), ubound(umc,dim=1), CONE, vmr(1,n_cols+1), ubound(vmr,dim=1))
593
              call timer%stop("blas")
594
595
596
597
598
599
600
601
            endif
          enddo

          if (useGPU) then
            if (size(vmr,dim=1)*size(vmr,dim=2) .gt. vmr_size) then
              print *,"bandred_complex: vmr size 3 :",size(vmr,dim=1)*size(vmr,dim=2),vmr_size
              stop
            endif
602
            successCUDA = cuda_memcpy(loc(vmr(1,1)),vmr_dev,vmr_size*size_of_PRECISION_complex,cudaMemcpyDeviceToHost)
603
604
605
606
607
608
609
610
            if (.not.(successCUDA)) then
              print *, "bandred_complex:  cuad memcpy failed vmr ", istat
              stop
            endif
            if (size(umc,dim=1)*size(umc,dim=2) .gt. umc_size) then
              print *,"bandred_complex: umc size 3 :",size(umc,dim=1)*size(umc,dim=2),umc_size
              stop
            endif
611
            successCUDA = cuda_memcpy(loc(umc(1,1)), umc_dev,umc_size*size_of_PRECISION_complex,cudaMemcpyDeviceToHost)
612
613
614
615
616
617
618
619
620
621
622
623
624
            if (.not.(successCUDA)) then
              print *, "bandred_complex:  cuad memcpy failed umc ", istat
              stop
            endif
          endif ! useGPU
        endif

        ! Sum up all ur(:) parts along rows and add them to the uc(:) parts
        ! on the processors containing the diagonal
        ! This is only necessary if ur has been calculated, i.e. if the
        ! global tile size is smaller than the global remaining matrix

        if (tile_size < istep*nbw) then
625
          call elpa_reduce_add_vectors_complex_PRECISION  (vmr(1,n_cols+1),ubound(vmr,dim=1),mpi_comm_rows, &
626
627
628
629
630
631
632
633
634
635
636
637
                                          umc, ubound(umc,dim=1), mpi_comm_cols, &
                                          istep*nbw, n_cols, nblk)
        endif

#ifdef WITH_MPI
        if (l_cols>0) then
          allocate(tmp(l_cols,n_cols), stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"bandred_complex: error when allocating tmp "//errorMessage
            stop
          endif
          call timer%start("mpi_communication")
638
          call mpi_allreduce(umc, tmp, l_cols*n_cols, MPI_COMPLEX_PRECISION, MPI_SUM, mpi_comm_rows, mpierr)
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
          call timer%stop("mpi_communication")

          umc(1:l_cols,1:n_cols) = tmp(1:l_cols,1:n_cols)
          deallocate(tmp, stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"bandred_complex: error when deallocating tmp "//errorMessage
            stop
          endif
        endif

#else /* WITH_MPI */

!        if (l_cols>0) then
!          allocate(tmp(l_cols,n_cols), stat=istat, errmsg=errorMessage)
!          if (istat .ne. 0) then
!            print *,"bandred_complex: error when allocating tmp "//errorMessage
!            stop
!          endif
!          tmp(1:l_cols,1:n_cols) = umc(1:l_cols,1:n_cols)
!
!          umc(1:l_cols,1:n_cols) = tmp(1:l_cols,1:n_cols)
!          deallocate(tmp, stat=istat, errmsg=errorMessage)
!          if (istat .ne. 0) then
!            print *,"bandred_complex: error when deallocating tmp "//errorMessage
!            stop
!          endif
!        endif

#endif /* WITH_MPI */

        ! U = U * Tmat**T
        if (useGPU) then
          if (size(umc,dim=1)*size(umc,dim=2) .gt. umc_size) then
            print *,"bandred_complex: umc size 4 :",size(umc,dim=1)*size(umc,dim=2),umc_size
            stop
          endif
675
          successCUDA = cuda_memcpy(umc_dev, loc(umc(1,1)),umc_size*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
676
677
678
679
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuad memcpy failed umc_dev ", istat
            stop
          endif
680
          successCUDA = cuda_memcpy(tmat_dev,loc(tmat(1,1,istep)),nbw*nbw*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
681
682
683
684
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuad memcpy failed tmat_dev ", istat
            stop
          endif
685
686
687
          call timer%start("cublas")
          call  cublas_PRECISION_TRMM('Right', 'Upper', 'C', 'Nonunit', l_cols, n_cols, CONE, tmat_dev, nbw, umc_dev, cur_l_cols)
          call timer%stop("cublas")
688
        else ! not useGPU
689
690
          call timer%start("blas")
          call PRECISION_TRMM('Right', 'Upper', 'C', 'Nonunit', l_cols, n_cols, CONE, tmat(1,1,istep), ubound(tmat,dim=1), &
691
                     umc, ubound(umc,dim=1))
692
          call timer%stop("blas")
693
694
695
696
        endif

        ! VAV = Tmat * V**T * A * V * Tmat**T = (U*Tmat**T)**T * V * Tmat**T
        if (useGPU) then
697
          successCUDA = cuda_memcpy(vav_dev,loc(vav(1,1)), nbw*nbw*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
698
699
700
701
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuad memcpy failed vav_dev ", istat
            stop
          endif
702
703
704
          call timer%start("cublas")
          call cublas_PRECISION_GEMM('C', 'N', n_cols, n_cols, l_cols, CONE, umc_dev, cur_l_cols, (umc_dev +( cur_l_cols *n_cols) &
                            *size_of_PRECISION_complex ), cur_l_cols, CZERO, vav_dev, nbw)
705

706
707
708
          call cublas_PRECISION_TRMM('Right', 'Upper', 'C', 'Nonunit', n_cols, n_cols, CONE, tmat_dev, nbw, vav_dev, nbw)
          call timer%stop("cublas")
          successCUDA = cuda_memcpy(loc(vav(1,1)), vav_dev,nbw*nbw*size_of_PRECISION_complex,cudaMemcpyDeviceToHost)
709
710
711
712
713
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuad memcpy failed vav ", istat
            stop
          endif

714
          call herm_matrix_allreduce_PRECISION(n_cols,vav, nbw, nbw,mpi_comm_cols)
715

716
          successCUDA = cuda_memcpy(vav_dev,loc(vav(1,1)),nbw*nbw*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
717
718
719
720
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuad memcpy failed vav_dev ", istat
            stop
          endif
721
        else ! useGPU
722
723
          call timer%start("blas")
          call PRECISION_GEMM('C', 'N', n_cols, n_cols, l_cols, CONE, umc, ubound(umc,dim=1), umc(1,n_cols+1), &
724
                     ubound(umc,dim=1), CZERO, vav, ubound(vav,dim=1))
725
          call PRECISION_TRMM('Right', 'Upper', 'C', 'Nonunit', n_cols, n_cols, CONE, tmat(1,1,istep), &
726
                     ubound(tmat,dim=1), vav, ubound(vav,dim=1))
727
728
          call timer%stop("blas")
          call herm_matrix_allreduce_PRECISION(n_cols,vav,nbw,nbw,mpi_comm_cols)
729
730
731
732
733
        endif

        ! U = U - 0.5 * V * VAV

        if (useGPU) then
734
735
736
          call timer%start("cublas")
          call cublas_PRECISION_GEMM('N', 'N', l_cols, n_cols, n_cols, CONST_COMPLEX_PAIR_NEGATIVE_0_5, (umc_dev +  &
                            (cur_l_cols * n_cols )*size_of_PRECISION_complex), &
737
                            cur_l_cols, vav_dev, nbw, CONE, umc_dev, cur_l_cols)
738
          call timer%stop("cublas")
739
740
741
742
743
744
          ! Transpose umc -> umr (stored in vmr, second half)

          if (size(umc,dim=1)*size(umc,dim=2) .gt. umc_size) then
            print *,"bandred_complex: umc size 5 :",size(umc,dim=1)*size(umc,dim=2),umc_size
            stop
          endif
745
          successCUDA = cuda_memcpy(loc(umc(1,1)),umc_dev,umc_size*size_of_PRECISION_complex,cudaMemcpyDeviceToHost)
746
747
748
749
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuad memcpy failed umc ", istat
            stop
          endif
750
          call elpa_transpose_vectors_complex_PRECISION  (umc, ubound(umc,dim=1), mpi_comm_cols, &
751
752
753
754
755
756
                                                vmr(1,n_cols+1), ubound(vmr,dim=1), mpi_comm_rows, &
                                                1, istep*nbw, n_cols, nblk)
          if (size(vmr,dim=1)*size(vmr,dim=2) .gt. vmr_size) then
            print *,"bandred_complex: vmr size 4 :",size(vmr,dim=1)*size(vmr,dim=2),vmr_size
            stop
          endif
757
          successCUDA = cuda_memcpy(vmr_dev,loc(vmr(1,1)),vmr_size*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
758
759
760
761
762
763
764
765
766
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuda memcpy failed vav_dev", istat
            stop
          endif

          if (size(umc,dim=1)*size(umc,dim=2) .gt. umc_size) then
            print *,"bandred_complex: umc size 6 :",size(umc,dim=1)*size(umc,dim=2),umc_size
            stop
          endif
767
          successCUDA = cuda_memcpy(umc_dev,loc(umc(1,1)),umc_size*size_of_PRECISION_complex,cudaMemcpyHostToDevice)
768
769
770
771
772
          if (.not.(successCUDA)) then
            print *, "bandred_complex:  cuda memcpy failed umc_dev ", istat
            stop
          endif
        else ! not useGPU
773
774
          call timer%start("blas")
          call PRECISION_GEMM('N', 'N', l_cols, n_cols, n_cols, CONST_COMPLEX_PAIR_NEGATIVE_0_5, umc(1,n_cols+1), ubound(umc,dim=1), &
775
                     vav, ubound(vav,dim=1), CONE, umc, ubound(umc,dim=1))
776
          call timer%stop("blas")
777
778
          ! Transpose umc -> umr (stored in vmr, second half)

779
          call elpa_transpose_vectors_complex_PRECISION  (umc, ubound(umc,dim=1), mpi_comm_cols, &
780
781
782
783
784
785
786
787
788
789
790
                                                vmr(1,n_cols+1), ubound(vmr,dim=1), mpi_comm_rows, &
                                                1, istep*nbw, n_cols, nblk)
        endif
        ! A = A - V*U**T - U*V**T

        do i=0,(istep*nbw-1)/tile_size
          lcs = i*l_cols_tile+1
          lce = min(l_cols,(i+1)*l_cols_tile)
          lre = min(l_rows,(i+1)*l_rows_tile)
          if (lce<lcs .or. lre<1) cycle
            if (useGPU) then
791
792
793
794
795
              call timer%start("cublas")
              call cublas_PRECISION_GEMM('N', 'C', lre, lce-lcs+1, 2*n_cols, -CONE, &
                                vmr_dev ,cur_l_rows, (umc_dev +(lcs-1)*size_of_PRECISION_complex),cur_l_cols, &
                                CONE, (a_dev + (lcs-1)*lda*size_of_PRECISION_complex),lda)
              call timer%stop("cublas")
796
            else
797
798
              call timer%start("blas")
              call PRECISION_GEMM('N', 'C', lre,lce-lcs+1, 2*n_cols, -CONE, &
799
800
                         vmr, ubound(vmr,dim=1), umc(lcs,1), ubound(umc,dim=1), &
                         CONE, a(1,lcs), lda)
801
              call timer%stop("blas")
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
            endif
          enddo

         if (.not.(useGPU)) then

           if (allocated(vr)) then
             deallocate(vr, stat=istat, errmsg=errorMessage)
             if (istat .ne. 0) then
               print *,"bandred_complex: error when deallocating vr "//errorMessage
               stop
             endif
           endif
           if (allocated(vmr)) then
             deallocate(vmr, stat=istat, errmsg=errorMessage)
             if (istat .ne. 0) then
               print *,"bandred_complex: error when deallocating vmr "//errorMessage
               stop
             endif
           endif

           if (allocated(umc)) then
             deallocate(umc, stat=istat, errmsg=errorMessage)
             if (istat .ne. 0) then
               print *,"bandred_complex: error when deallocating umc "//errorMessage
               stop
             endif
           endif


         endif ! not useGPU

       enddo ! istep

       if (useGPU) then
#if !(defined(USE_ASSUMED_SIZE))
         if (size(a,dim=1)*size(a,dim=2) .ne. lda*na_cols) then
           print *,"bandred_complex: size a ",size(a,dim=1)*size(a,dim=2) , lda*na_cols
         endif
#endif

842
         successCUDA = cuda_memcpy ( loc(a(1,1)), a_dev, lda*na_cols*size_of_PRECISION_complex,cudaMemcpyDeviceToHost)
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
         if (.not.(successCUDA)) then
           print *, "bandred_complex:  cuad memcpy failed a ", istat
           stop
         endif

         successCUDA = cuda_free(a_dev)
         if (.not.(successCUDA)) then
           print *,"bandred_complex: error in cudaFree"
           stop
         endif

         successCUDA = cuda_free(tmat_dev)
         if (.not.(successCUDA)) then
           print *,"bandred_complex: error in cudaFree"
           stop
         endif

         successCUDA = cuda_free(vav_dev)
         if (.not.(successCUDA)) then
           print *,"bandred_complex: error in cudaFree"
           stop
         endif

         if (allocated(vr)) then
           deallocate(vr, stat=istat, errmsg=errorMessage)
           if (istat .ne. 0) then
             print *,"bandred_complex: error when deallocating vr "//errorMessage
             stop
           endif
         endif
         if (allocated(vmr)) then
           deallocate(vmr, stat=istat, errmsg=errorMessage)
           if (istat .ne. 0) then
             print *,"bandred_complex: error when deallocating vmr "//errorMessage
             stop
           endif

           successCUDA = cuda_free(vmr_dev)
           if (.not.(successCUDA)) then
             print *,"bandred_complex: error in cudaFree"
             stop
           endif
         endif

         if (allocated(umc)) then
           deallocate(umc, stat=istat, errmsg=errorMessage)
           if (istat .ne. 0) then
             print *,"bandred_complex: error when deallocating umc "//errorMessage
             stop
           endif

           successCUDA = cuda_free(umc_dev)
           if (.not.(successCUDA)) then
             print *,"bandred_complex: error in cudaFree"
             stop
           endif
         endif
       endif ! use GPU

902
       call timer%stop("bandred_complex" // PRECISION_SUFFIX)
903

904
     end subroutine bandred_complex_PRECISION
905
906