elpa1_solve_tridi_real_template.X90 24.1 KB
Newer Older
Pavel Kus's avatar
Pavel Kus committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#if 0
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
#endif

55
#include "../general/sanity.X90"
56

57
subroutine solve_tridi_&
58
59
&PRECISION&
&_impl ( na, nev, d, e, q, ldq, nblk, matrixCols, mpi_comm_rows, &
Pavel Kus's avatar
Pavel Kus committed
60
61
62
63
                                           mpi_comm_cols, wantDebug, success )

#ifdef HAVE_DETAILED_TIMINGS
      use timings
64
65
#else
      use timings_dummy
Pavel Kus's avatar
Pavel Kus committed
66
67
68
69
#endif
      use precision
      implicit none

70
71
      integer(kind=ik), intent(in)           :: na, nev, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
      real(kind=REAL_DATATYPE), intent(inout)   :: d(na), e(na)
Pavel Kus's avatar
Pavel Kus committed
72
#ifdef USE_ASSUMED_SIZE
73
      real(kind=REAL_DATATYPE), intent(inout)   :: q(ldq,*)
Pavel Kus's avatar
Pavel Kus committed
74
#else
75
      real(kind=REAL_DATATYPE), intent(inout)   :: q(ldq,matrixCols)
Pavel Kus's avatar
Pavel Kus committed
76
#endif
77
78
      logical, intent(in)           :: wantDebug
      logical, intent(out)          :: success
Pavel Kus's avatar
Pavel Kus committed
79
80
81
82
83
84
85
86
87

      integer(kind=ik)              :: i, j, n, np, nc, nev1, l_cols, l_rows
      integer(kind=ik)              :: my_prow, my_pcol, np_rows, np_cols, mpierr

      integer(kind=ik), allocatable :: limits(:), l_col(:), p_col(:), l_col_bc(:), p_col_bc(:)

      integer(kind=ik)              :: istat
      character(200)                :: errorMessage

88
      call timer%start("solve_tridi" // PRECISION_SUFFIX)
89

Pavel Kus's avatar
Pavel Kus committed
90
91
92
93
94
95
      call timer%start("mpi_communication")
      call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
      call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
      call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
      call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
      call timer%stop("mpi_communication")
96

Pavel Kus's avatar
Pavel Kus committed
97
98
99
100
101
102
      success = .true.

      l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
      l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q

      ! Set Q to 0
103
      q(1:l_rows, 1:l_cols) = CONST_0_0
Pavel Kus's avatar
Pavel Kus committed
104
105
106
107
108
109
110

      ! Get the limits of the subdivisons, each subdivison has as many cols
      ! as fit on the respective processor column

      allocate(limits(0:np_cols), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi: error when allocating limits "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
111
        stop 1
Pavel Kus's avatar
Pavel Kus committed
112
113
114
115
116
117
118
119
120
121
122
      endif

      limits(0) = 0
      do np=0,np_cols-1
        nc = local_index(na, np, np_cols, nblk, -1) ! number of columns on proc column np

        ! Check for the case that a column has have zero width.
        ! This is not supported!
        ! Scalapack supports it but delivers no results for these columns,
        ! which is rather annoying
        if (nc==0) then
123
          call timer%stop("solve_tridi" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
          if (wantDebug) write(error_unit,*) 'ELPA1_solve_tridi: ERROR: Problem contains processor column with zero width'
          success = .false.
          return
        endif
        limits(np+1) = limits(np) + nc
      enddo

      ! Subdivide matrix by subtracting rank 1 modifications

      do i=1,np_cols-1
        n = limits(i)
        d(n) = d(n)-abs(e(n))
        d(n+1) = d(n+1)-abs(e(n))
      enddo

      ! Solve sub problems on processsor columns

      nc = limits(my_pcol) ! column after which my problem starts

      if (np_cols>1) then
        nev1 = l_cols ! all eigenvectors are needed
      else
        nev1 = MIN(nev,l_cols)
      endif
148
      call solve_tridi_col_&
149
150
           &PRECISION&
           &_impl (l_cols, nev1, nc, d(nc+1), e(nc+1), q, ldq, nblk,  &
Pavel Kus's avatar
Pavel Kus committed
151
152
                        matrixCols, mpi_comm_rows, wantDebug, success)
      if (.not.(success)) then
153
        call timer%stop("solve_tridi" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
154
155
156
157
158
159
160
161
        return
      endif
      ! If there is only 1 processor column, we are done

      if (np_cols==1) then
        deallocate(limits, stat=istat, errmsg=errorMessage)
        if (istat .ne. 0) then
          print *,"solve_tridi: error when deallocating limits "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
162
          stop 1
Pavel Kus's avatar
Pavel Kus committed
163
164
        endif

165
        call timer%stop("solve_tridi" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
166
167
168
169
170
171
172
173
174
175
        return
      endif

      ! Set index arrays for Q columns

      ! Dense distribution scheme:

      allocate(l_col(na), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi: error when allocating l_col "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
176
        stop 1
Pavel Kus's avatar
Pavel Kus committed
177
178
179
180
181
      endif

      allocate(p_col(na), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi: error when allocating p_col "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
182
        stop 1
Pavel Kus's avatar
Pavel Kus committed
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
      endif

      n = 0
      do np=0,np_cols-1
        nc = local_index(na, np, np_cols, nblk, -1)
        do i=1,nc
          n = n+1
          l_col(n) = i
          p_col(n) = np
        enddo
      enddo

      ! Block cyclic distribution scheme, only nev columns are set:

      allocate(l_col_bc(na), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi: error when allocating l_col_bc "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
200
        stop 1
Pavel Kus's avatar
Pavel Kus committed
201
202
203
204
205
      endif

      allocate(p_col_bc(na), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi: error when allocating p_col_bc "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
206
        stop 1
Pavel Kus's avatar
Pavel Kus committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
      endif

      p_col_bc(:) = -1
      l_col_bc(:) = -1

      do i = 0, na-1, nblk*np_cols
        do j = 0, np_cols-1
          do n = 1, nblk
            if (i+j*nblk+n <= MIN(nev,na)) then
              p_col_bc(i+j*nblk+n) = j
              l_col_bc(i+j*nblk+n) = i/np_cols + n
             endif
           enddo
         enddo
      enddo

      ! Recursively merge sub problems
224
      call merge_recursive_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
225
           &PRECISION &
226
           (0, np_cols, wantDebug, success)
Pavel Kus's avatar
Pavel Kus committed
227
      if (.not.(success)) then
228
        call timer%stop("solve_tridi" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
229
230
231
232
233
234
        return
      endif

      deallocate(limits,l_col,p_col,l_col_bc,p_col_bc, stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi: error when deallocating l_col "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
235
        stop 1
Pavel Kus's avatar
Pavel Kus committed
236
237
      endif

238
      call timer%stop("solve_tridi" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
239
240
241
      return

      contains
242
        recursive subroutine merge_recursive_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
243
244
                  &PRECISION &
           (np_off, nprocs, wantDebug, success)
Pavel Kus's avatar
Pavel Kus committed
245
246
247
           use precision
#ifdef HAVE_DETAILED_TIMINGS
           use timings
248
#else
249
           use timings_dummy
Pavel Kus's avatar
Pavel Kus committed
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
#endif
           implicit none

           ! noff is always a multiple of nblk_ev
           ! nlen-noff is always > nblk_ev

           integer(kind=ik)     :: np_off, nprocs
           integer(kind=ik)     :: np1, np2, noff, nlen, nmid, n
#ifdef WITH_MPI
!           integer(kind=ik)     :: my_mpi_status(mpi_status_size)
#endif
           logical, intent(in)  :: wantDebug
           logical, intent(out) :: success

           success = .true.

           if (nprocs<=1) then
             ! Safety check only
             if (wantDebug) write(error_unit,*) "ELPA1_merge_recursive: INTERNAL error merge_recursive: nprocs=",nprocs
             success = .false.
             return
           endif
           ! Split problem into 2 subproblems of size np1 / np2

           np1 = nprocs/2
           np2 = nprocs-np1

277
           if (np1 > 1) call merge_recursive_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
278
                        &PRECISION &
279
           (np_off, np1, wantDebug, success)
Pavel Kus's avatar
Pavel Kus committed
280
           if (.not.(success)) return
281
           if (np2 > 1) call merge_recursive_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
282
                        &PRECISION &
283
           (np_off+np1, np2, wantDebug, success)
Pavel Kus's avatar
Pavel Kus committed
284
285
286
287
288
289
290
291
292
293
           if (.not.(success)) return

           noff = limits(np_off)
           nmid = limits(np_off+np1) - noff
           nlen = limits(np_off+nprocs) - noff

#ifdef WITH_MPI
           call timer%start("mpi_communication")
           if (my_pcol==np_off) then
             do n=np_off+np1,np_off+nprocs-1
294
               call mpi_send(d(noff+1), nmid, MPI_REAL_PRECISION, n, 1, mpi_comm_cols, mpierr)
Pavel Kus's avatar
Pavel Kus committed
295
296
297
298
299
300
301
302
             enddo
           endif
           call timer%stop("mpi_communication")
#endif /* WITH_MPI */

           if (my_pcol>=np_off+np1 .and. my_pcol<np_off+nprocs) then
#ifdef WITH_MPI
             call timer%start("mpi_communication")
303
             call mpi_recv(d(noff+1), nmid, MPI_REAL_PRECISION, np_off, 1, mpi_comm_cols, MPI_STATUS_IGNORE, mpierr)
Pavel Kus's avatar
Pavel Kus committed
304
305
306
307
308
309
310
311
312
313
             call timer%stop("mpi_communication")
#else /* WITH_MPI */
!             d(noff+1:noff+1+nmid-1) = d(noff+1:noff+1+nmid-1)
#endif /* WITH_MPI */
           endif

           if (my_pcol==np_off+np1) then
             do n=np_off,np_off+np1-1
#ifdef WITH_MPI
               call timer%start("mpi_communication")
314
               call mpi_send(d(noff+nmid+1), nlen-nmid, MPI_REAL_PRECISION, n, 1, mpi_comm_cols, mpierr)
Pavel Kus's avatar
Pavel Kus committed
315
316
317
318
319
320
321
322
               call timer%stop("mpi_communication")
#endif /* WITH_MPI */

             enddo
           endif
           if (my_pcol>=np_off .and. my_pcol<np_off+np1) then
#ifdef WITH_MPI
             call timer%start("mpi_communication")
323
             call mpi_recv(d(noff+nmid+1), nlen-nmid, MPI_REAL_PRECISION, np_off+np1, 1,mpi_comm_cols, MPI_STATUS_IGNORE, mpierr)
Pavel Kus's avatar
Pavel Kus committed
324
325
326
327
328
329
330
331
332
             call timer%stop("mpi_communication")
#else /* WITH_MPI */
!             d(noff+nmid+1:noff+nmid+1+nlen-nmid-1) = d(noff+nmid+1:noff+nmid+1+nlen-nmid-1)
#endif /* WITH_MPI */
           endif
           if (nprocs == np_cols) then

             ! Last merge, result distribution must be block cyclic, noff==0,
             ! p_col_bc is set so that only nev eigenvalues are calculated
333
             call merge_systems_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
334
                  &PRECISION &
335
                                 (nlen, nmid, d(noff+1), e(noff+nmid), q, ldq, noff, &
Pavel Kus's avatar
Pavel Kus committed
336
337
338
339
340
                                 nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, l_col, p_col, &
                                 l_col_bc, p_col_bc, np_off, nprocs, wantDebug, success )
             if (.not.(success)) return
           else
             ! Not last merge, leave dense column distribution
341
             call merge_systems_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
342
                  &PRECISION &
343
                                (nlen, nmid, d(noff+1), e(noff+nmid), q, ldq, noff, &
Pavel Kus's avatar
Pavel Kus committed
344
345
346
347
                                 nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, l_col(noff+1), p_col(noff+1), &
                                 l_col(noff+1), p_col(noff+1), np_off, nprocs, wantDebug, success )
             if (.not.(success)) return
           endif
348
       end subroutine merge_recursive_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
349
           &PRECISION
Pavel Kus's avatar
Pavel Kus committed
350

351
    end subroutine solve_tridi_&
352
353
        &PRECISION&
	&_impl
Pavel Kus's avatar
Pavel Kus committed
354

355
    subroutine solve_tridi_col_&
356
357
    &PRECISION&
    &_impl ( na, nev, nqoff, d, e, q, ldq, nblk, matrixCols, mpi_comm_rows, wantDebug, success )
Pavel Kus's avatar
Pavel Kus committed
358
359
360
361
362
363

   ! Solves the symmetric, tridiagonal eigenvalue problem on one processor column
   ! with the divide and conquer method.
   ! Works best if the number of processor rows is a power of 2!
#ifdef HAVE_DETAILED_TIMINGS
      use timings
364
365
#else
      use timings_dummy
Pavel Kus's avatar
Pavel Kus committed
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
#endif
      use precision
      implicit none

      integer(kind=ik)              :: na, nev, nqoff, ldq, nblk, matrixCols, mpi_comm_rows
      real(kind=REAL_DATATYPE)                 :: d(na), e(na)
#ifdef USE_ASSUMED_SIZE
      real(kind=REAL_DATATYPE)                 :: q(ldq,*)
#else
      real(kind=REAL_DATATYPE)                 :: q(ldq,matrixCols)
#endif

      integer(kind=ik), parameter   :: min_submatrix_size = 16 ! Minimum size of the submatrices to be used

      real(kind=REAL_DATATYPE), allocatable    :: qmat1(:,:), qmat2(:,:)
      integer(kind=ik)              :: i, n, np
      integer(kind=ik)              :: ndiv, noff, nmid, nlen, max_size
      integer(kind=ik)              :: my_prow, np_rows, mpierr

      integer(kind=ik), allocatable :: limits(:), l_col(:), p_col_i(:), p_col_o(:)
      logical, intent(in)           :: wantDebug
      logical, intent(out)          :: success
      integer(kind=ik)              :: istat
      character(200)                :: errorMessage

391
      call timer%start("solve_tridi_col" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
      call timer%start("mpi_communication")
      call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
      call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
      call timer%stop("mpi_communication")
      success = .true.
      ! Calculate the number of subdivisions needed.

      n = na
      ndiv = 1
      do while(2*ndiv<=np_rows .and. n>2*min_submatrix_size)
        n = ((n+3)/4)*2 ! the bigger one of the two halves, we want EVEN boundaries
        ndiv = ndiv*2
      enddo

      ! If there is only 1 processor row and not all eigenvectors are needed
      ! and the matrix size is big enough, then use 2 subdivisions
      ! so that merge_systems is called once and only the needed
      ! eigenvectors are calculated for the final problem.

      if (np_rows==1 .and. nev<na .and. na>2*min_submatrix_size) ndiv = 2

      allocate(limits(0:ndiv), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi_col: error when allocating limits "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
416
        stop 1
Pavel Kus's avatar
Pavel Kus committed
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
      endif

      limits(0) = 0
      limits(ndiv) = na

      n = ndiv
      do while(n>1)
        n = n/2 ! n is always a power of 2
        do i=0,ndiv-1,2*n
          ! We want to have even boundaries (for cache line alignments)
          limits(i+n) = limits(i) + ((limits(i+2*n)-limits(i)+3)/4)*2
        enddo
      enddo

      ! Calculate the maximum size of a subproblem

      max_size = 0
      do i=1,ndiv
        max_size = MAX(max_size,limits(i)-limits(i-1))
      enddo

      ! Subdivide matrix by subtracting rank 1 modifications

      do i=1,ndiv-1
        n = limits(i)
        d(n) = d(n)-abs(e(n))
        d(n+1) = d(n+1)-abs(e(n))
      enddo

      if (np_rows==1)    then

        ! For 1 processor row there may be 1 or 2 subdivisions
        do n=0,ndiv-1
          noff = limits(n)        ! Start of subproblem
          nlen = limits(n+1)-noff ! Size of subproblem

453
          call solve_tridi_single_problem_&
454
455
          &PRECISION&
	  &_impl &
456
                                   (nlen,d(noff+1),e(noff+1), &
Pavel Kus's avatar
Pavel Kus committed
457
                                    q(nqoff+noff+1,noff+1),ubound(q,dim=1), wantDebug, success)
458

Pavel Kus's avatar
Pavel Kus committed
459
460
461
462
463
464
465
466
467
468
469
          if (.not.(success)) return
        enddo

      else

        ! Solve sub problems in parallel with solve_tridi_single
        ! There is at maximum 1 subproblem per processor

        allocate(qmat1(max_size,max_size), stat=istat, errmsg=errorMessage)
        if (istat .ne. 0) then
          print *,"solve_tridi_col: error when allocating qmat1 "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
470
          stop 1
Pavel Kus's avatar
Pavel Kus committed
471
472
473
474
475
        endif

        allocate(qmat2(max_size,max_size), stat=istat, errmsg=errorMessage)
        if (istat .ne. 0) then
          print *,"solve_tridi_col: error when allocating qmat2 "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
476
          stop 1
Pavel Kus's avatar
Pavel Kus committed
477
478
479
480
481
482
483
484
        endif

        qmat1 = 0 ! Make sure that all elements are defined

        if (my_prow < ndiv) then

          noff = limits(my_prow)        ! Start of subproblem
          nlen = limits(my_prow+1)-noff ! Size of subproblem
485
          call solve_tridi_single_problem_&
486
487
          &PRECISION&
	  &_impl &
488
                                   (nlen,d(noff+1),e(noff+1),qmat1, &
Pavel Kus's avatar
Pavel Kus committed
489
                                    ubound(qmat1,dim=1), wantDebug, success)
490

Pavel Kus's avatar
Pavel Kus committed
491
492
493
494
495
496
497
498
499
500
501
          if (.not.(success)) return
        endif

        ! Fill eigenvectors in qmat1 into global matrix q

        do np = 0, ndiv-1

          noff = limits(np)
          nlen = limits(np+1)-noff
#ifdef WITH_MPI
          call timer%start("mpi_communication")
502
          call MPI_Bcast(d(noff+1), nlen, MPI_REAL_PRECISION, np, mpi_comm_rows, mpierr)
Pavel Kus's avatar
Pavel Kus committed
503
          qmat2 = qmat1
504
          call MPI_Bcast(qmat2, max_size*max_size, MPI_REAL_PRECISION, np, mpi_comm_rows, mpierr)
Pavel Kus's avatar
Pavel Kus committed
505
506
507
508
509
510
511
          call timer%stop("mpi_communication")
#else /* WITH_MPI */
!          qmat2 = qmat1 ! is this correct
#endif /* WITH_MPI */
          do i=1,nlen

#ifdef WITH_MPI
512
            call distribute_global_column_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
513
            &PRECISION &
514
                     (qmat2(1,i), q(1,noff+i), nqoff+noff, nlen, my_prow, np_rows, nblk)
Pavel Kus's avatar
Pavel Kus committed
515
#else /* WITH_MPI */
516
            call distribute_global_column_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
517
            &PRECISION &
518
                     (qmat1(1,i), q(1,noff+i), nqoff+noff, nlen, my_prow, np_rows, nblk)
Pavel Kus's avatar
Pavel Kus committed
519
520
521
522
523
524
525
526
#endif /* WITH_MPI */
          enddo

        enddo

        deallocate(qmat1, qmat2, stat=istat, errmsg=errorMessage)
        if (istat .ne. 0) then
          print *,"solve_tridi_col: error when deallocating qmat2 "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
527
          stop 1
Pavel Kus's avatar
Pavel Kus committed
528
529
530
531
532
533
534
535
536
        endif

      endif

      ! Allocate and set index arrays l_col and p_col

      allocate(l_col(na), p_col_i(na),  p_col_o(na), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi_col: error when allocating l_col "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
537
        stop 1
Pavel Kus's avatar
Pavel Kus committed
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
      endif

      do i=1,na
        l_col(i) = i
        p_col_i(i) = 0
        p_col_o(i) = 0
      enddo

      ! Merge subproblems

      n = 1
      do while(n<ndiv) ! if ndiv==1, the problem was solved by single call to solve_tridi_single

        do i=0,ndiv-1,2*n

          noff = limits(i)
          nmid = limits(i+n) - noff
          nlen = limits(i+2*n) - noff

          if (nlen == na) then
            ! Last merge, set p_col_o=-1 for unneeded (output) eigenvectors
            p_col_o(nev+1:na) = -1
          endif
561
          call merge_systems_&
Andreas Marek's avatar
cleanup    
Andreas Marek committed
562
          &PRECISION &
563
                              (nlen, nmid, d(noff+1), e(noff+nmid), q, ldq, nqoff+noff, nblk, &
Pavel Kus's avatar
Pavel Kus committed
564
565
566
567
568
569
570
571
572
573
574
575
576
                               matrixCols, mpi_comm_rows, mpi_comm_self, l_col(noff+1), p_col_i(noff+1), &
                               l_col(noff+1), p_col_o(noff+1), 0, 1, wantDebug, success)
          if (.not.(success)) return

        enddo

        n = 2*n

      enddo

      deallocate(limits, l_col, p_col_i, p_col_o, stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"solve_tridi_col: error when deallocating l_col "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
577
        stop 1
Pavel Kus's avatar
Pavel Kus committed
578
579
      endif

580
      call timer%stop("solve_tridi_col" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
581

582
    end subroutine solve_tridi_col_&
583
584
    &PRECISION&
    &_impl
Pavel Kus's avatar
Pavel Kus committed
585

586
    recursive subroutine solve_tridi_single_problem_&
587
588
    &PRECISION&
    &_impl (nlen, d, e, q, ldq, wantDebug, success)
Pavel Kus's avatar
Pavel Kus committed
589
590
591
592
593

   ! Solves the symmetric, tridiagonal eigenvalue problem on a single processor.
   ! Takes precautions if DSTEDC fails or if the eigenvalues are not ordered correctly.
#ifdef HAVE_DETAILED_TIMINGS
     use timings
594
595
#else
     use timings_dummy
Pavel Kus's avatar
Pavel Kus committed
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
#endif
     use precision
     implicit none

     integer(kind=ik)              :: nlen, ldq
     real(kind=REAL_DATATYPE)                 :: d(nlen), e(nlen), q(ldq,nlen)

     real(kind=REAL_DATATYPE), allocatable    :: work(:), qtmp(:), ds(:), es(:)
     real(kind=REAL_DATATYPE)                 :: dtmp

     integer(kind=ik)              :: i, j, lwork, liwork, info, mpierr
     integer(kind=ik), allocatable :: iwork(:)

     logical, intent(in)           :: wantDebug
     logical, intent(out)          :: success
      integer(kind=ik)             :: istat
      character(200)               :: errorMessage

614
     call timer%start("solve_tridi_single" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
615
616
617
618
619

     success = .true.
     allocate(ds(nlen), es(nlen), stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_tridi_single: error when allocating ds "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
620
       stop 1
Pavel Kus's avatar
Pavel Kus committed
621
622
623
624
625
626
627
628
629
630
631
632
633
634
     endif

     ! Save d and e for the case that dstedc fails

     ds(:) = d(:)
     es(:) = e(:)

     ! First try dstedc, this is normally faster but it may fail sometimes (why???)

     lwork = 1 + 4*nlen + nlen**2
     liwork =  3 + 5*nlen
     allocate(work(lwork), iwork(liwork), stat=istat, errmsg=errorMessage)
     if (istat .ne. 0) then
       print *,"solve_tridi_single: error when allocating work "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
635
       stop 1
Pavel Kus's avatar
Pavel Kus committed
636
     endif
637
     call timer%start("blas")
638
     call PRECISION_STEDC('I', nlen, d, e, q, ldq, work, lwork, iwork, liwork, info)
639
     call timer%stop("blas")
Pavel Kus's avatar
Pavel Kus committed
640
641
642
643
644
645
646
647
648

     if (info /= 0) then

       ! DSTEDC failed, try DSTEQR. The workspace is enough for DSTEQR.

       write(error_unit,'(a,i8,a)') 'Warning: Lapack routine DSTEDC failed, info= ',info,', Trying DSTEQR!'

       d(:) = ds(:)
       e(:) = es(:)
649
       call timer%start("blas")
650
       call PRECISION_STEQR('I', nlen, d, e, q, ldq, work, info)
651
652
       call timer%stop("blas")

Pavel Kus's avatar
Pavel Kus committed
653
654
655
656
657
658
659
660
661
662
663
664
665
       ! If DSTEQR fails also, we don't know what to do further ...

       if (info /= 0) then
         if (wantDebug) &
           write(error_unit,'(a,i8,a)') 'ELPA1_solve_tridi_single: ERROR: Lapack routine DSTEQR failed, info= ',info,', Aborting!'
           success = .false.
           return
         endif
       end if

       deallocate(work,iwork,ds,es, stat=istat, errmsg=errorMessage)
       if (istat .ne. 0) then
         print *,"solve_tridi_single: error when deallocating ds "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
666
         stop 1
Pavel Kus's avatar
Pavel Kus committed
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
       endif

      ! Check if eigenvalues are monotonically increasing
      ! This seems to be not always the case  (in the IBM implementation of dstedc ???)

      do i=1,nlen-1
        if (d(i+1)<d(i)) then
#ifdef DOUBLE_PRECISION_REAL
          if (abs(d(i+1) - d(i)) / abs(d(i+1) + d(i)) > 1e-14_rk8) then
#else
          if (abs(d(i+1) - d(i)) / abs(d(i+1) + d(i)) > 1e-14_rk4) then
#endif
            write(error_unit,'(a,i8,2g25.16)') '***WARNING: Monotony error dste**:',i+1,d(i),d(i+1)
          else
            write(error_unit,'(a,i8,2g25.16)') 'Info: Monotony error dste{dc,qr}:',i+1,d(i),d(i+1)
            write(error_unit,'(a)') 'The eigenvalues from a lapack call are not sorted to machine precision.'
            write(error_unit,'(a)') 'In this extent, this is completely harmless.'
            write(error_unit,'(a)') 'Still, we keep this info message just in case.'
          end if
          allocate(qtmp(nlen), stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"solve_tridi_single: error when allocating qtmp "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
689
            stop 1
Pavel Kus's avatar
Pavel Kus committed
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
          endif

          dtmp = d(i+1)
          qtmp(1:nlen) = q(1:nlen,i+1)
          do j=i,1,-1
            if (dtmp<d(j)) then
              d(j+1)        = d(j)
              q(1:nlen,j+1) = q(1:nlen,j)
            else
              exit ! Loop
            endif
          enddo
          d(j+1)        = dtmp
          q(1:nlen,j+1) = qtmp(1:nlen)
          deallocate(qtmp, stat=istat, errmsg=errorMessage)
          if (istat .ne. 0) then
            print *,"solve_tridi_single: error when deallocating qtmp "//errorMessage
Andreas Marek's avatar
Andreas Marek committed
707
            stop 1
Pavel Kus's avatar
Pavel Kus committed
708
709
710
711
          endif

       endif
     enddo
712
     call timer%stop("solve_tridi_single" // PRECISION_SUFFIX)
Pavel Kus's avatar
Pavel Kus committed
713

714
    end subroutine solve_tridi_single_problem_&
715
716
    &PRECISION&
    &_impl
Pavel Kus's avatar
Pavel Kus committed
717