elpa1.F90 20.3 KB
Newer Older
1
2
!    This file is part of ELPA.
!
3
!    The ELPA library was originally created by the ELPA consortium,
4
5
!    consisting of the following organizations:
!
6
!    - Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
7
8
9
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
10
11
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
Andreas Marek's avatar
Andreas Marek committed
12
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
13
14
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
15
16
!    - IBM Deutschland GmbH
!
17
!    This particular source code file contains additions, changes and
18
!    enhancements authored by Intel Corporation which is not part of
19
!    the ELPA consortium.
20
21
!
!    More information can be found here:
22
!    http://elpa.mpcdf.mpg.de/
23
24
!
!    ELPA is free software: you can redistribute it and/or modify
25
26
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
46
!
47
48
49
50
51
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".

52
53
54
!> \mainpage
!> Eigenvalue SoLvers for Petaflop-Applications (ELPA)
!> \par
55
!> http://elpa.mpcdf.mpg.de
56
57
58
59
60
61
62
63
64
65
66
67
!>
!> \par
!>    The ELPA library was originally created by the ELPA consortium,
!>    consisting of the following organizations:
!>
!>    - Max Planck Computing and Data Facility (MPCDF) formerly known as
!>      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!>    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!>      Informatik,
!>    - Technische Universität München, Lehrstuhl für Informatik mit
!>      Schwerpunkt Wissenschaftliches Rechnen ,
!>    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
Andreas Marek's avatar
Andreas Marek committed
68
!>    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
69
70
71
72
73
74
75
76
77
78
79
!>      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!>      and
!>    - IBM Deutschland GmbH
!>
!>   Some parts and enhancements of ELPA have been contributed and authored
!>   by the Intel Corporation which is not part of the ELPA consortium.
!>
!>   Contributions to the ELPA source have been authored by (in alphabetical order):
!>
!> \author T. Auckenthaler, Volker Blum, A. Heinecke, L. Huedepohl, R. Johanni, Werner Jürgens, and A. Marek

80

81
82
#include "config-f90.h"
!> \brief Fortran module which provides the routines to use the one-stage ELPA solver
83
module ELPA1
Andreas Marek's avatar
Andreas Marek committed
84
  use, intrinsic :: iso_c_binding, only : c_double
85
  use elpa_utilities
86
  use elpa1_auxiliary
87

88
89
90
  implicit none

  ! The following routines are public:
91
  private
92

93
94
  public :: get_elpa_row_col_comms     !< old, deprecated interface: Sets MPI row/col communicators
  public :: get_elpa_communicators     !< Sets MPI row/col communicators
95
  public :: elpa_get_communicators     !< Sets MPI row/col communicators
96

97
98
99
100
  public :: solve_evp_real             !< old, deprecated interface: Driver routine for real eigenvalue problem
  public :: solve_evp_real_1stage      !< Driver routine for real eigenvalue problem
  public :: solve_evp_complex          !< old, deprecated interface:  Driver routine for complex eigenvalue problem
  public :: solve_evp_complex_1stage   !< Driver routine for complex eigenvalue problem
101

102
103
  ! imported from elpa1_auxilliary

104
105
  public :: elpa_mult_at_b_real        !< Multiply real matrices A**T * B
  public :: mult_at_b_real             !< old, deprecated interface to multiply real matrices A**T * B
106

107
108
  public :: elpa_mult_ah_b_complex     !< Multiply complex matrices A**H * B
  public :: mult_ah_b_complex          !< old, deprecated interface to multiply complex matrices A**H * B
109

110
111
112
113
114
115
116
117
118
119
120
121
122
  public :: elpa_invert_trm_real       !< Invert real triangular matrix
  public :: invert_trm_real            !< old, deprecated interface to invert real triangular matrix

  public :: elpa_invert_trm_complex    !< Invert complex triangular matrix
  public :: invert_trm_complex         !< old, deprecated interface to invert complex triangular matrix

  public :: elpa_cholesky_real         !< Cholesky factorization of a real matrix
  public :: cholesky_real              !< old, deprecated interface to do Cholesky factorization of a real matrix

  public :: elpa_cholesky_complex      !< Cholesky factorization of a complex matrix
  public :: cholesky_complex           !< old, deprecated interface to do Cholesky factorization of a complex matrix

  public :: elpa_solve_tridi           !< Solve tridiagonal eigensystem with divide and conquer method
123
124


125
126
  ! Timing results, set by every call to solve_evp_xxx

Andreas Marek's avatar
Andreas Marek committed
127
128
129
  real(kind=c_double), public :: time_evp_fwd    !< time for forward transformations (to tridiagonal form)
  real(kind=c_double), public :: time_evp_solve  !< time for solving the tridiagonal system
  real(kind=c_double), public :: time_evp_back   !< time for back transformations of eigenvectors
130

131
  logical, public :: elpa_print_times = .false. !< Set elpa_print_times to .true. for explicit timing outputs
132
133


134
!> \brief get_elpa_row_col_comms:  old, deprecated Fortran function to create the MPI communicators for ELPA. Better use "elpa_get_communicators"
135
!> \details
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
!> The interface and variable definition is the same as in "elpa_get_communicators"
!> \param  mpi_comm_global   Global communicator for the calculations (in)
!>
!> \param  my_prow           Row coordinate of the calling process in the process grid (in)
!>
!> \param  my_pcol           Column coordinate of the calling process in the process grid (in)
!>
!> \param  mpi_comm_rows     Communicator for communicating within rows of processes (out)
!>
!> \param  mpi_comm_cols     Communicator for communicating within columns of processes (out)
!> \result mpierr            integer error value of mpi_comm_split function
  interface get_elpa_row_col_comms
    module procedure get_elpa_communicators
  end interface

151
152
153
154
  interface elpa_get_communicators
    module procedure get_elpa_communicators
  end interface

155
156
!> \brief solve_evp_real: old, deprecated Fortran function to solve the real eigenvalue problem with 1-stage solver. Better use "solve_evp_real_1stage"
!>
157
!> \details
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
!>  The interface and variable definition is the same as in "elpa_solve_evp_real_1stage"
!  Parameters
!
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


  interface solve_evp_real
    module procedure solve_evp_real_1stage
  end interface

!> \brief solve_evp_complex: old, deprecated Fortran function to solve the complex eigenvalue problem with 1-stage solver. Better use "solve_evp_complex_1stage"
!>
198
!> \details
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
!> The interface and variable definition is the same as in "elpa_solve_evp_complex_1stage"
!  Parameters
!
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


  interface solve_evp_complex
    module procedure solve_evp_complex_1stage
  end interface

237
238
239
240
contains

!-------------------------------------------------------------------------------

241
!> \brief Fortran function to create the MPI communicators for ELPA.
242
243
244
245
246
247
! All ELPA routines need MPI communicators for communicating within
! rows or columns of processes, these are set here.
! mpi_comm_rows/mpi_comm_cols can be free'd with MPI_Comm_free if not used any more.
!
!  Parameters
!
248
249
250
251
252
253
254
255
256
257
258
259
!> \param  mpi_comm_global   Global communicator for the calculations (in)
!>
!> \param  my_prow           Row coordinate of the calling process in the process grid (in)
!>
!> \param  my_pcol           Column coordinate of the calling process in the process grid (in)
!>
!> \param  mpi_comm_rows     Communicator for communicating within rows of processes (out)
!>
!> \param  mpi_comm_cols     Communicator for communicating within columns of processes (out)
!> \result mpierr            integer error value of mpi_comm_split function


260
function get_elpa_communicators(mpi_comm_global, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols) result(mpierr)
Andreas Marek's avatar
Andreas Marek committed
261
   use precision
262
   use elpa_mpi
263
264
   implicit none

Andreas Marek's avatar
Andreas Marek committed
265
266
   integer(kind=ik), intent(in)  :: mpi_comm_global, my_prow, my_pcol
   integer(kind=ik), intent(out) :: mpi_comm_rows, mpi_comm_cols
267

Andreas Marek's avatar
Andreas Marek committed
268
   integer(kind=ik)              :: mpierr
269
270
271
272
273
274
275
276
277

   ! mpi_comm_rows is used for communicating WITHIN rows, i.e. all processes
   ! having the same column coordinate share one mpi_comm_rows.
   ! So the "color" for splitting is my_pcol and the "key" is my row coordinate.
   ! Analogous for mpi_comm_cols

   call mpi_comm_split(mpi_comm_global,my_pcol,my_prow,mpi_comm_rows,mpierr)
   call mpi_comm_split(mpi_comm_global,my_prow,my_pcol,mpi_comm_cols,mpierr)

278
end function get_elpa_communicators
279
280


281
!> \brief solve_evp_real_1stage: Fortran function to solve the real eigenvalue problem with 1-stage solver
282
!>
283
284
!  Parameters
!
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


316
function solve_evp_real_1stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success)
Andreas Marek's avatar
Andreas Marek committed
317
   use precision
318
#ifdef HAVE_DETAILED_TIMINGS
Andreas Marek's avatar
Andreas Marek committed
319
   use timings
320
#endif
321
322
   use elpa_mpi
   use elpa1_compute
323
324
   implicit none

Andreas Marek's avatar
Andreas Marek committed
325
   integer(kind=ik), intent(in)  :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
326
   real(kind=rk)                 :: ev(na)
327
#ifdef USE_ASSUMED_SIZE
328
329
330
331
   real(kind=rk)                 :: a(lda,*), q(ldq,*)
#else
   real(kind=rk)                 :: a(lda,matrixCols), q(ldq,matrixCols)
#endif
332

Andreas Marek's avatar
Andreas Marek committed
333
334
335
336
337
338
   integer(kind=ik)              :: my_prow, my_pcol, mpierr
   real(kind=rk), allocatable    :: e(:), tau(:)
   real(kind=rk)                 :: ttt0, ttt1
   logical                       :: success
   logical, save                 :: firstCall = .true.
   logical                       :: wantDebug
339

340
#ifdef HAVE_DETAILED_TIMINGS
341
   call timer%start("solve_evp_real_1stage")
342
343
#endif

344
345
346
   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)

347
348
   success = .true.

349
350
351
352
353
354
355
   wantDebug = .false.
   if (firstCall) then
     ! are debug messages desired?
     wantDebug = debug_messages_via_environment_variable()
     firstCall = .false.
   endif

356
357
358
   allocate(e(na), tau(na))

   ttt0 = MPI_Wtime()
359
   call tridiag_real(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau)
360

361
   ttt1 = MPI_Wtime()
362
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time tridiag_real :',ttt1-ttt0
363
364
365
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
366
   call solve_tridi(na, nev, ev, e, q, ldq, nblk, matrixCols, mpi_comm_rows, &
367
                    mpi_comm_cols, wantDebug, success)
368
369
   if (.not.(success)) return

370
   ttt1 = MPI_Wtime()
371
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time solve_tridi  :',ttt1-ttt0
372
373
374
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
375
   call trans_ev_real(na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
376
   ttt1 = MPI_Wtime()
377
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time trans_ev_real:',ttt1-ttt0
378
379
380
381
   time_evp_back = ttt1-ttt0

   deallocate(e, tau)

382
#ifdef HAVE_DETAILED_TIMINGS
383
   call timer%stop("solve_evp_real_1stage")
384
385
#endif

386
end function solve_evp_real_1stage
387
388


389
!> \brief solve_evp_complex_1stage: Fortran function to solve the complex eigenvalue problem with 1-stage solver
390
!>
391
392
!  Parameters
!
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success

423
function solve_evp_complex_1stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success)
424
#ifdef HAVE_DETAILED_TIMINGS
Andreas Marek's avatar
Andreas Marek committed
425
   use timings
426
#endif
Andreas Marek's avatar
Andreas Marek committed
427
   use precision
428
429
   use elpa_mpi
   use elpa1_compute
430
431
   implicit none

Andreas Marek's avatar
Andreas Marek committed
432
   integer(kind=ik), intent(in)     :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
433
#ifdef USE_ASSUMED_SIZE
434
435
   complex(kind=ck)                 :: a(lda,*), q(ldq,*)
#else
Andreas Marek's avatar
Andreas Marek committed
436
   complex(kind=ck)                 :: a(lda,matrixCols), q(ldq,matrixCols)
437
#endif
Andreas Marek's avatar
Andreas Marek committed
438
   real(kind=rk)                    :: ev(na)
439

Andreas Marek's avatar
Andreas Marek committed
440
441
442
443
444
   integer(kind=ik)                 :: my_prow, my_pcol, np_rows, np_cols, mpierr
   integer(kind=ik)                 :: l_rows, l_cols, l_cols_nev
   real(kind=rk), allocatable       :: q_real(:,:), e(:)
   complex(kind=ck), allocatable    :: tau(:)
   real(kind=rk)                    :: ttt0, ttt1
445

Andreas Marek's avatar
Andreas Marek committed
446
447
448
   logical                          :: success
   logical, save                    :: firstCall = .true.
   logical                          :: wantDebug
449

450
#ifdef HAVE_DETAILED_TIMINGS
451
   call timer%start("solve_evp_complex_1stage")
452
#endif
453

454
455
456
457
458
   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
   call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)

459
460
   success = .true.

461
462
463
464
465
466
467
468
   wantDebug = .false.
   if (firstCall) then
     ! are debug messages desired?
     wantDebug = debug_messages_via_environment_variable()
     firstCall = .false.
   endif


469
470
471
472
473
474
475
476
477
   l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
   l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q

   l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev

   allocate(e(na), tau(na))
   allocate(q_real(l_rows,l_cols))

   ttt0 = MPI_Wtime()
478
   call tridiag_complex(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau)
479
   ttt1 = MPI_Wtime()
480
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time tridiag_complex :',ttt1-ttt0
481
482
483
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
484
   call solve_tridi(na, nev, ev, e, q_real, l_rows, nblk, matrixCols, mpi_comm_rows, &
485
                    mpi_comm_cols, wantDebug, success)
486
487
   if (.not.(success)) return

488
   ttt1 = MPI_Wtime()
489
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time solve_tridi     :',ttt1-ttt0
490
491
492
493
494
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
   q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)

495
   call trans_ev_complex(na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
496
   ttt1 = MPI_Wtime()
497
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time trans_ev_complex:',ttt1-ttt0
498
499
500
501
   time_evp_back = ttt1-ttt0

   deallocate(q_real)
   deallocate(e, tau)
502
#ifdef HAVE_DETAILED_TIMINGS
503
   call timer%stop("solve_evp_complex_1stage")
504
#endif
505

506
end function solve_evp_complex_1stage
507

508

509
510

end module ELPA1