elpa1.F90 13.7 KB
Newer Older
1
2
!    This file is part of ELPA.
!
3
!    The ELPA library was originally created by the ELPA consortium,
4
5
!    consisting of the following organizations:
!
6
!    - Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
7
8
9
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
10
11
12
13
14
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaftrn,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
15
16
!    - IBM Deutschland GmbH
!
17
!    This particular source code file contains additions, changes and
18
!    enhancements authored by Intel Corporation which is not part of
19
!    the ELPA consortium.
20
21
22
23
24
!
!    More information can be found here:
!    http://elpa.rzg.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
25
26
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
46
!
47
48
49
50
51
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
!> \mainpage
!> Eigenvalue SoLvers for Petaflop-Applications (ELPA)
!> \par
!> http://elpa.rzg.mpg.de
!>
!> \par
!>    The ELPA library was originally created by the ELPA consortium,
!>    consisting of the following organizations:
!>
!>    - Max Planck Computing and Data Facility (MPCDF) formerly known as
!>      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!>    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!>      Informatik,
!>    - Technische Universität München, Lehrstuhl für Informatik mit
!>      Schwerpunkt Wissenschaftliches Rechnen ,
!>    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!>    - Max-Plack-Institut für Mathematik in den Naturwissenschaftrn,
!>      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!>      and
!>    - IBM Deutschland GmbH
!>
!>   Some parts and enhancements of ELPA have been contributed and authored
!>   by the Intel Corporation which is not part of the ELPA consortium.
!>
!>   Contributions to the ELPA source have been authored by (in alphabetical order):
!>
!> \author T. Auckenthaler, Volker Blum, A. Heinecke, L. Huedepohl, R. Johanni, Werner Jürgens, and A. Marek

80

81
82
#include "config-f90.h"
!> \brief Fortran module which provides the routines to use the one-stage ELPA solver
83
84
module ELPA1

85
  use elpa_utilities
86
  use elpa1_compute
87

88
89
90
#ifdef HAVE_DETAILED_TIMINGS
 use timings
#endif
91
92
93
94
95
96
  implicit none

  PRIVATE ! By default, all routines contained are private

  ! The following routines are public:

97
  public :: get_elpa_row_col_comms     !< Sets MPI row/col communicators
98

99
100
  public :: solve_evp_real             !< Driver routine for real eigenvalue problem
  public :: solve_evp_complex          !< Driver routine for complex eigenvalue problem
101
102
103

  ! Timing results, set by every call to solve_evp_xxx

104
105
106
  real*8, public :: time_evp_fwd    !< time for forward transformations (to tridiagonal form)
  real*8, public :: time_evp_solve  !< time for solving the tridiagonal system
  real*8, public :: time_evp_back   !< time for back transformations of eigenvectors
107

108
  logical, public :: elpa_print_times = .false. !< Set elpa_print_times to .true. for explicit timing outputs
109
110
111
112
113
114
115

  include 'mpif.h'

contains

!-------------------------------------------------------------------------------

116
!> \brief Fortran function to create the MPI communicators for ELPA
117
118
119
120
121
122
123
! get_elpa_row_col_comms:
! All ELPA routines need MPI communicators for communicating within
! rows or columns of processes, these are set here.
! mpi_comm_rows/mpi_comm_cols can be free'd with MPI_Comm_free if not used any more.
!
!  Parameters
!
124
125
126
127
128
129
130
131
132
133
134
135
136
!> \param  mpi_comm_global   Global communicator for the calculations (in)
!>
!> \param  my_prow           Row coordinate of the calling process in the process grid (in)
!>
!> \param  my_pcol           Column coordinate of the calling process in the process grid (in)
!>
!> \param  mpi_comm_rows     Communicator for communicating within rows of processes (out)
!>
!> \param  mpi_comm_cols     Communicator for communicating within columns of processes (out)
!> \result mpierr            integer error value of mpi_comm_split function


function get_elpa_row_col_comms(mpi_comm_global, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols) result(mpierr)
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

   implicit none

   integer, intent(in)  :: mpi_comm_global, my_prow, my_pcol
   integer, intent(out) :: mpi_comm_rows, mpi_comm_cols

   integer :: mpierr

   ! mpi_comm_rows is used for communicating WITHIN rows, i.e. all processes
   ! having the same column coordinate share one mpi_comm_rows.
   ! So the "color" for splitting is my_pcol and the "key" is my row coordinate.
   ! Analogous for mpi_comm_cols

   call mpi_comm_split(mpi_comm_global,my_pcol,my_prow,mpi_comm_rows,mpierr)
   call mpi_comm_split(mpi_comm_global,my_prow,my_pcol,mpi_comm_cols,mpierr)

153
end function get_elpa_row_col_comms
154
155


156
157
! \brief solve_evp_real: Fortran function to solve the real eigenvalue problem with 1-stage solver
!>
158
159
!  Parameters
!
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


function solve_evp_real(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success)

193
194
195
#ifdef HAVE_DETAILED_TIMINGS
 use timings
#endif
196
197
   implicit none

198
199
   integer, intent(in)  :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
   real*8               :: a(lda,matrixCols), ev(na), q(ldq,matrixCols)
200

201
202
203
204
   integer              :: my_prow, my_pcol, mpierr
   real*8, allocatable  :: e(:), tau(:)
   real*8               :: ttt0, ttt1
   logical              :: success
205
206
   logical, save        :: firstCall = .true.
   logical              :: wantDebug
207

208
209
210
211
#ifdef HAVE_DETAILED_TIMINGS
   call timer%start("solve_evp_real")
#endif

212
213
214
   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)

215
216
   success = .true.

217
218
219
220
221
222
223
   wantDebug = .false.
   if (firstCall) then
     ! are debug messages desired?
     wantDebug = debug_messages_via_environment_variable()
     firstCall = .false.
   endif

224
225
226
   allocate(e(na), tau(na))

   ttt0 = MPI_Wtime()
227
   call tridiag_real(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau)
228
   ttt1 = MPI_Wtime()
229
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time tridiag_real :',ttt1-ttt0
230
231
232
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
233
   call solve_tridi(na, nev, ev, e, q, ldq, nblk, matrixCols, mpi_comm_rows, &
234
                    mpi_comm_cols, wantDebug, success)
235
236
   if (.not.(success)) return

237
   ttt1 = MPI_Wtime()
238
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time solve_tridi  :',ttt1-ttt0
239
240
241
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
242
   call trans_ev_real(na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
243
   ttt1 = MPI_Wtime()
244
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time trans_ev_real:',ttt1-ttt0
245
246
247
248
   time_evp_back = ttt1-ttt0

   deallocate(e, tau)

249
250
251
252
#ifdef HAVE_DETAILED_TIMINGS
   call timer%stop("solve_evp_real")
#endif

253
end function solve_evp_real
254
255


256
257
! \brief solve_evp_real: Fortran function to solve the complex eigenvalue problem with 1-stage solver
!>
258
259
!  Parameters
!
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success

function solve_evp_complex(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success)
291
292
293
#ifdef HAVE_DETAILED_TIMINGS
 use timings
#endif
294
295
296

   implicit none

297
298
   integer, intent(in)     :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
   complex*16              :: a(lda,matrixCols), q(ldq,matrixCols)
299
   real*8                  :: ev(na)
300

301
302
303
   integer                 :: my_prow, my_pcol, np_rows, np_cols, mpierr
   integer                 :: l_rows, l_cols, l_cols_nev
   real*8, allocatable     :: q_real(:,:), e(:)
304
305
306
   complex*16, allocatable :: tau(:)
   real*8 ttt0, ttt1

307
   logical                 :: success
308
309
310
   logical, save           :: firstCall = .true.
   logical                 :: wantDebug

311
312
313
#ifdef HAVE_DETAILED_TIMINGS
   call timer%start("solve_evp_complex")
#endif
314

315
316
317
318
319
   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
   call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)

320
321
   success = .true.

322
323
324
325
326
327
328
329
   wantDebug = .false.
   if (firstCall) then
     ! are debug messages desired?
     wantDebug = debug_messages_via_environment_variable()
     firstCall = .false.
   endif


330
331
332
333
334
335
336
337
338
   l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
   l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q

   l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev

   allocate(e(na), tau(na))
   allocate(q_real(l_rows,l_cols))

   ttt0 = MPI_Wtime()
339
   call tridiag_complex(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau)
340
   ttt1 = MPI_Wtime()
341
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time tridiag_complex :',ttt1-ttt0
342
343
344
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
345
   call solve_tridi(na, nev, ev, e, q_real, l_rows, nblk, matrixCols, mpi_comm_rows, &
346
                    mpi_comm_cols, wantDebug, success)
347
348
   if (.not.(success)) return

349
   ttt1 = MPI_Wtime()
350
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time solve_tridi     :',ttt1-ttt0
351
352
353
354
355
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
   q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)

356
   call trans_ev_complex(na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
357
   ttt1 = MPI_Wtime()
358
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time trans_ev_complex:',ttt1-ttt0
359
360
361
362
   time_evp_back = ttt1-ttt0

   deallocate(q_real)
   deallocate(e, tau)
363
364
365
#ifdef HAVE_DETAILED_TIMINGS
   call timer%stop("solve_evp_complex")
#endif
366

367
end function solve_evp_complex
368

369

370
371

end module ELPA1