elpa1.F90 18 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".

!> \mainpage
!> Eigenvalue SoLvers for Petaflop-Applications (ELPA)
!> \par
!> http://elpa.mpcdf.mpg.de
!>
!> \par
!>    The ELPA library was originally created by the ELPA consortium,
!>    consisting of the following organizations:
!>
!>    - Max Planck Computing and Data Facility (MPCDF) formerly known as
!>      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!>    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!>      Informatik,
!>    - Technische Universität München, Lehrstuhl für Informatik mit
!>      Schwerpunkt Wissenschaftliches Rechnen ,
!>    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!>    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!>      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!>      and
!>    - IBM Deutschland GmbH
!>
!>   Some parts and enhancements of ELPA have been contributed and authored
!>   by the Intel Corporation which is not part of the ELPA consortium.
!>
!>   Contributions to the ELPA source have been authored by (in alphabetical order):
!>
!> \author T. Auckenthaler, Volker Blum, A. Heinecke, L. Huedepohl, R. Johanni, Werner Jürgens, and A. Marek


#include "config-f90.h"

!> \brief Fortran module which provides the routines to use the one-stage ELPA solver
84
module elpa1_impl
85
86
  use, intrinsic :: iso_c_binding
  use elpa_utilities
87
  use elpa1_auxiliary_impl
Pavel Kus's avatar
Pavel Kus committed
88
89
90
#ifdef HAVE_LIKWID
  use likwid
#endif
91
92
93
94
95
96

  implicit none

  ! The following routines are public:
  private

97
  public :: elpa_solve_evp_real_1stage_double_impl    !< Driver routine for real double-precision 1-stage eigenvalue problem
98
99

#ifdef WANT_SINGLE_PRECISION_REAL
100
  public :: elpa_solve_evp_real_1stage_single_impl    !< Driver routine for real single-precision 1-stage eigenvalue problem
101
102

#endif
103
  public :: elpa_solve_evp_complex_1stage_double_impl !< Driver routine for complex 1-stage eigenvalue problem
104
#ifdef WANT_SINGLE_PRECISION_COMPLEX
105
  public :: elpa_solve_evp_complex_1stage_single_impl !< Driver routine for complex 1-stage eigenvalue problem
106
107
#endif

108
109
110
111
112
113
114
115
116
117
#ifdef HAVE_SKEWSYMMETRIC
  public :: elpa_solve_skew_evp_real_1stage_double_impl    !< Driver routine for real double-precision 1-stage skew-symmetric eigenvalue problem

#ifdef WANT_SINGLE_PRECISION_REAL
  public :: elpa_solve_skew_evp_real_1stage_single_impl    !< Driver routine for real single-precision 1-stage skew-symmetric eigenvalue problem

#endif
#endif /* HAVE_SKEWSYMMETRIC */


118
119
  ! imported from elpa1_auxilliary

120
  public :: elpa_mult_at_b_real_double_impl       !< Multiply double-precision real matrices A**T * B
121

122
  public :: elpa_mult_ah_b_complex_double_impl    !< Multiply double-precision complex matrices A**H * B
123

124
  public :: elpa_invert_trm_real_double_impl      !< Invert double-precision real triangular matrix
125

126
  public :: elpa_invert_trm_complex_double_impl   !< Invert double-precision complex triangular matrix
127

128
  public :: elpa_cholesky_real_double_impl        !< Cholesky factorization of a double-precision real matrix
129

130
  public :: elpa_cholesky_complex_double_impl     !< Cholesky factorization of a double-precision complex matrix
131

132
  public :: elpa_solve_tridi_double_impl          !< Solve a double-precision tridiagonal eigensystem with divide and conquer method
133
134

#ifdef WANT_SINGLE_PRECISION_REAL
135
136
137
138
  public :: elpa_mult_at_b_real_single_impl       !< Multiply single-precision real matrices A**T * B
  public :: elpa_invert_trm_real_single_impl      !< Invert single-precision real triangular matrix
  public :: elpa_cholesky_real_single_impl        !< Cholesky factorization of a single-precision real matrix
  public :: elpa_solve_tridi_single_impl          !< Solve a single-precision tridiagonal eigensystem with divide and conquer method
139
140
141
#endif

#ifdef WANT_SINGLE_PRECISION_COMPLEX
142
143
144
  public :: elpa_mult_ah_b_complex_single_impl    !< Multiply single-precision complex matrices A**H * B
  public :: elpa_invert_trm_complex_single_impl   !< Invert single-precision complex triangular matrix
  public :: elpa_cholesky_complex_single_impl     !< Cholesky factorization of a single-precision complex matrix
145
146
147
148
149
#endif

contains


150
!> \brief elpa_solve_evp_real_1stage_double_impl: Fortran function to solve the real double-precision eigenvalue problem with 1-stage solver
151
!>
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
!> \details
!> \param  obj                      elpa_t object contains:
!> \param     - obj%na              Order of matrix
!> \param     - obj%nev             number of eigenvalues/vectors to be computed
!>                                  The smallest nev eigenvalues/eigenvectors are calculated.
!> \param     - obj%local_nrows     Leading dimension of a
!> \param     - obj%local_ncols     local columns of matrix q
!> \param     - obj%nblk            blocksize of cyclic distribution, must be the same in both directions!
!> \param     - obj%mpi_comm_rows   MPI communicator for rows
!> \param     - obj%mpi_comm_cols   MPI communicator for columns
!> \param     - obj%mpi_comm_parent MPI communicator for columns
!> \param     - obj%gpu             use GPU version (1 or 0)
!>
!> \param  a(lda,matrixCols)        Distributed matrix for which eigenvalues are to be computed.
!>                                  Distribution is like in Scalapack.
!>                                  The full matrix must be set (not only one half like in scalapack).
!>                                  Destroyed on exit (upper and lower half).
!>
!>  \param ev(na)                   On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)        On output: Eigenvectors of a
!>                                  Distribution is like in Scalapack.
!>                                  Must be always dimensioned to the full size (corresponding to (na,na))
!>                                  even if only a part of the eigenvalues is needed.
!>
!>
!>  \result                       success
179
180
#define REALCASE 1
#define DOUBLE_PRECISION 1
181
#undef ACTIVATE_SKEW
182
#include "../general/precision_macros.h"
183
#include "elpa1_template.F90"
184
185
186
187
#undef REALCASE
#undef DOUBLE_PRECISION

#ifdef WANT_SINGLE_PRECISION_REAL
188
!> \brief elpa_solve_evp_real_1stage_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
!> \details
!> \param  obj                      elpa_t object contains:
!> \param     - obj%na              Order of matrix
!> \param     - obj%nev             number of eigenvalues/vectors to be computed
!>                                  The smallest nev eigenvalues/eigenvectors are calculated.
!> \param     - obj%local_nrows     Leading dimension of a
!> \param     - obj%local_ncols     local columns of matrix q
!> \param     - obj%nblk            blocksize of cyclic distribution, must be the same in both directions!
!> \param     - obj%mpi_comm_rows   MPI communicator for rows
!> \param     - obj%mpi_comm_cols   MPI communicator for columns
!> \param     - obj%mpi_comm_parent MPI communicator for columns
!> \param     - obj%gpu             use GPU version (1 or 0)
!>
!> \param  a(lda,matrixCols)        Distributed matrix for which eigenvalues are to be computed.
!>                                  Distribution is like in Scalapack.
!>                                  The full matrix must be set (not only one half like in scalapack).
!>                                  Destroyed on exit (upper and lower half).
!>
!>  \param ev(na)                   On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)        On output: Eigenvectors of a
!>                                  Distribution is like in Scalapack.
!>                                  Must be always dimensioned to the full size (corresponding to (na,na))
!>                                  even if only a part of the eigenvalues is needed.
!>
!>
!>  \result                       success
216
217
218

#define REALCASE 1
#define SINGLE_PRECISION 1
219
#undef ACTIVATE_SKEW
220
#include "../general/precision_macros.h"
221
#include "elpa1_template.F90"
222
223
224
225
#undef REALCASE
#undef SINGLE_PRECISION
#endif /* WANT_SINGLE_PRECISION_REAL */

226
!> \brief elpa_solve_evp_complex_1stage_double_impl: Fortran function to solve the complex double-precision eigenvalue problem with 1-stage solver
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
!> \details
!> \param  obj                      elpa_t object contains:
!> \param     - obj%na              Order of matrix
!> \param     - obj%nev             number of eigenvalues/vectors to be computed
!>                                  The smallest nev eigenvalues/eigenvectors are calculated.
!> \param     - obj%local_nrows     Leading dimension of a
!> \param     - obj%local_ncols     local columns of matrix q
!> \param     - obj%nblk            blocksize of cyclic distribution, must be the same in both directions!
!> \param     - obj%mpi_comm_rows   MPI communicator for rows
!> \param     - obj%mpi_comm_cols   MPI communicator for columns
!> \param     - obj%mpi_comm_parent MPI communicator for columns
!> \param     - obj%gpu             use GPU version (1 or 0)
!>
!> \param  a(lda,matrixCols)        Distributed matrix for which eigenvalues are to be computed.
!>                                  Distribution is like in Scalapack.
!>                                  The full matrix must be set (not only one half like in scalapack).
!>                                  Destroyed on exit (upper and lower half).
!>
!>  \param ev(na)                   On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)        On output: Eigenvectors of a
!>                                  Distribution is like in Scalapack.
!>                                  Must be always dimensioned to the full size (corresponding to (na,na))
!>                                  even if only a part of the eigenvalues is needed.
!>
!>
!>  \result                       success
254
255
#define COMPLEXCASE 1
#define DOUBLE_PRECISION 1
256
#undef ACTIVATE_SKEW
257
#include "../general/precision_macros.h"
258
#include "elpa1_template.F90"
259
260
261
262
263
264
#undef DOUBLE_PRECISION
#undef COMPLEXCASE


#ifdef WANT_SINGLE_PRECISION_COMPLEX

265
!> \brief elpa_solve_evp_complex_1stage_single_impl: Fortran function to solve the complex single-precision eigenvalue problem with 1-stage solver
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
!> \details
!> \param  obj                      elpa_t object contains:
!> \param     - obj%na              Order of matrix
!> \param     - obj%nev             number of eigenvalues/vectors to be computed
!>                                  The smallest nev eigenvalues/eigenvectors are calculated.
!> \param     - obj%local_nrows     Leading dimension of a
!> \param     - obj%local_ncols     local columns of matrix q
!> \param     - obj%nblk            blocksize of cyclic distribution, must be the same in both directions!
!> \param     - obj%mpi_comm_rows   MPI communicator for rows
!> \param     - obj%mpi_comm_cols   MPI communicator for columns
!> \param     - obj%mpi_comm_parent MPI communicator for columns
!> \param     - obj%gpu             use GPU version (1 or 0)
!>
!> \param  a(lda,matrixCols)        Distributed matrix for which eigenvalues are to be computed.
!>                                  Distribution is like in Scalapack.
!>                                  The full matrix must be set (not only one half like in scalapack).
!>                                  Destroyed on exit (upper and lower half).
!>
!>  \param ev(na)                   On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)        On output: Eigenvectors of a
!>                                  Distribution is like in Scalapack.
!>                                  Must be always dimensioned to the full size (corresponding to (na,na))
!>                                  even if only a part of the eigenvalues is needed.
!>
!>
!>  \result                       success
293
294
295

#define COMPLEXCASE 1
#define SINGLE_PRECISION
296
#undef ACTIVATE_SKEW
297
#include "../general/precision_macros.h"
298
#include "elpa1_template.F90"
299
300
301
302
#undef COMPLEXCASE
#undef SINGLE_PRECISION
#endif /* WANT_SINGLE_PRECISION_COMPLEX */

303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384

#ifdef HAVE_SKEWSYMMETRIC
!> \brief elpa_solve_skew_evp_real_1stage_double_impl: Fortran function to solve the real double-precision skew-symmetric eigenvalue problem with 1-stage solver
!>
!> \details
!> \param  obj                      elpa_t object contains:
!> \param     - obj%na              Order of matrix
!> \param     - obj%nev             number of eigenvalues/vectors to be computed
!>                                  The smallest nev eigenvalues/eigenvectors are calculated.
!> \param     - obj%local_nrows     Leading dimension of a
!> \param     - obj%local_ncols     local columns of matrix q
!> \param     - obj%nblk            blocksize of cyclic distribution, must be the same in both directions!
!> \param     - obj%mpi_comm_rows   MPI communicator for rows
!> \param     - obj%mpi_comm_cols   MPI communicator for columns
!> \param     - obj%mpi_comm_parent MPI communicator for columns
!> \param     - obj%gpu             use GPU version (1 or 0)
!>
!> \param  a(lda,matrixCols)        Distributed matrix for which eigenvalues are to be computed.
!>                                  Distribution is like in Scalapack.
!>                                  The full matrix must be set (not only one half like in scalapack).
!>                                  Destroyed on exit (upper and lower half).
!>
!>  \param ev(na)                   On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)        On output: Eigenvectors of a
!>                                  Distribution is like in Scalapack.
!>                                  Must be always dimensioned to the full size (corresponding to (na,na))
!>                                  even if only a part of the eigenvalues is needed.
!>
!>
!>  \result                       success
#define REALCASE 1
#define DOUBLE_PRECISION 1
#define ACTIVATE_SKEW
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef ACTIVATE_SKEW
#undef REALCASE
#undef DOUBLE_PRECISION

#ifdef WANT_SINGLE_PRECISION_REAL
!> \brief elpa_solve_evp_real_1stage_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param  obj                      elpa_t object contains:
!> \param     - obj%na              Order of matrix
!> \param     - obj%nev             number of eigenvalues/vectors to be computed
!>                                  The smallest nev eigenvalues/eigenvectors are calculated.
!> \param     - obj%local_nrows     Leading dimension of a
!> \param     - obj%local_ncols     local columns of matrix q
!> \param     - obj%nblk            blocksize of cyclic distribution, must be the same in both directions!
!> \param     - obj%mpi_comm_rows   MPI communicator for rows
!> \param     - obj%mpi_comm_cols   MPI communicator for columns
!> \param     - obj%mpi_comm_parent MPI communicator for columns
!> \param     - obj%gpu             use GPU version (1 or 0)
!>
!> \param  a(lda,matrixCols)        Distributed matrix for which eigenvalues are to be computed.
!>                                  Distribution is like in Scalapack.
!>                                  The full matrix must be set (not only one half like in scalapack).
!>                                  Destroyed on exit (upper and lower half).
!>
!>  \param ev(na)                   On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)        On output: Eigenvectors of a
!>                                  Distribution is like in Scalapack.
!>                                  Must be always dimensioned to the full size (corresponding to (na,na))
!>                                  even if only a part of the eigenvalues is needed.
!>
!>
!>  \result                       success

#define REALCASE 1
#define SINGLE_PRECISION 1
#define ACTIVATE_SKEW
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef REALCASE
#undef ACTIVATE_SKEW
#undef SINGLE_PRECISION
#endif /* WANT_SINGLE_PRECISION_REAL */

#endif /* HAVE_SKEWSYMMETRIC */

385
end module ELPA1_impl