elpa1.F90 20.2 KB
Newer Older
1 2
!    This file is part of ELPA.
!
3
!    The ELPA library was originally created by the ELPA consortium,
4 5
!    consisting of the following organizations:
!
6
!    - Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
7 8 9
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
10 11
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
Andreas Marek's avatar
Andreas Marek committed
12
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
13 14
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
15 16
!    - IBM Deutschland GmbH
!
17
!    This particular source code file contains additions, changes and
18
!    enhancements authored by Intel Corporation which is not part of
19
!    the ELPA consortium.
20 21
!
!    More information can be found here:
22
!    http://elpa.mpcdf.mpg.de/
23 24
!
!    ELPA is free software: you can redistribute it and/or modify
25 26
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
46
!
47 48 49 50 51
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".

52 53 54
!> \mainpage
!> Eigenvalue SoLvers for Petaflop-Applications (ELPA)
!> \par
55
!> http://elpa.mpcdf.mpg.de
56 57 58 59 60 61 62 63 64 65 66 67
!>
!> \par
!>    The ELPA library was originally created by the ELPA consortium,
!>    consisting of the following organizations:
!>
!>    - Max Planck Computing and Data Facility (MPCDF) formerly known as
!>      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!>    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!>      Informatik,
!>    - Technische Universität München, Lehrstuhl für Informatik mit
!>      Schwerpunkt Wissenschaftliches Rechnen ,
!>    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
Andreas Marek's avatar
Andreas Marek committed
68
!>    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
69 70 71 72 73 74 75 76 77 78 79
!>      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!>      and
!>    - IBM Deutschland GmbH
!>
!>   Some parts and enhancements of ELPA have been contributed and authored
!>   by the Intel Corporation which is not part of the ELPA consortium.
!>
!>   Contributions to the ELPA source have been authored by (in alphabetical order):
!>
!> \author T. Auckenthaler, Volker Blum, A. Heinecke, L. Huedepohl, R. Johanni, Werner Jürgens, and A. Marek

80

81 82
#include "config-f90.h"
!> \brief Fortran module which provides the routines to use the one-stage ELPA solver
83
module ELPA1
Andreas Marek's avatar
Andreas Marek committed
84
  use, intrinsic :: iso_c_binding, only : c_double
85
  use elpa_utilities
86
  use elpa1_auxiliary
87

88 89 90
  implicit none

  ! The following routines are public:
91
  private
92

93 94
  public :: get_elpa_row_col_comms     !< old, deprecated interface: Sets MPI row/col communicators
  public :: get_elpa_communicators     !< Sets MPI row/col communicators
95

96 97 98 99
  public :: solve_evp_real             !< old, deprecated interface: Driver routine for real eigenvalue problem
  public :: solve_evp_real_1stage      !< Driver routine for real eigenvalue problem
  public :: solve_evp_complex          !< old, deprecated interface:  Driver routine for complex eigenvalue problem
  public :: solve_evp_complex_1stage   !< Driver routine for complex eigenvalue problem
100

101 102
  ! imported from elpa1_auxilliary

103 104
  public :: elpa_mult_at_b_real        !< Multiply real matrices A**T * B
  public :: mult_at_b_real             !< old, deprecated interface to multiply real matrices A**T * B
105

106 107
  public :: elpa_mult_ah_b_complex     !< Multiply complex matrices A**H * B
  public :: mult_ah_b_complex          !< old, deprecated interface to multiply complex matrices A**H * B
108

109 110 111 112 113 114 115 116 117 118 119 120 121
  public :: elpa_invert_trm_real       !< Invert real triangular matrix
  public :: invert_trm_real            !< old, deprecated interface to invert real triangular matrix

  public :: elpa_invert_trm_complex    !< Invert complex triangular matrix
  public :: invert_trm_complex         !< old, deprecated interface to invert complex triangular matrix

  public :: elpa_cholesky_real         !< Cholesky factorization of a real matrix
  public :: cholesky_real              !< old, deprecated interface to do Cholesky factorization of a real matrix

  public :: elpa_cholesky_complex      !< Cholesky factorization of a complex matrix
  public :: cholesky_complex           !< old, deprecated interface to do Cholesky factorization of a complex matrix

  public :: elpa_solve_tridi           !< Solve tridiagonal eigensystem with divide and conquer method
122 123


124 125
  ! Timing results, set by every call to solve_evp_xxx

Andreas Marek's avatar
Andreas Marek committed
126 127 128
  real(kind=c_double), public :: time_evp_fwd    !< time for forward transformations (to tridiagonal form)
  real(kind=c_double), public :: time_evp_solve  !< time for solving the tridiagonal system
  real(kind=c_double), public :: time_evp_back   !< time for back transformations of eigenvectors
129

130
  logical, public :: elpa_print_times = .false. !< Set elpa_print_times to .true. for explicit timing outputs
131 132


133
!> \brief get_elpa_row_col_comms:  old, deprecated Fortran function to create the MPI communicators for ELPA. Better use "elpa_get_communicators"
134
!> \details
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
!> The interface and variable definition is the same as in "elpa_get_communicators"
!> \param  mpi_comm_global   Global communicator for the calculations (in)
!>
!> \param  my_prow           Row coordinate of the calling process in the process grid (in)
!>
!> \param  my_pcol           Column coordinate of the calling process in the process grid (in)
!>
!> \param  mpi_comm_rows     Communicator for communicating within rows of processes (out)
!>
!> \param  mpi_comm_cols     Communicator for communicating within columns of processes (out)
!> \result mpierr            integer error value of mpi_comm_split function
  interface get_elpa_row_col_comms
    module procedure get_elpa_communicators
  end interface

!> \brief solve_evp_real: old, deprecated Fortran function to solve the real eigenvalue problem with 1-stage solver. Better use "solve_evp_real_1stage"
!>
152
!> \details
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
!>  The interface and variable definition is the same as in "elpa_solve_evp_real_1stage"
!  Parameters
!
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


  interface solve_evp_real
    module procedure solve_evp_real_1stage
  end interface

!> \brief solve_evp_complex: old, deprecated Fortran function to solve the complex eigenvalue problem with 1-stage solver. Better use "solve_evp_complex_1stage"
!>
193
!> \details
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
!> The interface and variable definition is the same as in "elpa_solve_evp_complex_1stage"
!  Parameters
!
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


  interface solve_evp_complex
    module procedure solve_evp_complex_1stage
  end interface

232 233 234 235
contains

!-------------------------------------------------------------------------------

236
!> \brief Fortran function to create the MPI communicators for ELPA.
237 238 239 240 241 242
! All ELPA routines need MPI communicators for communicating within
! rows or columns of processes, these are set here.
! mpi_comm_rows/mpi_comm_cols can be free'd with MPI_Comm_free if not used any more.
!
!  Parameters
!
243 244 245 246 247 248 249 250 251 252 253 254
!> \param  mpi_comm_global   Global communicator for the calculations (in)
!>
!> \param  my_prow           Row coordinate of the calling process in the process grid (in)
!>
!> \param  my_pcol           Column coordinate of the calling process in the process grid (in)
!>
!> \param  mpi_comm_rows     Communicator for communicating within rows of processes (out)
!>
!> \param  mpi_comm_cols     Communicator for communicating within columns of processes (out)
!> \result mpierr            integer error value of mpi_comm_split function


255
function get_elpa_communicators(mpi_comm_global, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols) result(mpierr)
Andreas Marek's avatar
Andreas Marek committed
256
   use precision
257
   use elpa_mpi
258 259
   implicit none

Andreas Marek's avatar
Andreas Marek committed
260 261
   integer(kind=ik), intent(in)  :: mpi_comm_global, my_prow, my_pcol
   integer(kind=ik), intent(out) :: mpi_comm_rows, mpi_comm_cols
262

Andreas Marek's avatar
Andreas Marek committed
263
   integer(kind=ik)              :: mpierr
264 265 266 267 268 269 270 271 272

   ! mpi_comm_rows is used for communicating WITHIN rows, i.e. all processes
   ! having the same column coordinate share one mpi_comm_rows.
   ! So the "color" for splitting is my_pcol and the "key" is my row coordinate.
   ! Analogous for mpi_comm_cols

   call mpi_comm_split(mpi_comm_global,my_pcol,my_prow,mpi_comm_rows,mpierr)
   call mpi_comm_split(mpi_comm_global,my_prow,my_pcol,mpi_comm_cols,mpierr)

273
end function get_elpa_communicators
274 275


276
!> \brief solve_evp_real_1stage: Fortran function to solve the real eigenvalue problem with 1-stage solver
277
!>
278 279
!  Parameters
!
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success


311
function solve_evp_real_1stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success)
Andreas Marek's avatar
Andreas Marek committed
312
   use precision
313
#ifdef HAVE_DETAILED_TIMINGS
Andreas Marek's avatar
Andreas Marek committed
314
   use timings
315
#endif
316 317
   use elpa_mpi
   use elpa1_compute
318 319
   implicit none

Andreas Marek's avatar
Andreas Marek committed
320
   integer(kind=ik), intent(in)  :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
321
   real(kind=rk)                 :: ev(na)
322
#ifdef USE_ASSUMED_SIZE
323 324 325 326
   real(kind=rk)                 :: a(lda,*), q(ldq,*)
#else
   real(kind=rk)                 :: a(lda,matrixCols), q(ldq,matrixCols)
#endif
327

Andreas Marek's avatar
Andreas Marek committed
328 329 330 331 332 333
   integer(kind=ik)              :: my_prow, my_pcol, mpierr
   real(kind=rk), allocatable    :: e(:), tau(:)
   real(kind=rk)                 :: ttt0, ttt1
   logical                       :: success
   logical, save                 :: firstCall = .true.
   logical                       :: wantDebug
334

335
#ifdef HAVE_DETAILED_TIMINGS
336
   call timer%start("solve_evp_real_1stage")
337 338
#endif

339 340 341
   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)

342 343
   success = .true.

344 345 346 347 348 349 350
   wantDebug = .false.
   if (firstCall) then
     ! are debug messages desired?
     wantDebug = debug_messages_via_environment_variable()
     firstCall = .false.
   endif

351 352 353
   allocate(e(na), tau(na))

   ttt0 = MPI_Wtime()
354
   call tridiag_real(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau)
355

356
   ttt1 = MPI_Wtime()
357
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time tridiag_real :',ttt1-ttt0
358 359 360
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
361
   call solve_tridi(na, nev, ev, e, q, ldq, nblk, matrixCols, mpi_comm_rows, &
362
                    mpi_comm_cols, wantDebug, success)
363 364
   if (.not.(success)) return

365
   ttt1 = MPI_Wtime()
366
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time solve_tridi  :',ttt1-ttt0
367 368 369
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
370
   call trans_ev_real(na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
371
   ttt1 = MPI_Wtime()
372
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time trans_ev_real:',ttt1-ttt0
373 374 375 376
   time_evp_back = ttt1-ttt0

   deallocate(e, tau)

377
#ifdef HAVE_DETAILED_TIMINGS
378
   call timer%stop("solve_evp_real_1stage")
379 380
#endif

381
end function solve_evp_real_1stage
382 383


384
!> \brief solve_evp_complex_1stage: Fortran function to solve the complex eigenvalue problem with 1-stage solver
385
!>
386 387
!  Parameters
!
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
!> \param  na                   Order of matrix a
!>
!> \param  nev                  Number of eigenvalues needed.
!>                              The smallest nev eigenvalues/eigenvectors are calculated.
!>
!> \param  a(lda,matrixCols)    Distributed matrix for which eigenvalues are to be computed.
!>                              Distribution is like in Scalapack.
!>                              The full matrix must be set (not only one half like in scalapack).
!>                              Destroyed on exit (upper and lower half).
!>
!>  \param lda                  Leading dimension of a
!>
!>  \param ev(na)               On output: eigenvalues of a, every processor gets the complete set
!>
!>  \param q(ldq,matrixCols)    On output: Eigenvectors of a
!>                              Distribution is like in Scalapack.
!>                              Must be always dimensioned to the full size (corresponding to (na,na))
!>                              even if only a part of the eigenvalues is needed.
!>
!>  \param ldq                  Leading dimension of q
!>
!>  \param nblk                 blocksize of cyclic distribution, must be the same in both directions!
!>
!>  \param matrixCols           distributed number of matrix columns
!>
!>  \param mpi_comm_rows        MPI-Communicator for rows
!>  \param mpi_comm_cols        MPI-Communicator for columns
!>
!>  \result                     success

418
function solve_evp_complex_1stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success)
419
#ifdef HAVE_DETAILED_TIMINGS
Andreas Marek's avatar
Andreas Marek committed
420
   use timings
421
#endif
Andreas Marek's avatar
Andreas Marek committed
422
   use precision
423 424
   use elpa_mpi
   use elpa1_compute
425 426
   implicit none

Andreas Marek's avatar
Andreas Marek committed
427
   integer(kind=ik), intent(in)     :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
428
#ifdef USE_ASSUMED_SIZE
429 430
   complex(kind=ck)                 :: a(lda,*), q(ldq,*)
#else
Andreas Marek's avatar
Andreas Marek committed
431
   complex(kind=ck)                 :: a(lda,matrixCols), q(ldq,matrixCols)
432
#endif
Andreas Marek's avatar
Andreas Marek committed
433
   real(kind=rk)                    :: ev(na)
434

Andreas Marek's avatar
Andreas Marek committed
435 436 437 438 439
   integer(kind=ik)                 :: my_prow, my_pcol, np_rows, np_cols, mpierr
   integer(kind=ik)                 :: l_rows, l_cols, l_cols_nev
   real(kind=rk), allocatable       :: q_real(:,:), e(:)
   complex(kind=ck), allocatable    :: tau(:)
   real(kind=rk)                    :: ttt0, ttt1
440

Andreas Marek's avatar
Andreas Marek committed
441 442 443
   logical                          :: success
   logical, save                    :: firstCall = .true.
   logical                          :: wantDebug
444

445
#ifdef HAVE_DETAILED_TIMINGS
446
   call timer%start("solve_evp_complex_1stage")
447
#endif
448

449 450 451 452 453
   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
   call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)

454 455
   success = .true.

456 457 458 459 460 461 462 463
   wantDebug = .false.
   if (firstCall) then
     ! are debug messages desired?
     wantDebug = debug_messages_via_environment_variable()
     firstCall = .false.
   endif


464 465 466 467 468 469 470 471 472
   l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
   l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q

   l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev

   allocate(e(na), tau(na))
   allocate(q_real(l_rows,l_cols))

   ttt0 = MPI_Wtime()
473
   call tridiag_complex(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau)
474
   ttt1 = MPI_Wtime()
475
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time tridiag_complex :',ttt1-ttt0
476 477 478
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
479
   call solve_tridi(na, nev, ev, e, q_real, l_rows, nblk, matrixCols, mpi_comm_rows, &
480
                    mpi_comm_cols, wantDebug, success)
481 482
   if (.not.(success)) return

483
   ttt1 = MPI_Wtime()
484
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time solve_tridi     :',ttt1-ttt0
485 486 487 488 489
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
   q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)

490
   call trans_ev_complex(na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
491
   ttt1 = MPI_Wtime()
492
   if(my_prow==0 .and. my_pcol==0 .and. elpa_print_times) write(error_unit,*) 'Time trans_ev_complex:',ttt1-ttt0
493 494 495 496
   time_evp_back = ttt1-ttt0

   deallocate(q_real)
   deallocate(e, tau)
497
#ifdef HAVE_DETAILED_TIMINGS
498
   call timer%stop("solve_evp_complex_1stage")
499
#endif
500

501
end function solve_evp_complex_1stage
502

503

504 505

end module ELPA1