elpa_c_interface.F90 35.2 KB
Newer Older
Andreas Marek's avatar
Andreas Marek committed
1 2 3 4 5
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
6 7
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
Andreas Marek's avatar
Andreas Marek committed
8 9 10 11 12
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
Andreas Marek's avatar
Andreas Marek committed
13
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
Andreas Marek's avatar
Andreas Marek committed
14 15 16 17 18 19
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!
!    More information can be found here:
20
!    http://elpa.mpcdf.mpg.de/
Andreas Marek's avatar
Andreas Marek committed
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
42
! Author: Andreas Marek, MCPDF
Andreas Marek's avatar
Andreas Marek committed
43
#include "config-f90.h"
Andreas Marek's avatar
Andreas Marek committed
44
  !c> #include <complex.h>
Andreas Marek's avatar
Andreas Marek committed
45

46
  !c> /*! \brief C old, deprecated interface to create the MPI communicators for ELPA
47 48 49 50 51 52 53
  !c> *
  !c> * \param mpi_comm_word    MPI global communicator (in)
  !c> * \param my_prow          Row coordinate of the calling process in the process grid (in)
  !c> * \param my_pcol          Column coordinate of the calling process in the process grid (in)
  !c> * \param mpi_comm_rows    Communicator for communicating within rows of processes (out)
  !c> * \result int             integer error value of mpi_comm_split function
  !c> */
Andreas Marek's avatar
Andreas Marek committed
54
  !c> int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
55
  function get_elpa_row_col_comms_wrapper_c_name1(mpi_comm_world, my_prow, my_pcol, &
Andreas Marek's avatar
Andreas Marek committed
56 57 58 59 60
                                          mpi_comm_rows, mpi_comm_cols)     &
                                          result(mpierr) bind(C,name="elpa_get_communicators")
    use, intrinsic :: iso_c_binding
    use elpa1, only : get_elpa_row_col_comms

Andreas Marek's avatar
Andreas Marek committed
61
    implicit none
Andreas Marek's avatar
Andreas Marek committed
62 63 64 65 66 67 68 69
    integer(kind=c_int)         :: mpierr
    integer(kind=c_int), value  :: mpi_comm_world, my_prow, my_pcol
    integer(kind=c_int)         :: mpi_comm_rows, mpi_comm_cols

    mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, &
                                    mpi_comm_rows, mpi_comm_cols)

  end function
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
  !c> #include <complex.h>

  !c> /*! \brief C interface to create the MPI communicators for ELPA
  !c> *
  !c> * \param mpi_comm_word    MPI global communicator (in)
  !c> * \param my_prow          Row coordinate of the calling process in the process grid (in)
  !c> * \param my_pcol          Column coordinate of the calling process in the process grid (in)
  !c> * \param mpi_comm_rows    Communicator for communicating within rows of processes (out)
  !c> * \result int             integer error value of mpi_comm_split function
  !c> */
  !c> int get_elpa_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
  function get_elpa_row_col_comms_wrapper_c_name2(mpi_comm_world, my_prow, my_pcol, &
                                          mpi_comm_rows, mpi_comm_cols)     &
                                          result(mpierr) bind(C,name="get_elpa_communicators")
    use, intrinsic :: iso_c_binding
    use elpa1, only : get_elpa_row_col_comms

    implicit none
    integer(kind=c_int)         :: mpierr
    integer(kind=c_int), value  :: mpi_comm_world, my_prow, my_pcol
    integer(kind=c_int)         :: mpi_comm_rows, mpi_comm_cols

    mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, &
                                    mpi_comm_rows, mpi_comm_cols)

  end function



99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
  !c>  /*! \brief C interface to solve the real eigenvalue problem with 1-stage solver
  !c>  *
  !c> *  \param  na                   Order of matrix a
  !c> *  \param  nev                  Number of eigenvalues needed.
  !c> *                               The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                    Distributed matrix for which eigenvalues are to be computed.
  !c> *                               Distribution is like in Scalapack.
  !c> *                               The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                   Leading dimension of a
  !c> *  \param ev(na)                On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                     On output: Eigenvectors of a
  !c> *                               Distribution is like in Scalapack.
  !c> *                               Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                               even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                   Leading dimension of q
  !c> *  \param nblk                  blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols           distributed number of matrix columns
  !c> *  \param mpi_comm_rows        MPI-Communicator for rows
  !c> *  \param mpi_comm_cols        MPI-Communicator for columns
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c>*/
121
  !c> int elpa_solve_evp_real_1stage(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols);
122 123
  function solve_elpa1_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, &
                                  matrixCols, mpi_comm_rows, mpi_comm_cols)      &
Andreas Marek's avatar
Andreas Marek committed
124 125 126 127 128
                                  result(success) bind(C,name="elpa_solve_evp_real_1stage")

    use, intrinsic :: iso_c_binding
    use elpa1, only : solve_evp_real

Andreas Marek's avatar
Andreas Marek committed
129
    implicit none
Andreas Marek's avatar
Andreas Marek committed
130
    integer(kind=c_int)                    :: success
131
    integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows
132
    real(kind=c_double)                    :: ev(1:na)
133
#ifdef USE_ASSUMED_SIZE
134 135 136 137
    real(kind=c_double)                    :: a(lda,*), q(ldq,*)
#else
    real(kind=c_double)                    :: a(1:lda,1:matrixCols), q(1:ldq,1:matrixCols)
#endif
Andreas Marek's avatar
Andreas Marek committed
138 139
    logical                                :: successFortran

140
    successFortran = solve_evp_real(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
Andreas Marek's avatar
Andreas Marek committed
141 142 143 144 145 146 147 148

    if (successFortran) then
      success = 1
    else
      success = 0
    endif

  end function
149 150


151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
  !c> /*! \brief C interface to solve the complex eigenvalue problem with 1-stage solver
  !c> *
  !c> *  \param  na                   Order of matrix a
  !c> *  \param  nev                  Number of eigenvalues needed.
  !c> *                               The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                    Distributed matrix for which eigenvalues are to be computed.
  !c> *                               Distribution is like in Scalapack.
  !c> *                               The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                   Leading dimension of a
  !c> *  \param ev(na)                On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                     On output: Eigenvectors of a
  !c> *                               Distribution is like in Scalapack.
  !c> *                               Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                               even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                   Leading dimension of q
  !c> *  \param nblk                  blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols           distributed number of matrix columns
  !c> *  \param mpi_comm_rows        MPI-Communicator for rows
  !c> *  \param mpi_comm_cols        MPI-Communicator for columns
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
173
  !c> int elpa_solve_evp_complex_1stage(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols);
174 175
  function solve_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, &
                                  matrixCols, mpi_comm_rows, mpi_comm_cols)      &
Andreas Marek's avatar
Andreas Marek committed
176 177 178 179 180
                                  result(success) bind(C,name="elpa_solve_evp_complex_1stage")

    use, intrinsic :: iso_c_binding
    use elpa1, only : solve_evp_complex

Andreas Marek's avatar
Andreas Marek committed
181
    implicit none
Andreas Marek's avatar
Andreas Marek committed
182
    integer(kind=c_int)                    :: success
183
    integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows
184
#ifdef USE_ASSUMED_SIZE
185 186
    complex(kind=c_double_complex)         :: a(lda,*), q(ldq,*)
#else
187
    complex(kind=c_double_complex)         :: a(1:lda,1:matrixCols), q(1:ldq,1:matrixCols)
188
#endif
Andreas Marek's avatar
Andreas Marek committed
189 190 191 192
    real(kind=c_double)                    :: ev(1:na)

    logical                                :: successFortran

193
    successFortran = solve_evp_complex(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)
Andreas Marek's avatar
Andreas Marek committed
194 195 196 197 198 199 200 201

    if (successFortran) then
      success = 1
    else
      success = 0
    endif

  end function
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
  !c> /*! \brief C interface to solve the real eigenvalue problem with 2-stage solver
  !c> *
  !c> *  \param  na                        Order of matrix a
  !c> *  \param  nev                       Number of eigenvalues needed.
  !c> *                                    The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                         Distributed matrix for which eigenvalues are to be computed.
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                        Leading dimension of a
  !c> *  \param ev(na)                     On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                          On output: Eigenvectors of a
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                                    even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                        Leading dimension of q
  !c> *  \param nblk                       blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols                 distributed number of matrix columns
  !c> *  \param mpi_comm_rows              MPI-Communicator for rows
  !c> *  \param mpi_comm_cols              MPI-Communicator for columns
  !c> *  \param mpi_coll_all               MPI communicator for the total processor set
  !c> *  \param THIS_REAL_ELPA_KERNEL_API  specify used ELPA2 kernel via API
  !c> *  \param use_qr                     use QR decomposition 1 = yes, 0 = no
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
227
  !c> int elpa_solve_evp_real_2stage(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR);
228 229
  function solve_elpa2_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk,    &
                                  matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
Andreas Marek's avatar
Andreas Marek committed
230 231 232 233 234 235
                                  THIS_REAL_ELPA_KERNEL_API, useQR)           &
                                  result(success) bind(C,name="elpa_solve_evp_real_2stage")

    use, intrinsic :: iso_c_binding
    use elpa2, only : solve_evp_real_2stage

Andreas Marek's avatar
Andreas Marek committed
236
    implicit none
Andreas Marek's avatar
Andreas Marek committed
237
    integer(kind=c_int)                    :: success
238
    integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows, &
Andreas Marek's avatar
Andreas Marek committed
239 240
                                              mpi_comm_all
    integer(kind=c_int), value, intent(in) :: THIS_REAL_ELPA_KERNEL_API, useQR
241
    real(kind=c_double)                    :: ev(1:na)
242
#ifdef USE_ASSUMED_SIZE
243 244 245 246
    real(kind=c_double)                    :: a(lda,*), q(ldq,*)
#else
    real(kind=c_double)                    :: a(1:lda,1:matrixCols), q(1:ldq,1:matrixCols)
#endif
Andreas Marek's avatar
Andreas Marek committed
247 248 249 250 251 252 253 254
    logical                                :: successFortran, useQRFortran

    if (useQR .eq. 0) then
      useQRFortran =.false.
    else
      useQRFortran = .true.
    endif

Andreas Marek's avatar
Andreas Marek committed
255 256
    successFortran = solve_evp_real_2stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
                                           mpi_comm_cols, mpi_comm_all,                                  &
Andreas Marek's avatar
Andreas Marek committed
257 258 259 260 261 262 263 264 265 266
                                           THIS_REAL_ELPA_KERNEL_API, useQRFortran)

    if (successFortran) then
      success = 1
    else
      success = 0
    endif

  end function

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292

  !c> /*! \brief C interface to solve the complex eigenvalue problem with 2-stage solver
  !c> *
  !c> *  \param  na                        Order of matrix a
  !c> *  \param  nev                       Number of eigenvalues needed.
  !c> *                                    The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                         Distributed matrix for which eigenvalues are to be computed.
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                        Leading dimension of a
  !c> *  \param ev(na)                     On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                          On output: Eigenvectors of a
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                                    even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                        Leading dimension of q
  !c> *  \param nblk                       blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols                 distributed number of matrix columns
  !c> *  \param mpi_comm_rows              MPI-Communicator for rows
  !c> *  \param mpi_comm_cols              MPI-Communicator for columns
  !c> *  \param mpi_coll_all               MPI communicator for the total processor set
  !c> *  \param THIS_REAL_ELPA_KERNEL_API  specify used ELPA2 kernel via API
  !c> *  \param use_qr                     use QR decomposition 1 = yes, 0 = no
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
293
  !c> int elpa_solve_evp_complex_2stage(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API);
294 295
  function solve_elpa2_evp_complex_wrapper(na, nev, a, lda, ev, q, ldq, nblk,    &
                                  matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all,    &
Andreas Marek's avatar
Andreas Marek committed
296 297 298 299 300 301
                                  THIS_COMPLEX_ELPA_KERNEL_API)                  &
                                  result(success) bind(C,name="elpa_solve_evp_complex_2stage")

    use, intrinsic :: iso_c_binding
    use elpa2, only : solve_evp_complex_2stage

Andreas Marek's avatar
Andreas Marek committed
302
    implicit none
Andreas Marek's avatar
Andreas Marek committed
303
    integer(kind=c_int)                    :: success
304
    integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows, &
Andreas Marek's avatar
Andreas Marek committed
305 306
                                              mpi_comm_all
    integer(kind=c_int), value, intent(in) :: THIS_COMPLEX_ELPA_KERNEL_API
307
#ifdef USE_ASSUMED_SIZE
308 309
    complex(kind=c_double_complex)         :: a(lda,*), q(ldq,*)
#else
310
    complex(kind=c_double_complex)         :: a(1:lda,1:matrixCols), q(1:ldq,1:matrixCols)
311
#endif
Andreas Marek's avatar
Andreas Marek committed
312 313 314
    real(kind=c_double)                    :: ev(1:na)
    logical                                :: successFortran

315
    successFortran = solve_evp_complex_2stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, &
Andreas Marek's avatar
Andreas Marek committed
316 317 318 319 320 321 322 323 324 325
                                              mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API)

    if (successFortran) then
      success = 1
    else
      success = 0
    endif

  end function

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
  !c> /*
  !c> \brief  C interface to solve tridiagonal eigensystem with divide and conquer method
  !c> \details
  !c>
  !c> \param na                    Matrix dimension
  !c> \param nev                   number of eigenvalues/vectors to be computed
  !c> \param d                     array d(na) on input diagonal elements of tridiagonal matrix, on
  !c>                              output the eigenvalues in ascending order
  !c> \param e                     array e(na) on input subdiagonal elements of matrix, on exit destroyed
  !c> \param q                     on exit : matrix q(ldq,matrixCols) contains the eigenvectors
  !c> \param ldq                   leading dimension of matrix q
  !c> \param nblk                  blocksize of cyclic distribution, must be the same in both directions!
  !c> \param matrixCols            columns of matrix q
  !c> \param mpi_comm_rows         MPI communicator for rows
  !c> \param mpi_comm_cols         MPI communicator for columns
  !c> \param wantDebug             give more debug information if 1, else 0
  !c> \result success              int 1 on success, else 0
  !c> */
  !c> int elpa_solve_tridi(int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
  function elpa_solve_tridi_wrapper(na, nev, d, e, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug) &
           result(success) bind(C,name="elpa_solve_tridi")

    use, intrinsic :: iso_c_binding
    use elpa1_auxiliary, only : elpa_solve_tridi

    implicit none
    integer(kind=c_int)                    :: success
    integer(kind=c_int), value, intent(in) :: na, nev, ldq, nblk, matrixCols,  mpi_comm_cols, mpi_comm_rows
    integer(kind=c_int), value             :: wantDebug
355
    real(kind=c_double)                    :: d(1:na), e(1:na)
356
#ifdef USE_ASSUMED_SIZE
357 358 359 360
    real(kind=c_double)                    :: q(ldq,*)
#else
    real(kind=c_double)                    :: q(1:ldq, 1:matrixCols)
#endif
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
    logical                                :: successFortran, wantDebugFortran

    if (wantDebug .ne. 0) then
      wantDebugFortran = .true.
    else
      wantDebugFortran = .false.
    endif

    successFortran = elpa_solve_tridi(na, nev, d, e, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebugFortran)

    if (successFortran) then
      success = 1
    else
      success = 0
    endif

  end function

  !c> /*
  !c> \brief  C interface for elpa_mult_at_b_real: Performs C : = A**T * B
  !c>         where   A is a square matrix (na,na) which is optionally upper or lower triangular
  !c>                 B is a (na,ncb) matrix
  !c>                 C is a (na,ncb) matrix where optionally only the upper or lower
  !c>                   triangle may be computed
  !c> \details
  !c> \param  uplo_a               'U' if A is upper triangular
  !c>                              'L' if A is lower triangular
  !c>                              anything else if A is a full matrix
  !c>                              Please note: This pertains to the original A (as set in the calling program)
  !c>                                           whereas the transpose of A is used for calculations
  !c>                              If uplo_a is 'U' or 'L', the other triangle is not used at all,
  !c>                              i.e. it may contain arbitrary numbers
  !c> \param uplo_c                'U' if only the upper diagonal part of C is needed
  !c>                              'L' if only the upper diagonal part of C is needed
  !c>                              anything else if the full matrix C is needed
  !c>                              Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
  !c>                                            written to a certain extent, i.e. one shouldn't rely on the content there!
  !c> \param na                    Number of rows/columns of A, number of rows of B and C
  !c> \param ncb                   Number of columns  of B and C
  !c> \param a                     matrix a
  !c> \param lda                   leading dimension of matrix a
402
  !c> \param ldaCols               columns of matrix a
403 404
  !c> \param b                     matrix b
  !c> \param ldb                   leading dimension of matrix b
405
  !c> \param ldbCols               columns of matrix b
406 407 408 409 410
  !c> \param nblk                  blocksize of cyclic distribution, must be the same in both directions!
  !c> \param  mpi_comm_rows        MPI communicator for rows
  !c> \param  mpi_comm_cols        MPI communicator for columns
  !c> \param c                     matrix c
  !c> \param ldc                   leading dimension of matrix c
411
  !c> \param ldcCols               columns of matrix c
412 413 414
  !c> \result success              int report success (1) or failure (0)
  !c> */

415 416 417 418
  !c> int elpa_mult_at_b_real(char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols);
  function elpa_mult_at_b_real_wrapper(uplo_a, uplo_c, na, ncb, a, lda, ldaCols, b, ldb, ldbCols, &
                                       nblk, mpi_comm_rows, mpi_comm_cols, c, ldc, ldcCols) &
                                       bind(C,name="elpa_mult_at_b_real") result(success)
419 420 421 422 423 424
    use, intrinsic :: iso_c_binding
    use elpa1_auxiliary, only : elpa_mult_at_b_real

    implicit none

    character(1,C_CHAR), value  :: uplo_a, uplo_c
425 426
    integer(kind=c_int), value  :: na, ncb, lda, ldb, nblk, mpi_comm_rows, mpi_comm_cols, ldc, &
                                   ldaCols, ldbCols, ldcCols
427
    integer(kind=c_int)         :: success
428
#ifdef USE_ASSUMED_SIZE
429
    real(kind=c_double)         :: a(lda,*), b(ldb,*), c(ldc,*)
430 431 432
#else
    real(kind=c_double)         :: a(lda,ldaCols), b(ldb,ldbCols), c(ldc,ldcCols)
#endif
433 434
    logical                     :: successFortran

435 436
    successFortran = elpa_mult_at_b_real(uplo_a, uplo_c, na, ncb, a, lda, ldaCols, b, ldb, ldbCols, nblk, &
                                         mpi_comm_rows, mpi_comm_cols, c, ldc, ldcCols)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469

    if (successFortran) then
      success = 1
    else
      success = 0
    endif

  end function

  !c> /*
  !c> \brief C interface for elpa_mult_ah_b_complex: Performs C : = A**H * B
  !c>         where   A is a square matrix (na,na) which is optionally upper or lower triangular
  !c>                 B is a (na,ncb) matrix
  !c>                 C is a (na,ncb) matrix where optionally only the upper or lower
  !c>                   triangle may be computed
  !c> \details
  !c>
  !c> \param  uplo_a               'U' if A is upper triangular
  !c>                              'L' if A is lower triangular
  !c>                              anything else if A is a full matrix
  !c>                              Please note: This pertains to the original A (as set in the calling program)
  !c>                                           whereas the transpose of A is used for calculations
  !c>                              If uplo_a is 'U' or 'L', the other triangle is not used at all,
  !c>                              i.e. it may contain arbitrary numbers
  !c> \param uplo_c                'U' if only the upper diagonal part of C is needed
  !c>                              'L' if only the upper diagonal part of C is needed
  !c>                              anything else if the full matrix C is needed
  !c>                              Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
  !c>                                            written to a certain extent, i.e. one shouldn't rely on the content there!
  !c> \param na                    Number of rows/columns of A, number of rows of B and C
  !c> \param ncb                   Number of columns  of B and C
  !c> \param a                     matrix a
  !c> \param lda                   leading dimension of matrix a
470
  !c> \param ldaCols               columns of matrix a
471 472
  !c> \param b                     matrix b
  !c> \param ldb                   leading dimension of matrix b
473
  !c> \param ldbCols               columns of matrix b
474 475 476 477 478
  !c> \param nblk                  blocksize of cyclic distribution, must be the same in both directions!
  !c> \param  mpi_comm_rows        MPI communicator for rows
  !c> \param  mpi_comm_cols        MPI communicator for columns
  !c> \param c                     matrix c
  !c> \param ldc                   leading dimension of matrix c
479
  !c> \param ldcCols               columns of matrix c
480 481 482 483
  !c> \result success              int reports success (1) or failure (0)
  !c> */

  !c> int elpa_mult_ah_b_complex(char uplo_a, char uplo_c, int na, int ncb, double complex *a, int lda, double complex *b, int ldb, int nblk, int mpi_comm_rows, int mpi_comm_cols, double complex *c, int ldc);
484 485 486
  function elpa_mult_ah_b_complex_wrapper( uplo_a, uplo_c, na, ncb, a, lda, ldaCols, b, ldb, ldbCols, nblk, &
                                           mpi_comm_rows, mpi_comm_cols, c, ldc, ldcCols) &
                                           result(success) bind(C,name="elpa_mult_ah_b_complex")
487 488 489 490 491 492
    use, intrinsic :: iso_c_binding
    use elpa1_auxiliary, only : elpa_mult_ah_b_complex

    implicit none

    character(1,C_CHAR), value     :: uplo_a, uplo_c
493
    integer(kind=c_int), value     :: na, ncb, lda, ldb, nblk, mpi_comm_rows, mpi_comm_cols, ldc, ldaCols, ldbCols, ldcCols
494
    integer(kind=c_int)            :: success
495
#ifdef USE_ASSUMED_SIZE
496
    complex(kind=c_double_complex) :: a(lda,*), b(ldb,*), c(ldc,*)
497 498 499
#else
    complex(kind=c_double_complex) :: a(lda,ldaCols), b(ldb,ldbCols), c(ldc,ldcCols)
#endif
500 501
    logical                        :: successFortran

502 503
    successFortran = elpa_mult_ah_b_complex(uplo_a, uplo_c, na, ncb, a, lda, ldaCols, b, ldb, ldbCols, nblk, &
                                            mpi_comm_rows, mpi_comm_cols, c, ldc, ldcCols)
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540

    if (successFortran) then
      success = 1
    else
      success = 0
     endif

  end function

  !c> /*
  !c> \brief  C interface to elpa_invert_trm_real: Inverts a upper triangular matrix
  !c> \details
  !c> \param  na                   Order of matrix
  !c> \param  a(lda,matrixCols)    Distributed matrix which should be inverted
  !c>                              Distribution is like in Scalapack.
  !c>                              Only upper triangle is needs to be set.
  !c>                              The lower triangle is not referenced.
  !c> \param  lda                  Leading dimension of a
  !c> \param                       matrixCols  local columns of matrix a
  !c> \param  nblk                 blocksize of cyclic distribution, must be the same in both directions!
  !c> \param  mpi_comm_rows        MPI communicator for rows
  !c> \param  mpi_comm_cols        MPI communicator for columns
  !c> \param wantDebug             int more debug information on failure if 1, else 0
  !c> \result succes               int reports success (1) or failure (0)
  !c> */

  !c> int elpa_invert_trm_real(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
  function elpa_invert_trm_real_wrapper(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug) &
        result(success) bind(C,name="elpa_invert_trm_real")
   use, intrinsic :: iso_c_binding
   use elpa1_auxiliary, only : elpa_invert_trm_real

   implicit none

   integer(kind=c_int), value  :: na, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
   integer(kind=c_int), value  :: wantDebug
   integer(kind=c_int)         :: success
541
#ifdef USE_ASSUMED_SIZE
542 543
   real(kind=c_double)         :: a(lda,*)
#else
544
   real(kind=c_double)         :: a(lda,matrixCols)
545
#endif
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
   logical                     :: wantDebugFortran, successFortran

   if (wantDebug .ne. 0) then
     wantDebugFortran = .true.
   else
     wantDebugFortran = .false.
   endif

   successFortran = elpa_invert_trm_real(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebugFortran)

   if (successFortran) then
     success = 1
   else
     success = 0
   endif

 end function

 !c> /*
 !c> \brief  C interface to elpa_invert_trm_complex: Inverts a complex upper triangular matrix
 !c> \details
 !c> \param  na                   Order of matrix
 !c> \param  a(lda,matrixCols)    Distributed matrix which should be inverted
 !c>                              Distribution is like in Scalapack.
 !c>                              Only upper triangle is needs to be set.
 !c>                              The lower triangle is not referenced.
 !c> \param  lda                  Leading dimension of a
 !c> \param                       matrixCols  local columns of matrix a
 !c> \param  nblk                 blocksize of cyclic distribution, must be the same in both directions!
 !c> \param  mpi_comm_rows        MPI communicator for rows
 !c> \param  mpi_comm_cols        MPI communicator for columns
 !c> \param wantDebug             int more debug information on failure if 1, else 0
 !c> \result succes               int reports success (1) or failure (0)
 !c> */

 !c> int elpa_invert_trm_complex(int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
 function elpa_invert_trm_complex_wrapper(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug) result(success) &
   bind(C,name="elpa_invert_trm_complex")

   use, intrinsic :: iso_c_binding
   use elpa1_auxiliary, only : elpa_invert_trm_complex

   implicit none

   integer(kind=c_int), value     :: na, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols
   integer(kind=c_int), value     :: wantDebug
   integer(kind=c_int)            :: success
593
#ifdef USE_ASSUMED_SIZE
594 595
   complex(kind=c_double_complex) :: a(lda, *)
#else
596
   complex(kind=c_double_complex) :: a(lda, matrixCols)
597
#endif
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
   logical                        :: successFortran, wantDebugFortran


   if (wantDebug .ne. 0) then
     wantDebugFortran = .true.
   else
     wantDebugFortran = .false.
   endif

   successFortran = elpa_invert_trm_complex(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebugFortran)

   if (successFortran) then
     success = 1
   else
     success = 0
   endif
 end function

 !c> /*
 !c> \brief  elpa_cholesky_real: Cholesky factorization of a real symmetric matrix
 !c> \details
 !c>
 !c> \param  na                   Order of matrix
 !c> \param  a(lda,matrixCols)    Distributed matrix which should be factorized.
 !c>                              Distribution is like in Scalapack.
 !c>                              Only upper triangle is needs to be set.
 !c>                              On return, the upper triangle contains the Cholesky factor
 !c>                              and the lower triangle is set to 0.
 !c> \param  lda                  Leading dimension of a
627
 !c> \param  matrixCols           local columns of matrix a
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
 !c> \param  nblk                 blocksize of cyclic distribution, must be the same in both directions!
 !c> \param  mpi_comm_rows        MPI communicator for rows
 !c> \param  mpi_comm_cols        MPI communicator for columns
 !c> \param wantDebug             int more debug information on failure if 1, else 0
 !c> \result succes               int reports success (1) or failure (0)
 !c> */

 !c> int elpa_cholesky_real(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
 function elpa_cholesky_real_wrapper(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug) result(success) &
       bind(C,name="elpa_cholesky_real")

   use, intrinsic :: iso_c_binding
   use elpa1_auxiliary, only : elpa_cholesky_real

   implicit none

   integer(kind=c_int), value :: na, lda, nblk, matrixCols,  mpi_comm_rows, mpi_comm_cols, wantDebug
   integer(kind=c_int)        :: success
646
#ifdef USE_ASSUMED_SIZE
647 648
   real(kind=c_double)        :: a(lda,*)
#else
649
   real(kind=c_double)        :: a(lda,matrixCols)
650
#endif
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
   logical                    :: successFortran, wantDebugFortran

   if (wantDebug .ne. 0) then
     wantDebugFortran = .true.
   else
     wantDebugFortran = .false.
   endif

   successFortran = elpa_cholesky_real(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebugFortran)

   if (successFortran) then
     success = 1
   else
     success = 0
   endif

 end function

 !c> /*
 !c> \brief  C interface elpa_cholesky_complex: Cholesky factorization of a complex hermitian matrix
 !c> \details
 !c> \param  na                   Order of matrix
 !c> \param  a(lda,matrixCols)    Distributed matrix which should be factorized.
 !c>                              Distribution is like in Scalapack.
 !c>                              Only upper triangle is needs to be set.
 !c>                              On return, the upper triangle contains the Cholesky factor
 !c>                              and the lower triangle is set to 0.
 !c> \param  lda                  Leading dimension of a
 !c> \param                       matrixCols  local columns of matrix a
 !c> \param  nblk                 blocksize of cyclic distribution, must be the same in both directions!
 !c> \param  mpi_comm_rows        MPI communicator for rows
 !c> \param  mpi_comm_cols        MPI communicator for columns
 !c> \param wantDebug             int more debug information on failure, if 1, else 0
 !c> \result succes               int reports success (1) or failure (0)
 !c> */

 !c> int elpa_cholesky_complex(int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
Andreas Marek's avatar
Andreas Marek committed
688 689
 function elpa_cholesky_complex_wrapper(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug) result(success) &
       bind(C,name="elpa_cholesky_complex")
690 691 692 693 694 695 696
   use, intrinsic :: iso_c_binding
   use elpa1_auxiliary, only : elpa_cholesky_complex

   implicit none

   integer(kind=c_int), value     :: na, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug
   integer(kind=c_int)            :: success
697
#ifdef USE_ASSUMED_SIZE
698 699
   complex(kind=c_double_complex) :: a(lda,*)
#else
700
   complex(kind=c_double_complex) :: a(lda,matrixCols)
701
#endif
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
   logical                        :: wantDebugFortran, successFortran

   if (wantDebug .ne. 0) then
     wantDebugFortran = .true.
   else
     wantDebugFortran = .false.
   endif

   successFortran = elpa_cholesky_complex(na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebugFortran)

   if (successFortran) then
     success = 1
   else
     success = 0
   endif

 end function