elpa_driver_c_interface.F90 14.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
! Author: Andreas Marek, MCPDF
#include "config-f90.h"

  !c> /*! \brief C interface to driver function "elpa_solve_evp_real_double"
  !c> *
  !c> *  \param  na                        Order of matrix a
  !c> *  \param  nev                       Number of eigenvalues needed.
  !c> *                                    The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                         Distributed matrix for which eigenvalues are to be computed.
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                        Leading dimension of a
  !c> *  \param ev(na)                     On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                          On output: Eigenvectors of a
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                                    even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                        Leading dimension of q
  !c> *  \param nblk                       blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols                 distributed number of matrix columns
  !c> *  \param mpi_comm_rows              MPI-Communicator for rows
  !c> *  \param mpi_comm_cols              MPI-Communicator for columns
  !c> *  \param mpi_coll_all               MPI communicator for the total processor set
  !c> *  \param THIS_REAL_ELPA_KERNEL_API  specify used ELPA2 kernel via API
  !c> *  \param useQR                      use QR decomposition 1 = yes, 0 = no
  !c> *  \param useGPU                     use GPU (1=yes, 0=No)
  !c> *  \param method                     choose whether to use ELPA 1stage or 2stage solver
  !c> *                                    possible values: "1stage" => use ELPA 1stage solver
  !c> *                                                      "2stage" => use ELPA 2stage solver
  !c> *                                                       "auto"   => (at the moment) use ELPA 2stage solver
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
#define REALCASE 1
#define DOUBLE_PRECISION 1
#if DOUBLE_PRECISION == 1
78 79
  !c> int elpa_solve_evp_real_double(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, 
  !c> int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method);
80
#else
81 82
  !c> int elpa_solve_evp_real_single(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, 
  !c> int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method);
83
#endif
84
#include "../../general/precision_macros.h"
85
#include "./elpa_driver_c_interface_template.X90"
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
#undef DOUBLE_PRECISION
#undef REALCASE

#ifdef WANT_SINGLE_PRECISION_REAL
  !c> /*! \brief C interface to driver function "elpa_solve_evp_real_single"
  !c> *
  !c> *  \param  na                        Order of matrix a
  !c> *  \param  nev                       Number of eigenvalues needed.
  !c> *                                    The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                         Distributed matrix for which eigenvalues are to be computed.
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                        Leading dimension of a
  !c> *  \param ev(na)                     On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                          On output: Eigenvectors of a
  !c> *                                    Distribution is like in Scalapack.
  !c> *                                    Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                                    even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                        Leading dimension of q
  !c> *  \param nblk                       blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols                 distributed number of matrix columns
  !c> *  \param mpi_comm_rows              MPI-Communicator for rows
  !c> *  \param mpi_comm_cols              MPI-Communicator for columns
  !c> *  \param mpi_coll_all               MPI communicator for the total processor set
  !c> *  \param THIS_REAL_ELPA_KERNEL_API  specify used ELPA2 kernel via API
  !c> *  \param useQR                      use QR decomposition 1 = yes, 0 = no
  !c> *  \param useGPU                     use GPU (1=yes, 0=No)
  !c> *  \param method                     choose whether to use ELPA 1stage or 2stage solver
  !c> *                                    possible values: "1stage" => use ELPA 1stage solver
  !c> *                                                      "2stage" => use ELPA 2stage solver
  !c> *                                                       "auto"   => (at the moment) use ELPA 2stage solver
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
#define REALCASE 1
#define SINGLE_PRECISION 1
#undef DOUBLE_PRECISION
#if DOUBLE_PRECISION == 1
124 125
  !c> int elpa_solve_evp_real_double(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, 
  !c> int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method);
126
#else
127 128
  !c> int elpa_solve_evp_real_single(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, 
  !c> int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method);
129
#endif
130
#include "../../general/precision_macros.h"
131
#include "elpa_driver_c_interface_template.X90"
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
#undef SINGLE_PRECISION
#undef DOUBLE_PRECISION
#undef REALCASE
#endif /* WANT_SINGLE_PRECISION_REAL */

  !c> #include <complex.h>
  !c> /*! \brief C interface to driver function "elpa_solve_evp_complex_double"
  !c> *
  !c> *  \param  na                           Order of matrix a
  !c> *  \param  nev                          Number of eigenvalues needed.
  !c> *                                       The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                            Distributed matrix for which eigenvalues are to be computed.
  !c> *                                       Distribution is like in Scalapack.
  !c> *                                       The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                           Leading dimension of a
  !c> *  \param ev(na)                        On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                             On output: Eigenvectors of a
  !c> *                                       Distribution is like in Scalapack.
  !c> *                                       Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                                       even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                           Leading dimension of q
  !c> *  \param nblk                          blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols                    distributed number of matrix columns
  !c> *  \param mpi_comm_rows                 MPI-Communicator for rows
  !c> *  \param mpi_comm_cols                 MPI-Communicator for columns
  !c> *  \param mpi_coll_all                  MPI communicator for the total processor set
  !c> *  \param THIS_COMPLEX_ELPA_KERNEL_API  specify used ELPA2 kernel via API
  !c> *  \param useGPU                        use GPU (1=yes, 0=No)
  !c> *  \param method                        choose whether to use ELPA 1stage or 2stage solver
  !c> *                                       possible values: "1stage" => use ELPA 1stage solver
  !c> *                                                        "2stage" => use ELPA 2stage solver
  !c> *                                                         "auto"   => (at the moment) use ELPA 2stage solver
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
#define COMPLEXCASE 1
#define DOUBLE_PRECISION 1
#if DOUBLE_PRECISION == 1
170 171
  !c> int elpa_solve_evp_complex_double(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, 
  !c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method);
172
#else
173 174
  !c> int elpa_solve_evp_complex_single(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, 
  !c> int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method);
175
#endif
176
#include "../../general/precision_macros.h"
177
#include "./elpa_driver_c_interface_template.X90"
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
#undef DOUBLE_PRECISION
#undef COMPLEXCASE


#ifdef WANT_SINGLE_PRECISION_COMPLEX
  !c> #include <complex.h>
  !c> /*! \brief C interface to driver function "elpa_solve_evp_complex_single"
  !c> *
  !c> *  \param  na                           Order of matrix a
  !c> *  \param  nev                          Number of eigenvalues needed.
  !c> *                                       The smallest nev eigenvalues/eigenvectors are calculated.
  !c> *  \param  a                            Distributed matrix for which eigenvalues are to be computed.
  !c> *                                       Distribution is like in Scalapack.
  !c> *                                       The full matrix must be set (not only one half like in scalapack).
  !c> *  \param lda                           Leading dimension of a
  !c> *  \param ev(na)                        On output: eigenvalues of a, every processor gets the complete set
  !c> *  \param q                             On output: Eigenvectors of a
  !c> *                                       Distribution is like in Scalapack.
  !c> *                                       Must be always dimensioned to the full size (corresponding to (na,na))
  !c> *                                       even if only a part of the eigenvalues is needed.
  !c> *  \param ldq                           Leading dimension of q
  !c> *  \param nblk                          blocksize of cyclic distribution, must be the same in both directions!
  !c> *  \param matrixCols                    distributed number of matrix columns
  !c> *  \param mpi_comm_rows                 MPI-Communicator for rows
  !c> *  \param mpi_comm_cols                 MPI-Communicator for columns
  !c> *  \param mpi_coll_all                  MPI communicator for the total processor set
  !c> *  \param THIS_COMPLEX_ELPA_KERNEL_API  specify used ELPA2 kernel via API
  !c> *  \param useGPU                        use GPU (1=yes, 0=No)
  !c> *  \param method                        choose whether to use ELPA 1stage or 2stage solver
  !c> *                                       possible values: "1stage" => use ELPA 1stage solver
  !c> *                                                        "2stage" => use ELPA 2stage solver
  !c> *                                                         "auto"   => (at the moment) use ELPA 2stage solver
  !c> *
  !c> *  \result                     int: 1 if error occured, otherwise 0
  !c> */
#define COMPLEXCASE 1
#define SINGLE_PRECISION 1
#undef DOUBLE_PRECISION
#if DOUBLE_PRECISION == 1
217 218
  !c> int elpa_solve_evp_complex_double(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, 
  !c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method);
219
#else
220 221
  !c> int elpa_solve_evp_complex_single(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, 
  !c> int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method);
222
#endif
223
#include "../../general/precision_macros.h"
224
#include "./elpa_driver_c_interface_template.X90"
225 226 227 228 229
#undef SINGLE_PRECISION
#undef DOUBLE_PRECISION
#undef COMPLEXCASE

#endif /* WANT_SINGLE_PRECISION_COMPLEX */