!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param use_qr use QR decomposition 1 = yes, 0 = no
!c> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
...
...
@@ -778,7 +777,342 @@
endif
endfunction
#endif /* WANT_SINGLE_PRECISION_COMPLEX */
!c> /*! \brief C interface to driver function "elpa_solve_evp_real_double"
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param use_qr use QR decomposition 1 = yes, 0 = no
!c> * \param method choose whether to use ELPA 1stage or 2stage solver
!c> * possible values: "1stage" => use ELPA 1stage solver
!c> * "2stage" => use ELPA 2stage solver
!c> * "auto" => (at the moment) use ELPA 2stage solver
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_real_double(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, char *method);
!c> /*! \brief C interface to driver function "elpa_solve_evp_real_single"
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param use_qr use QR decomposition 1 = yes, 0 = no
!c> * \param method choose whether to use ELPA 1stage or 2stage solver
!c> * possible values: "1stage" => use ELPA 1stage solver
!c> * "2stage" => use ELPA 2stage solver
!c> * "auto" => (at the moment) use ELPA 2stage solver
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_real_single(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, char *method);
!c> /*! \brief C interface to driver function "elpa_solve_evp_complex_double"
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param method choose whether to use ELPA 1stage or 2stage solver
!c> * possible values: "1stage" => use ELPA 1stage solver
!c> * "2stage" => use ELPA 2stage solver
!c> * "auto" => (at the moment) use ELPA 2stage solver
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_complex_double(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, char *method);
!c> /*! \brief C interface to driver function "elpa_solve_evp_complex_single"
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param method choose whether to use ELPA 1stage or 2stage solver
!c> * possible values: "1stage" => use ELPA 1stage solver
!c> * "2stage" => use ELPA 2stage solver
!c> * "auto" => (at the moment) use ELPA 2stage solver
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_complex_single(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, char *method);