!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c>*/
...
...
@@ -229,7 +229,7 @@
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c>*/
...
...
@@ -304,7 +304,7 @@
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
...
...
@@ -391,7 +391,7 @@
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
...
...
@@ -471,27 +471,28 @@
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#define DOUBLE_PRECISION_REAL 1
#ifdef DOUBLE_PRECISION_REAL
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwdith);
#else
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwidth);
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#undef DOUBLE_PRECISION_REAL
#ifdef DOUBLE_PRECISION_REAL
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwith);
#else
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwidth);
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#define DOUBLE_PRECISION_COMPLEX 1
#ifdef DOUBLE_PRECISION_COMPLEX
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
#else
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \parmam useGPU use GPU (1=yes, 0=No)
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#undef DOUBLE_PRECISION_COMPLEX
#ifdef DOUBLE_PRECISION_COMPLEX
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
#else
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);