Commit dba18ce9 authored by Andreas Marek's avatar Andreas Marek
Browse files

Remove bandwidth from signature of ELPA2

parent fdcc3339
......@@ -10,7 +10,7 @@
#if COMPLEXCASE == 1
THIS_ELPA_KERNEL_API, &
#endif
useGPU, bandwidth) result(success)
useGPU) result(success)
#ifdef HAVE_DETAILED_TIMINGS
use timings
......@@ -31,7 +31,7 @@
#endif
logical :: useQRActual, useQREnvironment
integer(kind=c_int), intent(in), optional :: bandwidth
integer(kind=c_int) :: bandwidth
integer(kind=c_int), intent(in), optional :: THIS_ELPA_KERNEL_API
integer(kind=c_int) :: THIS_ELPA_KERNEL
......@@ -260,8 +260,8 @@
endif
endif
if(present(bandwidth)) then
bandwidth = -1
if (bandwidth .ne. -1) then
nbw = bandwidth
if ((nbw == 0) .or. (mod(nbw, nblk) .ne. 0)) then
......@@ -449,7 +449,7 @@
stop 1
endif
if(present(bandwidth)) then
if( bandwidth .ne. -1) then
time_evp_back = ttt1-ttts
else
......@@ -492,7 +492,7 @@
&PRECISION " // ": error when deallocating tmat"//errorMessage
stop 1
endif
endif ! not present(bandwidth)
endif
call timer%stop("solve_evp_&
&MATH_DATATYPE&
......
......@@ -473,26 +473,25 @@
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#define DOUBLE_PRECISION_REAL 1
#ifdef DOUBLE_PRECISION_REAL
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwdith);
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#else
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwidth);
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#endif
#ifdef DOUBLE_PRECISION_REAL
function solve_elpa2_evp_real_wrapper_double(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU, bandwidth) &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU) &
result(success) bind(C,name="elpa_solve_evp_real_2stage_double_precision")
#else
function solve_elpa2_evp_real_wrapper_single(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU, bandwidth) &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU) &
result(success) bind(C,name="elpa_solve_evp_real_2stage_double_precision")
result(success) bind(C,name="elpa_solve_evp_real_2stage_single_precision")
......@@ -504,7 +503,7 @@
integer(kind=c_int) :: success
integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows, &
mpi_comm_all
integer(kind=c_int), value, intent(in) :: THIS_REAL_ELPA_KERNEL_API, useQR, useGPU, bandwidth
integer(kind=c_int), value, intent(in) :: THIS_REAL_ELPA_KERNEL_API, useQR, useGPU
#ifdef DOUBLE_PRECISION_REAL
real(kind=c_double) :: ev(1:na)
#ifdef USE_ASSUMED_SIZE
......@@ -532,9 +531,6 @@
useQRFortran = .true.
endif
if (bandwidth .eq. -1) then
! no banded matrix
#ifdef DOUBLE_PRECISION_REAL
successFortran = elpa_solve_evp_real_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
mpi_comm_cols, mpi_comm_all, &
......@@ -544,19 +540,6 @@
mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQRFortran, useGPU == 1)
#endif
else
! bandwidht given
#ifdef DOUBLE_PRECISION_REAL
successFortran = elpa_solve_evp_real_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQRFortran, useGPU == 1, bandwidth)
#else
successFortran = elpa_solve_evp_real_2stage_single(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQRFortran, useGPU == 1, bandwidth)
#endif
endif
if (successFortran) then
success = 1
......@@ -591,26 +574,25 @@
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#undef DOUBLE_PRECISION_REAL
#ifdef DOUBLE_PRECISION_REAL
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwith);
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#else
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, int bandwidth);
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#endif
#ifdef DOUBLE_PRECISION_REAL
function solve_elpa2_evp_real_wrapper_double(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU, bandwidth) &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU) &
result(success) bind(C,name="elpa_solve_evp_real_2stage_double_precision")
#else
function solve_elpa2_evp_real_wrapper_single(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU, bandwidth) &
THIS_REAL_ELPA_KERNEL_API, useQR, useGPU) &
result(success) bind(C,name="elpa_solve_evp_real_2stage_single_precision")
#endif
use, intrinsic :: iso_c_binding
......@@ -620,7 +602,7 @@
integer(kind=c_int) :: success
integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows, &
mpi_comm_all
integer(kind=c_int), value, intent(in) :: THIS_REAL_ELPA_KERNEL_API, useQR, useGPU, bandwidth
integer(kind=c_int), value, intent(in) :: THIS_REAL_ELPA_KERNEL_API, useQR, useGPU
#ifdef DOUBLE_PRECISION_REAL
real(kind=c_double) :: ev(1:na)
#ifdef USE_ASSUMED_SIZE
......@@ -647,8 +629,6 @@
useQRFortran = .true.
endif
if (bandwidth .eq. -1) then
! matrix is not banded
#ifdef DOUBLE_PRECISION_REAL
successFortran = elpa_solve_evp_real_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
mpi_comm_cols, mpi_comm_all, &
......@@ -658,19 +638,6 @@
mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQRFortran, useGPU == 1)
#endif
else
! bandwidth is given
#ifdef DOUBLE_PRECISION_REAL
successFortran = elpa_solve_evp_real_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQRFortran, useGPU == 1, bandwidth)
#else
successFortran = elpa_solve_evp_real_2stage_single(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, &
mpi_comm_cols, mpi_comm_all, &
THIS_REAL_ELPA_KERNEL_API, useQRFortran, useGPU == 1, bandwidth)
#endif
endif
if (successFortran) then
success = 1
else
......@@ -703,27 +670,26 @@
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#define DOUBLE_PRECISION_COMPLEX 1
#ifdef DOUBLE_PRECISION_COMPLEX
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#else
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#endif
#ifdef DOUBLE_PRECISION_COMPLEX
function solve_elpa2_evp_complex_wrapper_double(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth) &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU) &
result(success) bind(C,name="elpa_solve_evp_complex_2stage_double_precision")
#else
function solve_elpa2_evp_complex_wrapper_single(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth) &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU) &
result(success) bind(C,name="elpa_solve_evp_complex_2stage_single_precision")
#endif
......@@ -734,7 +700,7 @@
integer(kind=c_int) :: success
integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows, &
mpi_comm_all
integer(kind=c_int), value, intent(in) :: THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth
integer(kind=c_int), value, intent(in) :: THIS_COMPLEX_ELPA_KERNEL_API, useGPU
#ifdef DOUBLE_PRECISION_COMPLEX
real(kind=c_double) :: ev(1:na)
#ifdef USE_ASSUMED_SIZE
......@@ -755,7 +721,6 @@
logical :: successFortran
if (bandwidth .eq. -1) then
! matrix is not banded
#ifdef DOUBLE_PRECISION_COMPLEX
......@@ -767,19 +732,7 @@
mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API, useGPU == 1)
#endif
else
! matrix is banded
#ifdef DOUBLE_PRECISION_COMPLEX
successFortran = elpa_solve_evp_complex_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API, useGPU == 1, bandwidth)
#else
successFortran = elpa_solve_evp_complex_2stage_single(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API, useGPU == 1, bandwidth)
#endif
endif
if (successFortran) then
success = 1
else
......@@ -812,27 +765,26 @@
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> * \param bandwidth bandwidth of already banded matrix (-1 = No banding)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
#undef DOUBLE_PRECISION_COMPLEX
#ifdef DOUBLE_PRECISION_COMPLEX
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#else
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, int bandwidth);
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#endif
#ifdef DOUBLE_PRECISION_COMPLEX
function solve_elpa2_evp_complex_wrapper_double(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth) &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU) &
result(success) bind(C,name="elpa_solve_evp_complex_2stage_double_precision")
#else
function solve_elpa2_evp_complex_wrapper_single(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth) &
THIS_COMPLEX_ELPA_KERNEL_API, useGPU) &
result(success) bind(C,name="elpa_solve_evp_complex_2stage_single_precision")
#endif
......@@ -843,7 +795,7 @@
integer(kind=c_int) :: success
integer(kind=c_int), value, intent(in) :: na, nev, lda, ldq, nblk, matrixCols, mpi_comm_cols, mpi_comm_rows, &
mpi_comm_all
integer(kind=c_int), value, intent(in) :: THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth
integer(kind=c_int), value, intent(in) :: THIS_COMPLEX_ELPA_KERNEL_API, useGPU
#ifdef DOUBLE_PRECISION_COMPLEX
complex(kind=c_double_complex) :: a(1:lda,1:matrixCols), q(1:ldq,1:matrixCols)
real(kind=c_double) :: ev(1:na)
......@@ -854,9 +806,6 @@
logical :: successFortran
if (bandwidth .eq. -1) then
! matrix is not banded
#ifdef DOUBLE_PRECISION_COMPLEX
successFortran = elpa_solve_evp_complex_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols, &
......@@ -866,20 +815,7 @@
mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API, useGPU == 1)
#endif
else
! bandwidth is given
#ifdef DOUBLE_PRECISION_COMPLEX
successFortran = elpa_solve_evp_complex_2stage_double(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API, useGPU == 1, bandwidth)
#else
successFortran = elpa_solve_evp_complex_2stage_single(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API, useGPU == 1, bandwidth)
#endif
endif
if (successFortran) then
success = 1
else
......
......@@ -92,7 +92,7 @@ int main(int argc, char** argv) {
int success;
int THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth;
int THIS_COMPLEX_ELPA_KERNEL_API, useGPU;
#ifdef WITH_MPI
MPI_Init(&argc, &argv);
......@@ -231,12 +231,11 @@ int main(int argc, char** argv) {
mpierr = MPI_Barrier(MPI_COMM_WORLD);
#endif
useGPU = 0;
bandwidth = -1;
THIS_COMPLEX_ELPA_KERNEL_API = ELPA2_COMPLEX_KERNEL_GENERIC;
#ifdef DOUBLE_PRECISION_COMPLEX
success = elpa_solve_evp_complex_2stage_double_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth);
success = elpa_solve_evp_complex_2stage_double_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_COMPLEX_ELPA_KERNEL_API, useGPU);
#else
success = elpa_solve_evp_complex_2stage_single_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_COMPLEX_ELPA_KERNEL_API, useGPU, bandwidth);
success = elpa_solve_evp_complex_2stage_single_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_COMPLEX_ELPA_KERNEL_API, useGPU);
#endif
if (success != 1) {
......
......@@ -87,7 +87,7 @@ int main(int argc, char** argv) {
int success;
int useQr, THIS_REAL_ELPA_KERNEL_API, useGPU, bandwidth;
int useQr, THIS_REAL_ELPA_KERNEL_API, useGPU;
#ifdef WITH_MPI
MPI_Init(&argc, &argv);
......@@ -218,12 +218,11 @@ int main(int argc, char** argv) {
#endif
useGPU =0 ;
useQr = 0;
bandwidth = -1;
THIS_REAL_ELPA_KERNEL_API = ELPA2_REAL_KERNEL_GENERIC;
#ifdef DOUBLE_PRECISION_REAL
success = elpa_solve_evp_real_2stage_double_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_REAL_ELPA_KERNEL_API, useQr, useGPU, bandwidth);
success = elpa_solve_evp_real_2stage_double_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_REAL_ELPA_KERNEL_API, useQr, useGPU);
#else
success = elpa_solve_evp_real_2stage_single_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_REAL_ELPA_KERNEL_API, useQr, useGPU, bandwidth);
success = elpa_solve_evp_real_2stage_single_precision(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, my_mpi_comm_world, THIS_REAL_ELPA_KERNEL_API, useQr, useGPU);
#endif
if (success != 1) {
printf("error in ELPA solve \n");
......
......@@ -290,7 +290,7 @@ program test_complex2_double_precision
call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
#endif
successELPA = elpa_solve_evp_complex_2stage_double(na, nev, a, na_rows, ev, z, na_rows, nblk, &
na_cols, mpi_comm_rows, mpi_comm_cols, mpi_comm_world, bandwidth=bandwidth)
na_cols, mpi_comm_rows, mpi_comm_cols, mpi_comm_world)
if (.not.(successELPA)) then
write(error_unit,*) "solve_evp_complex_2stage produced an error! Aborting..."
......
......@@ -282,7 +282,7 @@ program test_real2_double_precision
call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
#endif
successELPA = elpa_solve_evp_real_2stage_double(na, nev, a, na_rows, ev, z, na_rows, nblk, na_cols, &
mpi_comm_rows, mpi_comm_cols, mpi_comm_world, bandwidth = bandwidth)
mpi_comm_rows, mpi_comm_cols, mpi_comm_world)
if (.not.(successELPA)) then
write(error_unit,*) "solve_evp_real_2stage produced an error! Aborting..."
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment