diff --git a/man/elpa_solve_evp_complex.3 b/man/elpa_solve_evp_complex.3 index 8a02e7f4193c51d148b6e88cbdff096bd8ca418c..7ab40de85c952665f4dc954f41160538ebaa8574 100644 --- a/man/elpa_solve_evp_complex.3 +++ b/man/elpa_solve_evp_complex.3 @@ -11,42 +11,44 @@ elpa_solve_evp_complex \- solve the complex eigenvalue problem with either the 1 use elpa .br .br -.RI "success = \fBelpa_solve_evp_complex\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL=THIS_COMPLEX_ELPA_KERNEL, method=method)" +.RI "success = \fBelpa_solve_evp_complex\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL, useGPU, method)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br -.RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" +.RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" .br -.RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br +.RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" @@ -54,7 +56,7 @@ use elpa #include .br -.RI "success = \fBelpa_solve_evp_complex\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_COMPLEX_KERNEL \fB char *\fPmethod);" +.RI "success = \fBelpa_solve_evp_complex\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_COMPLEX_KERNEL , \fBint\fP useGPU, \fB char *\fPmethod);" .br .RI " " .br @@ -87,6 +89,8 @@ use elpa .br .RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" .br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "char *\fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_complex_1stage.3 b/man/elpa_solve_evp_complex_1stage.3 index 1e30eb51f10276eaa9840ba7b30038f48e3ba9c9..e2bab4eb7dbf2d959c102c3a0e5ea5450fef9867 100644 --- a/man/elpa_solve_evp_complex_1stage.3 +++ b/man/elpa_solve_evp_complex_1stage.3 @@ -11,7 +11,7 @@ elpa_solve_evp_complex_1stage \- solve the complex eigenvalue problem with the 1 use elpa1 .br .br -.RI "success = \fBelpa_solve_evp_complex_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBelpa_solve_evp_complex_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -40,7 +40,10 @@ use elpa1 .br .RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE @@ -49,7 +52,7 @@ use elpa1 #include .br -.RI "success = \fBelpa_solve_evp_complex_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBelpa_solve_evp_complex_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -78,7 +81,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION diff --git a/man/elpa_solve_evp_complex_1stage_double.3 b/man/elpa_solve_evp_complex_1stage_double.3 index 2157b05bb8fa2723045d9c177813002829fdc716..05ec7dad81f1454fd2578df49a29f803f0834e04 100644 --- a/man/elpa_solve_evp_complex_1stage_double.3 +++ b/man/elpa_solve_evp_complex_1stage_double.3 @@ -11,7 +11,7 @@ elpa_solve_evp_complex_1stage_double \- solve the double-precision complex eigen use elpa1 .br .br -.RI "success = \fBelpa_solve_evp_complex_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBelpa_solve_evp_complex_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -40,7 +40,10 @@ use elpa1 .br .RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE @@ -49,7 +52,7 @@ use elpa1 #include .br -.RI "success = \fBelpa_solve_evp_complex_1stage_double_precision\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBelpa_solve_evp_complex_1stage_double_precision\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -78,7 +81,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION diff --git a/man/elpa_solve_evp_complex_1stage_single.3 b/man/elpa_solve_evp_complex_1stage_single.3 index b4992016b4e81cc882e68f64bd5d35480082c334..e6884b0a149153541e7ffce2ae3031391296f33d 100644 --- a/man/elpa_solve_evp_complex_1stage_single.3 +++ b/man/elpa_solve_evp_complex_1stage_single.3 @@ -11,7 +11,7 @@ elpa_solve_evp_complex_1stage_single \- solve the complex eigenvalue problem wit use elpa1 .br .br -.RI "success = \fBelpa_solve_evp_complex_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBelpa_solve_evp_complex_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -40,7 +40,10 @@ use elpa1 .br .RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE @@ -49,7 +52,7 @@ use elpa1 #include .br -.RI "success = \fBelpa_solve_evp_complex_1stage_single\fP (\fBint\fP na, \fBint\fP nev, \fB complex *\fPa, \fBint\fP lda, \fB float *\fPev, \fBcomplex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBelpa_solve_evp_complex_1stage_single\fP (\fBint\fP na, \fBint\fP nev, \fB complex *\fPa, \fBint\fP lda, \fB float *\fPev, \fBcomplex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -78,7 +81,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION diff --git a/man/elpa_solve_evp_complex_double.3 b/man/elpa_solve_evp_complex_double.3 index cf07994cd65cc1acd2f514b4057f2325262a570d..db460a27b9b38351e06bb9e7046f3205737b2762 100644 --- a/man/elpa_solve_evp_complex_double.3 +++ b/man/elpa_solve_evp_complex_double.3 @@ -11,42 +11,44 @@ elpa_solve_evp_complex_double \- solve the complex eigenvalue problem with eithe use elpa .br .br -.RI "success = \fBelpa_solve_evp_complex_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL=THIS_COMPLEX_ELPA_KERNEL, method=method)" +.RI "success = \fBelpa_solve_evp_complex_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL, useGPU, method)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br -.RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" +.RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" .br -.RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br +.RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" @@ -54,7 +56,7 @@ use elpa #include .br -.RI "success = \fBelpa_solve_evp_complex_double\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_COMPLEX_KERNEL \fB char *\fPmethod);" +.RI "success = \fBelpa_solve_evp_complex_double\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_COMPLEX_KERNEL, \fBint\fP useGPU, \fB char *\fPmethod);" .br .RI " " .br @@ -87,6 +89,8 @@ use elpa .br .RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" .br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "char *\fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_complex_single.3 b/man/elpa_solve_evp_complex_single.3 index e4999eaee87c493c4db9ae39449a337f5deb48b0..da0d5a4784f2e9e5ac74339ac3aeb3f2306a7a1a 100644 --- a/man/elpa_solve_evp_complex_single.3 +++ b/man/elpa_solve_evp_complex_single.3 @@ -11,42 +11,44 @@ elpa_solve_evp_complex_single \- solve the complex eigenvalue problem with eithe use elpa .br .br -.RI "success = \fBelpa_solve_evp_complex_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL=THIS_COMPLEX_ELPA_KERNEL, method=method)" +.RI "success = \fBelpa_solve_evp_complex_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_COMPLEX_ELPA_KERNE, useGPU, method)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*4, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*4, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br -.RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" .br -.RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " +.RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" +.br +.RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" @@ -54,7 +56,7 @@ use elpa #include .br -.RI "success = \fBelpa_solve_evp_complex_single\fP (\fBint\fP na, \fBint\fP nev, \fB complex *\fPa, \fBint\fP lda, \fB float *\fPev, \fBcomplex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_COMPLEX_KERNEL \fB char *\fPmethod);" +.RI "success = \fBelpa_solve_evp_complex_single\fP (\fBint\fP na, \fBint\fP nev, \fB complex *\fPa, \fBint\fP lda, \fB float *\fPev, \fBcomplex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_COMPLEX_KERNEL, \fBint\fP useGPU, \fB char *\fPmethod);" .br .RI " " .br @@ -87,6 +89,8 @@ use elpa .br .RI "int \fBTHIS_ELPA_COMPLEX_KERNEL\fp: choose the compute kernel for 2-stage solver" .br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "char *\fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_real.3 b/man/elpa_solve_evp_real.3 index 6bd519151cc23b30d685ebf346b74b94bedd48d7..b31ace7d07fda4e75ff0762dcd46c084aa9ef16e 100644 --- a/man/elpa_solve_evp_real.3 +++ b/man/elpa_solve_evp_real.3 @@ -11,7 +11,7 @@ elpa_solve_evp_real \- solve the real eigenvalue problem use elpa .br .br -.RI "success = \fBelpa_solve_evp_real\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_REAL_ELPA_KERNEL=THIS_REAL_ELPA_KERNEL, useQr=useQR, method=method)" +.RI "success = \fBelpa_solve_evp_real\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_REAL_ELPA_KERNEL=THIS_REAL_ELPA_KERNEL, useQr, useGPU, method)" .br .RI " " .br @@ -48,6 +48,8 @@ use elpa .br .RI "logical, intent(in), optional: \fBuseQR\fP: optional argument; switches to QR-decomposition if set to .true." .br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "logical \fBsuccess\fP: return value indicating success or failure" @@ -56,7 +58,7 @@ use elpa #include "elpa.h" .br -.RI "success = \fBelpa_solve_evp_real\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr, \fbchar *\fPmethod);" +.RI "success = \fBelpa_solve_evp_real\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr, \fBint\fP useGPU, \fbchar *\fPmethod);" .br .RI " " .br @@ -91,6 +93,8 @@ use elpa .br .RI "int \fBuseQR\fP: if set to 1 switch to QR-decomposition" .br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "char *\fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_real_1stage.3 b/man/elpa_solve_evp_real_1stage.3 index 6894656969cd5bb1781a7a3751708072cd56bf74..22c32e17298a6aac1794d2cf6d9defcac2aa444a 100644 --- a/man/elpa_solve_evp_real_1stage.3 +++ b/man/elpa_solve_evp_real_1stage.3 @@ -11,7 +11,7 @@ elpa_solve_evp_real_1stage \- solve the real eigenvalue problem with the 1-stage use elpa1 .br .br -.RI "success = \fBelpa_solve_evp_real_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBelpa_solve_evp_real_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -40,6 +40,10 @@ use elpa1 .br .RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br @@ -47,7 +51,7 @@ use elpa1 #include "elpa.h" .br -.RI "success = \fBelpa_solve_evp_real_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBelpa_solve_evp_real_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -76,7 +80,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION diff --git a/man/elpa_solve_evp_real_1stage_double.3 b/man/elpa_solve_evp_real_1stage_double.3 index b5ab63c6ea889f97ff5fd50121746fb099d948a1..9f7bcb7f05ad44bf6420354d1646e5edacb62f33 100644 --- a/man/elpa_solve_evp_real_1stage_double.3 +++ b/man/elpa_solve_evp_real_1stage_double.3 @@ -11,7 +11,7 @@ elpa_solve_evp_real_1stage_double \- solve the real eigenvalue problem with the use elpa1 .br .br -.RI "success = \fBelpa_solve_evp_real_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBelpa_solve_evp_real_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -40,14 +40,17 @@ use elpa1 .br .RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" .br -.RI "success = \fBelpa_solve_evp_real_1stage_double\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBelpa_solve_evp_real_1stage_double\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -76,6 +79,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_real_1stage_single.3 b/man/elpa_solve_evp_real_1stage_single.3 index 5db7abf654c706f4cd73110d52e462f8d92f1a5c..778908faf1e8120696fd27189a5706c5a1472462 100644 --- a/man/elpa_solve_evp_real_1stage_single.3 +++ b/man/elpa_solve_evp_real_1stage_single.3 @@ -11,7 +11,7 @@ elpa_solve_evp_real_1stage_single \- solve the real eigenvalue problem with the use elpa1 .br .br -.RI "success = \fBelpa_solve_evp_real_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBelpa_solve_evp_real_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -40,6 +40,10 @@ use elpa1 .br .RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br @@ -47,7 +51,7 @@ use elpa1 #include "elpa.h" .br -.RI "success = \fBelpa_solve_evp_real_1stage_single\fP (\fBint\fP na, \fBint\fP nev, \fB float *\fPa, \fBint\fP lda, \fB float *\fPev, \fBfloat *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBelpa_solve_evp_real_1stage_single\fP (\fBint\fP na, \fBint\fP nev, \fB float *\fPa, \fBint\fP lda, \fB float *\fPev, \fBfloat *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -76,6 +80,11 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" +.br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br + .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_real_double.3 b/man/elpa_solve_evp_real_double.3 index e48e9eafb15b6e67a087b4e1067bdf808c1b87f8..e1b2d07e41f75fe0fc5ca0244d637b2879a46205 100644 --- a/man/elpa_solve_evp_real_double.3 +++ b/man/elpa_solve_evp_real_double.3 @@ -11,7 +11,7 @@ elpa_solve_evp_real_double \- solve the real eigenvalue problem use elpa .br .br -.RI "success = \fBelpa_solve_evp_real_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_REAL_ELPA_KERNEL=THIS_REAL_ELPA_KERNEL, useQr=useQR, method=method)" +.RI "success = \fBelpa_solve_evp_real_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_REAL_ELPA_KERNEL, useQr, useGPU, method=method)" .br .RI " " .br @@ -48,6 +48,8 @@ use elpa .br .RI "logical, intent(in), optional: \fBuseQR\fP: optional argument; switches to QR-decomposition if set to .true." .br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "logical \fBsuccess\fP: return value indicating success or failure" @@ -56,7 +58,7 @@ use elpa #include "elpa.h" .br -.RI "success = \fBelpa_solve_evp_real_double\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr, \fbchar *\fPmethod);" +.RI "success = \fBelpa_solve_evp_real_double\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr, \fBint\fP useGPU, \fbchar *\fPmethod);" .br .RI " " .br @@ -91,6 +93,8 @@ use elpa .br .RI "int \fBuseQR\fP: if set to 1 switch to QR-decomposition" .br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "char *\fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/elpa_solve_evp_real_single.3 b/man/elpa_solve_evp_real_single.3 index 729e0685ffb5ddbd2a2207e6f4394a2e2d0ebb3c..96fcdf9a86a39c1b302dc709e7acea8b794e6541 100644 --- a/man/elpa_solve_evp_real_single.3 +++ b/man/elpa_solve_evp_real_single.3 @@ -11,7 +11,7 @@ elpa_solve_evp_real_single \- solve the real eigenvalue problem use elpa .br .br -.RI "success = \fBelpa_solve_evp_real_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_REAL_ELPA_KERNEL=THIS_REAL_ELPA_KERNEL, useQr=useQR, method=method)" +.RI "success = \fBelpa_solve_evp_real_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, THIS_REAL_ELPA_KERNE, useQr, useGPU, method)" .br .RI " " .br @@ -48,6 +48,8 @@ use elpa .br .RI "logical, intent(in), optional: \fBuseQR\fP: optional argument; switches to QR-decomposition if set to .true." .br +.RI "logical, optional, intent(in) \fBuseGPU\fP: specify whether GPUs should be used or not used" +.br .RI "character(*), optional \fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "logical \fBsuccess\fP: return value indicating success or failure" @@ -56,7 +58,7 @@ use elpa #include "elpa.h" .br -.RI "success = \fBelpa_solve_evp_real_single\fP (\fBint\fP na, \fBint\fP nev, \fB float *\fPa, \fBint\fP lda, \fB float *\fPev, \fBfloat *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr, \fbchar *\fPmethod);" +.RI "success = \fBelpa_solve_evp_real_single\fP (\fBint\fP na, \fBint\fP nev, \fB float *\fPa, \fBint\fP lda, \fB float *\fPev, \fBfloat *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr, \fBint\fP useGPU, \fbchar *\fPmethod);" .br .RI " " .br @@ -91,6 +93,8 @@ use elpa .br .RI "int \fBuseQR\fP: if set to 1 switch to QR-decomposition" .br +.RI "int \fBuseGPU\fP: specify whether GPUS should be used or not +.br .RI "char *\fBmethod\fP: use 1stage solver if "1stage", use 2stage solver if "2stage", (at the moment) use 2stage solver if "auto" " .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/solve_evp_complex.3 b/man/solve_evp_complex.3 index 21782a78197fda61dc36b23a8fb2eed694691b6f..a14da47b8f17d0eb62c8bb86e4d5fb6c1355bbb2 100644 --- a/man/solve_evp_complex.3 +++ b/man/solve_evp_complex.3 @@ -12,7 +12,7 @@ This interface is old and deprecated. It is recommended to use \fBsolve_evp_comp use elpa1 .br .br -.RI "success = \fBsolve_evp_complex\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_complex\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -23,26 +23,30 @@ use elpa1 .br .RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI process used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SH DESCRIPTION Old, deprecated interface, which will be deleted at some point. Use \fBsolve_evp_complex_1stage\fP(3) or \fBelpa_solve_evp_complex\fP(3). diff --git a/man/solve_evp_complex_1stage.3 b/man/solve_evp_complex_1stage.3 index a8045d97bb30a0771f97a9d3316e6eb2ebc996c3..92a4f6a8930ca9d8a462754bdb56476cf22160ad 100644 --- a/man/solve_evp_complex_1stage.3 +++ b/man/solve_evp_complex_1stage.3 @@ -12,7 +12,7 @@ This interface is old and deprecated. It is recommended to use \fBelpa_solve_evp use elpa1 .br .br -.RI "success = \fBsolve_evp_complex_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_complex_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br @@ -23,23 +23,27 @@ use elpa1 .br .RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br .RI "logical \fBsuccess\fP: return value indicating success or failure" @@ -50,7 +54,7 @@ use elpa1 #include .br -.RI "success = \fBsolve_evp_complex_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBsolve_evp_complex_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -79,7 +83,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "int \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "int \fBuseGPU\fP: decide whether GPUs should be used or not" +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION diff --git a/man/solve_evp_complex_1stage_double.3 b/man/solve_evp_complex_1stage_double.3 index b1c88e054608cf71fbbebe3fa6229c55cd8b1965..7000667664ef486d0ad146671d15594c84a2a686 100644 --- a/man/solve_evp_complex_1stage_double.3 +++ b/man/solve_evp_complex_1stage_double.3 @@ -11,34 +11,38 @@ solve_evp_complex_1stage_double \- solve the double-precision complex eigenvalue use elpa1 .br .br -.RI "success = \fBsolve_evp_complex_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_complex_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br .RI "logical \fBsuccess\fP: return value indicating success or failure" @@ -49,7 +53,7 @@ use elpa1 #include .br -.RI "success = \fBsolve_evp_complex_1stage_double_precision\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBsolve_evp_complex_1stage_double_precision\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -78,6 +82,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "int \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "int \fBuseGPU\fP: decide whether GPUs should be used or not" +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/solve_evp_complex_1stage_single.3 b/man/solve_evp_complex_1stage_single.3 index d36198ce89564fb9cf0f19ddcd5fc7897946f7e5..e61bfd9350b77c83ecaeac8ac3620a520032a9de 100644 --- a/man/solve_evp_complex_1stage_single.3 +++ b/man/solve_evp_complex_1stage_single.3 @@ -11,37 +11,40 @@ solve_evp_complex_1stage_single \- solve the single-precision complex eigenvalue use elpa1 .br .br -.RI "success = \fBsolve_evp_complex_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_complex_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "complex*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "complex*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*4, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*4, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "complex*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "complex*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide wheter GPUs should be used or not" +.br +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" @@ -49,7 +52,7 @@ use elpa1 #include .br -.RI "success = \fBsolve_evp_complex_1stage_single_precision\fP (\fBint\fP na, \fBint\fP nev, \fB complex *\fPa, \fBint\fP lda, \fB float *\fPev, \fBcomplex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBsolve_evp_complex_1stage_single_precision\fP (\fBint\fP na, \fBint\fP nev, \fB complex *\fPa, \fBint\fP lda, \fB float *\fPev, \fBcomplex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -78,7 +81,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br - +.RI "int \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "int \fBuseGPU\fP: decide whether GPUs should be used or not" +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION diff --git a/man/solve_evp_real.3 b/man/solve_evp_real.3 index 6cb12ae4fe1d8ec6ce7150507a31f85d3c180570..3d598a030f75c46b7c7f4d54ac2b1d3d533cee80 100644 --- a/man/solve_evp_real.3 +++ b/man/solve_evp_real.3 @@ -12,34 +12,38 @@ This is an old and deprecated interface. It is recommendet to use \fBsolve_evp_r use elpa1 .br .br -.RI "success = \fBsolve_evp_real\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_real\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br .RI "logical \fBsuccess\fP: return value indicating success or failure" diff --git a/man/solve_evp_real_1stage.3 b/man/solve_evp_real_1stage.3 index 20645614eb294be3037b70b649742f2310d486b7..dc34ded7f730afa182847d792458d00968ccb771 100644 --- a/man/solve_evp_real_1stage.3 +++ b/man/solve_evp_real_1stage.3 @@ -12,43 +12,47 @@ This interface is old and deprecated. It is recommended to use \fBelpa_solve_evp use elpa1 .br .br -.RI "success = \fBsolve_evp_real_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_real_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" .br -.RI "success = \fBsolve_evp_real_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBsolve_evp_real_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -77,6 +81,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "int \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "int \fBuseGPU\fP: decide whether GPUs should be used or not" +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/solve_evp_real_1stage_double.3 b/man/solve_evp_real_1stage_double.3 index a69b80cab04cabf49d1534da861b617b5b29fd00..21e5a65e4dff4af4bac9470ba949a80ff669718b 100644 --- a/man/solve_evp_real_1stage_double.3 +++ b/man/solve_evp_real_1stage_double.3 @@ -11,34 +11,38 @@ solve_evp_real_1stage_double \- solve the double-precision real eigenvalue probl use elpa1 .br .br -.RI "success = \fBsolve_evp_real_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_real_1stage_double\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br .RI "logical \fBsuccess\fP: return value indicating success or failure" @@ -47,7 +51,7 @@ use elpa1 #include "elpa.h" .br -.RI "success = \fBsolve_evp_real_1stage_double_precision\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBsolve_evp_real_1stage_double_precision\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -76,6 +80,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "int \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "int \fBuseGPU\fP: decide whether GPUs should be used or not" +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) diff --git a/man/solve_evp_real_1stage_single.3 b/man/solve_evp_real_1stage_single.3 index f863324e95f0a1348afc41fa45a43a8774ac5fdf..5a3da6863152fe610f9ac724ebeadbdd1b66ca1e 100644 --- a/man/solve_evp_real_1stage_single.3 +++ b/man/solve_evp_real_1stage_single.3 @@ -11,43 +11,47 @@ solve_evp_real_1stage_single \- solve the single-precision real eigenvalue probl use elpa1 .br .br -.RI "success = \fBsolve_evp_real_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.RI "success = \fBsolve_evp_real_1stage_single\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, useGPU)" .br .RI " " .br .RI "With the definintions of the input and output variables:" .br -.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" .br -.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" .br -.RI "real*4, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.RI "real*4, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" .br -.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" .br -.RI "real*4, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.RI "real*4, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" .br -.RI "real*4, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.RI "real*4, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" .br -.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" .br -.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" .br -.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBelpa_get_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "logical, optional, intent(in) \fBuseGPU\fP: decide whether GPUs should be used or not" .br -.RI "logical \fBsuccess\fP: return value indicating success or failure" +.RI "logical \fBsuccess\fP: return value indicating success or failure" .br .SS C INTERFACE #include "elpa.h" .br -.RI "success = \fBsolve_evp_real_1stage_single_precision\fP (\fBint\fP na, \fBint\fP nev, \fB float *\fPa, \fBint\fP lda, \fB float *\fPev, \fBfloat *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.RI "success = \fBsolve_evp_real_1stage_single_precision\fP (\fBint\fP na, \fBint\fP nev, \fB float *\fPa, \fBint\fP lda, \fB float *\fPev, \fBfloat *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP useGPU);" .br .RI " " .br @@ -76,6 +80,10 @@ use elpa1 .br .RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBelpa_get_communicators\fP(3)" .br +.RI "int \fBmpi_comm_all\fP: communicator for all MPI processes used in ELPA" +.br +.RI "int \fBuseGPU\fP: decide whether GPUs should be used or not" +.br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0)