From bb046d1cb389c5ced2659c7f58836503d99aed62 Mon Sep 17 00:00:00 2001 From: Andreas Marek Date: Tue, 15 Dec 2015 14:48:32 +0100 Subject: [PATCH] Add interface to unify C and Fortran names This commit does not change the interfaces defined in ELPA_2015.11.001 ! All functionality is available via the interface names and definitions as in ELPA_2015.11.001 But some new interfaces have been added, in order to unfiy the references from C and Fortran codes: - The procedures to create the ELPA (row/column) communicators are now available from C _and_ Fortran with the name "get_elpa_communicators". The old Fortran name "get_elpa_row_col_comms" and the old C name "elpa_get_communicators" are from now on deprecated but still available - The 1-stage solver routines are available from C _and_ Fortran via the names "solve_evp_real_1stage" and "solve_evp_complex_1stage". The old Fortran names "solve_evp_real" and "solve_evp_complex" are from now on deprecated but still functional. All documentation (man pages, doxygen, and example test programs) have been changed accordingly. This commit implies a change in the API versioning number, but no changes to codes calling ELPA (if they have been already updated to the API of ELPA_2015.11.001) --- Makefile.am | 10 ++ configure.ac | 8 +- man/get_elpa_communicators.3 | 59 ++++++++ man/get_elpa_row_col_comms.3 | 6 +- man/print_available_elpa2_kernels.1 | 2 +- man/solve_evp_complex.3 | 49 +------ man/solve_evp_complex_1stage.3 | 88 ++++++++++++ man/solve_evp_complex_2stage.3 | 14 +- man/solve_evp_real.3 | 47 +----- man/solve_evp_real_1stage.3 | 86 +++++++++++ man/solve_evp_real_2stage.3 | 14 +- src/elpa1.F90 | 135 +++++++++++++++--- src/elpa_c_interface.F90 | 43 +++++- .../elpa1_test_complex_c_version.c | 4 +- .../elpa1_test_real_c_version.c | 6 +- .../elpa2_test_complex_c_version.c | 4 +- .../elpa2_test_real_c_version.c | 4 +- test/fortran_test_programs/read_real.F90 | 6 +- test/fortran_test_programs/test_complex.F90 | 6 +- test/fortran_test_programs/test_complex2.F90 | 4 +- .../test_complex2_choose_kernel_with_api.F90 | 4 +- .../test_complex2_default_kernel.F90 | 4 +- test/fortran_test_programs/test_real.F90 | 8 +- test/fortran_test_programs/test_real2.F90 | 4 +- .../test_real2_choose_kernel_with_api.F90 | 4 +- .../test_real2_default_kernel.F90 | 4 +- ..._real2_default_kernel_qr_decomposition.F90 | 4 +- .../test_real_with_c.F90 | 8 +- test/shared_sources/blacs_infrastructure.F90 | 9 ++ test/shared_sources/call_elpa1.c | 4 +- test/shared_sources/check_correctnes.F90 | 9 +- test/shared_sources/prepare_matrix.F90 | 7 +- 32 files changed, 498 insertions(+), 166 deletions(-) create mode 100644 man/get_elpa_communicators.3 create mode 100644 man/solve_evp_complex_1stage.3 create mode 100644 man/solve_evp_real_1stage.3 diff --git a/Makefile.am b/Makefile.am index 9220e48..abb2278 100644 --- a/Makefile.am +++ b/Makefile.am @@ -95,10 +95,13 @@ nobase_elpa_include_HEADERS = $(wildcard modules/*) nobase_elpa_include_HEADERS += elpa/elpa.h elpa/elpa_kernel_constants.h elpa/elpa_generated.h man_MANS = man/solve_evp_real.3 \ + man/solve_evp_real_1stage.3 \ man/solve_evp_complex.3 \ + man/solve_evp_complex_1stage.3 \ man/solve_evp_real_2stage.3 \ man/solve_evp_complex_2stage.3 \ man/get_elpa_row_col_comms.3 \ + man/get_elpa_communicators.3 \ man/print_available_elpa2_kernels.1 # other files to distribute @@ -342,6 +345,13 @@ EXTRA_DIST = \ src/elpa_transpose_vectors.X90 \ src/redist_band.X90 +# Rules to re-generated the headers +elpa/elpa_generated.h: $(top_srcdir)/src/elpa_c_interface.F90 + grep -h "^ *!c>" $^ | sed 's/^ *!c>//;' > $@ || { rm $@; exit 1; } + +test/shared_sources/generated.h: $(wildcard $(top_srcdir)/test/shared_sources/*.F90) + grep -h "^ *!c>" $^ | sed 's/^ *!c>//;' > $@ || { rm $@; exit 1; } + LIBTOOL_DEPS = @LIBTOOL_DEPS@ libtool: $(LIBTOOL_DEPS) $(SHELL) ./config.status libtool diff --git a/configure.ac b/configure.ac index 56c0d59..9ee00b6 100644 --- a/configure.ac +++ b/configure.ac @@ -34,7 +34,7 @@ rm -rf config.h config-f90.h # by the current interface, as they are ABI compatible (e.g. only new symbols # were added by the new interface) # -AC_SUBST([ELPA_SO_VERSION], [4:1:0]) +AC_SUBST([ELPA_SO_VERSION], [5:0:1]) # @@ -725,7 +725,11 @@ fi echo "Generating elpa/elpa_generated.h..." mkdir -p elpa -grep "^ *!c>" $srcdir/src/elpa_c_interface.F90 | sed 's/^ *!c>//;' > elpa/elpa_generated.h || exit 1 +grep -h "^ *!c>" $srcdir/src/elpa_c_interface.F90 | sed 's/^ *!c>//;' > elpa/elpa_generated.h || exit 1 + +echo "Generating test/shared_sources/generated.h..." +mkdir -p test/shared_sources +grep -h "^ *!c>" $srcdir/test/shared_sources/*.F90 | sed 's/^ *!c>//;' > test/shared_sources/generated.h || exit 1 if test "${can_compile_avx}" = "no" ; then if test x"${want_avx}" = x"yes" ; then diff --git a/man/get_elpa_communicators.3 b/man/get_elpa_communicators.3 new file mode 100644 index 0000000..d60429b --- /dev/null +++ b/man/get_elpa_communicators.3 @@ -0,0 +1,59 @@ +.TH "get_elpa_communicators" 3 "Wed Dec 2 2015" "ELPA" \" -*- nroff -*- +.ad l +.nh +.SH NAME +get_elpa_communicators \- get the MPI row and column communicators needed in ELPA +.br + +.SH SYNOPSIS +.br +.SS FORTRAN INTERFACE +use elpa1 + +.br +.RI "success = \fBget_elpa_communicators\fP (mpi_comm_global, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols)" + +.br +.br +.RI "integer, intent(in) \fBmpi_comm_global\fP: global communicator for the calculation" +.br +.RI "integer, intent(in) \fBmy_prow\fP: row coordinate of the calling process in the process grid" +.br +.RI "integer, intent(in) \fBmy_pcol\fP: column coordinate of the calling process in the process grid" +.br +.RI "integer, intent(out) \fBmpi_comm_row\fP: communicator for communication within rows of processes" +.br +.RI "integer, intent(out) \fBmpi_comm_row\fP: communicator for communication within columns of processes" +.br + +.RI "integer \fBsuccess\fP: return value indicating success or failure of the underlying MPI_COMM_SPLIT function" + +.SS C INTERFACE +#include "elpa_generated.h" + +.br +.RI "success = \fBget_elpa_communicators\fP (int mpi_comm_world, int my_prow, my_pcol, int *mpi_comm_rows, int *Pmpi_comm_cols);" + +.br +.br +.RI "int \fBmpi_comm_global\fP: global communicator for the calculation" +.br +.RI "int \fBmy_prow\fP: row coordinate of the calling process in the process grid" +.br +.RI "int \fBmy_pcol\fP: column coordinate of the calling process in the process grid" +.br +.RI "int *\fBmpi_comm_row\fP: pointer to the communicator for communication within rows of processes" +.br +.RI "int *\fBmpi_comm_row\fP: pointer to the communicator for communication within columns of processes" +.br + +.RI "int \fBsuccess\fP: return value indicating success or failure of the underlying MPI_COMM_SPLIT function" + + + + +.SH DESCRIPTION +All ELPA routines need MPI communicators for communicating within rows or columns of processes. These communicators are created from the \fBmpi_comm_global\fP communicator. It is assumed that the matrix used in ELPA is distributed with \fBmy_prow\fP rows and \fBmy_pcol\fP columns on the calling process. This function has to be envoked by all involved processes before any other calls to ELPA routines. +.br +.SH "SEE ALSO" +\fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/get_elpa_row_col_comms.3 b/man/get_elpa_row_col_comms.3 index 1ded36d..ebae482 100644 --- a/man/get_elpa_row_col_comms.3 +++ b/man/get_elpa_row_col_comms.3 @@ -2,7 +2,8 @@ .ad l .nh .SH NAME -get_elpa_row_col_comms \- get the MPI row and column communicators needed in ELPA +get_elpa_row_col_comms \- old, deprecated interface to get the MPI row and column communicators needed in ELPA. +It is recommended to use \fBget_elpa_communicators\fP(3) .br .SH SYNOPSIS @@ -52,8 +53,9 @@ use elpa1 + .SH DESCRIPTION All ELPA routines need MPI communicators for communicating within rows or columns of processes. These communicators are created from the \fBmpi_comm_global\fP communicator. It is assumed that the matrix used in ELPA is distributed with \fBmy_prow\fP rows and \fBmy_pcol\fP columns on the calling process. This function has to be envoked by all involved processes before any other calls to ELPA routines. .br .SH "SEE ALSO" -\fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) +\fBget_elpa_communicators\fP(3) \fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/print_available_elpa2_kernels.1 b/man/print_available_elpa2_kernels.1 index e391fb3..36fdc61 100644 --- a/man/print_available_elpa2_kernels.1 +++ b/man/print_available_elpa2_kernels.1 @@ -23,5 +23,5 @@ A. Marek, MPCDF .SH "Reporting bugs" Report bugs to the ELPA mail elpa-library@mpcdf.mpg.de .SH "SEE ALSO" -\fBget_elpa_row_col_comms\fP(3) \fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) +\fBget_elpa_communicators\fP(3) \fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) diff --git a/man/solve_evp_complex.3 b/man/solve_evp_complex.3 index 13c0aa2..3984ca0 100644 --- a/man/solve_evp_complex.3 +++ b/man/solve_evp_complex.3 @@ -2,7 +2,8 @@ .ad l .nh .SH NAME -solve_evp_complex \- solve the complex eigenvalue problem with the 1-stage ELPA solver +solve_evp_complex \- solve the complex eigenvalue problem with the 1-stage ELPA solver. +This interface is old and deprecated. It is recommended to use \fBsolve_evp_complex_1stage\fP(3) .br .SH SYNOPSIS @@ -36,53 +37,15 @@ use elpa1 .br .RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" .br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br -.SS C INTERFACE -#include "elpa.h" -.br -#include - -.br -.RI "success = \fBsolve_evp_complex_stage1\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" -.br -.RI " " -.br -.RI "With the definintions of the input and output variables:" - -.br -.RI "int \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" -.br -.RI "int \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" -.br -.RI "double complex *\fBa\fP: pointer to locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" -.br -.RI "int \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" -.br -.RI "double *\fBev\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvalues" -.br -.RI "double complex *\fBq\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvectors" -.br -.RI "int \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" -.br -.RI "int \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" -.br -.RI "int \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" -.br -.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" -.br -.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" -.br - -.RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) - .SH DESCRIPTION -Solve the complex eigenvalue problem with the 1-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_row_col_comms\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. +Solve the complex eigenvalue problem with the 1-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_communicators\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. .br .SH "SEE ALSO" -\fBget_elpa_row_col_comms\fP(3) \fBsolve_evp_real\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) +\fBget_elpa_communicators\fP(3) \fBsolve_evp_real_1stage\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/solve_evp_complex_1stage.3 b/man/solve_evp_complex_1stage.3 new file mode 100644 index 0000000..a859d20 --- /dev/null +++ b/man/solve_evp_complex_1stage.3 @@ -0,0 +1,88 @@ +.TH "solve_evp_complex_1stage" 3 "Wed Dec 2 2015" "ELPA" \" -*- nroff -*- +.ad l +.nh +.SH NAME +solve_evp_complex_1stage \- solve the complex eigenvalue problem with the 1-stage ELPA solver +.br + +.SH SYNOPSIS +.br +.SS FORTRAN INTERFACE +use elpa1 +.br +.br +.RI "success = \fBsolve_evp_complex_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.br +.RI " " +.br +.RI "With the definintions of the input and output variables:" + +.br +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.br +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.br +.RI "complex*16, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.br +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.br +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.br +.RI "complex*16, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.br +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.br +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.br +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.br +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" +.br + +.RI "logical \fBsuccess\fP: return value indicating success or failure" +.br +.SS C INTERFACE +#include "elpa.h" +.br +#include + +.br +.RI "success = \fBsolve_evp_complex_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex*\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.br +.RI " " +.br +.RI "With the definintions of the input and output variables:" + +.br +.RI "int \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.br +.RI "int \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.br +.RI "double complex *\fBa\fP: pointer to locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.br +.RI "int \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.br +.RI "double *\fBev\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvalues" +.br +.RI "double complex *\fBq\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvectors" +.br +.RI "int \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.br +.RI "int \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.br +.RI "int \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.br +.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" +.br +.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" +.br + +.RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) + +.SH DESCRIPTION +Solve the complex eigenvalue problem with the 1-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_communicators\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. +.br +.SH "SEE ALSO" +\fBget_elpa_communicators\fP(3) \fBsolve_evp_real_1stage\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/solve_evp_complex_2stage.3 b/man/solve_evp_complex_2stage.3 index 73198ec..71be1b5 100644 --- a/man/solve_evp_complex_2stage.3 +++ b/man/solve_evp_complex_2stage.3 @@ -37,9 +37,9 @@ use elpa2 .br .RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" .br .RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br @@ -51,7 +51,7 @@ use elpa2 #include .br -.RI "success = \fBsolve_evp_complex_stage2\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL);" +.RI "success = \fBsolve_evp_complex_2stage\fP (\fBint\fP na, \fBint\fP nev, \fB double complex *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble complex *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL);" .br .RI " " .br @@ -76,16 +76,16 @@ use elpa2 .br .RI "int \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" .br -.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" .br .RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION -Solve the complex eigenvalue problem with the 2-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_row_col_comms\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. +Solve the complex eigenvalue problem with the 2-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_communicators\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. .br .SH "SEE ALSO" -\fBget_elpa_row_col_comms\fP(3) \fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) +\fBget_elpa_communicators\fP(3) \fBsolve_evp_real_1stage\fP(3) \fBsolve_evp_complex_1stage\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/solve_evp_real.3 b/man/solve_evp_real.3 index 61ee69c..a6762cd 100644 --- a/man/solve_evp_real.3 +++ b/man/solve_evp_real.3 @@ -2,7 +2,8 @@ .ad l .nh .SH NAME -solve_evp_real \- solve the real eigenvalue problem with the 1-stage ELPA solver +solve_evp_real \- solve the real eigenvalue problem with the 1-stage ELPA solver. +This is an old and deprecated interface. It is recommendet to use \fBsolve_evp_real_1stage\fP(3) .br .SH SYNOPSIS @@ -36,51 +37,15 @@ use elpa1 .br .RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" .br .RI "logical \fBsuccess\fP: return value indicating success or failure" .br -.SS C INTERFACE -#include "elpa.h" - -.br -.RI "success = \fBsolve_evp_real_stage1\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" -.br -.RI " " -.br -.RI "With the definintions of the input and output variables:" - -.br -.RI "int \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" -.br -.RI "int \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" -.br -.RI "double *\fBa\fP: pointer to locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" -.br -.RI "int \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" -.br -.RI "double *\fBev\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvalues" -.br -.RI "double *\fBq\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvectors" -.br -.RI "int \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" -.br -.RI "int \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" -.br -.RI "int \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" -.br -.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" -.br -.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" -.br - -.RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) - .SH DESCRIPTION -Solve the real eigenvalue problem with the 1-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_row_col_comms\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. +Solve the real eigenvalue problem with the 1-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_communicators\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. .br .SH "SEE ALSO" -\fBget_elpa_row_col_comms\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) +\fBget_elpa_communicators\fP(3) \fBsolve_evp_complex_1stage\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/solve_evp_real_1stage.3 b/man/solve_evp_real_1stage.3 new file mode 100644 index 0000000..aa7431d --- /dev/null +++ b/man/solve_evp_real_1stage.3 @@ -0,0 +1,86 @@ +.TH "solve_evp_real_1stage" 3 "Wed Dec 2 2015" "ELPA" \" -*- nroff -*- +.ad l +.nh +.SH NAME +solve_evp_real_1stage \- solve the real eigenvalue problem with the 1-stage ELPA solver +.br + +.SH SYNOPSIS +.br +.SS FORTRAN INTERFACE +use elpa1 +.br +.br +.RI "success = \fBsolve_evp_real_1stage\fP (na, nev, a(lda,matrixCols), ev(nev), q(ldq, matrixCols), ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols)" +.br +.RI " " +.br +.RI "With the definintions of the input and output variables:" + +.br +.RI "integer, intent(in) \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.br +.RI "integer, intent(in) \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.br +.RI "real*8, intent(inout) \fBa\fP: locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.br +.RI "integer, intent(in) \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.br +.RI "real*8, intent(inout) \fBev\fP: on output the first \fBnev\fP computed eigenvalues" +.br +.RI "real*8, intent(inout) \fBq\fP: on output the first \fBnev\fP computed eigenvectors" +.br +.RI "integer, intent(in) \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.br +.RI "integer, intent(in) \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.br +.RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.br +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" +.br +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" +.br + +.RI "logical \fBsuccess\fP: return value indicating success or failure" +.br +.SS C INTERFACE +#include "elpa.h" + +.br +.RI "success = \fBsolve_evp_real_1stage\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols);" +.br +.RI " " +.br +.RI "With the definintions of the input and output variables:" + +.br +.RI "int \fBna\fP: global dimension of quadratic matrix \fBa\fP to solve" +.br +.RI "int \fBnev\fP: number of eigenvalues to be computed; the first \fBnev\fP eigenvalules are calculated" +.br +.RI "double *\fBa\fP: pointer to locally distributed part of the matrix \fBa\fP. The local dimensions are \fBlda\fP x \fBmatrixCols\fP" +.br +.RI "int \fBlda\fP: leading dimension of locally distributed matrix \fBa\fP" +.br +.RI "double *\fBev\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvalues" +.br +.RI "double *\fBq\fP: pointer to memory containing on output the first \fBnev\fP computed eigenvectors" +.br +.RI "int \fBldq\fP: leading dimension of matrix \fBq\fP which stores the eigenvectors" +.br +.RI "int \fBnblk\fP: blocksize of block cyclic distributin, must be the same in both directions" +.br +.RI "int \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" +.br +.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" +.br +.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" +.br + +.RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) + +.SH DESCRIPTION +Solve the real eigenvalue problem with the 1-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_communicators\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. +.br +.SH "SEE ALSO" +\fBget_elpa_communicators\fP(3) \fBsolve_evp_complex_1stage\fP(3) \fBsolve_evp_real_2stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/man/solve_evp_real_2stage.3 b/man/solve_evp_real_2stage.3 index c0b30eb..207216f 100644 --- a/man/solve_evp_real_2stage.3 +++ b/man/solve_evp_real_2stage.3 @@ -37,9 +37,9 @@ use elpa2 .br .RI "integer, intent(in) \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" .br -.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "integer, intent(in) \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" .br .RI "integer, intent(in) \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br @@ -51,7 +51,7 @@ use elpa2 #include "elpa.h" .br -.RI "success = \fBsolve_evp_real_stage2\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr);" +.RI "success = \fBsolve_evp_real_2stage\fP (\fBint\fP na, \fBint\fP nev, \fB double *\fPa, \fBint\fP lda, \fB double *\fPev, \fBdouble *\fPq, \fBint\fP ldq, \fBint\fP nblk, \fBint\fP matrixCols, \fBint\fP mpi_comm_rows, \fBint\fP mpi_comm_cols, \fBint\fP mpi_comm_all, \fBint\fP THIS_ELPA_REAL_KERNEL, \fBint\fP useQr);" .br .RI " " .br @@ -76,9 +76,9 @@ use elpa2 .br .RI "int \fBmatrixCols\fP: number of columns of locally distributed matrices \fBa\fP and \fBq\fP" .br -.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "int \fBmpi_comm_rows\fP: communicator for communication in rows. Constructed with \fBget_elpa_communicators\fP(3)" .br -.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_row_col_comms\fP(3)" +.RI "int \fBmpi_comm_cols\fP: communicator for communication in colums. Constructed with \fBget_elpa_communicators\fP(3)" .br .RI "int \fBmpi_comm_all\fP: communicator for all processes in the processor set involved in ELPA" .br @@ -87,7 +87,7 @@ use elpa2 .RI "int \fBsuccess\fP: return value indicating success (1) or failure (0) .SH DESCRIPTION -Solve the real eigenvalue problem with the 2-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_row_col_comms\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. +Solve the real eigenvalue problem with the 2-stage solver. The ELPA communicators \fBmpi_comm_rows\fP and \fBmpi_comm_cols\fP are obtained with the \fBget_elpa_communicators\fP(3) function. The distributed quadratic marix \fBa\fP has global dimensions \fBna\fP x \fBna\fP, and a local size \fBlda\fP x \fBmatrixCols\fP. The solver will compute the first \fBnev\fP eigenvalues, which will be stored on exit in \fBev\fP. The eigenvectors corresponding to the eigenvalues will be stored in \fBq\fP. All memory of the arguments must be allocated outside the call to the solver. .br .SH "SEE ALSO" -\fBget_elpa_row_col_comms\fP(3) \fBsolve_evp_real\fP(3) \fBsolve_evp_complex\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) +\fBget_elpa_communicators\fP(3) \fBsolve_evp_real_1stage\fP(3) \fBsolve_evp_complex_1stage\fP(3) \fBsolve_evp_complex_2stage\fP(3) \fBprint_available_elpa2_kernels\fP(1) diff --git a/src/elpa1.F90 b/src/elpa1.F90 index 5596b2a..86ff564 100644 --- a/src/elpa1.F90 +++ b/src/elpa1.F90 @@ -94,10 +94,13 @@ module ELPA1 ! The following routines are public: - public :: get_elpa_row_col_comms !< Sets MPI row/col communicators + public :: get_elpa_row_col_comms !< old, deprecated interface: Sets MPI row/col communicators + public :: get_elpa_communicators !< Sets MPI row/col communicators - public :: solve_evp_real !< Driver routine for real eigenvalue problem - public :: solve_evp_complex !< Driver routine for complex eigenvalue problem + public :: solve_evp_real !< old, deprecated interface: Driver routine for real eigenvalue problem + public :: solve_evp_real_1stage !< Driver routine for real eigenvalue problem + public :: solve_evp_complex !< old, deprecated interface: Driver routine for complex eigenvalue problem + public :: solve_evp_complex_1stage !< Driver routine for complex eigenvalue problem ! Timing results, set by every call to solve_evp_xxx @@ -109,12 +112,110 @@ module ELPA1 include 'mpif.h' +!> \brief get_elpa_row_col_comms: old, deprecated Fortran function to create the MPI communicators for ELPA. Better use "elpa_get_communicators" +!> \detail +!> The interface and variable definition is the same as in "elpa_get_communicators" +!> \param mpi_comm_global Global communicator for the calculations (in) +!> +!> \param my_prow Row coordinate of the calling process in the process grid (in) +!> +!> \param my_pcol Column coordinate of the calling process in the process grid (in) +!> +!> \param mpi_comm_rows Communicator for communicating within rows of processes (out) +!> +!> \param mpi_comm_cols Communicator for communicating within columns of processes (out) +!> \result mpierr integer error value of mpi_comm_split function + interface get_elpa_row_col_comms + module procedure get_elpa_communicators + end interface + +!> \brief solve_evp_real: old, deprecated Fortran function to solve the real eigenvalue problem with 1-stage solver. Better use "solve_evp_real_1stage" +!> +!> \detail +!> The interface and variable definition is the same as in "elpa_solve_evp_real_1stage" +! Parameters +! +!> \param na Order of matrix a +!> +!> \param nev Number of eigenvalues needed. +!> The smallest nev eigenvalues/eigenvectors are calculated. +!> +!> \param a(lda,matrixCols) Distributed matrix for which eigenvalues are to be computed. +!> Distribution is like in Scalapack. +!> The full matrix must be set (not only one half like in scalapack). +!> Destroyed on exit (upper and lower half). +!> +!> \param lda Leading dimension of a +!> +!> \param ev(na) On output: eigenvalues of a, every processor gets the complete set +!> +!> \param q(ldq,matrixCols) On output: Eigenvectors of a +!> Distribution is like in Scalapack. +!> Must be always dimensioned to the full size (corresponding to (na,na)) +!> even if only a part of the eigenvalues is needed. +!> +!> \param ldq Leading dimension of q +!> +!> \param nblk blocksize of cyclic distribution, must be the same in both directions! +!> +!> \param matrixCols distributed number of matrix columns +!> +!> \param mpi_comm_rows MPI-Communicator for rows +!> \param mpi_comm_cols MPI-Communicator for columns +!> +!> \result success + + + interface solve_evp_real + module procedure solve_evp_real_1stage + end interface + +!> \brief solve_evp_complex: old, deprecated Fortran function to solve the complex eigenvalue problem with 1-stage solver. Better use "solve_evp_complex_1stage" +!> +!> \detail +!> The interface and variable definition is the same as in "elpa_solve_evp_complex_1stage" +! Parameters +! +!> \param na Order of matrix a +!> +!> \param nev Number of eigenvalues needed. +!> The smallest nev eigenvalues/eigenvectors are calculated. +!> +!> \param a(lda,matrixCols) Distributed matrix for which eigenvalues are to be computed. +!> Distribution is like in Scalapack. +!> The full matrix must be set (not only one half like in scalapack). +!> Destroyed on exit (upper and lower half). +!> +!> \param lda Leading dimension of a +!> +!> \param ev(na) On output: eigenvalues of a, every processor gets the complete set +!> +!> \param q(ldq,matrixCols) On output: Eigenvectors of a +!> Distribution is like in Scalapack. +!> Must be always dimensioned to the full size (corresponding to (na,na)) +!> even if only a part of the eigenvalues is needed. +!> +!> \param ldq Leading dimension of q +!> +!> \param nblk blocksize of cyclic distribution, must be the same in both directions! +!> +!> \param matrixCols distributed number of matrix columns +!> +!> \param mpi_comm_rows MPI-Communicator for rows +!> \param mpi_comm_cols MPI-Communicator for columns +!> +!> \result success + + + interface solve_evp_complex + module procedure solve_evp_complex_1stage + end interface + contains !------------------------------------------------------------------------------- -!> \brief Fortran function to create the MPI communicators for ELPA -! get_elpa_row_col_comms: +!> \brief Fortran function to create the MPI communicators for ELPA. ! All ELPA routines need MPI communicators for communicating within ! rows or columns of processes, these are set here. ! mpi_comm_rows/mpi_comm_cols can be free'd with MPI_Comm_free if not used any more. @@ -133,7 +234,7 @@ contains !> \result mpierr integer error value of mpi_comm_split function -function get_elpa_row_col_comms(mpi_comm_global, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols) result(mpierr) +function get_elpa_communicators(mpi_comm_global, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols) result(mpierr) implicit none @@ -150,10 +251,10 @@ function get_elpa_row_col_comms(mpi_comm_global, my_prow, my_pcol, mpi_comm_rows call mpi_comm_split(mpi_comm_global,my_pcol,my_prow,mpi_comm_rows,mpierr) call mpi_comm_split(mpi_comm_global,my_prow,my_pcol,mpi_comm_cols,mpierr) -end function get_elpa_row_col_comms +end function get_elpa_communicators -! \brief solve_evp_real: Fortran function to solve the real eigenvalue problem with 1-stage solver +!> \brief solve_evp_real_1stage: Fortran function to solve the real eigenvalue problem with 1-stage solver !> ! Parameters ! @@ -188,7 +289,7 @@ end function get_elpa_row_col_comms !> \result success -function solve_evp_real(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success) +function solve_evp_real_1stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success) #ifdef HAVE_DETAILED_TIMINGS use timings @@ -206,7 +307,7 @@ function solve_evp_real(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_ logical :: wantDebug #ifdef HAVE_DETAILED_TIMINGS - call timer%start("solve_evp_real") + call timer%start("solve_evp_real_1stage") #endif call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr) @@ -247,13 +348,13 @@ function solve_evp_real(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_ deallocate(e, tau) #ifdef HAVE_DETAILED_TIMINGS - call timer%stop("solve_evp_real") + call timer%stop("solve_evp_real_1stage") #endif -end function solve_evp_real +end function solve_evp_real_1stage -! \brief solve_evp_real: Fortran function to solve the complex eigenvalue problem with 1-stage solver +!> \brief solve_evp_complex_1stage: Fortran function to solve the complex eigenvalue problem with 1-stage solver !> ! Parameters ! @@ -287,7 +388,7 @@ end function solve_evp_real !> !> \result success -function solve_evp_complex(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success) +function solve_evp_complex_1stage(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols) result(success) #ifdef HAVE_DETAILED_TIMINGS use timings #endif @@ -309,7 +410,7 @@ function solve_evp_complex(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_co logical :: wantDebug #ifdef HAVE_DETAILED_TIMINGS - call timer%start("solve_evp_complex") + call timer%start("solve_evp_complex_1stage") #endif call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr) @@ -361,10 +462,10 @@ function solve_evp_complex(na, nev, a, lda, ev, q, ldq, nblk, matrixCols, mpi_co deallocate(q_real) deallocate(e, tau) #ifdef HAVE_DETAILED_TIMINGS - call timer%stop("solve_evp_complex") + call timer%stop("solve_evp_complex_1stage") #endif -end function solve_evp_complex +end function solve_evp_complex_1stage diff --git a/src/elpa_c_interface.F90 b/src/elpa_c_interface.F90 index 339bcdc..c4d14e4 100644 --- a/src/elpa_c_interface.F90 +++ b/src/elpa_c_interface.F90 @@ -50,7 +50,7 @@ #include "config-f90.h" !c> #include - !c> /*! \brief C interface to create the MPI communicators for ELPA + !c> /*! \brief C old, deprecated interface to create the MPI communicators for ELPA !c> * !c> * \param mpi_comm_word MPI global communicator (in) !c> * \param my_prow Row coordinate of the calling process in the process grid (in) @@ -59,7 +59,7 @@ !c> * \result int integer error value of mpi_comm_split function !c> */ !c> int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols); - function get_elpa_row_col_comms_wrapper(mpi_comm_world, my_prow, my_pcol, & + function get_elpa_row_col_comms_wrapper_c_name1(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) & result(mpierr) bind(C,name="elpa_get_communicators") use, intrinsic :: iso_c_binding @@ -74,6 +74,35 @@ mpi_comm_rows, mpi_comm_cols) end function + !c> #include + + !c> /*! \brief C interface to create the MPI communicators for ELPA + !c> * + !c> * \param mpi_comm_word MPI global communicator (in) + !c> * \param my_prow Row coordinate of the calling process in the process grid (in) + !c> * \param my_pcol Column coordinate of the calling process in the process grid (in) + !c> * \param mpi_comm_rows Communicator for communicating within rows of processes (out) + !c> * \result int integer error value of mpi_comm_split function + !c> */ + !c> int get_elpa_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols); + function get_elpa_row_col_comms_wrapper_c_name2(mpi_comm_world, my_prow, my_pcol, & + mpi_comm_rows, mpi_comm_cols) & + result(mpierr) bind(C,name="get_elpa_communicators") + use, intrinsic :: iso_c_binding + use elpa1, only : get_elpa_row_col_comms + + implicit none + integer(kind=c_int) :: mpierr + integer(kind=c_int), value :: mpi_comm_world, my_prow, my_pcol + integer(kind=c_int) :: mpi_comm_rows, mpi_comm_cols + + mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpi_comm_rows, mpi_comm_cols) + + end function + + + !c> /*! \brief C interface to solve the real eigenvalue problem with 1-stage solver !c> * !c> * \param na Order of matrix a @@ -96,7 +125,7 @@ !c> * !c> * \result int: 1 if error occured, otherwise 0 !c>*/ - !c> int elpa_solve_evp_real_stage1(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols); + !c> int elpa_solve_evp_real_1stage(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols); function solve_elpa1_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, & matrixCols, mpi_comm_rows, mpi_comm_cols) & result(success) bind(C,name="elpa_solve_evp_real_1stage") @@ -120,6 +149,8 @@ endif end function + + !c> /*! \brief C interface to solve the complex eigenvalue problem with 1-stage solver !c> * !c> * \param na Order of matrix a @@ -142,7 +173,7 @@ !c> * !c> * \result int: 1 if error occured, otherwise 0 !c> */ - !c> int elpa_solve_evp_complex_stage1(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols); + !c> int elpa_solve_evp_complex_1stage(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols); function solve_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, & matrixCols, mpi_comm_rows, mpi_comm_cols) & result(success) bind(C,name="elpa_solve_evp_complex_1stage") @@ -192,7 +223,7 @@ !c> * !c> * \result int: 1 if error occured, otherwise 0 !c> */ - !c> int elpa_solve_evp_real_stage2(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR); + !c> int elpa_solve_evp_real_2stage(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR); function solve_elpa2_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, & matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, & THIS_REAL_ELPA_KERNEL_API, useQR) & @@ -256,7 +287,7 @@ !c> * !c> * \result int: 1 if error occured, otherwise 0 !c> */ - !c> int elpa_solve_evp_complex_stage2(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API); + !c> int elpa_solve_evp_complex_2stage(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API); function solve_elpa2_evp_complex_wrapper(na, nev, a, lda, ev, q, ldq, nblk, & matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, & THIS_COMPLEX_ELPA_KERNEL_API) & diff --git a/test/c_test_programs/elpa1_test_complex_c_version.c b/test/c_test_programs/elpa1_test_complex_c_version.c index 5a99df2..b067cc2 100644 --- a/test/c_test_programs/elpa1_test_complex_c_version.c +++ b/test/c_test_programs/elpa1_test_complex_c_version.c @@ -51,7 +51,7 @@ #include #include -main(int argc, char** argv) { +int main(int argc, char** argv) { int myid; int nprocs; @@ -135,7 +135,7 @@ main(int argc, char** argv) { /* get the ELPA row and col communicators. */ /* These are NOT usable in C without calling the MPI_Comm_f2c function on them !! */ my_mpi_comm_world = MPI_Comm_c2f(MPI_COMM_WORLD); - mpierr = elpa_get_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); + mpierr = get_elpa_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); if (myid == 0) { printf("\n"); diff --git a/test/c_test_programs/elpa1_test_real_c_version.c b/test/c_test_programs/elpa1_test_real_c_version.c index b50019f..547a3e7 100644 --- a/test/c_test_programs/elpa1_test_real_c_version.c +++ b/test/c_test_programs/elpa1_test_real_c_version.c @@ -50,7 +50,9 @@ #include -main(int argc, char** argv) { +#include "test/shared_sources/generated.h" + +int main(int argc, char** argv) { int myid; int nprocs; @@ -131,7 +133,7 @@ main(int argc, char** argv) { /* get the ELPA row and col communicators. */ /* These are NOT usable in C without calling the MPI_Comm_f2c function on them !! */ my_mpi_comm_world = MPI_Comm_c2f(MPI_COMM_WORLD); - mpierr = elpa_get_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); + mpierr = get_elpa_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); if (myid == 0) { printf("\n"); diff --git a/test/c_test_programs/elpa2_test_complex_c_version.c b/test/c_test_programs/elpa2_test_complex_c_version.c index f45f58f..f8dc4e3 100644 --- a/test/c_test_programs/elpa2_test_complex_c_version.c +++ b/test/c_test_programs/elpa2_test_complex_c_version.c @@ -51,7 +51,7 @@ #include #include -main(int argc, char** argv) { +int main(int argc, char** argv) { int myid; int nprocs; @@ -137,7 +137,7 @@ main(int argc, char** argv) { /* get the ELPA row and col communicators. */ /* These are NOT usable in C without calling the MPI_Comm_f2c function on them !! */ my_mpi_comm_world = MPI_Comm_c2f(MPI_COMM_WORLD); - mpierr = elpa_get_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); + mpierr = get_elpa_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); if (myid == 0) { printf("\n"); diff --git a/test/c_test_programs/elpa2_test_real_c_version.c b/test/c_test_programs/elpa2_test_real_c_version.c index f496be2..3cd16b1 100644 --- a/test/c_test_programs/elpa2_test_real_c_version.c +++ b/test/c_test_programs/elpa2_test_real_c_version.c @@ -50,7 +50,7 @@ #include -main(int argc, char** argv) { +int main(int argc, char** argv) { int myid; int nprocs; @@ -133,7 +133,7 @@ main(int argc, char** argv) { /* get the ELPA row and col communicators. */ /* These are NOT usable in C without calling the MPI_Comm_f2c function on them !! */ my_mpi_comm_world = MPI_Comm_c2f(MPI_COMM_WORLD); - mpierr = elpa_get_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); + mpierr = get_elpa_communicators(my_mpi_comm_world, my_prow, my_pcol, &mpi_comm_rows, &mpi_comm_cols); if (myid == 0) { printf("\n"); diff --git a/test/fortran_test_programs/read_real.F90 b/test/fortran_test_programs/read_real.F90 index e1a049c..c5e66d8 100644 --- a/test/fortran_test_programs/read_real.F90 +++ b/test/fortran_test_programs/read_real.F90 @@ -220,9 +220,9 @@ program read_real call BLACS_Gridinfo( my_blacs_ctxt, nprow, npcol, my_prow, my_pcol ) ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators - call get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + call get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) ! Read matrix size @@ -279,7 +279,7 @@ program read_real ! Calculate eigenvalues/eigenvectors call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only - call solve_evp_real(na, nev, a, na_rows, ev, z, na_rows, nblk, & + call solve_evp_real_1stage(na, nev, a, na_rows, ev, z, na_rows, nblk, & mpi_comm_rows, mpi_comm_cols) if(myid == 0) print *,'Time tridiag_real :',time_evp_fwd diff --git a/test/fortran_test_programs/test_complex.F90 b/test/fortran_test_programs/test_complex.F90 index 30f15b2..53e1fb9 100644 --- a/test/fortran_test_programs/test_complex.F90 +++ b/test/fortran_test_programs/test_complex.F90 @@ -229,9 +229,9 @@ program test_complex end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then @@ -277,7 +277,7 @@ program test_complex end if call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only - success = solve_evp_complex(na, nev, a, na_rows, ev, z, na_rows, nblk, & + success = solve_evp_complex_1stage(na, nev, a, na_rows, ev, z, na_rows, nblk, & na_cols, mpi_comm_rows, mpi_comm_cols) if (.not.(success)) then diff --git a/test/fortran_test_programs/test_complex2.F90 b/test/fortran_test_programs/test_complex2.F90 index a570168..69c9e52 100644 --- a/test/fortran_test_programs/test_complex2.F90 +++ b/test/fortran_test_programs/test_complex2.F90 @@ -261,9 +261,9 @@ program test_complex2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_complex2_choose_kernel_with_api.F90 b/test/fortran_test_programs/test_complex2_choose_kernel_with_api.F90 index 154a0de..bf11c28 100644 --- a/test/fortran_test_programs/test_complex2_choose_kernel_with_api.F90 +++ b/test/fortran_test_programs/test_complex2_choose_kernel_with_api.F90 @@ -256,9 +256,9 @@ program test_complex2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_complex2_default_kernel.F90 b/test/fortran_test_programs/test_complex2_default_kernel.F90 index 62189d9..42c664e 100644 --- a/test/fortran_test_programs/test_complex2_default_kernel.F90 +++ b/test/fortran_test_programs/test_complex2_default_kernel.F90 @@ -256,9 +256,9 @@ program test_complex2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_real.F90 b/test/fortran_test_programs/test_real.F90 index 842a48b..1029b61 100644 --- a/test/fortran_test_programs/test_real.F90 +++ b/test/fortran_test_programs/test_real.F90 @@ -236,9 +236,9 @@ program test_real end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then @@ -278,11 +278,11 @@ program test_real end if call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only - success = solve_evp_real(na, nev, a, na_rows, ev, z, na_rows, nblk, & + success = solve_evp_real_1stage(na, nev, a, na_rows, ev, z, na_rows, nblk, & na_cols, mpi_comm_rows, mpi_comm_cols) if (.not.(success)) then - write(error_unit,*) "solve_evp_real produced an error! Aborting..." + write(error_unit,*) "solve_evp_real_1stage produced an error! Aborting..." call MPI_ABORT(mpi_comm_world, 1, mpierr) endif diff --git a/test/fortran_test_programs/test_real2.F90 b/test/fortran_test_programs/test_real2.F90 index 5535777..0760ddd 100644 --- a/test/fortran_test_programs/test_real2.F90 +++ b/test/fortran_test_programs/test_real2.F90 @@ -264,9 +264,9 @@ program test_real2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_real2_choose_kernel_with_api.F90 b/test/fortran_test_programs/test_real2_choose_kernel_with_api.F90 index ea1bf6a..3fa289f 100644 --- a/test/fortran_test_programs/test_real2_choose_kernel_with_api.F90 +++ b/test/fortran_test_programs/test_real2_choose_kernel_with_api.F90 @@ -252,9 +252,9 @@ program test_real2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_real2_default_kernel.F90 b/test/fortran_test_programs/test_real2_default_kernel.F90 index 98b728f..492cb16 100644 --- a/test/fortran_test_programs/test_real2_default_kernel.F90 +++ b/test/fortran_test_programs/test_real2_default_kernel.F90 @@ -249,9 +249,9 @@ program test_real2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_real2_default_kernel_qr_decomposition.F90 b/test/fortran_test_programs/test_real2_default_kernel_qr_decomposition.F90 index 231ed0a..eb9aa88 100644 --- a/test/fortran_test_programs/test_real2_default_kernel_qr_decomposition.F90 +++ b/test/fortran_test_programs/test_real2_default_kernel_qr_decomposition.F90 @@ -260,9 +260,9 @@ program test_real2 end if ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) if (myid==0) then diff --git a/test/fortran_test_programs/test_real_with_c.F90 b/test/fortran_test_programs/test_real_with_c.F90 index e30965e..6750ccc 100644 --- a/test/fortran_test_programs/test_real_with_c.F90 +++ b/test/fortran_test_programs/test_real_with_c.F90 @@ -244,9 +244,9 @@ program test_real my_pcolFromC = my_pcol ! All ELPA routines need MPI communicators for communicating within - ! rows or columns of processes, these are set in get_elpa_row_col_comms. + ! rows or columns of processes, these are set in get_elpa_communicators. - mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, & + mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, & mpi_comm_rows, mpi_comm_cols) ! call here a c function, which via the c-interface in turn calls the @@ -300,11 +300,11 @@ program test_real end if call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only - success = solve_evp_real(na, nev, a, na_rows, ev, z, na_rows, nblk, & + success = solve_evp_real_1stage(na, nev, a, na_rows, ev, z, na_rows, nblk, & na_cols, mpi_comm_rows, mpi_comm_cols) if (.not.(success)) then - write(error_unit,*) "solve_evp_real produced an error! Aborting..." + write(error_unit,*) "solve_evp_real_1stage produced an error! Aborting..." call MPI_ABORT(mpi_comm_world, 1, mpierr) endif diff --git a/test/shared_sources/blacs_infrastructure.F90 b/test/shared_sources/blacs_infrastructure.F90 index f9a79a9..95c96f9 100644 --- a/test/shared_sources/blacs_infrastructure.F90 +++ b/test/shared_sources/blacs_infrastructure.F90 @@ -57,6 +57,9 @@ module mod_blacs_infrastructure call BLACS_Gridinfo(my_blacs_ctxt, nprow, npcol, my_prow, my_pcol) end subroutine + !c> void set_up_blacsgrid_from_fortran(int mpi_comm_world, int* my_blacs_ctxt, + !c> int *np_rows, int *np_cols, int *nprow, int *npcol, + !c> int *my_prow, int *my_pcol); subroutine set_up_blacsgrid_wrapper(mpi_comm_world, my_blacs_ctxt, np_rows, & np_cols, nprow, npcol, my_prow, my_pcol) & bind(C, name="set_up_blacsgrid_from_fortran") @@ -112,6 +115,12 @@ module mod_blacs_infrastructure end subroutine + !c> void set_up_blacs_descriptor_from_fortran(int na, int nblk, int my_prow, int my_pcol, + !c> int np_rows, int np_cols, + !c> int *na_rows, int *na_cols, + !c> int sc_desc[9], + !c> int my_blacs_ctxt, + !c> int *info); subroutine set_up_blacs_descriptor_wrapper(na, nblk, my_prow, my_pcol, & np_rows, np_cols, na_rows, & na_cols, sc_desc, & diff --git a/test/shared_sources/call_elpa1.c b/test/shared_sources/call_elpa1.c index 66206c0..9afb20f 100644 --- a/test/shared_sources/call_elpa1.c +++ b/test/shared_sources/call_elpa1.c @@ -48,13 +48,13 @@ int call_elpa1_real_solver_from_c(int na, int nev, int ncols, double *a, int lda, double *ev, double *q, int ldq, int nblk, int mpi_comm_rows, int mpi_comm_cols) { int result; - result = elpa_solve_evp_real_1stage(na, nev, ncols, a, lda, ev, q, ldq, nblk, mpi_comm_rows, mpi_comm_cols); + result = elpa_solve_evp_real_1stage(na, nev, a, lda, ev, q, ldq, nblk, ncols, mpi_comm_rows, mpi_comm_cols); return result; } int call_elpa1_complex_solver_from_c(int na, int nev, int ncols, complex double *a, int lda, double *ev, complex double *q, int ldq, int nblk, int mpi_comm_rows, int mpi_comm_cols) { int result; - result = elpa_solve_evp_complex_1stage(na, nev, ncols, a, lda, ev, q, ldq, nblk, mpi_comm_rows, mpi_comm_cols); + result = elpa_solve_evp_complex_1stage(na, nev, a, lda, ev, q, ldq, nblk, ncols, mpi_comm_rows, mpi_comm_cols); return result; } diff --git a/test/shared_sources/check_correctnes.F90 b/test/shared_sources/check_correctnes.F90 index 85c6da9..a667662 100644 --- a/test/shared_sources/check_correctnes.F90 +++ b/test/shared_sources/check_correctnes.F90 @@ -203,6 +203,10 @@ module mod_check_correctness endif end function + !c> int check_correctness_real_from_fortran(int na, int nev, int na_rows, int na_cols, + !c> double *as, double *z, double *ev, + !c> int sc_desc[9], int myid, + !c> double *tmp1, double *tmp2); function check_correctness_real_wrapper(na, nev, na_rows, na_cols, as, z, ev, sc_desc, myid, tmp1, tmp2) result(status) & bind(C,name="check_correctness_real_from_fortran") @@ -220,7 +224,10 @@ module mod_check_correctness status = check_correctness_real(na, nev, as, z, ev, sc_desc, myid, tmp1, tmp2) end function - + !c> int check_correctness_complex_from_fortran(int na, int nev, int na_rows, int na_cols, + !c> complex double *as, complex double *z, double *ev, + !c> int sc_desc[9], int myid, + !c> complex double *tmp1, complex double *tmp2); function check_correctness_complex_wrapper(na, nev, na_rows, na_cols, as, z, ev, sc_desc, myid, tmp1, tmp2) result(status) & bind(C,name="check_correctness_complex_from_fortran") diff --git a/test/shared_sources/prepare_matrix.F90 b/test/shared_sources/prepare_matrix.F90 index 43eaa54..9d36408 100644 --- a/test/shared_sources/prepare_matrix.F90 +++ b/test/shared_sources/prepare_matrix.F90 @@ -127,6 +127,9 @@ module mod_prepare_matrix end subroutine + !c> void prepare_matrix_real_from_fortran(int na, int myid, int na_rows, int na_cols, + !c> int sc_desc[9], int iseed[4096], + !c> double *a, double *z, double *as); subroutine prepare_matrix_real_wrapper(na, myid, na_rows, na_cols, sc_desc, iseed, a, z, as) & bind(C, name="prepare_matrix_real_from_fortran") use iso_c_binding @@ -141,7 +144,9 @@ module mod_prepare_matrix call prepare_matrix_real(na, myid, sc_desc, iseed, a, z, as) end subroutine - + !c> void prepare_matrix_complex_from_fortran(int na, int myid, int na_rows, int na_cols, + !c> int sc_desc[9], int iseed[4096], + !c> complex double *a, complex double *z, complex double *as); subroutine prepare_matrix_complex_wrapper(na, myid, na_rows, na_cols, sc_desc, iseed, xr, a, z, as) & bind(C, name="prepare_matrix_complex_from_fortran") use iso_c_binding -- GitLab