Commit c88242d4 authored by Andreas Marek's avatar Andreas Marek
Browse files

Rename header file of legacy C interface

parent c27fe3a6
......@@ -1177,6 +1177,7 @@ distclean-local:
EXTRA_DIST = \
elpa/elpa.h \
elpa/elpa_legacy.h \
fdep/fortran_dependencies.pl \
fdep/fortran_dependencies.mk \
test/Fortran/elpa_print_headers.X90 \
......
......@@ -19,6 +19,12 @@ elpa/elpa_generated.h: $(top_srcdir)/src/elpa_driver/legacy_interface/elpa_drive
@rm -f $@
$(call extract_interface,!c>)
elpa/elpa_generated_legacy.h: $(top_srcdir)/src/elpa_driver/legacy_interface/elpa_driver_c_interface.F90 \
$(top_srcdir)/src/elpa1/legacy_interface/elpa_1stage_c_interface.F90 \
$(top_srcdir)/src/elpa2/legacy_interface/elpa_2stage_c_interface.F90 | elpa
@rm -f $@
$(call extract_interface,!lc>)
test/shared/generated.h: $(wildcard $(top_srcdir)/test/shared/*.*90) | test/shared
@rm -f $@
$(call extract_interface,!c>)
......@@ -34,7 +40,7 @@ src/fortran_constants.X90: $(top_srcdir)/src/fortran_constants.h
@rm $@_
generated_headers= config-f90.h elpa/elpa_generated.h test/shared/generated.h elpa/elpa_generated_fortran_interfaces.h src/fortran_constants.X90
generated_headers= config-f90.h elpa/elpa_generated.h elpa/elpa_generated_legacy.h test/shared/generated.h elpa/elpa_generated_fortran_interfaces.h src/fortran_constants.X90
generated-headers: $(generated_headers)
......
......@@ -41,17 +41,17 @@
!
! Author: Andreas Marek, MCPDF
#include "config-f90.h"
!c> #include <complex.h>
!c> /*! \brief C old, deprecated interface, will be deleted. Use "elpa_get_communicators"
!c> *
!c> * \param mpi_comm_word MPI global communicator (in)
!c> * \param my_prow Row coordinate of the calling process in the process grid (in)
!c> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!c> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!c> * \result int integer error value of mpi_comm_split function
!c> */
!c> int get_elpa_row_col_comms(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
!lc> #include <complex.h>
!lc> /*! \brief C old, deprecated interface, will be deleted. Use "elpa_get_communicators"
!lc> *
!lc> * \param mpi_comm_word MPI global communicator (in)
!lc> * \param my_prow Row coordinate of the calling process in the process grid (in)
!lc> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!lc> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!lc> * \result int integer error value of mpi_comm_split function
!lc> */
!lc> int get_elpa_row_col_comms(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
function get_elpa_row_col_comms_wrapper_c_name1(mpi_comm_world, my_prow, my_pcol, &
mpi_comm_rows, mpi_comm_cols) &
result(mpierr) bind(C,name="get_elpa_row_col_comms")
......@@ -67,17 +67,17 @@
mpi_comm_rows, mpi_comm_cols)
end function
!c> #include <complex.h>
!c> /*! \brief C old, deprecated interface, will be deleted. Use "elpa_get_communicators"
!c> *
!c> * \param mpi_comm_word MPI global communicator (in)
!c> * \param my_prow Row coordinate of the calling process in the process grid (in)
!c> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!c> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!c> * \result int integer error value of mpi_comm_split function
!c> */
!c> int get_elpa_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
!lc> #include <complex.h>
!lc> /*! \brief C old, deprecated interface, will be deleted. Use "elpa_get_communicators"
!lc> *
!lc> * \param mpi_comm_word MPI global communicator (in)
!lc> * \param my_prow Row coordinate of the calling process in the process grid (in)
!lc> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!lc> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!lc> * \result int integer error value of mpi_comm_split function
!lc> */
!lc> int get_elpa_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
function get_elpa_row_col_comms_wrapper_c_name2(mpi_comm_world, my_prow, my_pcol, &
mpi_comm_rows, mpi_comm_cols) &
result(mpierr) bind(C,name="get_elpa_communicators")
......@@ -94,17 +94,17 @@
end function
!c> #include <complex.h>
!c> /*! \brief C interface to create ELPA communicators
!c> *
!c> * \param mpi_comm_word MPI global communicator (in)
!c> * \param my_prow Row coordinate of the calling process in the process grid (in)
!c> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!c> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!c> * \result int integer error value of mpi_comm_split function
!c> */
!c> int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
!lc> #include <complex.h>
!lc> /*! \brief C interface to create ELPA communicators
!lc> *
!lc> * \param mpi_comm_word MPI global communicator (in)
!lc> * \param my_prow Row coordinate of the calling process in the process grid (in)
!lc> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!lc> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!lc> * \result int integer error value of mpi_comm_split function
!lc> */
!lc> int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
function elpa_get_communicators_wrapper_c(mpi_comm_world, my_prow, my_pcol, &
mpi_comm_rows, mpi_comm_cols) &
result(mpierr) bind(C,name="elpa_get_communicators")
......@@ -122,33 +122,33 @@
end function
!c> /*! \brief C interface to solve the double-precision real eigenvalue problem with 1-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c>*/
!lc> /*! \brief C interface to solve the double-precision real eigenvalue problem with 1-stage solver
!lc> *
!lc> * \param na Order of matrix a
!lc> * \param nev Number of eigenvalues needed.
!lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!lc> * Distribution is like in Scalapack.
!lc> * The full matrix must be set (not only one half like in scalapack).
!lc> * \param lda Leading dimension of a
!lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!lc> * \param q On output: Eigenvectors of a
!lc> * Distribution is like in Scalapack.
!lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!lc> * even if only a part of the eigenvalues is needed.
!lc> * \param ldq Leading dimension of q
!lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> * \param matrixCols distributed number of matrix columns
!lc> * \param mpi_comm_rows MPI-Communicator for rows
!lc> * \param mpi_comm_cols MPI-Communicator for columns
!lc> * \param useGPU use GPU (1=yes, 0=No)
!lc> *
!lc> * \result int: 1 if error occured, otherwise 0
!lc>*/
#define REALCASE 1
#define DOUBLE_PRECISION 1
#include "../../general/precision_macros.h"
!c> int elpa_solve_evp_real_1stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
!lc> int elpa_solve_evp_real_1stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
#include "./elpa1_c_interface_template.X90"
#undef REALCASE
......@@ -156,70 +156,70 @@
#ifdef WANT_SINGLE_PRECISION_REAL
!c> /*! \brief C interface to solve the single-precision real eigenvalue problem with 1-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c>*/
!lc> /*! \brief C interface to solve the single-precision real eigenvalue problem with 1-stage solver
!lc> *
!lc> * \param na Order of matrix a
!lc> * \param nev Number of eigenvalues needed.
!lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!lc> * Distribution is like in Scalapack.
!lc> * The full matrix must be set (not only one half like in scalapack).
!lc> * \param lda Leading dimension of a
!lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!lc> * \param q On output: Eigenvectors of a
!lc> * Distribution is like in Scalapack.
!lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!lc> * even if only a part of the eigenvalues is needed.
!lc> * \param ldq Leading dimension of q
!lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> * \param matrixCols distributed number of matrix columns
!lc> * \param mpi_comm_rows MPI-Communicator for rows
!lc> * \param mpi_comm_cols MPI-Communicator for columns
!lc> * \param useGPU use GPU (1=yes, 0=No)
!lc> *
!lc> * \result int: 1 if error occured, otherwise 0
!lc>*/
#define REALCASE 1
#undef DOUBLE_PRECISION
#define SINGLE_PRECISION 1
#include "../../general/precision_macros.h"
!c> int elpa_solve_evp_real_1stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
!lc> int elpa_solve_evp_real_1stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
#include "./elpa1_c_interface_template.X90"
#undef SINGLE_PRECISION
#undef REALCASE
#endif /* WANT_SINGLE_PRECISION_REAL */
!c> /*! \brief C interface to solve the double-precision complex eigenvalue problem with 1-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!lc> /*! \brief C interface to solve the double-precision complex eigenvalue problem with 1-stage solver
!lc> *
!lc> * \param na Order of matrix a
!lc> * \param nev Number of eigenvalues needed.
!lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!lc> * Distribution is like in Scalapack.
!lc> * The full matrix must be set (not only one half like in scalapack).
!lc> * \param lda Leading dimension of a
!lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!lc> * \param q On output: Eigenvectors of a
!lc> * Distribution is like in Scalapack.
!lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!lc> * even if only a part of the eigenvalues is needed.
!lc> * \param ldq Leading dimension of q
!lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> * \param matrixCols distributed number of matrix columns
!lc> * \param mpi_comm_rows MPI-Communicator for rows
!lc> * \param mpi_comm_cols MPI-Communicator for columns
!lc> * \param useGPU use GPU (1=yes, 0=No)
!lc> *
!lc> * \result int: 1 if error occured, otherwise 0
!lc> */
#define COMPLEXCASE 1
#define DOUBLE_PRECISION 1
#include "../../general/precision_macros.h"
!c> int elpa_solve_evp_complex_1stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
!lc> int elpa_solve_evp_complex_1stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
#include "./elpa1_c_interface_template.X90"
#undef COMPLEXCASE
......@@ -227,35 +227,35 @@
#ifdef WANT_SINGLE_PRECISION_COMPLEX
!c> /*! \brief C interface to solve the single-precision complex eigenvalue problem with 1-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param useGPU use GPU (1=yes, 0=No)
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!lc> /*! \brief C interface to solve the single-precision complex eigenvalue problem with 1-stage solver
!lc> *
!lc> * \param na Order of matrix a
!lc> * \param nev Number of eigenvalues needed.
!lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!lc> * Distribution is like in Scalapack.
!lc> * The full matrix must be set (not only one half like in scalapack).
!lc> * \param lda Leading dimension of a
!lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!lc> * \param q On output: Eigenvectors of a
!lc> * Distribution is like in Scalapack.
!lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!lc> * even if only a part of the eigenvalues is needed.
!lc> * \param ldq Leading dimension of q
!lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> * \param matrixCols distributed number of matrix columns
!lc> * \param mpi_comm_rows MPI-Communicator for rows
!lc> * \param mpi_comm_cols MPI-Communicator for columns
!lc> * \param useGPU use GPU (1=yes, 0=No)
!lc> *
!lc> * \result int: 1 if error occured, otherwise 0
!lc> */
#define COMPLEXCASE 1
#undef DOUBLE_PRECISION
#define SINGLE_PRECISION
#include "../../general/precision_macros.h"
!c> int elpa_solve_evp_complex_1stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
!lc> int elpa_solve_evp_complex_1stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
#include "./elpa1_c_interface_template.X90"
......@@ -263,25 +263,25 @@
#undef COMPLEXCASE
#endif /* WANT_SINGLE_PRECISION_COMPLEX */
!c> /*
!c> \brief C interface to solve double-precision tridiagonal eigensystem with divide and conquer method
!c> \details
!c>
!c> *\param na Matrix dimension
!c> *\param nev number of eigenvalues/vectors to be computed
!c> *\param d array d(na) on input diagonal elements of tridiagonal matrix, on
!c> * output the eigenvalues in ascending order
!c> *\param e array e(na) on input subdiagonal elements of matrix, on exit destroyed
!c> *\param q on exit : matrix q(ldq,matrixCols) contains the eigenvectors
!c> *\param ldq leading dimension of matrix q
!c> *\param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> *\param matrixCols columns of matrix q
!c> *\param mpi_comm_rows MPI communicator for rows
!c> *\param mpi_comm_cols MPI communicator for columns
!c> *\param wantDebug give more debug information if 1, else 0
!c> *\result success int 1 on success, else 0
!c> */
!c> int elpa_solve_tridi_double(int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
!lc> /*
!lc> \brief C interface to solve double-precision tridiagonal eigensystem with divide and conquer method
!lc> \details
!lc>
!lc> *\param na Matrix dimension
!lc> *\param nev number of eigenvalues/vectors to be computed
!lc> *\param d array d(na) on input diagonal elements of tridiagonal matrix, on
!lc> * output the eigenvalues in ascending order
!lc> *\param e array e(na) on input subdiagonal elements of matrix, on exit destroyed
!lc> *\param q on exit : matrix q(ldq,matrixCols) contains the eigenvectors
!lc> *\param ldq leading dimension of matrix q
!lc> *\param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> *\param matrixCols columns of matrix q
!lc> *\param mpi_comm_rows MPI communicator for rows
!lc> *\param mpi_comm_cols MPI communicator for columns
!lc> *\param wantDebug give more debug information if 1, else 0
!lc> *\result success int 1 on success, else 0
!lc> */
!lc> int elpa_solve_tridi_double(int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
#define REALCASE 1
#define DOUBLE_PRECISION 1
#include "../../general/precision_macros.h"
......@@ -291,25 +291,25 @@
#ifdef WANT_SINGLE_PRECISION_REAL
!c> /*
!c> \brief C interface to solve single-precision tridiagonal eigensystem with divide and conquer method
!c> \details
!c>
!c> \param na Matrix dimension
!c> \param nev number of eigenvalues/vectors to be computed
!c> \param d array d(na) on input diagonal elements of tridiagonal matrix, on
!c> output the eigenvalues in ascending order
!c> \param e array e(na) on input subdiagonal elements of matrix, on exit destroyed
!c> \param q on exit : matrix q(ldq,matrixCols) contains the eigenvectors
!c> \param ldq leading dimension of matrix q
!c> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> \param matrixCols columns of matrix q
!c> \param mpi_comm_rows MPI communicator for rows
!c> \param mpi_comm_cols MPI communicator for columns
!c> \param wantDebug give more debug information if 1, else 0
!c> \result success int 1 on success, else 0
!c> */
!c> int elpa_solve_tridi_single(int na, int nev, float *d, float *e, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
!lc> /*
!lc> \brief C interface to solve single-precision tridiagonal eigensystem with divide and conquer method
!lc> \details
!lc>
!lc> \param na Matrix dimension
!lc> \param nev number of eigenvalues/vectors to be computed
!lc> \param d array d(na) on input diagonal elements of tridiagonal matrix, on
!lc> output the eigenvalues in ascending order
!lc> \param e array e(na) on input subdiagonal elements of matrix, on exit destroyed
!lc> \param q on exit : matrix q(ldq,matrixCols) contains the eigenvectors
!lc> \param ldq leading dimension of matrix q
!lc> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> \param matrixCols columns of matrix q
!lc> \param mpi_comm_rows MPI communicator for rows
!lc> \param mpi_comm_cols MPI communicator for columns
!lc> \param wantDebug give more debug information if 1, else 0
!lc> \result success int 1 on success, else 0
!lc> */
!lc> int elpa_solve_tridi_single(int na, int nev, float *d, float *e, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
#define REALCASE 1
#define SINGLE_PRECISION 1
#include "../../general/precision_macros.h"
......@@ -319,43 +319,43 @@
#endif /* WANT_SINGLE_PRECISION_REAL */
!c> /*
!c> \brief C interface for elpa_mult_at_b_real_double: Performs C : = A**T * B for double-precision matrices
!c> where A is a square matrix (na,na) which is optionally upper or lower triangular
!c> B is a (na,ncb) matrix
!c> C is a (na,ncb) matrix where optionally only the upper or lower
!c> triangle may be computed
!c> \details
!c> \param uplo_a 'U' if A is upper triangular
!c> 'L' if A is lower triangular
!c> anything else if A is a full matrix
!c> Please note: This pertains to the original A (as set in the calling program)
!c> whereas the transpose of A is used for calculations
!c> If uplo_a is 'U' or 'L', the other triangle is not used at all,
!c> i.e. it may contain arbitrary numbers
!c> \param uplo_c 'U' if only the upper diagonal part of C is needed
!c> 'L' if only the upper diagonal part of C is needed
!c> anything else if the full matrix C is needed
!c> Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
!c> written to a certain extent, i.e. one shouldn't rely on the content there!
!c> \param na Number of rows/columns of A, number of rows of B and C
!c> \param ncb Number of columns of B and C
!c> \param a matrix a
!c> \param lda leading dimension of matrix a
!c> \param ldaCols columns of matrix a
!c> \param b matrix b
!c> \param ldb leading dimension of matrix b
!c> \param ldbCols columns of matrix b
!c> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> \param mpi_comm_rows MPI communicator for rows
!c> \param mpi_comm_cols MPI communicator for columns
!c> \param c matrix c
!c> \param ldc leading dimension of matrix c
!c> \param ldcCols columns of matrix c
!c> \result success int report success (1) or failure (0)
!c> */
!c> int elpa_mult_at_b_real_double(char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols);
!lc> /*
!lc> \brief C interface for elpa_mult_at_b_real_double: Performs C : = A**T * B for double-precision matrices
!lc> where A is a square matrix (na,na) which is optionally upper or lower triangular
!lc> B is a (na,ncb) matrix
!lc> C is a (na,ncb) matrix where optionally only the upper or lower
!lc> triangle may be computed
!lc> \details
!lc> \param uplo_a 'U' if A is upper triangular
!lc> 'L' if A is lower triangular
!lc> anything else if A is a full matrix
!lc> Please note: This pertains to the original A (as set in the calling program)
!lc> whereas the transpose of A is used for calculations
!lc> If uplo_a is 'U' or 'L', the other triangle is not used at all,
!lc> i.e. it may contain arbitrary numbers
!lc> \param uplo_c 'U' if only the upper diagonal part of C is needed
!lc> 'L' if only the upper diagonal part of C is needed
!lc> anything else if the full matrix C is needed
!lc> Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
!lc> written to a certain extent, i.e. one shouldn't rely on the content there!
!lc> \param na Number of rows/columns of A, number of rows of B and C
!lc> \param ncb Number of columns of B and C
!lc> \param a matrix a
!lc> \param lda leading dimension of matrix a
!lc> \param ldaCols columns of matrix a
!lc> \param b matrix b
!lc> \param ldb leading dimension of matrix b
!lc> \param ldbCols columns of matrix b
!lc> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> \param mpi_comm_rows MPI communicator for rows
!lc> \param mpi_comm_cols MPI communicator for columns
!lc> \param c matrix c
!lc> \param ldc leading dimension of matrix c
!lc> \param ldcCols columns of matrix c
!lc> \result success int report success (1) or failure (0)
!lc> */
!lc> int elpa_mult_at_b_real_double(char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols);
#define REALCASE 1
#define DOUBLE_PRECISION 1
......@@ -365,42 +365,42 @@
#undef REALCASE
#ifdef WANT_SINGLE_PRECISION_REAL
!c> /*
!c> \brief C interface for elpa_mult_at_b_real_single: Performs C : = A**T * B for single-precision matrices
!c> where A is a square matrix (na,na) which is optionally upper or lower triangular
!c> B is a (na,ncb) matrix
!c> C is a (na,ncb) matrix where optionally only the upper or lower
!c> triangle may be computed
!c> \details
!c> \param uplo_a 'U' if A is upper triangular
!c> 'L' if A is lower triangular
!c> anything else if A is a full matrix
!c> Please note: This pertains to the original A (as set in the calling program)
!c> whereas the transpose of A is used for calculations
!c> If uplo_a is 'U' or 'L', the other triangle is not used at all,
!c> i.e. it may contain arbitrary numbers
!c> \param uplo_c 'U' if only the upper diagonal part of C is needed
!c> 'L' if only the upper diagonal part of C is needed
!c> anything else if the full matrix C is needed
!c> Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
!c> written to a certain extent, i.e. one shouldn't rely on the content there!
!c> \param na Number of rows/columns of A, number of rows of B and C
!c> \param ncb Number of columns of B and C
!c> \param a matrix a
!c> \param lda leading dimension of matrix a
!c> \param ldaCols columns of matrix a
!c> \param b matrix b
!c> \param ldb leading dimension of matrix b
!c> \param ldbCols columns of matrix b
!c> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> \param mpi_comm_rows MPI communicator for rows
!c> \param mpi_comm_cols MPI communicator for columns
!c> \param c matrix c
!c> \param ldc leading dimension of matrix c
!c> \result success int report success (1) or failure (0)
!c> */
!c> int elpa_mult_at_b_real_single(char uplo_a, char uplo_c, int na, int ncb, float *a, int lda, int ldaCols, float *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, float *c, int ldc, int ldcCols);
!lc> /*
!lc> \brief C interface for elpa_mult_at_b_real_single: Performs C : = A**T * B for single-precision matrices
!lc> where A is a square matrix (na,na) which is optionally upper or lower triangular
!lc> B is a (na,ncb) matrix
!lc> C is a (na,ncb) matrix where optionally only the upper or lower
!lc> triangle may be computed
!lc> \details
!lc> \param uplo_a 'U' if A is upper triangular
!lc> 'L' if A is lower triangular
!lc> anything else if A is a full matrix
!lc> Please note: This pertains to the original A (as set in the calling program)
!lc> whereas the transpose of A is used for calculations
!lc> If uplo_a is 'U' or 'L', the other triangle is not used at all,
!lc> i.e. it may contain arbitrary numbers
!lc> \param uplo_c 'U' if only the upper diagonal part of C is needed
!lc> 'L' if only the upper diagonal part of C is needed
!lc> anything else if the full matrix C is needed
!lc> Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
!lc> written to a certain extent, i.e. one shouldn't rely on the content there!
!lc> \param na Number of rows/columns of A, number of rows of B and C
!lc> \param ncb Number of columns of B and C
!lc> \param a matrix a
!lc> \param lda leading dimension of matrix a
!lc> \param ldaCols columns of matrix a
!lc> \param b matrix b
!lc> \param ldb leading dimension of matrix b
!lc> \param ldbCols columns of matrix b
!lc> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!lc> \param mpi_comm_rows MPI communicator for rows
!lc> \param mpi_comm_cols MPI communicator for columns
!lc> \param c matrix c