Commit c88242d4 authored by Andreas Marek's avatar Andreas Marek

Rename header file of legacy C interface

parent c27fe3a6
...@@ -1177,6 +1177,7 @@ distclean-local: ...@@ -1177,6 +1177,7 @@ distclean-local:
EXTRA_DIST = \ EXTRA_DIST = \
elpa/elpa.h \ elpa/elpa.h \
elpa/elpa_legacy.h \
fdep/fortran_dependencies.pl \ fdep/fortran_dependencies.pl \
fdep/fortran_dependencies.mk \ fdep/fortran_dependencies.mk \
test/Fortran/elpa_print_headers.X90 \ test/Fortran/elpa_print_headers.X90 \
......
...@@ -19,6 +19,12 @@ elpa/elpa_generated.h: $(top_srcdir)/src/elpa_driver/legacy_interface/elpa_drive ...@@ -19,6 +19,12 @@ elpa/elpa_generated.h: $(top_srcdir)/src/elpa_driver/legacy_interface/elpa_drive
@rm -f $@ @rm -f $@
$(call extract_interface,!c>) $(call extract_interface,!c>)
elpa/elpa_generated_legacy.h: $(top_srcdir)/src/elpa_driver/legacy_interface/elpa_driver_c_interface.F90 \
$(top_srcdir)/src/elpa1/legacy_interface/elpa_1stage_c_interface.F90 \
$(top_srcdir)/src/elpa2/legacy_interface/elpa_2stage_c_interface.F90 | elpa
@rm -f $@
$(call extract_interface,!lc>)
test/shared/generated.h: $(wildcard $(top_srcdir)/test/shared/*.*90) | test/shared test/shared/generated.h: $(wildcard $(top_srcdir)/test/shared/*.*90) | test/shared
@rm -f $@ @rm -f $@
$(call extract_interface,!c>) $(call extract_interface,!c>)
...@@ -34,7 +40,7 @@ src/fortran_constants.X90: $(top_srcdir)/src/fortran_constants.h ...@@ -34,7 +40,7 @@ src/fortran_constants.X90: $(top_srcdir)/src/fortran_constants.h
@rm $@_ @rm $@_
generated_headers= config-f90.h elpa/elpa_generated.h test/shared/generated.h elpa/elpa_generated_fortran_interfaces.h src/fortran_constants.X90 generated_headers= config-f90.h elpa/elpa_generated.h elpa/elpa_generated_legacy.h test/shared/generated.h elpa/elpa_generated_fortran_interfaces.h src/fortran_constants.X90
generated-headers: $(generated_headers) generated-headers: $(generated_headers)
......
...@@ -42,42 +42,37 @@ ...@@ -42,42 +42,37 @@
! Author: Andreas Marek, MCPDF ! Author: Andreas Marek, MCPDF
#include "config-f90.h" #include "config-f90.h"
!c> /*! \brief C interface to solve the double-precision real eigenvalue problem with 2-stage solver !lc> /*! \brief C interface to solve the double-precision real eigenvalue problem with 2-stage solver
!c> * !lc> *
!c> * \param na Order of matrix a !lc> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed. !lc> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated. !lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed. !lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack). !lc> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a !lc> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set !lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a !lc> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na)) !lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed. !lc> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q !lc> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions! !lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns !lc> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows !lc> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns !lc> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set !lc> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API !lc> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useQR use QR decomposition 1 = yes, 0 = no !lc> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \param useGPU use GPU (1=yes, 0=No) !lc> * \param useGPU use GPU (1=yes, 0=No)
!c> * !lc> *
!c> * \result int: 1 if error occured, otherwise 0 !lc> * \result int: 1 if error occured, otherwise 0
!c> */ !lc> */
#define REALCASE 1 #define REALCASE 1
#define DOUBLE_PRECISION 1 #define DOUBLE_PRECISION 1
#if DOUBLE_PRECISION == 1 !lc> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk,
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, !lc> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
!c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#else
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk,
!c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#endif
#include "../../general/precision_macros.h" #include "../../general/precision_macros.h"
#include "./elpa2_c_interface_template.X90" #include "./elpa2_c_interface_template.X90"
...@@ -86,43 +81,38 @@ ...@@ -86,43 +81,38 @@
#ifdef WANT_SINGLE_PRECISION_REAL #ifdef WANT_SINGLE_PRECISION_REAL
!c> /*! \brief C interface to solve the single-precision real eigenvalue problem with 2-stage solver !lc> /*! \brief C interface to solve the single-precision real eigenvalue problem with 2-stage solver
!c> * !lc> *
!c> * \param na Order of matrix a !lc> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed. !lc> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated. !lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed. !lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack). !lc> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a !lc> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set !lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a !lc> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na)) !lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed. !lc> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q !lc> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions! !lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns !lc> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows !lc> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns !lc> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set !lc> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API !lc> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useQR use QR decomposition 1 = yes, 0 = no !lc> * \param useQR use QR decomposition 1 = yes, 0 = no
!c> * \param useGPU use GPU (1=yes, 0=No) !lc> * \param useGPU use GPU (1=yes, 0=No)
!c> * !lc> *
!c> * \result int: 1 if error occured, otherwise 0 !lc> * \result int: 1 if error occured, otherwise 0
!c> */ !lc> */
#define REALCASE 1 #define REALCASE 1
#define SINGLE_PRECISION 1 #define SINGLE_PRECISION 1
#undef DOUBLE_PRECISION #undef DOUBLE_PRECISION
#if DOUBLE_PRECISION == 1 !lc> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk,
!c> int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, !lc> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
!c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#else
!c> int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk,
!c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
#endif
#include "../../general/precision_macros.h" #include "../../general/precision_macros.h"
#include "./elpa2_c_interface_template.X90" #include "./elpa2_c_interface_template.X90"
...@@ -132,43 +122,38 @@ ...@@ -132,43 +122,38 @@
#endif /* WANT_SINGLE_PRECISION_REAL */ #endif /* WANT_SINGLE_PRECISION_REAL */
!c> #include <complex.h> !lc> #include <complex.h>
!c> /*! \brief C interface to solve the double-precision complex eigenvalue problem with 2-stage solver !lc> /*! \brief C interface to solve the double-precision complex eigenvalue problem with 2-stage solver
!c> * !lc> *
!c> * \param na Order of matrix a !lc> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed. !lc> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated. !lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed. !lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack). !lc> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a !lc> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set !lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a !lc> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na)) !lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed. !lc> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q !lc> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions! !lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns !lc> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows !lc> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns !lc> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set !lc> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API !lc> * \param THIS_COMPLEX_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useGPU use GPU (1=yes, 0=No) !lc> * \param useGPU use GPU (1=yes, 0=No)
!c> * !lc> *
!c> * \result int: 1 if error occured, otherwise 0 !lc> * \result int: 1 if error occured, otherwise 0
!c> */ !lc> */
#define COMPLEXCASE 1 #define COMPLEXCASE 1
#define DOUBLE_PRECISION 1 #define DOUBLE_PRECISION 1
#if DOUBLE_PRECISION == 1 !lc> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq,
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, !lc> int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
!c> int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#else
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk,
!c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#endif
#include "../../general/precision_macros.h" #include "../../general/precision_macros.h"
#include "./elpa2_c_interface_template.X90" #include "./elpa2_c_interface_template.X90"
...@@ -177,43 +162,38 @@ ...@@ -177,43 +162,38 @@
#ifdef WANT_SINGLE_PRECISION_COMPLEX #ifdef WANT_SINGLE_PRECISION_COMPLEX
!c> #include <complex.h> !lc> #include <complex.h>
!c> /*! \brief C interface to solve the single-precision complex eigenvalue problem with 2-stage solver !lc> /*! \brief C interface to solve the single-precision complex eigenvalue problem with 2-stage solver
!c> * !lc> *
!c> * \param na Order of matrix a !lc> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed. !lc> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated. !lc> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed. !lc> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack). !lc> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a !lc> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set !lc> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a !lc> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack. !lc> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na)) !lc> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed. !lc> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q !lc> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions! !lc> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns !lc> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows !lc> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns !lc> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set !lc> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API !lc> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param useGPU use GPU (1=yes, 0=No) !lc> * \param useGPU use GPU (1=yes, 0=No)
!c> * !lc> *
!c> * \result int: 1 if error occured, otherwise 0 !lc> * \result int: 1 if error occured, otherwise 0
!c> */ !lc> */
#define COMPLEXCASE 1 #define COMPLEXCASE 1
#undef DOUBLE_PRECISION #undef DOUBLE_PRECISION
#define SINGLE_PRECISION 1 #define SINGLE_PRECISION 1
#if DOUBLE_PRECISION == 1 !lc> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk,
!c> int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, !lc> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
!c> int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#else
!c> int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex *a, int lda, float *ev, complex *q, int ldq, int nblk,
!c> int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
#endif
#include "../../general/precision_macros.h" #include "../../general/precision_macros.h"
#include "./elpa2_c_interface_template.X90" #include "./elpa2_c_interface_template.X90"
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
#include <complex.h> #include <complex.h>
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include "test/shared/generated.h" #include "test/shared/generated.h"
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
#include <complex.h> #include <complex.h>
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
#define DOUBLE_PRECISION_REAL 1 #define DOUBLE_PRECISION_REAL 1
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
#include <complex.h> #include <complex.h>
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
#include <complex.h> #include <complex.h>
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
int main(int argc, char** argv) { int main(int argc, char** argv) {
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#endif #endif
#include <math.h> #include <math.h>
#include <elpa/elpa.h> #include <elpa/elpa_legacy.h>
#include <test/shared/generated.h> #include <test/shared/generated.h>
int main(int argc, char** argv) { int main(int argc, char** argv) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment