Commit 927f988a authored by Andreas Marek's avatar Andreas Marek

Create doxygen documentation for ELPA

The user functions of ELPA are now documented with doxygen tags.
At the moment the interface of ELPA 2015.11.001 is decribed.

The documentation has step by step to be implemented for all functions
and test programms.
parent 2998fac3
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -292,6 +292,8 @@ elpa2.i: $(top_srcdir)/src/elpa2.F90
elpa1.i: $(top_srcdir)/src/elpa1.F90
$(CPP) $(CPPFLAGS) -I$(top_builddir)/ -c $(top_srcdir)/src/elpa1.F90 -o $@
include doxygen.am
CLEANFILES = \
elpa-generated.h \
elpa1_test_real.sh \
......
......@@ -660,6 +660,12 @@ fi
LT_INIT
DX_PDF_FEATURE(OFF)
DX_PS_FEATURE(OFF)
DX_MAN_FEATURE(ON)
DX_HTML_FEATURE(ON)
DX_INIT_DOXYGEN([ELPA], [Doxyfile], [docs])
AC_SUBST([WITH_MKL])
AC_SUBST([WITH_BLACS])
AC_SUBST([with_amd_bulldozer_kernel])
......@@ -669,6 +675,7 @@ AC_SUBST([OPENMP_CFLAGS])
AC_SUBST([OPENMP_FCFLAGS])
AC_SUBST([OPENMP_LDFLAGS])
#AC_SUBST(OPT_FCFLAGS)
AC_SUBST([DOXYGEN_OUTPUT_DIR], [docs])
rm -rf modules/ .fortran_dependencies/
mkdir modules
......@@ -702,6 +709,7 @@ AC_SUBST([PKG_CONFIG_FILE],[elpa${SUFFIX}-${PACKAGE_VERSION}.pc])
AC_CONFIG_FILES([
Makefile
Doxyfile
${PKG_CONFIG_FILE}:elpa.pc.in
])
......
## --------------------------------- ##
## Format-independent Doxygen rules. ##
## --------------------------------- ##
if DX_COND_doc
## ------------------------------- ##
## Rules specific for HTML output. ##
## ------------------------------- ##
if DX_COND_html
DX_CLEAN_HTML = @DX_DOCDIR@/html
endif DX_COND_html
## ------------------------------ ##
## Rules specific for CHM output. ##
## ------------------------------ ##
if DX_COND_chm
DX_CLEAN_CHM = @DX_DOCDIR@/chm
if DX_COND_chi
DX_CLEAN_CHI = @DX_DOCDIR@/@PACKAGE@.chi
endif DX_COND_chi
endif DX_COND_chm
## ------------------------------ ##
## Rules specific for MAN output. ##
## ------------------------------ ##
if DX_COND_man
DX_CLEAN_MAN = @DX_DOCDIR@/man
endif DX_COND_man
## ------------------------------ ##
## Rules specific for RTF output. ##
## ------------------------------ ##
if DX_COND_rtf
DX_CLEAN_RTF = @DX_DOCDIR@/rtf
endif DX_COND_rtf
## ------------------------------ ##
## Rules specific for XML output. ##
## ------------------------------ ##
if DX_COND_xml
DX_CLEAN_XML = @DX_DOCDIR@/xml
endif DX_COND_xml
## ----------------------------- ##
## Rules specific for PS output. ##
## ----------------------------- ##
if DX_COND_ps
DX_CLEAN_PS = @DX_DOCDIR@/@PACKAGE@.ps
DX_PS_GOAL = doxygen-ps
doxygen-ps: @DX_DOCDIR@/@PACKAGE@.ps
@DX_DOCDIR@/@PACKAGE@.ps: @DX_DOCDIR@/@PACKAGE@.tag
cd @DX_DOCDIR@/latex; \
rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
$(DX_LATEX) refman.tex; \
$(MAKEINDEX_PATH) refman.idx; \
$(DX_LATEX) refman.tex; \
countdown=5; \
while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
refman.log > /dev/null 2>&1 \
&& test $$countdown -gt 0; do \
$(DX_LATEX) refman.tex; \
countdown=`expr $$countdown - 1`; \
done; \
$(DX_DVIPS) -o ../@PACKAGE@.ps refman.dvi
endif DX_COND_ps
## ------------------------------ ##
## Rules specific for PDF output. ##
## ------------------------------ ##
if DX_COND_pdf
DX_CLEAN_PDF = @DX_DOCDIR@/@PACKAGE@.pdf
DX_PDF_GOAL = doxygen-pdf
doxygen-pdf: @DX_DOCDIR@/@PACKAGE@.pdf
@DX_DOCDIR@/@PACKAGE@.pdf: @DX_DOCDIR@/@PACKAGE@.tag
cd @DX_DOCDIR@/latex; \
rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \
$(DX_PDFLATEX) refman.tex; \
$(DX_MAKEINDEX) refman.idx; \
$(DX_PDFLATEX) refman.tex; \
countdown=5; \
while $(DX_EGREP) 'Rerun (LaTeX|to get cross-references right)' \
refman.log > /dev/null 2>&1 \
&& test $$countdown -gt 0; do \
$(DX_PDFLATEX) refman.tex; \
countdown=`expr $$countdown - 1`; \
done; \
mv refman.pdf ../@PACKAGE@.pdf
endif DX_COND_pdf
## ------------------------------------------------- ##
## Rules specific for LaTeX (shared for PS and PDF). ##
## ------------------------------------------------- ##
if DX_COND_latex
DX_CLEAN_LATEX = @DX_DOCDIR@/latex
endif DX_COND_latex
.PHONY: doxygen-run doxygen-doc $(DX_PS_GOAL) $(DX_PDF_GOAL)
.INTERMEDIATE: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
doxygen-run: @DX_DOCDIR@/@PACKAGE@.tag
doxygen-doc: doxygen-run $(DX_PS_GOAL) $(DX_PDF_GOAL)
@DX_DOCDIR@/@PACKAGE@.tag: $(DX_CONFIG) $(pkginclude_HEADERS)
rm -rf @DX_DOCDIR@
$(DX_ENV) $(DX_DOXYGEN) $(DX_CONFIG)
DX_CLEANFILES = \
@DX_DOCDIR@/@PACKAGE@.tag \
-r \
$(DX_CLEAN_HTML) \
$(DX_CLEAN_CHM) \
$(DX_CLEAN_CHI) \
$(DX_CLEAN_MAN) \
$(DX_CLEAN_RTF) \
$(DX_CLEAN_XML) \
$(DX_CLEAN_PS) \
$(DX_CLEAN_PDF) \
$(DX_CLEAN_LATEX)
endif DX_COND_doc
This diff is collapsed.
This diff is collapsed.
......@@ -60,7 +60,7 @@
#include "config-f90.h"
!> \brief Fortran module which provides the routines to use the two-stage ELPA solver
module ELPA2
! Version 1.1.2, 2011-02-21
......@@ -81,13 +81,49 @@ module ELPA2
public :: solve_evp_real_2stage
public :: solve_evp_complex_2stage
include 'mpif.h'
!******
contains
!-------------------------------------------------------------------------------
!> \brief solve_evp_real_2stage: Fortran function to solve the real eigenvalue problem with a 2 stage approach
!>
!> Parameters
!>
!> \param na Order of matrix a
!>
!> \param nev Number of eigenvalues needed
!>
!> \param a(lda,matrixCols) Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param lda Leading dimension of a
!>
!> \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q(ldq,matrixCols) On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!> \param ldq Leading dimension of q
!>
!> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!>
!> \param matrixCols local columns of matrix a and q
!>
!> \param mpi_comm_rows MPI communicator for rows
!> \param mpi_comm_cols MPI communicator for columns
!> \param mpi_comm_all MPI communicator for the total processor set
!>
!> \param THIS_REAL_ELPA_KERNEL_API (optional) specify used ELPA2 kernel via API
!>
!> \param use_qr (optional) use QR decomposition
!>
!> \result success logical, false if error occured
!-------------------------------------------------------------------------------
function solve_evp_real_2stage(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, &
......@@ -95,41 +131,6 @@ function solve_evp_real_2stage(na, nev, a, lda, ev, q, ldq, nblk, &
mpi_comm_all, THIS_REAL_ELPA_KERNEL_API,&
useQR) result(success)
!-------------------------------------------------------------------------------
! solve_evp_real_2stage: Solves the real eigenvalue problem with a 2 stage approach
!
! Parameters
!
! na Order of matrix a
!
! nev Number of eigenvalues needed
!
! a(lda,matrixCols) Distributed matrix for which eigenvalues are to be computed.
! Distribution is like in Scalapack.
! The full matrix must be set (not only one half like in scalapack).
! Destroyed on exit (upper and lower half).
!
! lda Leading dimension of a
! matrixCols local columns of matrix a and q
!
! ev(na) On output: eigenvalues of a, every processor gets the complete set
!
! q(ldq,matrixCols) On output: Eigenvectors of a
! Distribution is like in Scalapack.
! Must be always dimensioned to the full size (corresponding to (na,na))
! even if only a part of the eigenvalues is needed.
!
! ldq Leading dimension of q
!
! nblk blocksize of cyclic distribution, must be the same in both directions!
!
! mpi_comm_rows
! mpi_comm_cols
! MPI-Communicators for rows/columns
! mpi_comm_all
! MPI-Communicator for the total processor set
!
!-------------------------------------------------------------------------------
#ifdef HAVE_DETAILED_TIMINGS
use timings
#endif
......@@ -318,49 +319,48 @@ function solve_evp_real_2stage(na, nev, a, lda, ev, q, ldq, nblk, &
end function solve_evp_real_2stage
!-------------------------------------------------------------------------------
!-------------------------------------------------------------------------------
!> \brief solve_evp_complex_2stage: Fortran function to solve the complex eigenvalue problem with a 2 stage approach
!>
!> Parameters
!>
!> \param na Order of matrix a
!>
!> \param nev Number of eigenvalues needed
!>
!> \param a(lda,matrixCols) Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param lda Leading dimension of a
!>
!> \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q(ldq,matrixCols) On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!> \param ldq Leading dimension of q
!>
!> \param nblk blocksize of cyclic distribution, must be the same in both directions!
!>
!> \param matrixCols local columns of matrix a and q
!>
!> \param mpi_comm_rows MPI communicator for rows
!> \param mpi_comm_cols MPI communicator for columns
!> \param mpi_comm_all MPI communicator for the total processor set
!>
!> \param THIS_REAL_ELPA_KERNEL_API (optional) specify used ELPA2 kernel via API
!>
!> \result success logical, false if error occured
!-------------------------------------------------------------------------------
function solve_evp_complex_2stage(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, &
mpi_comm_all, THIS_COMPLEX_ELPA_KERNEL_API) result(success)
!-------------------------------------------------------------------------------
! solve_evp_complex_2stage: Solves the complex eigenvalue problem with a 2 stage approach
!
! Parameters
!
! na Order of matrix a
!
! nev Number of eigenvalues needed
!
! a(lda,matrixCols) Distributed matrix for which eigenvalues are to be computed.
! Distribution is like in Scalapack.
! The full matrix must be set (not only one half like in scalapack).
! Destroyed on exit (upper and lower half).
!
! lda Leading dimension of a
! matrixCols local columns of matrix a and q
!
! ev(na) On output: eigenvalues of a, every processor gets the complete set
!
! q(ldq,matrixCols) On output: Eigenvectors of a
! Distribution is like in Scalapack.
! Must be always dimensioned to the full size (corresponding to (na,na))
! even if only a part of the eigenvalues is needed.
!
! ldq Leading dimension of q
!
! nblk blocksize of cyclic distribution, must be the same in both directions!
!
! mpi_comm_rows
! mpi_comm_cols
! MPI-Communicators for rows/columns
! mpi_comm_all
! MPI-Communicator for the total processor set
!
!-------------------------------------------------------------------------------
#ifdef HAVE_DETAILED_TIMINGS
use timings
#endif
......
......@@ -49,6 +49,14 @@
#include "config-f90.h"
!c> #include <complex.h>
!c> /*! \brief C interface to create the MPI communicators for ELPA
!c> *
!c> * \param mpi_comm_word MPI global communicator (in)
!c> * \param my_prow Row coordinate of the calling process in the process grid (in)
!c> * \param my_pcol Column coordinate of the calling process in the process grid (in)
!c> * \param mpi_comm_rows Communicator for communicating within rows of processes (out)
!c> * \result int integer error value of mpi_comm_split function
!c> */
!c> int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
function get_elpa_row_col_comms_wrapper(mpi_comm_world, my_prow, my_pcol, &
mpi_comm_rows, mpi_comm_cols) &
......@@ -65,7 +73,28 @@
mpi_comm_rows, mpi_comm_cols)
end function
!c> /*! \brief C interface to solve the real eigenvalue problem with 1-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c>*/
!c> int elpa_solve_evp_real_stage1(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols);
function solve_elpa1_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols) &
......@@ -90,7 +119,28 @@
endif
end function
!c> /*! \brief C interface to solve the complex eigenvalue problem with 1-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_complex_stage1(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols);
function solve_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols) &
......@@ -116,7 +166,31 @@
endif
end function
!c> /*! \brief C interface to solve the real eigenvalue problem with 2-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param use_qr use QR decomposition 1 = yes, 0 = no
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_real_stage2(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR);
function solve_elpa2_evp_real_wrapper(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
......@@ -155,6 +229,32 @@
end function
!c> /*! \brief C interface to solve the complex eigenvalue problem with 2-stage solver
!c> *
!c> * \param na Order of matrix a
!c> * \param nev Number of eigenvalues needed.
!c> * The smallest nev eigenvalues/eigenvectors are calculated.
!c> * \param a Distributed matrix for which eigenvalues are to be computed.
!c> * Distribution is like in Scalapack.
!c> * The full matrix must be set (not only one half like in scalapack).
!c> * \param lda Leading dimension of a
!c> * \param ev(na) On output: eigenvalues of a, every processor gets the complete set
!c> * \param q On output: Eigenvectors of a
!c> * Distribution is like in Scalapack.
!c> * Must be always dimensioned to the full size (corresponding to (na,na))
!c> * even if only a part of the eigenvalues is needed.
!c> * \param ldq Leading dimension of q
!c> * \param nblk blocksize of cyclic distribution, must be the same in both directions!
!c> * \param matrixCols distributed number of matrix columns
!c> * \param mpi_comm_rows MPI-Communicator for rows
!c> * \param mpi_comm_cols MPI-Communicator for columns
!c> * \param mpi_coll_all MPI communicator for the total processor set
!c> * \param THIS_REAL_ELPA_KERNEL_API specify used ELPA2 kernel via API
!c> * \param use_qr use QR decomposition 1 = yes, 0 = no
!c> *
!c> * \result int: 1 if error occured, otherwise 0
!c> */
!c> int elpa_solve_evp_complex_stage2(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API);
function solve_elpa2_evp_complex_wrapper(na, nev, a, lda, ev, q, ldq, nblk, &
matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_all, &
......
......@@ -56,6 +56,21 @@
! distributed along with the original code in the file "COPYING".
#include "config-f90.h"
!> \file print_available_elpa2_kernels.F90
!> \par
!> \brief Provide information which ELPA2 kernels are available on this system
!>
!> \details
!> It is possible to configure ELPA2 such, that different compute intensive
!> "ELPA2 kernels" can be choosen at runtime.
!> The service binary print_available_elpa2_kernels will query the library and tell
!> whether ELPA2 has been configured in this way, and if this is the case which kernels can be
!> choosen at runtime.
!> It will furthermore detail whether ELPA has been configured with OpenMP support
!>
!> Synopsis: print_available_elpa2_kernels
!>
program print_available_elpa2_kernels
use ELPA1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment