Commit 7123a5c9 authored by Andreas Marek's avatar Andreas Marek
Browse files

Rename ELPA1 driver function

parent 5834d0c6
......@@ -147,7 +147,7 @@ module elpa1_impl
contains
!> \brief elpa_solve_evp_real_1stage_double_impl: Fortran function to solve the real double-precision eigenvalue problem with 1-stage solver
!> \brief elpa_solve_evp_real_1stage_host_arrays_double_impl: Fortran function to solve the real double-precision eigenvalue problem with 1-stage solver
!>
!> \details
!> \param obj elpa_t object contains:
......@@ -179,13 +179,55 @@ contains
#define REALCASE 1
#define DOUBLE_PRECISION 1
#undef ACTIVATE_SKEW
#undef DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef REALCASE
#undef DOUBLE_PRECISION
!> \brief elpa_solve_evp_real_1stage_device_pointer_double_impl: Fortran function to solve the real double-precision eigenvalue problem with 1-stage solver
!>
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
!> \param - obj%nev number of eigenvalues/vectors to be computed
!> The smallest nev eigenvalues/eigenvectors are calculated.
!> \param - obj%local_nrows Leading dimension of a
!> \param - obj%local_ncols local columns of matrix q
!> \param - obj%nblk blocksize of cyclic distribution, must be the same in both directions!
!> \param - obj%mpi_comm_rows MPI communicator for rows
!> \param - obj%mpi_comm_cols MPI communicator for columns
!> \param - obj%mpi_comm_parent MPI communicator for columns
!> \param - obj%gpu use GPU version (1 or 0)
!>
!> \param a Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param ev On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!>
!> \result success
#define REALCASE 1
#define DOUBLE_PRECISION 1
#undef ACTIVATE_SKEW
#define DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef DEVICE_POINTER
#undef REALCASE
#undef DOUBLE_PRECISION
#ifdef WANT_SINGLE_PRECISION_REAL
!> \brief elpa_solve_evp_real_1stage_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \brief elpa_solve_evp_real_1stage_host_arrays_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
......@@ -217,13 +259,54 @@ contains
#define REALCASE 1
#define SINGLE_PRECISION 1
#undef ACTIVATE_SKEW
#undef DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef REALCASE
#undef SINGLE_PRECISION
!> \brief elpa_solve_evp_real_1stage_device_pointer_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
!> \param - obj%nev number of eigenvalues/vectors to be computed
!> The smallest nev eigenvalues/eigenvectors are calculated.
!> \param - obj%local_nrows Leading dimension of a
!> \param - obj%local_ncols local columns of matrix q
!> \param - obj%nblk blocksize of cyclic distribution, must be the same in both directions!
!> \param - obj%mpi_comm_rows MPI communicator for rows
!> \param - obj%mpi_comm_cols MPI communicator for columns
!> \param - obj%mpi_comm_parent MPI communicator for columns
!> \param - obj%gpu use GPU version (1 or 0)
!>
!> \param a Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param ev On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!>
!> \result success
#define REALCASE 1
#define SINGLE_PRECISION 1
#undef ACTIVATE_SKEW
#define DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef DEVICE_POINTER
#undef REALCASE
#undef SINGLE_PRECISION
#endif /* WANT_SINGLE_PRECISION_REAL */
!> \brief elpa_solve_evp_complex_1stage_double_impl: Fortran function to solve the complex double-precision eigenvalue problem with 1-stage solver
!> \brief elpa_solve_evp_complex_1stage_host_arrays_double_impl: Fortran function to solve the complex double-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
......@@ -254,15 +337,54 @@ contains
#define COMPLEXCASE 1
#define DOUBLE_PRECISION 1
#undef ACTIVATE_SKEW
#undef DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef DOUBLE_PRECISION
#undef COMPLEXCASE
!> \brief elpa_solve_evp_complex_1stage_device_pointer_double_impl: Fortran function to solve the complex double-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
!> \param - obj%nev number of eigenvalues/vectors to be computed
!> The smallest nev eigenvalues/eigenvectors are calculated.
!> \param - obj%local_nrows Leading dimension of a
!> \param - obj%local_ncols local columns of matrix q
!> \param - obj%nblk blocksize of cyclic distribution, must be the same in both directions!
!> \param - obj%mpi_comm_rows MPI communicator for rows
!> \param - obj%mpi_comm_cols MPI communicator for columns
!> \param - obj%mpi_comm_parent MPI communicator for columns
!> \param - obj%gpu use GPU version (1 or 0)
!>
!> \param a Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param ev On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!>
!> \result success
#define COMPLEXCASE 1
#define DOUBLE_PRECISION 1
#undef ACTIVATE_SKEW
#define DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef DEVICE_POINTER
#undef DOUBLE_PRECISION
#undef COMPLEXCASE
#ifdef WANT_SINGLE_PRECISION_COMPLEX
!> \brief elpa_solve_evp_complex_1stage_single_impl: Fortran function to solve the complex single-precision eigenvalue problem with 1-stage solver
!> \brief elpa_solve_evp_complex_1stage_host_arrays_single_impl: Fortran function to solve the complex single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
......@@ -294,15 +416,55 @@ contains
#define COMPLEXCASE 1
#define SINGLE_PRECISION
#undef ACTIVATE_SKEW
#undef DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef COMPLEXCASE
#undef SINGLE_PRECISION
!> \brief elpa_solve_evp_complex_1stage_device_pointer_single_impl: Fortran function to solve the complex single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
!> \param - obj%nev number of eigenvalues/vectors to be computed
!> The smallest nev eigenvalues/eigenvectors are calculated.
!> \param - obj%local_nrows Leading dimension of a
!> \param - obj%local_ncols local columns of matrix q
!> \param - obj%nblk blocksize of cyclic distribution, must be the same in both directions!
!> \param - obj%mpi_comm_rows MPI communicator for rows
!> \param - obj%mpi_comm_cols MPI communicator for columns
!> \param - obj%mpi_comm_parent MPI communicator for columns
!> \param - obj%gpu use GPU version (1 or 0)
!>
!> \param a Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param ev On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!>
!> \result success
#define COMPLEXCASE 1
#define SINGLE_PRECISION
#undef ACTIVATE_SKEW
#define DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef DEVICE_POINTER
#undef COMPLEXCASE
#undef SINGLE_PRECISION
#endif /* WANT_SINGLE_PRECISION_COMPLEX */
#ifdef HAVE_SKEWSYMMETRIC
!> \brief elpa_solve_skew_evp_real_1stage_double_impl: Fortran function to solve the real double-precision skew-symmetric eigenvalue problem with 1-stage solver
!> \brief elpa_solve_skew_evp_real_1stage_host_arrays_double_impl: Fortran function to solve the real double-precision skew-symmetric eigenvalue problem with 1-stage solver
!>
!> \details
!> \param obj elpa_t object contains:
......@@ -334,14 +496,55 @@ contains
#define REALCASE 1
#define DOUBLE_PRECISION 1
#define ACTIVATE_SKEW
#undef DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef ACTIVATE_SKEW
#undef REALCASE
#undef DOUBLE_PRECISION
!> \brief elpa_solve_skew_evp_real_1stage_device_pointer_double_impl: Fortran function to solve the real double-precision skew-symmetric eigenvalue problem with 1-stage solver
!>
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
!> \param - obj%nev number of eigenvalues/vectors to be computed
!> The smallest nev eigenvalues/eigenvectors are calculated.
!> \param - obj%local_nrows Leading dimension of a
!> \param - obj%local_ncols local columns of matrix q
!> \param - obj%nblk blocksize of cyclic distribution, must be the same in both directions!
!> \param - obj%mpi_comm_rows MPI communicator for rows
!> \param - obj%mpi_comm_cols MPI communicator for columns
!> \param - obj%mpi_comm_parent MPI communicator for columns
!> \param - obj%gpu use GPU version (1 or 0)
!>
!> \param a Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param ev On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!>
!> \result success
#define REALCASE 1
#define DOUBLE_PRECISION 1
#define ACTIVATE_SKEW
#define DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef ACTIVATE_SKEW
#undef DEVICE_POINTER
#undef REALCASE
#undef DOUBLE_PRECISION
#ifdef WANT_SINGLE_PRECISION_REAL
!> \brief elpa_solve_evp_real_1stage_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \brief elpa_solve_evp_real_1stage_host_arrays_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
......@@ -373,9 +576,50 @@ contains
#define REALCASE 1
#define SINGLE_PRECISION 1
#define ACTIVATE_SKEW
#undef DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef REALCASE
#undef ACTIVATE_SKEW
#undef SINGLE_PRECISION
!> \brief elpa_solve_evp_real_1stage_device_pointer_single_impl: Fortran function to solve the real single-precision eigenvalue problem with 1-stage solver
!> \details
!> \param obj elpa_t object contains:
!> \param - obj%na Order of matrix
!> \param - obj%nev number of eigenvalues/vectors to be computed
!> The smallest nev eigenvalues/eigenvectors are calculated.
!> \param - obj%local_nrows Leading dimension of a
!> \param - obj%local_ncols local columns of matrix q
!> \param - obj%nblk blocksize of cyclic distribution, must be the same in both directions!
!> \param - obj%mpi_comm_rows MPI communicator for rows
!> \param - obj%mpi_comm_cols MPI communicator for columns
!> \param - obj%mpi_comm_parent MPI communicator for columns
!> \param - obj%gpu use GPU version (1 or 0)
!>
!> \param a Distributed matrix for which eigenvalues are to be computed.
!> Distribution is like in Scalapack.
!> The full matrix must be set (not only one half like in scalapack).
!> Destroyed on exit (upper and lower half).
!>
!> \param ev On output: eigenvalues of a, every processor gets the complete set
!>
!> \param q On output: Eigenvectors of a
!> Distribution is like in Scalapack.
!> Must be always dimensioned to the full size (corresponding to (na,na))
!> even if only a part of the eigenvalues is needed.
!>
!>
!> \result success
#define REALCASE 1
#define SINGLE_PRECISION 1
#define ACTIVATE_SKEW
#define DEVICE_POINTER
#include "../general/precision_macros.h"
#include "elpa1_template.F90"
#undef REALCASE
#undef DEVICE_POINTER
#undef ACTIVATE_SKEW
#undef SINGLE_PRECISION
#endif /* WANT_SINGLE_PRECISION_REAL */
......
......@@ -55,30 +55,61 @@
#include "../general/sanity.F90"
#include "../general/error_checking.inc"
#ifdef DEVICE_POINTER
#ifdef ACTIVATE_SKEW
function elpa_solve_skew_evp_&
&MATH_DATATYPE&
&_1stage_all_host_arrays_&
&_1stage_device_pointer_&
&PRECISION&
&_impl (obj, &
#else /* ACTIVATE_SKEW */
function elpa_solve_evp_&
&MATH_DATATYPE&
&_1stage_device_pointer_&
&PRECISION&
&_impl (obj, &
#endif /* ACTIVATE_SKEW */
#ifdef REDISTRIBUTE_MATRIX
aExtern, &
#else
a, &
#endif /* REDISTRIBUTE_MATRIX */
ev, &
#ifdef REDISTRIBUTE_MATRIX
qExtern) result(success)
#else
q) result(success)
#endif /* REDISTRIBUTE_MATRIX */
#else /* DEVICE_POINTER */
#ifdef ACTIVATE_SKEW
function elpa_solve_skew_evp_&
&MATH_DATATYPE&
&_1stage_all_host_arrays_&
&PRECISION&
&_impl (obj, &
#else /* ACTIVATE_SKEW */
function elpa_solve_evp_&
&MATH_DATATYPE&
&_1stage_all_host_arrays_&
&PRECISION&
&_impl (obj, &
#endif
#endif /* ACTIVATE_SKEW */
#ifdef REDISTRIBUTE_MATRIX
aExtern, &
#else
a, &
#endif
#endif /* REDISTRIBUTE_MATRIX */
ev, &
#ifdef REDISTRIBUTE_MATRIX
qExtern) result(success)
#else
q) result(success)
#endif
#endif /* REDISTRIBUTE_MATRIX */
#endif /* DEVICE_POINTER */
use precision
use cuda_functions
use hip_functions
......@@ -97,7 +128,21 @@ function elpa_solve_evp_&
implicit none
#include "../general/precision_kinds.F90"
class(elpa_abstract_impl_t), intent(inout) :: obj
#ifdef DEVICE_POINTER
type(c_ptr) :: ev
#else
real(kind=REAL_DATATYPE), intent(out) :: ev(obj%na)
#endif
#ifdef DEVICE_POINTER
#ifdef REDISTRIBUTE_MATRIX
type(c_ptr) :: aExtern, qExtern
#else /* REDISTRIBUTE_MATRIX */
type(c_ptr) :: a, q
#endif /* REDISTRIBUTE_MATRIX */
#else /* DEVICE_POINTER */
#ifdef REDISTRIBUTE_MATRIX
......@@ -129,10 +174,18 @@ function elpa_solve_evp_&
#endif /* REDISTRIBUTE_MATRIX */
#endif /* DEVICE_POINTER */
#ifdef DEVICE_POINTER
#ifdef REDISTRIBUTE_MATRIX
type(c_ptr) :: a, q
#endif
#else /* DEVICE_POINTER */
#ifdef REDISTRIBUTE_MATRIX
MATH_DATATYPE(kind=rck), pointer :: a(:,:)
MATH_DATATYPE(kind=rck), pointer :: q(:,:)
#endif
#endif /* DEVICE_POINTER */
#if REALCASE == 1
real(kind=C_DATATYPE_KIND), allocatable :: tau(:)
......@@ -258,6 +311,7 @@ function elpa_solve_evp_&
endif
success = .true.
#ifndef DEVICE_POINTER
#ifdef REDISTRIBUTE_MATRIX
if (present(qExtern)) then
#else
......@@ -267,6 +321,8 @@ function elpa_solve_evp_&
else
obj%eigenvalues_only = .true.
endif
#endif
na = obj%na
nev = obj%nev
......@@ -285,12 +341,15 @@ function elpa_solve_evp_&
stop
endif
#ifndef DEVICE_POINTER
#ifdef REDISTRIBUTE_MATRIX
#include "../helpers/elpa_redistribute_template.F90"
#endif /* REDISTRIBUTE_MATRIX */
#endif
! special case na = 1
if (na .eq. 1) then
#ifndef DEVICE_POINTER
#if REALCASE == 1
ev(1) = a(1,1)
#endif
......@@ -300,6 +359,7 @@ function elpa_solve_evp_&
if (.not.(obj%eigenvalues_only)) then
q(1,1) = ONE
endif
#endif
! restore original OpenMP settings
#ifdef WITH_OPENMP_TRADITIONAL
......@@ -461,7 +521,7 @@ function elpa_solve_evp_&
! for elpa1 the easy thing is, that the individual phases of the algorithm
! do not share any data on the GPU.
#ifndef DEVICE_POINTER
! allocate a dummy q_intern, if eigenvectors should not be commputed and thus q is NOT present
if (.not.(obj%eigenvalues_only)) then
q_actual => q(1:matrixRows,1:matrixCols)
......@@ -470,6 +530,7 @@ function elpa_solve_evp_&
check_allocate("elpa1_template: q_dummy", istat, errorMessage)
q_actual => q_dummy
endif
#endif
#if COMPLEXCASE == 1
l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
......@@ -497,12 +558,15 @@ function elpa_solve_evp_&
#ifdef WITH_NVTX
call nvtxRangePush("tridi")
#endif
#ifndef DEVICE_POINTER
call tridiag_&
&MATH_DATATYPE&
&_&
&PRECISION&
& (obj, na, a, matrixRows, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau, do_useGPU_tridiag, wantDebug, &
nrThreads, isSkewsymmetric)
#endif
#ifdef WITH_NVTX
call nvtxRangePop()
......@@ -521,6 +585,7 @@ function elpa_solve_evp_&
#ifdef WITH_NVTX
call nvtxRangePush("solve")
#endif
#ifndef DEVICE_POINTER
call solve_tridi_&
&PRECISION&
& (obj, na, nev, ev, e, &
......@@ -532,6 +597,7 @@ function elpa_solve_evp_&
#endif
nblk, matrixCols, mpi_comm_all, mpi_comm_rows, mpi_comm_cols, do_useGPU_solve_tridi, wantDebug, &
success, nrThreads)
#endif
#ifdef WITH_NVTX
call nvtxRangePop()
......@@ -563,11 +629,13 @@ function elpa_solve_evp_&
endif
check_pd = 0
#ifndef DEVICE_POINTER
do i = 1, na
if (ev(i) .gt. thres_pd) then
check_pd = check_pd + 1
endif
enddo
#endif
if (check_pd .lt. na) then
! not positiv definite => eigenvectors needed
do_trans_ev = .true.
......@@ -579,13 +647,16 @@ function elpa_solve_evp_&
if (do_trans_ev) then
! q must be given thats why from here on we can use q and not q_actual
#ifndef DEVICE_POINTER
#if COMPLEXCASE == 1
q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)
#endif
#endif
if (isSkewsymmetric) then
! Extra transformation step for skew-symmetric matrix. Multiplication with diagonal complex matrix D.
! This makes the eigenvectors complex.
! For now real part of eigenvectors is generated in first half of q, imaginary part in second part.
#ifndef DEVICE_POINTER
q(1:matrixRows, matrixCols+1:2*matrixCols) = 0.0
do i = 1, matrixRows
! global_index = indxl2g(i, nblk, my_prow, 0, np_rows)
......@@ -605,6 +676,7 @@ function elpa_solve_evp_&
q(i,1:matrixCols) = 0
end if
end do
#endif
endif
call obj%timer%start("back")
......@@ -614,6 +686,8 @@ function elpa_solve_evp_&
#ifdef WITH_NVTX
call nvtxRangePush("trans_ev")
#endif
#ifndef DEVICE_POINTER
! In the skew-symmetric case this transforms the real part
call trans_ev_&
&MATH_DATATYPE&
......@@ -630,6 +704,7 @@ function elpa_solve_evp_&
& (obj, na, nev, a, matrixRows, tau, q(1:matrixRows, matrixCols+1:2*matrixCols), matrixRows, nblk, matrixCols, &
mpi_comm_rows, mpi_comm_cols, do_useGPU_trans_ev)
endif
#endif
#ifdef WITH_NVTX
call nvtxRangePop()
......@@ -661,6 +736,7 @@ function elpa_solve_evp_&
call omp_set_num_threads(omp_threads_caller)
#endif
#ifndef DEVICE_POINTER
#ifdef REDISTRIBUTE_MATRIX
! redistribute back if necessary
if (doRedistributeMatrix) then
......@@ -700,6 +776,8 @@ function elpa_solve_evp_&
call blacs_gridexit(blacs_ctxt_)
endif
#endif /* REDISTRIBUTE_MATRIX */
#endif
#ifdef ACTIVATE_SKEW
call obj%timer%stop("elpa_solve_skew_evp_&
#else
......
......@@ -120,16 +120,20 @@ module elpa_api
elpa_eigenvectors_all_host_arrays_fc
generic, public :: eigenvectors_double => & !< method eigenvectors for solving the full eigenvalue problem
elpa_eigenvectors_all_host_arrays_d !< the eigenvalues and (parts of) the eigenvectors are computed
elpa_eigenvectors_all_host_arrays_d, & !< for (real) double data, can be used with host arrays or
elpa_eigenvectors_device_pointer_d !< GPU device pointers in the GPU version
generic, public :: eigenvectors_float => & !< method eigenvectors for solving the full eigenvalue problem
elpa_eigenvectors_all_host_arrays_f !< the eigenvalues and (parts of) the eigenvectors are computed
elpa_eigenvectors_all_host_arrays_f, & !< for (real) float data, can be used with host arrays or
elpa_eigenvectors_device_pointer_f !< GPU device pointers in the GPU version
generic, public :: eigenvectors_double_complex => & !< method eigenvectors for solving the full eigenvalue problem
elpa_eigenvectors_all_host_arrays_dc !< the eigenvalues and (parts of) the eigenvectors are computed
elpa_eigenvectors_all_host_arrays_dc, & !< for complex_double data, can be used with host arrays or
elpa_eigenvectors_device_pointer_dc !< GPU device pointers in the GPU version
generic, public :: eigenvectors_float_complex => & !< method eigenvectors for solving the full eigenvalue problem
elpa_eigenvectors_all_host_arrays_fc !< the eigenvalues and (parts of) the eigenvectors are computed
generic, public :: eigenvectors_float_complex => & !< method eigenvectors for solving the full eigenvalue problem
elpa_eigenvectors_all_host_arrays_fc, & !< for float_double data, can be used with host arrays or
elpa_eigenvectors_device_pointer_fc !< GPU device pointers in the GPU version
generic, public :: eigenvalues => & !< method eigenvalues for solving the eigenvalue problem
elpa_eigenvalues_all_host_arrays_d, & !< only the eigenvalues are computed
......@@ -138,17 +142,20 @@ module elpa_api
elpa_eigenvalues_all_host_arrays_fc
generic, public :: eigenvalues_double => & !< method eigenvalues for solving the eigenvalue problem
elpa_eigenvalues_all_host_arrays_d !< only the eigenvalues are computed
elpa_eigenvalues_all_host_arrays_d, & !< for (real) double data, can be used with host arrays or
elpa_eigenvalues_device_pointer_d !< GPU device pointers in the GPU version
generic, public :: eigenvalues_float => & !< method eigenvalues for solving the eigenvalue problem
elpa_eigenvalues_all_host_arrays_f !< only the eigenvalues are computed
elpa_eigenvalues_all_host_arrays_f, & !< for (real) float data, can be used with host arrays or
elpa_eigenvalues_device_pointer_f !< GPU device pointers in the GPU version
generic, public :: eigenvalues_double_complex => & !< method eigenvalues for solving the eigenvalue problem
elpa_eigenvalues_all_host_arrays_dc !< only the eigenvalues are computed