Commit 5deb8c12 authored by Andreas Marek's avatar Andreas Marek
Browse files

Simple test for elpa_cholesky_complex

parent e3203c67
......@@ -199,6 +199,7 @@ dist_files_DATA = \
test/Fortran/test_transpose_multiply_real.F90 \
test/Fortran/test_transpose_multiply_complex.F90 \
test/Fortran/test_cholesky_real.F90 \
test/Fortran/test_cholesky_complex.F90 \
dist_doc_DATA = LICENSE Changelog COPYING/COPYING COPYING/gpl.txt COPYING/lgpl.txt
......@@ -225,6 +226,7 @@ noinst_PROGRAMS = \
elpa1_real_transpose_multiply@SUFFIX@ \
elpa1_complex_transpose_multiply@SUFFIX@ \
elpa1_real_cholesky@SUFFIX@ \
elpa1_complex_cholesky@SUFFIX@ \
elpa1_test_real_with_c@SUFFIX@ \
elpa1_test_real_c_version@SUFFIX@ \
elpa1_test_complex_c_version@SUFFIX@ \
......@@ -299,6 +301,11 @@ elpa1_real_cholesky@SUFFIX@_LDADD = $(build_lib)
elpa1_real_cholesky@SUFFIX@_FCFLAGS = $(AM_FCFLAGS) @FC_MODOUT@private_modules @FC_MODINC@private_modules
EXTRA_elpa1_real_cholesky@SUFFIX@_DEPENDENCIES = test/Fortran/elpa_print_headers.X90
elpa1_complex_cholesky@SUFFIX@_SOURCES = test/Fortran/test_cholesky_complex.F90
elpa1_complex_cholesky@SUFFIX@_LDADD = $(build_lib)
elpa1_complex_cholesky@SUFFIX@_FCFLAGS = $(AM_FCFLAGS) @FC_MODOUT@private_modules @FC_MODINC@private_modules
EXTRA_elpa1_complex_cholesky@SUFFIX@_DEPENDENCIES = test/Fortran/elpa_print_headers.X90
elpa1_test_real_with_c@SUFFIX@_SOURCES = test/Fortran/test_real_with_c.F90
elpa1_test_real_with_c@SUFFIX@_LDADD = $(build_lib)
elpa1_test_real_with_c@SUFFIX@_FCFLAGS = $(AM_FCFLAGS) @FC_MODOUT@private_modules @FC_MODINC@private_modules
......@@ -365,6 +372,7 @@ check_SCRIPTS = \ \ \ \ \
elpa2_print_kernels@SUFFIX@ \ \ \
! This file is part of ELPA.
! The ELPA library was originally created by the ELPA consortium,
! consisting of the following organizations:
! - Max Planck Computing and Data Facility (MPCDF), formerly known as
! Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
! - Bergische Universität Wuppertal, Lehrstuhl für angewandte
! Informatik,
! - Technische Universität München, Lehrstuhl für Informatik mit
! Schwerpunkt Wissenschaftliches Rechnen ,
! - Fritz-Haber-Institut, Berlin, Abt. Theorie,
! - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
! Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
! and
! - IBM Deutschland GmbH
! More information can be found here:
! ELPA is free software: you can redistribute it and/or modify
! it under the terms of the version 3 of the license of the
! GNU Lesser General Public License as published by the Free
! Software Foundation.
! ELPA is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! GNU Lesser General Public License for more details.
! You should have received a copy of the GNU Lesser General Public License
! along with ELPA. If not, see <>
! ELPA reflects a substantial effort on the part of the original
! ELPA consortium, and we ask you to respect the spirit of the
! license that we chose: i.e., please contribute any changes you
! may have back to the original ELPA library distribution, and keep
! any derivatives of ELPA under the same license that we chose for
! the original distribution, the GNU Lesser General Public License.
#include "config-f90.h"
program test_cholesky
use precision
use ELPA1
use elpa_utilities
use test_util
use mod_read_input_parameters
use mod_check_correctness
use mod_setup_mpi
use mod_blacs_infrastructure
use mod_prepare_matrix
use elpa_mpi
use redirect
use timings
use output_types
implicit none
! Please set system size parameters below!
! na: System size
! nev: Number of eigenvectors to be calculated
! nblk: Blocking factor in block cyclic distribution
integer(kind=ik) :: nblk
integer(kind=ik) :: na, nev
integer(kind=ik) :: np_rows, np_cols, na_rows, na_cols
integer(kind=ik) :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
integer(kind=ik) :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol
integer, external :: numroc
real(kind=rk), allocatable :: ev(:), xr(:,:)
complex(kind=ck), allocatable :: a(:,:), b(:,:), c(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:)
complex(kind=ck), allocatable :: d(:), e(:)
complex(kind=rk) :: diagonalElement, subdiagonalElement
integer(kind=ik) :: loctmp ,rowLocal, colLocal
complex(kind=ck), parameter :: CZERO = (0.d0,0.d0), CONE = (1.d0,0.d0)
real(kind=rk) :: norm, normmax
#ifdef WITH_MPI
real(kind=rk) :: pzlange
real(kind=rk) :: zlange
integer(kind=ik) :: iseed(4096) ! Random seed, size should be sufficient for every generator
complex(kind=ck), parameter :: pi = (3.141592653589793238462643383279d0, 0.d0)
integer(kind=ik) :: STATUS
integer(kind=ik) :: omp_get_max_threads, required_mpi_thread_level, &
type(output_t) :: write_to_file
logical :: success
character(len=8) :: task_suffix
integer(kind=ik) :: j
success = .true.
call read_input_parameters(na, nev, nblk, write_to_file)
! MPI Initialization
call setup_mpi(myid, nprocs)
! initialise the timing functionality
call timer%measure_flops(.true.)
call timer%measure_allocated_memory(.true.)
call timer%measure_virtual_memory(.true.)
call timer%measure_max_allocated_memory(.true.)
call timer%set_print_options(&
print_flop_count=.true., &
print_flop_rate=.true., &
print_allocated_memory = .true. , &
print_virtual_memory=.true., &
call timer%enable()
call timer%start("program")
do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
if(mod(nprocs,np_cols) == 0 ) exit
! at the end of the above loop, nprocs is always divisible by np_cols
np_rows = nprocs/np_cols
if(myid==0) then
print '(3(a,i0))','Matrix size=',na,', Block size=',nblk
print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
print *
! Set up BLACS context and MPI communicators
! The BLACS context is only necessary for using Scalapack.
! For ELPA, the MPI communicators along rows/cols are sufficient,
! and the grid setup may be done in an arbitrary way as long as it is
! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
! process has a unique (my_prow,my_pcol) pair).
call set_up_blacsgrid(mpi_comm_world, my_blacs_ctxt, np_rows, np_cols, &
nprow, npcol, my_prow, my_pcol)
if (myid==0) then
print '(a)','| Past BLACS_Gridinfo.'
end if
! All ELPA routines need MPI communicators for communicating within
! rows or columns of processes, these are set in get_elpa_communicators.
mpierr = get_elpa_communicators(mpi_comm_world, my_prow, my_pcol, &
mpi_comm_rows, mpi_comm_cols)
if (myid==0) then
print '(a)','| Past split communicator setup for rows and columns.'
end if
call set_up_blacs_descriptor(na ,nblk, my_prow, my_pcol, np_rows, np_cols, &
na_rows, na_cols, sc_desc, my_blacs_ctxt, info)
if (myid==0) then
print '(a)','| Past scalapack descriptor setup.'
end if
! Allocate matrices and set up a test matrix for the eigenvalue problem
call timer%start("set up matrix")
allocate(a (na_rows,na_cols))
allocate(b (na_rows,na_cols))
allocate(c (na_rows,na_cols))
allocate(z (na_rows,na_cols))
! allocate(xr(na_rows,na_cols))
! call prepare_matrix(na, myid, sc_desc, iseed, xr, a, z, as)
! deallocate(xr)
! b(:,:) = 2.0 * a(:,:)
! c(:,:) = 0.0
a(:,:) = CONE - CONE
diagonalElement = (2.546_rk, 0.d0)
do i = 1, na
if (map_global_array_index_to_local_index(i, i, rowLocal, colLocal, nblk, np_rows, np_cols, my_prow, my_pcol)) then
a(rowLocal,colLocal) = diagonalElement * abs(cos( pi*real(i,kind=rk)/ real(na+1,kind=rk) ))
as(:,:) = a(:,:)
call timer%stop("set up matrix")
! Calculate eigenvalues/eigenvectors
if (myid==0) then
print '(a)','| Compute cholesky decomposition ... '
print *
end if
#ifdef WITH_MPI
call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
success = elpa_cholesky_complex(na, a, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, .true.)
if (.not.(success)) then
write(error_unit,*) " elpa_cholesky_complex produced an error! Aborting..."
#ifdef WITH_MPI
call MPI_ABORT(mpi_comm_world, 1, mpierr)
if (myid==0) then
print '(a)','| Solve cholesky decomposition complete.'
print *
end if
! Test correctness of result (using plain scalapack routines)
tmp1(:,:) = 0.0_ck
! tmp1 = a**H
#ifdef WITH_MPI
call pztranc(na, na, 1.0_rk, a, 1, 1, sc_desc, CZERO, tmp1, 1, 1, sc_desc)
tmp1 = transpose(conjg(a))
! tmp2 = a * a**H
#ifdef WITH_MPI
call pzgemm("N","N", na, na, na, CONE, a, 1, 1, sc_desc, tmp1, 1, 1, &
sc_desc, CZERO, tmp2, 1, 1, sc_desc)
call zgemm("N","N", na, na, na, CONE, a, na, tmp1, na, CZERO, tmp2, na)
! compare tmp2 with c
tmp2(:,:) = tmp2(:,:) - as(:,:)
#ifdef WITH_MPI
norm = pzlange("M",na, na, tmp2, 1, 1, sc_desc, tmp1)
norm = zlange("M",na, na, tmp2, na_rows, tmp1)
#ifdef WITH_MPI
call mpi_allreduce(norm,normmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
normmax = norm
if (myid .eq. 0) then
print *," Maximum error of result: ", normmax
if (normmax .gt. 5e-11) then
status = 1
call timer%stop("program")
print *," "
print *,"Timings program:"
print *," "
call timer%print("program")
print *," "
print *,"End timings program"
print *," "
#ifdef WITH_MPI
call blacs_gridexit(my_blacs_ctxt)
call mpi_finalize(mpierr)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment