test_invert_trm_complex.F90 10.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
#include "config-f90.h"
!>

program test_invert_trm

   use precision
   use ELPA1
   use elpa_utilities
#ifdef WITH_OPENMP
   use test_util
#endif

   use mod_read_input_parameters
   use mod_check_correctness
   use mod_setup_mpi
   use mod_blacs_infrastructure
   use mod_prepare_matrix

   use elpa_mpi
#ifdef HAVE_REDIRECT
   use redirect
#endif
#ifdef HAVE_DETAILED_TIMINGS
  use timings
#endif
  use output_types

   implicit none

   !-------------------------------------------------------------------------------
   ! Please set system size parameters below!
   ! na:   System size
   ! nev:  Number of eigenvectors to be calculated
   ! nblk: Blocking factor in block cyclic distribution
   !-------------------------------------------------------------------------------
   integer(kind=ik)           :: nblk
   integer(kind=ik)           :: na, nev

   integer(kind=ik)           :: np_rows, np_cols, na_rows, na_cols

   integer(kind=ik)           :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
   integer(kind=ik)           :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol

   integer, external          :: numroc

88
89
90
91
   real(kind=rk8), allocatable    :: ev(:), xr(:,:)
   complex(kind=ck8), allocatable :: a(:,:), b(:,:), c(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:)
   complex(kind=ck8), allocatable :: d(:), e(:), bs(:,:)
   complex(kind=rk8)              :: diagonalElement, subdiagonalElement
92
   integer(kind=ik)           :: loctmp ,rowLocal, colLocal
93
94
   complex(kind=ck8), parameter   :: CZERO = (0._rk8,0._rk8), CONE = (1._rk8,0._rk8)
   real(kind=rk8)              :: norm, normmax
95
#ifdef WITH_MPI
96
   real(kind=rk8)              :: pzlange
97
#else
98
   real(kind=rk8)              :: zlange
99
100
101
#endif

   integer(kind=ik)           :: iseed(4096) ! Random seed, size should be sufficient for every generator
102
   complex(kind=ck8), parameter   :: pi = (3.141592653589793238462643383279_rk8, 0._rk8)
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183

   integer(kind=ik)           :: STATUS
#ifdef WITH_OPENMP
   integer(kind=ik)           :: omp_get_max_threads,  required_mpi_thread_level, &
                                 provided_mpi_thread_level
#endif
   type(output_t)             :: write_to_file
   logical                    :: success
   character(len=8)           :: task_suffix
   integer(kind=ik)           :: j
   !-------------------------------------------------------------------------------

   success = .true.

   call read_input_parameters(na, nev, nblk, write_to_file)

   !-------------------------------------------------------------------------------
   !  MPI Initialization
   call setup_mpi(myid, nprocs)

   STATUS = 0

#ifdef HAVE_DETAILED_TIMINGS

   ! initialise the timing functionality

#ifdef HAVE_LIBPAPI
   call timer%measure_flops(.true.)
#endif

   call timer%measure_allocated_memory(.true.)
   call timer%measure_virtual_memory(.true.)
   call timer%measure_max_allocated_memory(.true.)

   call timer%set_print_options(&
#ifdef HAVE_LIBPAPI
                print_flop_count=.true., &
                print_flop_rate=.true., &
#endif
                print_allocated_memory = .true. , &
                print_virtual_memory=.true., &
                print_max_allocated_memory=.true.)


  call timer%enable()

  call timer%start("program")
#endif

   do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
      if(mod(nprocs,np_cols) == 0 ) exit
   enddo

   ! at the end of the above loop, nprocs is always divisible by np_cols

   np_rows = nprocs/np_cols

   if(myid==0) then
      print '(3(a,i0))','Matrix size=',na,', Block size=',nblk
      print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
      print *
   endif

   !-------------------------------------------------------------------------------
   ! Set up BLACS context and MPI communicators
   !
   ! The BLACS context is only necessary for using Scalapack.
   !
   ! For ELPA, the MPI communicators along rows/cols are sufficient,
   ! and the grid setup may be done in an arbitrary way as long as it is
   ! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
   ! process has a unique (my_prow,my_pcol) pair).

   call set_up_blacsgrid(mpi_comm_world, my_blacs_ctxt, np_rows, np_cols, &
                         nprow, npcol, my_prow, my_pcol)

   if (myid==0) then
     print '(a)','| Past BLACS_Gridinfo.'
   end if

   ! All ELPA routines need MPI communicators for communicating within
184
   ! rows or columns of processes, these are set in elpa_get_communicators.
185

186
   mpierr = elpa_get_communicators(mpi_comm_world, my_prow, my_pcol, &
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
                                   mpi_comm_rows, mpi_comm_cols)

   if (myid==0) then
     print '(a)','| Past split communicator setup for rows and columns.'
   end if

   call set_up_blacs_descriptor(na ,nblk, my_prow, my_pcol, np_rows, np_cols, &
                                na_rows, na_cols, sc_desc, my_blacs_ctxt, info)

   if (myid==0) then
     print '(a)','| Past scalapack descriptor setup.'
   end if

   !-------------------------------------------------------------------------------
   ! Allocate matrices and set up a test matrix for the eigenvalue problem
#ifdef HAVE_DETAILED_TIMINGS
   call timer%start("set up matrix")
#endif
   allocate(a (na_rows,na_cols))
   allocate(b (na_rows,na_cols))
   allocate(bs(na_rows,na_cols))
   allocate(c (na_rows,na_cols))

   allocate(z (na_rows,na_cols))
   allocate(as(na_rows,na_cols))

   allocate(ev(na))
   allocate(xr(na_rows,na_cols))
215
   call prepare_matrix_double(na, myid, sc_desc, iseed, xr, b, z, bs)
216
217
218
219
   deallocate(xr)
   bs(:,:) = b(:,:)

    a(:,:) = CONE - CONE
220
    diagonalElement = (2.546_rk8, 0.0_rk8)
221
222
    do i = 1, na
      if (map_global_array_index_to_local_index(i, i, rowLocal, colLocal, nblk, np_rows, np_cols, my_prow, my_pcol)) then
223
        a(rowLocal,colLocal) = diagonalElement * abs(cos( pi*real(i,kind=rk8)/ real(na+1,kind=rk8) ))
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
      endif
    enddo
    as(:,:) = a(:,:)

#ifdef HAVE_DETAILED_TIMINGS
   call timer%stop("set up matrix")
#endif

   !-------------------------------------------------------------------------------
   ! Calculate eigenvalues/eigenvectors

   if (myid==0) then
     print '(a)','| Setting up tridiagonal matrix ... '
     print *
   end if
#ifdef WITH_MPI
   call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
#endif

243
   success = elpa_cholesky_complex_double(na, a, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, .true.)
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266

   if (.not.(success)) then
      write(error_unit,*) " elpa_cholesky_complex produced an error! Aborting..."
#ifdef WITH_MPI
      call MPI_ABORT(mpi_comm_world, 1, mpierr)
#endif
   endif

   as(:,:) = a(:,:)

   if (myid==0) then
     print '(a)','| Setting up tridiagonal matrix complete.'
     print *
   end if

   if (myid==0) then
     print '(a)','| Inverting tridiagonal matrix ... '
     print *
   end if
#ifdef WITH_MPI
   call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
#endif

267
   success = elpa_invert_trm_complex_double(na, a, na_rows, nblk, na_cols, mpi_comm_rows, mpi_comm_cols, .true.)
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286

   if (.not.(success)) then
      write(error_unit,*) " elpa_invert_trm_complex produced an error! Aborting..."
#ifdef WITH_MPI
      call MPI_ABORT(mpi_comm_world, 1, mpierr)
#endif
   endif


   if (myid==0) then
     print '(a)','| Inversion of tridiagonal matrix complete.'
     print *
   end if

   !-------------------------------------------------------------------------------
   ! Test correctness of result (using plain scalapack routines)
   allocate(tmp1(na_rows,na_cols))
   allocate(tmp2(na_rows,na_cols))

287
   tmp1(:,:) = 0.0_ck8
288
289
290
291
292
293
294
295
296
297

   ! tmp1 = a * a^-1 ! should be unity matrix
#ifdef WITH_MPI
   call pzgemm("N","N", na, na, na, CONE, as, 1, 1, sc_desc, a, 1, 1, &
               sc_desc, CZERO, tmp1, 1, 1, sc_desc)
#else
   call zgemm("N","N", na, na, na, CONE, as, na, a, na, CZERO, tmp1, na)
#endif

   ! tmp2 = b * tmp1
298
   tmp2(:,:) = 0.0_ck8
299
300
301
302
303
304
305
306
307
#ifdef WITH_MPI
   call pzgemm("N","N", na, na, na, CONE, b, 1, 1, sc_desc, tmp1, 1, 1, &
               sc_desc, CZERO, tmp2, 1, 1, sc_desc)
#else
   call zgemm("N","N", na, na, na, CONE, b, na, tmp1, na, CZERO, tmp2, na)
#endif

   ! compare tmp2 with c
   tmp2(:,:) = tmp2(:,:) - bs(:,:)
308
   tmp1(:,:) = 0.0_ck8
309
310
311
312
313
314
315
316
317
318
319
320
321
322
#ifdef WITH_MPI
   norm = pzlange("M",na, na, tmp2, 1, 1, sc_desc, tmp1)
#else
   norm = zlange("M",na, na, tmp2, na_rows, tmp1)
#endif
#ifdef WITH_MPI
   call mpi_allreduce(norm,normmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
#else
   normmax = norm
#endif
   if (myid .eq. 0) then
     print *," Maximum error of result: ", normmax
   endif

323
   if (normmax .gt. 5e-11_rk8) then
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
        status = 1
   endif

   deallocate(a)
   deallocate(b)
   deallocate(bs)
   deallocate(c)

   deallocate(as)

   deallocate(z)
   deallocate(tmp1)
   deallocate(tmp2)
   deallocate(ev)


#ifdef HAVE_DETAILED_TIMINGS
   call timer%stop("program")
   print *," "
   print *,"Timings program:"
   print *," "
   call timer%print("program")
   print *," "
   print *,"End timings program"
   print *," "
#endif

#ifdef WITH_MPI
   call blacs_gridexit(my_blacs_ctxt)
   call mpi_finalize(mpierr)
#endif

   call EXIT(STATUS)


end

!-------------------------------------------------------------------------------