test_real.F90 14.5 KB
Newer Older
1
2
!    This file is part of ELPA.
!
3
!    The ELPA library was originally created by the ELPA consortium,
4
5
!    consisting of the following organizations:
!
6
!    - Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
7
8
9
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
10
11
12
13
14
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
15
16
17
18
19
20
21
!    - IBM Deutschland GmbH
!
!
!    More information can be found here:
!    http://elpa.rzg.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
22
23
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
#include "config-f90.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
!>
!> Fortran test programm to demonstrates the use of
!> ELPA 1 real case library.
!> If "HAVE_REDIRECT" was defined at build time
!> the stdout and stderr output of each MPI task
!> can be redirected to files if the environment
!> variable "REDIRECT_ELPA_TEST_OUTPUT" is set
!> to "true".
!>
!> By calling executable [arg1] [arg2] [arg3] [arg4]
!> one can define the size (arg1), the number of
!> Eigenvectors to compute (arg2), and the blocking (arg3).
!> If these values are not set default values (4000, 1500, 16)
!> are choosen.
!> If these values are set the 4th argument can be
!> "output", which specifies that the EV's are written to
!> an ascii file.
!>
61
62
63
64
65
66
67
program test_real

!-------------------------------------------------------------------------------
! Standard eigenvalue problem - REAL version
!
! This program demonstrates the use of the ELPA module
! together with standard scalapack routines
68
!
69
70
71
72
73
74
75
76
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
!
!-------------------------------------------------------------------------------

   use ELPA1
77
78
79
#ifdef WITH_OPENMP
   use test_util
#endif
80

81
82
83
#ifdef HAVE_ISO_FORTRAN_ENV
  use iso_fortran_env, only : error_unit
#endif
84
85
86
#ifdef HAVE_REDIRECT
  use redirect
#endif
87
88
89
#ifdef HAVE_DETAILED_TIMINGS
 use timings
#endif
90

91
92
93
94
95
96
97
98
99
100
101
   implicit none
   include 'mpif.h'

   !-------------------------------------------------------------------------------
   ! Please set system size parameters below!
   ! na:   System size
   ! nev:  Number of eigenvectors to be calculated
   ! nblk: Blocking factor in block cyclic distribution
   !-------------------------------------------------------------------------------
   integer :: nblk
   integer na, nev
102

103
104
105
106

   !-------------------------------------------------------------------------------
   !  Local Variables

107
   integer             :: np_rows, np_cols, na_rows, na_cols
108

109
110
   integer             :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
   integer             :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol
111

112
   integer, external   :: numroc
113

114
   real*8              :: err, errmax
115
116
   real*8, allocatable :: a(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:), ev(:)

117
   integer             :: iseed(4096) ! Random seed, size should be sufficient for every generator
118
119


120
   integer             :: STATUS
121
#ifdef WITH_OPENMP
122
123
   integer             :: omp_get_max_threads,  required_mpi_thread_level, &
                          provided_mpi_thread_level
124
#endif
125
   logical             :: write_to_file
126
127
   !-------------------------------------------------------------------------------
   !  Parse command line argumnents, if given
128
129
130
131
132
133
134
135
136
137
138
139
   character*16        :: arg1
   character*16        :: arg2
   character*16        :: arg3
   character*16        :: arg4

#ifndef HAVE_ISO_FORTRAN_ENV
  integer, parameter   :: error_unit = 6
#endif

   logical             :: success

   success = .true.
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164

   write_to_file = .false.

   nblk = 16
   na = 4000
   nev = 1500

   if (COMMAND_ARGUMENT_COUNT() == 3) then
      call GET_COMMAND_ARGUMENT(1, arg1)
      call GET_COMMAND_ARGUMENT(2, arg2)
      call GET_COMMAND_ARGUMENT(3, arg3)

      read(arg1, *) na
      read(arg2, *) nev
      read(arg3, *) nblk
   endif

   if (COMMAND_ARGUMENT_COUNT() == 4) then
      call GET_COMMAND_ARGUMENT(1, arg1)
      call GET_COMMAND_ARGUMENT(2, arg2)
      call GET_COMMAND_ARGUMENT(3, arg3)
      call GET_COMMAND_ARGUMENT(4, arg4)
      read(arg1, *) na
      read(arg2, *) nev
      read(arg3, *) nblk
165

166
167
168
169
170
171
172
173
   endif

   !-------------------------------------------------------------------------------
   !  MPI Initialization
#ifndef WITH_OPENMP
   call mpi_init(mpierr)
#else
   required_mpi_thread_level = MPI_THREAD_MULTIPLE
174

175
176
177
178
   call mpi_init_thread(required_mpi_thread_level,     &
                        provided_mpi_thread_level, mpierr)

   if (required_mpi_thread_level .ne. provided_mpi_thread_level) then
179
      write(error_unit,*) "MPI ERROR: MPI_THREAD_MULTIPLE is not provided on this system"
180
181
      write(error_unit,*) "           only ", mpi_thread_level_name(provided_mpi_thread_level), " is available"
      call exit(77)
182
183
184
185
186
   endif

#endif
   call mpi_comm_rank(mpi_comm_world,myid,mpierr)
   call mpi_comm_size(mpi_comm_world,nprocs,mpierr)
187

188
   if (arg4 .eq. "output") then
189
190
191
      write_to_file = .true.
      if (myid .eq. 0) print *,"Writing output files"
   endif
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218

#ifdef HAVE_DETAILED_TIMINGS

   ! initialise the timing functionality

#ifdef HAVE_LIBPAPI
   call timer%measure_flops(.true.)
#endif

   call timer%measure_allocated_memory(.true.)
   call timer%measure_virtual_memory(.true.)
   call timer%measure_max_allocated_memory(.true.)

   call timer%set_print_options(&
#ifdef HAVE_LIBPAPI
                print_flop_count=.true., &
                print_flop_rate=.true., &
#endif
                print_allocated_memory = .true. , &
                print_virtual_memory=.true., &
                print_max_allocated_memory=.true.)


  call timer%enable()

  call timer%start("program")
#endif
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
   !-------------------------------------------------------------------------------
   ! Selection of number of processor rows/columns
   ! We try to set up the grid square-like, i.e. start the search for possible
   ! divisors of nprocs with a number next to the square root of nprocs
   ! and decrement it until a divisor is found.


   STATUS = 0
#ifdef WITH_OPENMP
   if (myid .eq. 0) then
      print *,"Threaded version of test program"
      print *,"Using ",omp_get_max_threads()," threads"
      print *," "
   endif
#endif
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
    call MPI_BARRIER(MPI_COMM_WORLD, mpierr)

#ifdef HAVE_REDIRECT
   if (check_redirect_environment_variable()) then
     if (myid .eq. 0) then
       print *," "
       print *,"Redirection of mpi processes is used"
       print *," "
       if (create_directories() .ne. 1) then
         write(error_unit,*) "Unable to create directory for stdout and stderr!"
         stop
       endif
      endif
      call MPI_BARRIER(MPI_COMM_WORLD, mpierr)
      call redirect_stdout(myid)
    endif
#endif

252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
   do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
      if(mod(nprocs,np_cols) == 0 ) exit
   enddo
   ! at the end of the above loop, nprocs is always divisible by np_cols

   np_rows = nprocs/np_cols

   if(myid==0) then
      print *
      print '(a)','Standard eigenvalue problem - REAL version'
      print *
      print '(3(a,i0))','Matrix size=',na,', Number of eigenvectors=',nev,', Block size=',nblk
      print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
      print *
   endif

   !-------------------------------------------------------------------------------
   ! Set up BLACS context and MPI communicators
   !
   ! The BLACS context is only necessary for using Scalapack.
   !
   ! For ELPA, the MPI communicators along rows/cols are sufficient,
   ! and the grid setup may be done in an arbitrary way as long as it is
   ! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
   ! process has a unique (my_prow,my_pcol) pair).

   my_blacs_ctxt = mpi_comm_world
   call BLACS_Gridinit( my_blacs_ctxt, 'C', np_rows, np_cols )
   call BLACS_Gridinfo( my_blacs_ctxt, nprow, npcol, my_prow, my_pcol )

   if (myid==0) then
     print '(a)','| Past BLACS_Gridinfo.'
   end if

   ! All ELPA routines need MPI communicators for communicating within
   ! rows or columns of processes, these are set in get_elpa_row_col_comms.

289
290
   mpierr = get_elpa_row_col_comms(mpi_comm_world, my_prow, my_pcol, &
                                   mpi_comm_rows, mpi_comm_cols)
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314

   if (myid==0) then
     print '(a)','| Past split communicator setup for rows and columns.'
   end if

   ! Determine the necessary size of the distributed matrices,
   ! we use the Scalapack tools routine NUMROC for that.

   na_rows = numroc(na, nblk, my_prow, 0, np_rows)
   na_cols = numroc(na, nblk, my_pcol, 0, np_cols)

   ! Set up a scalapack descriptor for the checks below.
   ! For ELPA the following restrictions hold:
   ! - block sizes in both directions must be identical (args 4+5)
   ! - first row and column of the distributed matrix must be on row/col 0/0 (args 6+7)

   call descinit( sc_desc, na, na, nblk, nblk, 0, 0, my_blacs_ctxt, na_rows, info )

   if (myid==0) then
     print '(a)','| Past scalapack descriptor setup.'
   end if

   !-------------------------------------------------------------------------------
   ! Allocate matrices and set up a test matrix for the eigenvalue problem
315
316
317
#ifdef HAVE_DETAILED_TIMINGS
   call timer%start("set up matrix")
#endif
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
   allocate(a (na_rows,na_cols))
   allocate(z (na_rows,na_cols))
   allocate(as(na_rows,na_cols))

   allocate(ev(na))

   ! For getting a symmetric test matrix A we get a random matrix Z
   ! and calculate A = Z + Z**T

   ! We want different random numbers on every process
   ! (otherways A might get rank deficient):

   iseed(:) = myid
   call RANDOM_SEED(put=iseed)

   call RANDOM_NUMBER(z)

   a(:,:) = z(:,:)

   if (myid==0) then
     print '(a)','| Random matrix block has been set up. (only processor 0 confirms this step)'
   end if

   call pdtran(na, na,  1.d0, z, 1, 1, sc_desc, 1.d0, a, 1, 1, sc_desc) ! A = A + Z**T

   if (myid==0) then
     print '(a)','| Random matrix has been symmetrized.'
   end if

   ! Save original matrix A for later accuracy checks

   as = a
350
351
352
#ifdef HAVE_DETAILED_TIMINGS
   call timer%stop("set up matrix")
#endif
353
354
355
356
357
358
359
360
361
362

   !-------------------------------------------------------------------------------
   ! Calculate eigenvalues/eigenvectors

   if (myid==0) then
     print '(a)','| Entering one-step ELPA solver ... '
     print *
   end if

   call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
363
364
365
366
367
368
369
370
   success = solve_evp_real(na, nev, a, na_rows, ev, z, na_rows, nblk, &
                          mpi_comm_rows, mpi_comm_cols)

   if (.not.(success)) then
      write(error_unit,*) "solve_evp_real produced an error! Aborting..."
      call MPI_ABORT(mpi_comm_world, mpierr)
   endif

371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451

   if (myid==0) then
     print '(a)','| One-step ELPA solver complete.'
     print *
   end if

   if(myid == 0) print *,'Time tridiag_real     :',time_evp_fwd
   if(myid == 0) print *,'Time solve_tridi      :',time_evp_solve
   if(myid == 0) print *,'Time trans_ev_real    :',time_evp_back
   if(myid == 0) print *,'Total time (sum above):',time_evp_back+time_evp_solve+time_evp_fwd
   if(write_to_file) then
      if (myid == 0) then
         open(17,file="EVs_real_out.txt",form='formatted',status='new')
         do i=1,na
            write(17,*) i,ev(i)
         enddo
         close(17)
      endif
   endif

   !-------------------------------------------------------------------------------
   ! Test correctness of result (using plain scalapack routines)

   deallocate(a)
   allocate(tmp1(na_rows,na_cols))

   ! 1. Residual (maximum of || A*Zi - Zi*EVi ||)

   ! tmp1 =  A * Z
   call pdgemm('N','N',na,nev,na,1.d0,as,1,1,sc_desc, &
           z,1,1,sc_desc,0.d0,tmp1,1,1,sc_desc)

   deallocate(as)
   allocate(tmp2(na_rows,na_cols))

   ! tmp2 = Zi*EVi
   tmp2(:,:) = z(:,:)
   do i=1,nev
      call pdscal(na,ev(i),tmp2,1,i,sc_desc,1)
   enddo

   !  tmp1 = A*Zi - Zi*EVi
   tmp1(:,:) =  tmp1(:,:) - tmp2(:,:)

   ! Get maximum norm of columns of tmp1
   errmax = 0
   do i=1,nev
      err = 0
      call pdnrm2(na,err,tmp1,1,i,sc_desc,1)
      errmax = max(errmax, err)
   enddo

   ! Get maximum error norm over all processors
   err = errmax
   call mpi_allreduce(err,errmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
   if(myid==0) print *
   if(myid==0) print *,'Error Residual     :',errmax


   if (errmax .gt. 5e-12) then
      status = 1
   endif


   ! 2. Eigenvector orthogonality

   ! tmp1 = Z**T * Z
   tmp1 = 0
   call pdgemm('T','N',nev,nev,na,1.d0,z,1,1,sc_desc, &
           z,1,1,sc_desc,0.d0,tmp1,1,1,sc_desc)
   ! Initialize tmp2 to unit matrix
   tmp2 = 0
   call pdlaset('A',nev,nev,0.d0,1.d0,tmp2,1,1,sc_desc)

   ! tmp1 = Z**T * Z - Unit Matrix
   tmp1(:,:) =  tmp1(:,:) - tmp2(:,:)

   ! Get maximum error (max abs value in tmp1)
   err = maxval(abs(tmp1))
   call mpi_allreduce(err,errmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
   if(myid==0) print *,'Error Orthogonality:',errmax
452

453
454
455
456
457
458
459
460
   if (errmax .gt. 5e-12) then
      status = 1
   endif

   deallocate(z)
   deallocate(tmp1)
   deallocate(tmp2)
   deallocate(ev)
461
462
463
464
465
466
467
468
469
470
471
#ifdef HAVE_DETAILED_TIMINGS
   call timer%stop("program")
   print *," "
   print *,"Timings program:"
   print *," "
   call timer%print("program")
   print *," "
   print *,"End timings program"
   print *," "
   print *,"End timings program"
#endif
472
473
474
475
476
477
478
479
480
   call blacs_gridexit(my_blacs_ctxt)
   call mpi_finalize(mpierr)

   call EXIT(STATUS)


end

!-------------------------------------------------------------------------------