legacy_real_transp_multiply.F90 8.22 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
#include "config-f90.h"
!>

program test_transpose_multiply

48
   use elpa1
49
50
51
   use elpa_utilities, only : error_unit
   use test_util

52
53
54
55
56
   use test_read_input_parameters
   use test_check_correctness
   use test_setup_mpi
   use test_blacs_infrastructure
   use test_prepare_matrix
57
58

#ifdef HAVE_REDIRECT
59
   use test_redirect
60
#endif
61
  use test_output_type
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80

   implicit none

   !-------------------------------------------------------------------------------
   ! Please set system size parameters below!
   ! na:   System size
   ! nev:  Number of eigenvectors to be calculated
   ! nblk: Blocking factor in block cyclic distribution
   !-------------------------------------------------------------------------------
   integer(kind=ik)           :: nblk
   integer(kind=ik)           :: na, nev

   integer(kind=ik)           :: np_rows, np_cols, na_rows, na_cols

   integer(kind=ik)           :: myid, nprocs, my_prow, my_pcol, mpi_comm_rows, mpi_comm_cols
   integer(kind=ik)           :: i, mpierr, my_blacs_ctxt, sc_desc(9), info, nprow, npcol

   integer, external          :: numroc

81
   real(kind=rk8), allocatable :: a(:,:), b(:,:), c(:,:), z(:,:), tmp1(:,:), tmp2(:,:), as(:,:), ev(:)
82

83
   real(kind=rk8)              :: norm, normmax
84
#ifdef WITH_MPI
85
   real(kind=rk8)              :: pdlange
86
#else
87
   real(kind=rk8)              :: dlange
88
#endif
89
90
91
92
93
94
95
96
97
98
99
100
101
   integer(kind=ik)           :: STATUS
#ifdef WITH_OPENMP
   integer(kind=ik)           :: omp_get_max_threads,  required_mpi_thread_level, &
                                 provided_mpi_thread_level
#endif
   type(output_t)             :: write_to_file
   logical                    :: success
   character(len=8)           :: task_suffix
   integer(kind=ik)           :: j
   !-------------------------------------------------------------------------------

   success = .true.

102
   call read_input_parameters_traditional(na, nev, nblk, write_to_file)
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133

   !-------------------------------------------------------------------------------
   !  MPI Initialization
   call setup_mpi(myid, nprocs)

   STATUS = 0

   do np_cols = NINT(SQRT(REAL(nprocs))),2,-1
      if(mod(nprocs,np_cols) == 0 ) exit
   enddo

   ! at the end of the above loop, nprocs is always divisible by np_cols

   np_rows = nprocs/np_cols

   if(myid==0) then
      print '(3(a,i0))','Matrix size=',na,', Block size=',nblk
      print '(3(a,i0))','Number of processor rows=',np_rows,', cols=',np_cols,', total=',nprocs
      print *
   endif

   !-------------------------------------------------------------------------------
   ! Set up BLACS context and MPI communicators
   !
   ! The BLACS context is only necessary for using Scalapack.
   !
   ! For ELPA, the MPI communicators along rows/cols are sufficient,
   ! and the grid setup may be done in an arbitrary way as long as it is
   ! consistent (i.e. 0<=my_prow<np_rows, 0<=my_pcol<np_cols and every
   ! process has a unique (my_prow,my_pcol) pair).

134
135
   call set_up_blacsgrid(mpi_comm_world, np_rows, np_cols, 'C', &
                         my_blacs_ctxt, my_prow, my_pcol)
136
137
138
139
140
141

   if (myid==0) then
     print '(a)','| Past BLACS_Gridinfo.'
   end if

   ! All ELPA routines need MPI communicators for communicating within
142
   ! rows or columns of processes, these are set in elpa_get_communicators.
143

144
   mpierr = elpa_get_communicators(mpi_comm_world, my_prow, my_pcol, &
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
                                   mpi_comm_rows, mpi_comm_cols)

   if (myid==0) then
     print '(a)','| Past split communicator setup for rows and columns.'
   end if

   call set_up_blacs_descriptor(na ,nblk, my_prow, my_pcol, np_rows, np_cols, &
                                na_rows, na_cols, sc_desc, my_blacs_ctxt, info)

   if (myid==0) then
     print '(a)','| Past scalapack descriptor setup.'
   end if

   !-------------------------------------------------------------------------------
   ! Allocate matrices and set up a test matrix for the eigenvalue problem
   allocate(a (na_rows,na_cols))
   allocate(b (na_rows,na_cols))
   allocate(c (na_rows,na_cols))

   allocate(z (na_rows,na_cols))
   allocate(as(na_rows,na_cols))

   allocate(ev(na))

169
   call prepare_matrix_random(na, myid, sc_desc, a, z, as)
170

171
172
   b(:,:) = 2.0_rk8 * a(:,:)
   c(:,:) = 0.0_rk8
173
174
175
176
177
178
179
180
181
182
183
184


   !-------------------------------------------------------------------------------
   ! Calculate eigenvalues/eigenvectors

   if (myid==0) then
     print '(a)','| Compute c= a**T * b ... '
     print *
   end if
#ifdef WITH_MPI
   call mpi_barrier(mpi_comm_world, mpierr) ! for correct timings only
#endif
185
186
187
   success = elpa_mult_at_b_real_double("F","F", na, na, a, na_rows, na_cols, b, na_rows, &
                                        na_cols, nblk, mpi_comm_rows, mpi_comm_cols, c,   &
                                        na_rows, na_cols)
188
189

   if (.not.(success)) then
Andreas Marek's avatar
Andreas Marek committed
190
      write(error_unit,*) "elpa_mult_at_b_real produced an error! Aborting..."
191
192
#ifdef WITH_MPI
      call MPI_ABORT(mpi_comm_world, 1, mpierr)
193
194
#else
      call exit(1)
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
#endif
   endif


   if (myid==0) then
     print '(a)','| Solve c = a**T * b complete.'
     print *
   end if


   !-------------------------------------------------------------------------------
   ! Test correctness of result (using plain scalapack routines)
   allocate(tmp1(na_rows,na_cols))
   allocate(tmp2(na_rows,na_cols))

210
   tmp1(:,:) = 0.0_rk8
211
212

   ! tmp1 = a**T
213
#ifdef WITH_MPI
214
   call pdtran(na, na, 1.0_rk8, a, 1, 1, sc_desc, 0.0_rk8, tmp1, 1, 1, sc_desc)
215
216
217
#else
   tmp1 = transpose(a)
#endif
218
   ! tmp2 = tmp1 * b
219
#ifdef WITH_MPI
220
221
   call pdgemm("N","N", na, na, na, 1.0_rk8, tmp1, 1, 1, sc_desc, b, 1, 1, &
               sc_desc, 0.0_rk8, tmp2, 1, 1, sc_desc)
222
#else
223
   call dgemm("N","N", na, na, na, 1.0_rk8, tmp1, na, b, na, 0.0_rk8, tmp2, na)
224
#endif
225
226
227
228

   ! compare tmp2 with c
   tmp2(:,:) = tmp2(:,:) - c(:,:)

229
230
231
232
233
#ifdef WITH_MPI
   norm = pdlange("M", na, na, tmp2, 1, 1, sc_desc, tmp1)
#else
   norm = dlange("M", na, na, tmp2, na_rows, tmp1)
#endif
234

235
236
237
238
239
#ifdef WITH_MPI
   call mpi_allreduce(norm,normmax,1,MPI_REAL8,MPI_MAX,MPI_COMM_WORLD,mpierr)
#else
   normmax = norm
#endif
240
241
242
   if (myid .eq. 0) then
     print *," Maximum error of result: ", normmax
   endif
243

244
   if (normmax .gt. 5e-11_rk8) then
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
        status = 1
   endif

   deallocate(a)
   deallocate(b)
   deallocate(c)

   deallocate(as)

   deallocate(z)
   deallocate(tmp1)
   deallocate(tmp2)
   deallocate(ev)

#ifdef WITH_MPI
   call blacs_gridexit(my_blacs_ctxt)
   call mpi_finalize(mpierr)
#endif

   call EXIT(STATUS)


end

!-------------------------------------------------------------------------------