elpa_multiply_a_b.X90 12.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
!
! Author: A. Marek, MPCDF


56
#include "../general/sanity.X90"
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

#ifdef HAVE_DETAILED_TIMINGS
      use timings
#else
      use timings_dummy
#endif
      use elpa1_compute
      use elpa_mpi
      use precision
      implicit none

      character*1                   :: uplo_a, uplo_c

      integer(kind=ik), intent(in)  :: na, lda, ldaCols, ldb, ldbCols, ldc, ldcCols, nblk
      integer(kind=ik)              :: ncb, mpi_comm_rows, mpi_comm_cols
#if REALCASE == 1
#ifdef USE_ASSUMED_SIZE
      real(kind=REAL_DATATYPE)                 :: a(lda,*), b(ldb,*), c(ldc,*)
#else
      real(kind=REAL_DATATYPE)                 :: a(lda,ldaCols), b(ldb,ldbCols), c(ldc,ldcCols)
#endif
#endif
#if COMPLEXCASE == 1
#ifdef USE_ASSUMED_SIZE
      complex(kind=COMPLEX_DATATYPE)           ::  a(lda,*), b(ldb,*), c(ldc,*)
#else
      complex(kind=COMPLEX_DATATYPE)           :: a(lda,ldaCols), b(ldb,ldbCols), c(ldc,ldcCols)
#endif
#endif
      integer(kind=ik)              :: my_prow, my_pcol, np_rows, np_cols, mpierr
      integer(kind=ik)              :: l_cols, l_rows, l_rows_np
      integer(kind=ik)              :: np, n, nb, nblk_mult, lrs, lre, lcs, lce
      integer(kind=ik)              :: gcol_min, gcol, goff
      integer(kind=ik)              :: nstor, nr_done, noff, np_bc, n_aux_bc, nvals
      integer(kind=ik), allocatable :: lrs_save(:), lre_save(:)

      logical                       :: a_lower, a_upper, c_lower, c_upper
#if REALCASE == 1
      real(kind=REAL_DATATYPE), allocatable    :: aux_mat(:,:), aux_bc(:), tmp1(:,:), tmp2(:,:)
#endif
#if COMPLEXCASE == 1
      complex(kind=COMPLEX_DATATYPE), allocatable :: aux_mat(:,:), aux_bc(:), tmp1(:,:), tmp2(:,:)
#endif
      integer(kind=ik)              :: istat
      character(200)                :: errorMessage
      logical                       :: success

      call timer%start("elpa_mult_at_b_&
      &MATH_DATATYPE&
      &_&
107
      &PRECISION&
108
      &")
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348

      success = .true.

      call timer%start("mpi_communication")
      call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
      call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
      call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)
      call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
      call timer%stop("mpi_communication")
      l_rows = local_index(na,  my_prow, np_rows, nblk, -1) ! Local rows of a and b
      l_cols = local_index(ncb, my_pcol, np_cols, nblk, -1) ! Local cols of b

      ! Block factor for matrix multiplications, must be a multiple of nblk

      if (na/np_rows<=256) then
         nblk_mult = (31/nblk+1)*nblk
      else
         nblk_mult = (63/nblk+1)*nblk
      endif

      allocate(aux_mat(l_rows,nblk_mult), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"elpa_mult_at_b_&
	&MATH_DATATYPE&
	&: error when allocating aux_mat "//errorMessage
        stop 1
      endif

      allocate(aux_bc(l_rows*nblk), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"elpa_mult_at_b_&
	&MATH_DATATYPE&
	&: error when allocating aux_bc "//errorMessage
        stop 1
      endif

      allocate(lrs_save(nblk), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"elpa_mult_at_b_&
        &MATH_DATATYPE&
        &: error when allocating lrs_save "//errorMessage
        stop 1
      endif

      allocate(lre_save(nblk), stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
        print *,"elpa_mult_at_b_&
        &MATH_DATATYPE&
        &: error when allocating lre_save "//errorMessage
        stop 1
      endif

      a_lower = .false.
      a_upper = .false.
      c_lower = .false.
      c_upper = .false.

      if (uplo_a=='u' .or. uplo_a=='U') a_upper = .true.
      if (uplo_a=='l' .or. uplo_a=='L') a_lower = .true.
      if (uplo_c=='u' .or. uplo_c=='U') c_upper = .true.
      if (uplo_c=='l' .or. uplo_c=='L') c_lower = .true.

      ! Build up the result matrix by processor rows

      do np = 0, np_rows-1

        ! In this turn, procs of row np assemble the result

        l_rows_np = local_index(na, np, np_rows, nblk, -1) ! local rows on receiving processors

        nr_done = 0 ! Number of rows done
        aux_mat = 0
        nstor = 0   ! Number of columns stored in aux_mat

        ! Loop over the blocks on row np

        do nb=0,(l_rows_np-1)/nblk

          goff  = nb*np_rows + np ! Global offset in blocks corresponding to nb

          ! Get the processor column which owns this block (A is transposed, so we need the column)
          ! and the offset in blocks within this column.
          ! The corresponding block column in A is then broadcast to all for multiplication with B

          np_bc = MOD(goff,np_cols)
          noff = goff/np_cols
          n_aux_bc = 0

          ! Gather up the complete block column of A on the owner

          do n = 1, min(l_rows_np-nb*nblk,nblk) ! Loop over columns to be broadcast

            gcol = goff*nblk + n ! global column corresponding to n
            if (nstor==0 .and. n==1) gcol_min = gcol

            lrs = 1       ! 1st local row number for broadcast
            lre = l_rows  ! last local row number for broadcast
            if (a_lower) lrs = local_index(gcol, my_prow, np_rows, nblk, +1)
            if (a_upper) lre = local_index(gcol, my_prow, np_rows, nblk, -1)

            if (lrs<=lre) then
              nvals = lre-lrs+1
              if (my_pcol == np_bc) aux_bc(n_aux_bc+1:n_aux_bc+nvals) = a(lrs:lre,noff*nblk+n)
              n_aux_bc = n_aux_bc + nvals
            endif

            lrs_save(n) = lrs
            lre_save(n) = lre

          enddo

          ! Broadcast block column
#ifdef WITH_MPI
          call timer%start("mpi_communication")
#if REALCASE == 1
          call MPI_Bcast(aux_bc, n_aux_bc,    &
                         MPI_REAL_PRECISION,  &
                         np_bc, mpi_comm_cols, mpierr)
#endif
          call timer%stop("mpi_communication")
#endif /* WITH_MPI */
          ! Insert what we got in aux_mat

          n_aux_bc = 0
          do n = 1, min(l_rows_np-nb*nblk,nblk)
            nstor = nstor+1
            lrs = lrs_save(n)
            lre = lre_save(n)
            if (lrs<=lre) then
              nvals = lre-lrs+1
              aux_mat(lrs:lre,nstor) = aux_bc(n_aux_bc+1:n_aux_bc+nvals)
              n_aux_bc = n_aux_bc + nvals
            endif
          enddo

          ! If we got nblk_mult columns in aux_mat or this is the last block
          ! do the matrix multiplication

          if (nstor==nblk_mult .or. nb*nblk+nblk >= l_rows_np) then

            lrs = 1       ! 1st local row number for multiply
            lre = l_rows  ! last local row number for multiply
            if (a_lower) lrs = local_index(gcol_min, my_prow, np_rows, nblk, +1)
            if (a_upper) lre = local_index(gcol, my_prow, np_rows, nblk, -1)

            lcs = 1       ! 1st local col number for multiply
            lce = l_cols  ! last local col number for multiply
            if (c_upper) lcs = local_index(gcol_min, my_pcol, np_cols, nblk, +1)
            if (c_lower) lce = MIN(local_index(gcol, my_pcol, np_cols, nblk, -1),l_cols)

            if (lcs<=lce) then
              allocate(tmp1(nstor,lcs:lce),tmp2(nstor,lcs:lce), stat=istat, errmsg=errorMessage)
              if (istat .ne. 0) then
               print *,"elpa_mult_at_b_&
               &MATH_DATATYPE&
               &: error when allocating tmp1 "//errorMessage
               stop 1
              endif

              if (lrs<=lre) then
                call timer%start("blas")
#if REALCASE == 1
                call PRECISION_GEMM('T', 'N',          &
#endif
#if COMPLEXCASE == 1
                call PRECISION_GEMM('C', 'N',          &
#endif
		                    nstor, lce-lcs+1, lre-lrs+1, &
#if REALCASE == 1
				    CONST_1_0,      &
#endif
#if COMPLEXCASE == 1
                                    CONST_COMPLEX_PAIR_1_0,  &
#endif
				    aux_mat(lrs,1), ubound(aux_mat,dim=1), &
                                    b(lrs,lcs), ldb,                       &
#if REALCASE == 1
				    CONST_0_0,     &
#endif
#if COMPLEXCASE == 1
                                    CONST_COMPLEX_PAIR_0_0,                &
#endif
				    tmp1, nstor)
                call timer%stop("blas")
              else
                tmp1 = 0
              endif

              ! Sum up the results and send to processor row np
#ifdef WITH_MPI
              call timer%start("mpi_communication")
              call mpi_reduce(tmp1, tmp2, nstor*(lce-lcs+1),     &

#if REALCASE == 1
	                      MPI_REAL_PRECISION,                &
#endif
#if COMPLEXCASE == 1
	                      MPI_COMPLEX_PRECISION,             &
#endif
			      MPI_SUM, np, mpi_comm_rows, mpierr)

              call timer%stop("mpi_communication")
              ! Put the result into C
              if (my_prow==np) c(nr_done+1:nr_done+nstor,lcs:lce) = tmp2(1:nstor,lcs:lce)

#else /* WITH_MPI */
!              tmp2 = tmp1
              ! Put the result into C
              if (my_prow==np) c(nr_done+1:nr_done+nstor,lcs:lce) = tmp1(1:nstor,lcs:lce)

#endif /* WITH_MPI */

              deallocate(tmp1,tmp2, stat=istat, errmsg=errorMessage)
              if (istat .ne. 0) then
               print *,"elpa_mult_at_b_&
               &MATH_DATATYPE&
               &: error when deallocating tmp1 "//errorMessage
               stop 1
              endif

            endif

            nr_done = nr_done+nstor
            nstor=0
            aux_mat(:,:)=0
          endif
        enddo
      enddo

      deallocate(aux_mat, aux_bc, lrs_save, lre_save, stat=istat, errmsg=errorMessage)
      if (istat .ne. 0) then
       print *,"elpa_mult_at_b_&
       &MATH_DATATYPE&
       &: error when deallocating aux_mat "//errorMessage
       stop 1
      endif

      call timer%stop("elpa_mult_at_b_&
      &MATH_DATATYPE&
      &_&
349
      &PRECISION&
350
      &")
351
352
353
354
355
356

#undef REALCASE
#undef COMPLEXCASE
#undef DOUBLE_PRECISION
#undef SINGLE_PRECISION