elpa1_template.X90 10 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#if 0
!    This file is part of ELPA.
!
!    The ELPA library was originally created by the ELPA consortium,
!    consisting of the following organizations:
!
!    - Max Planck Computing and Data Facility (MPCDF), formerly known as
!      Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
!    - Bergische Universität Wuppertal, Lehrstuhl für angewandte
!      Informatik,
!    - Technische Universität München, Lehrstuhl für Informatik mit
!      Schwerpunkt Wissenschaftliches Rechnen ,
!    - Fritz-Haber-Institut, Berlin, Abt. Theorie,
!    - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
!      Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
!      and
!    - IBM Deutschland GmbH
!
!    This particular source code file contains additions, changes and
!    enhancements authored by Intel Corporation which is not part of
!    the ELPA consortium.
!
!    More information can be found here:
!    http://elpa.mpcdf.mpg.de/
!
!    ELPA is free software: you can redistribute it and/or modify
!    it under the terms of the version 3 of the license of the
!    GNU Lesser General Public License as published by the Free
!    Software Foundation.
!
!    ELPA is distributed in the hope that it will be useful,
!    but WITHOUT ANY WARRANTY; without even the implied warranty of
!    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!    GNU Lesser General Public License for more details.
!
!    You should have received a copy of the GNU Lesser General Public License
!    along with ELPA.  If not, see <http://www.gnu.org/licenses/>
!
!    ELPA reflects a substantial effort on the part of the original
!    ELPA consortium, and we ask you to respect the spirit of the
!    license that we chose: i.e., please contribute any changes you
!    may have back to the original ELPA library distribution, and keep
!    any derivatives of ELPA under the same license that we chose for
!    the original distribution, the GNU Lesser General Public License.
!
!
! ELPA1 -- Faster replacements for ScaLAPACK symmetric eigenvalue routines
!
! Copyright of the original code rests with the authors inside the ELPA
! consortium. The copyright of any additional modifications shall rest
! with their original authors, but shall adhere to the licensing terms
! distributed along with the original code in the file "COPYING".
#endif

55
#include "../general/sanity.X90"
56
57
58
59
60

function elpa_solve_evp_&
         &MATH_DATATYPE&
	 &_1stage_&
	 &PRECISION&
61
62
	 &_impl (obj, a, ev, q, time_evp_fwd,     &
                time_evp_solve, time_evp_back) result(success)
63
64
65
66
67
68
69
70
71
   use precision
   use cuda_functions
   use mod_check_for_gpu
#ifdef HAVE_DETAILED_TIMINGS
   use timings
#else
   use timings_dummy
#endif
   use iso_c_binding
72
   use elpa_api
73
74
   use elpa_mpi
   use elpa1_compute
Andreas Marek's avatar
Andreas Marek committed
75
   use elpa1_utilities, only : gpu_usage_via_environment_variable
76
77
   implicit none

78
   class(elpa_t), intent(in)                       :: obj
79
   real(kind=REAL_DATATYPE), intent(out)           :: ev(obj%na)
80
81
#if REALCASE == 1
#ifdef USE_ASSUMED_SIZE
82
83
   real(kind=C_DATATYPE_KIND), intent(inout)       :: a(obj%local_nrows,*)
   real(kind=C_DATATYPE_KIND), intent(out)         :: q(obj%local_nrows,*)
84
#else
85
86
   real(kind=C_DATATYPE_KIND), intent(inout)       :: a(obj%local_nrows,matrixCols)
   real(kind=C_DATATYPE_KIND), intent(out)         :: q(obj%local_nrows,matrixCols)
87
88
89
90
91
92
#endif
   real(kind=C_DATATYPE_KIND), allocatable         :: tau(:)
#endif /* REALCASE */

#if COMPLEXCASE == 1
#ifdef USE_ASSUMED_SIZE
93
94
   complex(kind=C_DATATYPE_KIND), intent(inout)    :: a(obj%local_nrows,*)
   complex(kind=C_DATATYPE_KIND), intent(out)      :: q(obj%local_nrows,*)
95
#else
96
97
   complex(kind=C_DATATYPE_KIND), intent(inout)    :: a(obj%local_nrows,matrixCols)
   complex(kind=C_DATATYPE_KIND), intent(out)      :: q(obj%local_nrows,matrixCols)
98
99
100
101
102
103
104
#endif

   real(kind=REAL_DATATYPE), allocatable           :: q_real(:,:)
   complex(kind=C_DATATYPE_KIND), allocatable      :: tau(:)
   integer(kind=c_int)                             :: l_cols, l_rows, l_cols_nev, np_rows, np_cols
#endif /* COMPLEXCASE */

105
   logical                                         :: useGPU
106
107
   logical                                         :: success

108
109
   real(kind=c_double)                             :: time_evp_fwd,     &
                                                      time_evp_solve, time_evp_back
110
   logical                                         :: summary_timings
111
112
113
114
115
116
117
118
119
   logical                                         :: do_useGPU
   integer(kind=ik)                                :: numberOfGPUDevices

   integer(kind=c_int)                             :: my_pe, n_pes, my_prow, my_pcol, mpierr
   real(kind=C_DATATYPE_KIND), allocatable         :: e(:)
   real(kind=c_double)                             :: ttt0, ttt1 ! MPI_WTIME always needs double
   logical                                         :: wantDebug
   integer(kind=c_int)                             :: istat
   character(200)                                  :: errorMessage
120
121
   integer(kind=ik)                                :: na, nev, lda, ldq, nblk, matrixCols, &
                                                      mpi_comm_rows, mpi_comm_cols, mpi_comm_all
122
123
124
125
126

   call timer%start("elpa_solve_evp_&
   &MATH_DATATYPE&
   &_1stage_&
   &PRECISION&
127
   &")
128

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
   na         = obj%na
   nev        = obj%nev
   lda        = obj%local_nrows
   ldq        = obj%local_nrows
   nblk       = obj%nblk
   matrixCols = obj%local_ncols

   mpi_comm_rows = obj%get("mpi_comm_rows")
   mpi_comm_cols = obj%get("mpi_comm_cols")
   mpi_comm_all  = obj%get("mpi_comm_parent")

   if (obj%get("gpu") .eq. 1) then
     useGPU =.true.
   else
     useGPU = .false.
   endif
   if (obj%get("summary_timings") .eq. 1) then
     summary_timings = .true.
   else
     summary_timings = .false.
   endif

151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
   call timer%start("mpi_communication")

   call mpi_comm_rank(mpi_comm_all,my_pe,mpierr)
   call mpi_comm_size(mpi_comm_all,n_pes,mpierr)

   call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
   call mpi_comm_rank(mpi_comm_cols,my_pcol,mpierr)

#if COMPLEXCASE == 1
   call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
   call mpi_comm_size(mpi_comm_cols,np_cols,mpierr)
#endif

   call timer%stop("mpi_communication")
   success = .true.

167
168
   wantDebug = obj%get("debug") == 1
   do_useGPU = .false.
169

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190

   if (useGPU) then
     if (check_for_gpu(my_pe,numberOfGPUDevices, wantDebug=wantDebug)) then

       do_useGPU = .true.
       ! set the neccessary parameters
       cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
       cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
       cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
       cudaHostRegisterPortable = cuda_hostRegisterPortable()
       cudaHostRegisterMapped   = cuda_hostRegisterMapped()
     else
       print *,"GPUs are requested but not detected! Aborting..."
       success = .false.
       return
     endif
   else
     ! check whether set by environment variable
     do_useGPU = gpu_usage_via_environment_variable()

     if (do_useGPU) then
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
       if (check_for_gpu(my_pe,numberOfGPUDevices, wantDebug=wantDebug)) then

         ! set the neccessary parameters
         cudaMemcpyHostToDevice   = cuda_memcpyHostToDevice()
         cudaMemcpyDeviceToHost   = cuda_memcpyDeviceToHost()
         cudaMemcpyDeviceToDevice = cuda_memcpyDeviceToDevice()
         cudaHostRegisterPortable = cuda_hostRegisterPortable()
         cudaHostRegisterMapped   = cuda_hostRegisterMapped()
       else
         print *,"GPUs are requested but not detected! Aborting..."
         success = .false.
         return
       endif
     endif
   endif
#if COMPLEXCASE == 1
   l_rows = local_index(na, my_prow, np_rows, nblk, -1) ! Local rows of a and q
   l_cols = local_index(na, my_pcol, np_cols, nblk, -1) ! Local columns of q

   l_cols_nev = local_index(nev, my_pcol, np_cols, nblk, -1) ! Local columns corresponding to nev

   allocate(q_real(l_rows,l_cols), stat=istat, errmsg=errorMessage)
   if (istat .ne. 0) then
     print *,"solve_evp_&
     &MATH_DATATYPE&
     &_1stage_&
     &PRECISION&
     &" // ": error when allocating q_real "//errorMessage
     stop 1
   endif
#endif
   allocate(e(na), tau(na), stat=istat, errmsg=errorMessage)
   if (istat .ne. 0) then
     print *,"solve_evp_&
     &MATH_DATATYPE&
     &_1stage_&
     &PRECISION&
     &" // ": error when allocating e, tau "//errorMessage
     stop 1
   endif
   ttt0 = MPI_Wtime()
   call tridiag_&
   &MATH_DATATYPE&
   &_&
   &PRECISION&
   & (na, a, lda, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, ev, e, tau, do_useGPU)
   ttt1 = MPI_Wtime()
238
   if(my_prow==0 .and. my_pcol==0 .and. summary_timings) write(error_unit,*) 'Time tridiag_real :',ttt1-ttt0
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
   time_evp_fwd = ttt1-ttt0

   ttt0 = MPI_Wtime()
   call solve_tridi_&
   &PRECISION&
   & (na, nev, ev, e,  &
#if REALCASE == 1
      q, ldq,          &
#endif
#if COMPLEXCASE == 1
      q_real, l_rows,  &
#endif
      nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, wantDebug, success)
   if (.not.(success)) return

   ttt1 = MPI_Wtime()
255
   if(my_prow==0 .and. my_pcol==0 .and. summary_timings) write(error_unit,*) 'Time solve_tridi  :',ttt1-ttt0
256
257
258
259
260
261
262
263
264
265
266
267
   time_evp_solve = ttt1-ttt0

   ttt0 = MPI_Wtime()
#if COMPLEXCASE == 1
   q(1:l_rows,1:l_cols_nev) = q_real(1:l_rows,1:l_cols_nev)
#endif
   call trans_ev_&
   &MATH_DATATYPE&
   &_&
   &PRECISION&
   & (na, nev, a, lda, tau, q, ldq, nblk, matrixCols, mpi_comm_rows, mpi_comm_cols, do_useGPU)
   ttt1 = MPI_Wtime()
268
   if(my_prow==0 .and. my_pcol==0 .and. summary_timings) write(error_unit,*) 'Time trans_ev_real:',ttt1-ttt0
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
   time_evp_back = ttt1-ttt0
#if COMPLEXCASE == 1
   deallocate(q_real, stat=istat, errmsg=errorMessage)
   if (istat .ne. 0) then
     print *,"solve_evp_&
     &MATH_DATATYPE&
     &_1stage_&
     &PRECISION&
     &" // ": error when deallocating q_real "//errorMessage
     stop 1
   endif
#endif
   deallocate(e, tau, stat=istat, errmsg=errorMessage)
   if (istat .ne. 0) then
     print *,"solve_evp_&
     &MATH_DATATYPE&
     &_1stage_&
     &PRECISION&
     &" // ": error when deallocating e, tau "//errorMessage
     stop 1
   endif

   call timer%stop("elpa_solve_evp_&
   &MATH_DATATYPE&
   &_1stage_&
   &PRECISION&
295
   &")
296
297
298
end function