Commit 2aaf56f6 authored by Andreas Marek's avatar Andreas Marek

Rename of mpi_comm to communicator

parent 4169f747
......@@ -75,7 +75,7 @@
subroutine band_band_real_&
&PRECISION &
(na, nb, nbCol, nb2, nb2Col, ab, ab2, d, e, mpi_comm)
(na, nb, nbCol, nb2, nb2Col, ab, ab2, d, e, communicator)
!-------------------------------------------------------------------------------
! band_band_real:
! Reduces a real symmetric banded matrix to a real symmetric matrix with smaller bandwidth. Householder transformations are not stored.
......@@ -101,7 +101,7 @@
!
! e(na) Subdiagonal of tridiagonal matrix, set only on PE 0, set only if ab2 = 1 (output)
!
! mpi_comm
! communicator
! MPI-Communicator for the total processor set
!-------------------------------------------------------------------------------
#ifdef HAVE_DETAILED_TIMINGS
......@@ -113,7 +113,7 @@
use precision
implicit none
integer(kind=ik), intent(in) :: na, nb, nbCol, nb2, nb2Col, mpi_comm
integer(kind=ik), intent(in) :: na, nb, nbCol, nb2, nb2Col, communicator
real(kind=REAL_DATATYPE), intent(inout) :: ab(2*nb,nbCol) ! removed assumed size
real(kind=REAL_DATATYPE), intent(inout) :: ab2(2*nb2,nb2Col) ! removed assumed size
real(kind=REAL_DATATYPE), intent(out) :: d(na), e(na) ! set only on PE 0
......@@ -143,8 +143,8 @@
call timer%start("band_band_real" // PRECISION_SUFFIX)
call timer%start("mpi_communication")
call mpi_comm_rank(mpi_comm,my_pe,mpierr)
call mpi_comm_size(mpi_comm,n_pes,mpierr)
call mpi_comm_rank(communicator,my_pe,mpierr)
call mpi_comm_size(communicator,n_pes,mpierr)
call timer%stop("mpi_communication")
! Total number of blocks in the band:
......@@ -185,7 +185,7 @@
if (nb2>1) then
do i=0,nblocks2-1
call mpi_irecv(ab2(1,i*nb2+1), 2*nb2*nb2, MPI_REAL_PRECISION, 0, 3, mpi_comm, ireq_ab2(i+1), mpierr)
call mpi_irecv(ab2(1,i*nb2+1), 2*nb2*nb2, MPI_REAL_PRECISION, 0, 3, communicator, ireq_ab2(i+1), mpierr)
enddo
endif
call timer%stop("mpi_communication")
......@@ -221,7 +221,7 @@
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_isend(ab_s, (nb+1)*nb2, MPI_REAL_PRECISION, my_pe-1, 1, mpi_comm, ireq_ab, mpierr)
call mpi_isend(ab_s, (nb+1)*nb2, MPI_REAL_PRECISION, my_pe-1, 1, communicator, ireq_ab, mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
endif
......@@ -265,7 +265,7 @@
endif
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_send(ab_s2, 2*nb2*nb2, MPI_REAL_PRECISION, dest, 3, mpi_comm, mpierr)
call mpi_send(ab_s2, 2*nb2*nb2, MPI_REAL_PRECISION, dest, 3, communicator, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -284,7 +284,7 @@
! Receive Householder vectors from previous task, from PE owning subdiagonal
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_recv(hv, nb*nb2, MPI_REAL_PRECISION, my_pe-1, 2, mpi_comm, MPI_STATUS_IGNORE, mpierr)
call mpi_recv(hv, nb*nb2, MPI_REAL_PRECISION, my_pe-1, 2, communicator, MPI_STATUS_IGNORE, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -322,7 +322,7 @@
!request last nb2 columns
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_recv(ab_r,(nb+1)*nb2, MPI_REAL_PRECISION, my_pe+1, 1, mpi_comm, MPI_STATUS_IGNORE, mpierr)
call mpi_recv(ab_r,(nb+1)*nb2, MPI_REAL_PRECISION, my_pe+1, 1, communicator, MPI_STATUS_IGNORE, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -363,7 +363,7 @@
enddo
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_isend(hv_s,nb*nb2, MPI_REAL_PRECISION, my_pe+1, 2, mpi_comm, ireq_hv, mpierr)
call mpi_isend(hv_s,nb*nb2, MPI_REAL_PRECISION, my_pe+1, 2, communicator, ireq_hv, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -390,7 +390,7 @@
enddo
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_isend(ab_s,(nb+1)*nb2, MPI_REAL_PRECISION, my_pe-1, 1, mpi_comm, ireq_ab, mpierr)
call mpi_isend(ab_s,(nb+1)*nb2, MPI_REAL_PRECISION, my_pe-1, 1, communicator, ireq_ab, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -432,7 +432,7 @@
! stop
! endif
call mpi_barrier(mpi_comm,mpierr)
call mpi_barrier(communicator,mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
......
......@@ -63,7 +63,7 @@
#if COMPLEXCASE == 1
hh_trans_complex, &
#endif
mpi_comm_rows, mpi_comm_cols, mpi_comm, useGPU)
mpi_comm_rows, mpi_comm_cols, communicator, useGPU)
!-------------------------------------------------------------------------------
! tridiag_band_real/complex:
! Reduces a real symmetric band matrix to tridiagonal form
......@@ -88,7 +88,7 @@
! mpi_comm_rows
! mpi_comm_cols
! MPI-Communicators for rows/columns
! mpi_comm
! communicator
! MPI-Communicator for the total processor set
!-------------------------------------------------------------------------------
#ifdef HAVE_DETAILED_TIMINGS
......@@ -103,7 +103,7 @@
implicit none
logical, intent(in) :: useGPU
integer(kind=ik), intent(in) :: na, nb, nblk, lda, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm
integer(kind=ik), intent(in) :: na, nb, nblk, lda, matrixCols, mpi_comm_rows, mpi_comm_cols, communicator
#if REALCASE == 1
#ifdef USE_ASSUMED_SIZE
real(kind=REAL_DATATYPE), intent(in) :: aMatrix(lda,*)
......@@ -181,8 +181,8 @@
&PRECISION_SUFFIX &
)
call timer%start("mpi_communication")
call mpi_comm_rank(mpi_comm,my_pe,mpierr)
call mpi_comm_size(mpi_comm,n_pes,mpierr)
call mpi_comm_rank(communicator,my_pe,mpierr)
call mpi_comm_size(communicator,n_pes,mpierr)
call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
......@@ -216,10 +216,10 @@
#ifdef WITH_MPI
call timer%start("mpi_communication")
#ifndef WITH_OPENMP
call mpi_allreduce(mpi_in_place, global_id, np_rows*np_cols, mpi_integer, mpi_sum, mpi_comm, mpierr)
call mpi_allreduce(mpi_in_place, global_id, np_rows*np_cols, mpi_integer, mpi_sum, communicator, mpierr)
#else
global_id_tmp(:,:) = global_id(:,:)
call mpi_allreduce(global_id_tmp, global_id, np_rows*np_cols, mpi_integer, mpi_sum, mpi_comm, mpierr)
call mpi_allreduce(global_id_tmp, global_id, np_rows*np_cols, mpi_integer, mpi_sum, communicator, mpierr)
deallocate(global_id_tmp, stat=istat, errmsg=errorMessage)
if (istat .ne. 0) then
print *,"tridiag_band_&
......@@ -270,7 +270,7 @@
&MATH_DATATYPE&
&_&
&PRECISION&
&(aMatrix, a_dev, lda, na, nblk, nb, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm, ab, useGPU)
&(aMatrix, a_dev, lda, na, nblk, nb, matrixCols, mpi_comm_rows, mpi_comm_cols, communicator, ab, useGPU)
! Calculate the workload for each sweep in the back transformation
! and the space requirements to hold the HH vectors
......@@ -359,7 +359,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
nt, 10+n-block_limits(nt), mpi_comm, ireq_hhr(num_chunks), mpierr)
nt, 10+n-block_limits(nt), communicator, ireq_hhr(num_chunks), mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -501,7 +501,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe-1, 1, mpi_comm, ireq_ab, mpierr)
my_pe-1, 1, communicator, ireq_ab, mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
endif
......@@ -600,7 +600,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe-1, 2, mpi_comm, MPI_STATUS_IGNORE, mpierr)
my_pe-1, 2, communicator, MPI_STATUS_IGNORE, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -621,7 +621,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe-1, 2, mpi_comm, MPI_STATUS_IGNORE, mpierr)
my_pe-1, 2, communicator, MPI_STATUS_IGNORE, mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -879,7 +879,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe-1, 1, mpi_comm, ireq_ab, mpierr)
my_pe-1, 1, communicator, ireq_ab, mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
......@@ -898,7 +898,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe+1, 1, mpi_comm, MPI_STATUS_IGNORE, mpierr)
my_pe+1, 1, communicator, MPI_STATUS_IGNORE, mpierr)
endif
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -929,7 +929,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe+1, 2, mpi_comm, ireq_hv, mpierr)
my_pe+1, 2, communicator, ireq_hv, mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
......@@ -992,7 +992,7 @@
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
global_id(hh_dst(iblk), mod(iblk+block_limits(my_pe)-1,np_cols)), &
10+iblk, mpi_comm, ireq_hhs(iblk), mpierr)
10+iblk, communicator, ireq_hhs(iblk), mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
! do the post-poned irecv here
......@@ -1068,7 +1068,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe+1, 1, mpi_comm, MPI_STATUS_IGNORE, mpierr)
my_pe+1, 1, communicator, MPI_STATUS_IGNORE, mpierr)
#else /* WITH_OPENMP */
call mpi_recv(ab(1,ne), nb+1, &
#if REALCASE == 1
......@@ -1077,7 +1077,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe+1, 1, mpi_comm, MPI_STATUS_IGNORE, mpierr)
my_pe+1, 1, communicator, MPI_STATUS_IGNORE, mpierr)
#endif /* WITH_OPENMP */
call timer%stop("mpi_communication")
#else /* WITH_MPI */
......@@ -1188,7 +1188,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe+1, 2, mpi_comm, ireq_hv, mpierr)
my_pe+1, 2, communicator, ireq_hv, mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
......@@ -1239,7 +1239,7 @@
#if COMPLEXCASE == 1
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
my_pe-1, 1, mpi_comm, ireq_ab, mpierr)
my_pe-1, 1, communicator, ireq_ab, mpierr)
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
......@@ -1347,7 +1347,7 @@
MPI_COMPLEX_EXPLICIT_PRECISION, &
#endif
global_id(hh_dst(iblk), mod(iblk+block_limits(my_pe)-1, np_cols)), &
10+iblk, mpi_comm, ireq_hhs(iblk), mpierr)
10+iblk, communicator, ireq_hhs(iblk), mpierr)
call timer%stop("mpi_communication")
#else /* WITH_MPI */
! do the post-poned irecv here
......@@ -1415,7 +1415,7 @@
#ifdef WITH_MPI
call timer%start("mpi_communication")
call mpi_barrier(mpi_comm,mpierr)
call mpi_barrier(communicator,mpierr)
call timer%stop("mpi_communication")
#endif
deallocate(ab, stat=istat, errmsg=errorMessage)
......
......@@ -718,7 +718,7 @@ module elpa_pdgeqrf
! incx == 1: column major
! incx != 1: row major
subroutine qr_pdlarfg_1dcomm_double(x,incx,tau,work,lwork,n,idx,nb,hgmode,rev,mpi_comm)
subroutine qr_pdlarfg_1dcomm_double(x,incx,tau,work,lwork,n,idx,nb,hgmode,rev,communicator)
use precision
use ELPA1
......@@ -736,7 +736,7 @@ module elpa_pdgeqrf
real(kind=rk8) :: x(*),work(*)
! input variables (global)
integer(kind=ik) :: mpi_comm,nb,idx,n,rev
integer(kind=ik) :: communicator,nb,idx,n,rev
! output variables (global)
real(kind=rk8) :: tau
......@@ -768,8 +768,8 @@ module elpa_pdgeqrf
#endif
return
end if
call MPI_Comm_rank(mpi_comm, mpirank, mpierr)
call MPI_Comm_size(mpi_comm, mpiprocs, mpierr)
call MPI_Comm_rank(communicator, mpirank, mpierr)
call MPI_Comm_size(communicator, mpiprocs, mpierr)
! calculate expected work size and store in work(1)
if (hgmode .eq. ichar('s')) then
! allreduce (MPI_SUM)
......@@ -841,11 +841,11 @@ module elpa_pdgeqrf
#ifdef DOUBLE_PRECISION_REAL
call mpi_allreduce(work(1),work(sendsize+1), &
sendsize,mpi_real8,mpi_sum, &
mpi_comm,mpierr)
communicator,mpierr)
#else
call mpi_allreduce(work(1),work(sendsize+1), &
sendsize,mpi_real4,mpi_sum, &
mpi_comm,mpierr)
communicator,mpierr)
#endif
#else
......@@ -877,11 +877,11 @@ module elpa_pdgeqrf
#ifdef DOUBLE_PRECISION_REAL
call mpi_alltoall(work(1),2,mpi_real8, &
work(sendsize+1),2,mpi_real8, &
mpi_comm,mpierr)
communicator,mpierr)
#else
call mpi_alltoall(work(1),2,mpi_real4, &
work(sendsize+1),2,mpi_real4, &
mpi_comm,mpierr)
communicator,mpierr)
#endif
#else
......@@ -917,11 +917,11 @@ module elpa_pdgeqrf
#ifdef DOUBLE_PRECISION_REAL
call mpi_allgather(work(1),sendsize,mpi_real8, &
work(sendsize+1),sendsize,mpi_real8, &
mpi_comm,mpierr)
communicator,mpierr)
#else
call mpi_allgather(work(1),sendsize,mpi_real4, &
work(sendsize+1),sendsize,mpi_real4, &
mpi_comm,mpierr)
communicator,mpierr)
#endif
#else
......@@ -3464,7 +3464,7 @@ module elpa_pdgeqrf
! incx == 1: column major
! incx != 1: row major
subroutine qr_pdlarfg_1dcomm_single(x,incx,tau,work,lwork,n,idx,nb,hgmode,rev,mpi_comm)
subroutine qr_pdlarfg_1dcomm_single(x,incx,tau,work,lwork,n,idx,nb,hgmode,rev,communicator)
use precision
use ELPA1
......@@ -3482,7 +3482,7 @@ module elpa_pdgeqrf
real(kind=rk4) :: x(*),work(*)
! input variables (global)
integer(kind=ik) :: mpi_comm,nb,idx,n,rev
integer(kind=ik) :: communicator,nb,idx,n,rev
! output variables (global)
real(kind=rk4) :: tau
......@@ -3514,8 +3514,8 @@ module elpa_pdgeqrf
#endif
return
end if
call MPI_Comm_rank(mpi_comm, mpirank, mpierr)
call MPI_Comm_size(mpi_comm, mpiprocs, mpierr)
call MPI_Comm_rank(communicator, mpirank, mpierr)
call MPI_Comm_size(communicator, mpiprocs, mpierr)
! calculate expected work size and store in work(1)
if (hgmode .eq. ichar('s')) then
! allreduce (MPI_SUM)
......@@ -3587,11 +3587,11 @@ module elpa_pdgeqrf
#ifdef DOUBLE_PRECISION_REAL
call mpi_allreduce(work(1),work(sendsize+1), &
sendsize,mpi_real8,mpi_sum, &
mpi_comm,mpierr)
communicator,mpierr)
#else
call mpi_allreduce(work(1),work(sendsize+1), &
sendsize,mpi_real4,mpi_sum, &
mpi_comm,mpierr)
communicator,mpierr)
#endif
#else
......@@ -3623,11 +3623,11 @@ module elpa_pdgeqrf
#ifdef DOUBLE_PRECISION_REAL
call mpi_alltoall(work(1),2,mpi_real8, &
work(sendsize+1),2,mpi_real8, &
mpi_comm,mpierr)
communicator,mpierr)
#else
call mpi_alltoall(work(1),2,mpi_real4, &
work(sendsize+1),2,mpi_real4, &
mpi_comm,mpierr)
communicator,mpierr)
#endif
#else
......@@ -3669,11 +3669,11 @@ module elpa_pdgeqrf
#ifdef DOUBLE_PRECISION_REAL
call mpi_allgather(work(1),sendsize,mpi_real8, &
work(sendsize+1),sendsize,mpi_real8, &
mpi_comm,mpierr)
communicator,mpierr)
#else
call mpi_allgather(work(1),sendsize,mpi_real4, &
work(sendsize+1),sendsize,mpi_real4, &
mpi_comm,mpierr)
communicator,mpierr)
#endif
#else
......
......@@ -58,7 +58,7 @@ subroutine redist_band_&
#if COMPLEXCASE == 1
c_a, &
#endif
a_dev, lda, na, nblk, nbw, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_global, &
a_dev, lda, na, nblk, nbw, matrixCols, mpi_comm_rows, mpi_comm_cols, communicator, &
#if REALCASE == 1
r_ab, useGPU)
#endif
......@@ -80,7 +80,7 @@ subroutine redist_band_&
implicit none
logical, intent(in) :: useGPU
integer(kind=ik), intent(in) :: lda, na, nblk, nbw, matrixCols, mpi_comm_rows, mpi_comm_cols, mpi_comm_global
integer(kind=ik), intent(in) :: lda, na, nblk, nbw, matrixCols, mpi_comm_rows, mpi_comm_cols, communicator
#if REALCASE == 1
real(kind=REAL_DATATYPE), intent(in) :: r_a(lda, matrixCols)
#endif
......@@ -152,9 +152,8 @@ subroutine redist_band_&
endif ! useGPU
call timer%start("mpi_communication")
print *, mpi_comm_global
call mpi_comm_rank(mpi_comm_global,my_pe,mpierr)
call mpi_comm_size(mpi_comm_global,n_pes,mpierr)
call mpi_comm_rank(communicator,my_pe,mpierr)
call mpi_comm_size(communicator,n_pes,mpierr)
call mpi_comm_rank(mpi_comm_rows,my_prow,mpierr)
call mpi_comm_size(mpi_comm_rows,np_rows,mpierr)
......@@ -174,10 +173,10 @@ subroutine redist_band_&
call timer%start("mpi_communication")
#ifdef WITH_OPENMP
global_id_tmp(:,:) = global_id(:,:)
call mpi_allreduce(global_id_tmp, global_id, np_rows*np_cols, mpi_integer, mpi_sum, mpi_comm_global, mpierr)
call mpi_allreduce(global_id_tmp, global_id, np_rows*np_cols, mpi_integer, mpi_sum, communicator, mpierr)
deallocate(global_id_tmp)
#else
call mpi_allreduce(mpi_in_place, global_id, np_rows*np_cols, mpi_integer, mpi_sum, mpi_comm_global, mpierr)
call mpi_allreduce(mpi_in_place, global_id, np_rows*np_cols, mpi_integer, mpi_sum, communicator, mpierr)
#endif
call timer%stop("mpi_communication")
#endif /* WITH_MPI */
......@@ -303,9 +302,9 @@ subroutine redist_band_&
#if REALCASE==1
#ifdef DOUBLE_PRECISION_REAL
call MPI_Alltoallv(r_sbuf, ncnt_s, nstart_s, MPI_REAL8, r_rbuf, ncnt_r, nstart_r, MPI_REAL8, mpi_comm_global, mpierr)
call MPI_Alltoallv(r_sbuf, ncnt_s, nstart_s, MPI_REAL8, r_rbuf, ncnt_r, nstart_r, MPI_REAL8, communicator, mpierr)
#else
call MPI_Alltoallv(r_sbuf, ncnt_s, nstart_s, MPI_REAL4, r_rbuf, ncnt_r, nstart_r, MPI_REAL4, mpi_comm_global, mpierr)
call MPI_Alltoallv(r_sbuf, ncnt_s, nstart_s, MPI_REAL4, r_rbuf, ncnt_r, nstart_r, MPI_REAL4, communicator, mpierr)
#endif
#endif /* REALCASE==1 */
......@@ -313,9 +312,9 @@ subroutine redist_band_&
#if COMPLEXCASE==1
#ifdef DOUBLE_PRECISION_COMPLEX
call MPI_Alltoallv(c_sbuf, ncnt_s, nstart_s, MPI_COMPLEX16, c_rbuf, ncnt_r, nstart_r, MPI_COMPLEX16, mpi_comm_global, mpierr)
call MPI_Alltoallv(c_sbuf, ncnt_s, nstart_s, MPI_COMPLEX16, c_rbuf, ncnt_r, nstart_r, MPI_COMPLEX16, communicator, mpierr)
#else
call MPI_Alltoallv(c_sbuf, ncnt_s, nstart_s, MPI_COMPLEX, c_rbuf, ncnt_r, nstart_r, MPI_COMPLEX, mpi_comm_global, mpierr)
call MPI_Alltoallv(c_sbuf, ncnt_s, nstart_s, MPI_COMPLEX, c_rbuf, ncnt_r, nstart_r, MPI_COMPLEX, communicator, mpierr)
#endif
#endif /* COMPLEXCASE==1 */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment