Commit 67be7503 authored by Lorenz Huedepohl's avatar Lorenz Huedepohl
Browse files

Fix name clash with reservered symbol 'mpi_status'

Apparently in some compiler/MPI combinations (gcc with impi 5.1.3) the
identifier 'mpi_status' is defined and exporeted in their MPI fortran
module and it is thus not allowed to name one of your local variables
also 'mpi_status'.

The confusing error message I got was

  ../src/elpa2_compute.F90:5780:37:

                  call mpi_wait(ireq_hv,mpi_status,mpierr)
                                       1
  Error: Invalid procedure argument at (1)

even though everything seemed to be defined correctly
parent e25a7554
...@@ -1617,7 +1617,7 @@ module ELPA1_compute ...@@ -1617,7 +1617,7 @@ module ELPA1_compute
integer(kind=ik) :: np_off, nprocs integer(kind=ik) :: np_off, nprocs
integer(kind=ik) :: np1, np2, noff, nlen, nmid, n integer(kind=ik) :: np1, np2, noff, nlen, nmid, n
#ifdef WITH_MPI #ifdef WITH_MPI
integer(kind=ik) :: mpi_status(mpi_status_size) integer(kind=ik) :: my_mpi_status(mpi_status_size)
#endif #endif
logical, intent(in) :: wantDebug logical, intent(in) :: wantDebug
logical, intent(out) :: success logical, intent(out) :: success
...@@ -1654,7 +1654,7 @@ module ELPA1_compute ...@@ -1654,7 +1654,7 @@ module ELPA1_compute
if (my_pcol>=np_off+np1 .and. my_pcol<np_off+nprocs) then if (my_pcol>=np_off+np1 .and. my_pcol<np_off+nprocs) then
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_recv(d(noff+1),nmid,MPI_REAL8,np_off,1,mpi_comm_cols,mpi_status,mpierr) call mpi_recv(d(noff+1),nmid,MPI_REAL8,np_off,1,mpi_comm_cols,my_mpi_status,mpierr)
#else #else
d(noff+1:noff+1+nmid-1) = d(noff+1:noff+1+nmid-1) d(noff+1:noff+1+nmid-1) = d(noff+1:noff+1+nmid-1)
#endif #endif
...@@ -1669,7 +1669,7 @@ module ELPA1_compute ...@@ -1669,7 +1669,7 @@ module ELPA1_compute
endif endif
if (my_pcol>=np_off .and. my_pcol<np_off+np1) then if (my_pcol>=np_off .and. my_pcol<np_off+np1) then
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_recv(d(noff+nmid+1),nlen-nmid,MPI_REAL8,np_off+np1,1,mpi_comm_cols,mpi_status,mpierr) call mpi_recv(d(noff+nmid+1),nlen-nmid,MPI_REAL8,np_off+np1,1,mpi_comm_cols,my_mpi_status,mpierr)
#else #else
d(noff+nmid+1:noff+nmid+1+nlen-nmid-1) = d(noff+nmid+1:noff+nmid+1+nlen-nmid-1) d(noff+nmid+1:noff+nmid+1+nlen-nmid-1) = d(noff+nmid+1:noff+nmid+1+nlen-nmid-1)
#endif #endif
...@@ -2066,7 +2066,7 @@ module ELPA1_compute ...@@ -2066,7 +2066,7 @@ module ELPA1_compute
integer(kind=ik) :: my_proc, n_procs, my_prow, my_pcol, np_rows, & integer(kind=ik) :: my_proc, n_procs, my_prow, my_pcol, np_rows, &
np_cols, mpierr np_cols, mpierr
#ifdef WITH_MPI #ifdef WITH_MPI
integer(kind=ik) :: mpi_status(mpi_status_size) integer(kind=ik) :: my_mpi_status(mpi_status_size)
#endif #endif
integer(kind=ik) :: np_next, np_prev, np_rem integer(kind=ik) :: np_next, np_prev, np_rem
integer(kind=ik) :: idx(na), idx1(na), idx2(na) integer(kind=ik) :: idx(na), idx1(na), idx2(na)
...@@ -2623,7 +2623,7 @@ module ELPA1_compute ...@@ -2623,7 +2623,7 @@ module ELPA1_compute
#ifdef WITH_MPI #ifdef WITH_MPI
call MPI_Sendrecv_replace(qtmp1, l_rows*max_local_cols, MPI_REAL8, & call MPI_Sendrecv_replace(qtmp1, l_rows*max_local_cols, MPI_REAL8, &
np_next, 1111, np_prev, 1111, & np_next, 1111, np_prev, 1111, &
mpi_comm_cols, mpi_status, mpierr) mpi_comm_cols, my_mpi_status, mpierr)
#endif #endif
endif endif
...@@ -2810,7 +2810,7 @@ module ELPA1_compute ...@@ -2810,7 +2810,7 @@ module ELPA1_compute
endif endif
else if (pc2==my_pcol) then else if (pc2==my_pcol) then
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_recv(qtmp(1,nc),l_rows,MPI_REAL8,pc1,mod(i,4096),mpi_comm_cols,mpi_status,mpierr) call mpi_recv(qtmp(1,nc),l_rows,MPI_REAL8,pc1,mod(i,4096),mpi_comm_cols,my_mpi_status,mpierr)
#else #else
qtmp(1:l_rows,nc) = q(l_rqs:l_rqe,nc) qtmp(1:l_rows,nc) = q(l_rqs:l_rqe,nc)
#endif #endif
...@@ -2863,7 +2863,7 @@ module ELPA1_compute ...@@ -2863,7 +2863,7 @@ module ELPA1_compute
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_sendrecv(q(l_rqs,lc1),l_rows,MPI_REAL8,pc2,1, & call mpi_sendrecv(q(l_rqs,lc1),l_rows,MPI_REAL8,pc2,1, &
tmp,l_rows,MPI_REAL8,pc2,1, & tmp,l_rows,MPI_REAL8,pc2,1, &
mpi_comm_cols,mpi_status,mpierr) mpi_comm_cols,my_mpi_status,mpierr)
#else #else
tmp(1:l_rows) = q(l_rqs:l_rqe,lc1) tmp(1:l_rows) = q(l_rqs:l_rqe,lc1)
#endif #endif
...@@ -2873,7 +2873,7 @@ module ELPA1_compute ...@@ -2873,7 +2873,7 @@ module ELPA1_compute
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_sendrecv(q(l_rqs,lc2),l_rows,MPI_REAL8,pc1,1, & call mpi_sendrecv(q(l_rqs,lc2),l_rows,MPI_REAL8,pc1,1, &
tmp,l_rows,MPI_REAL8,pc1,1, & tmp,l_rows,MPI_REAL8,pc1,1, &
mpi_comm_cols,mpi_status,mpierr) mpi_comm_cols,my_mpi_status,mpierr)
#else #else
tmp(1:l_rows) = q(l_rqs:l_rqe,lc2) tmp(1:l_rows) = q(l_rqs:l_rqe,lc2)
#endif #endif
...@@ -2925,7 +2925,7 @@ module ELPA1_compute ...@@ -2925,7 +2925,7 @@ module ELPA1_compute
z(:) = z(:) + tmp(:) z(:) = z(:) + tmp(:)
#ifdef WITH_MPI #ifdef WITH_MPI
call MPI_Sendrecv_replace(z, n, MPI_REAL8, np_next, 1111, np_prev, 1111, & call MPI_Sendrecv_replace(z, n, MPI_REAL8, np_next, 1111, np_prev, 1111, &
mpi_comm_cols, mpi_status, mpierr) mpi_comm_cols, my_mpi_status, mpierr)
#endif #endif
enddo enddo
...@@ -2973,7 +2973,7 @@ module ELPA1_compute ...@@ -2973,7 +2973,7 @@ module ELPA1_compute
z(1:n) = tmp(1:n) z(1:n) = tmp(1:n)
do np = npc_0+1, npc_0+npc_n-1 do np = npc_0+1, npc_0+npc_n-1
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_recv(tmp,n,MPI_REAL8,np,1111,mpi_comm_cols,mpi_status,mpierr) call mpi_recv(tmp,n,MPI_REAL8,np,1111,mpi_comm_cols,my_mpi_status,mpierr)
#else #else
tmp(1:n) = z(1:n) tmp(1:n) = z(1:n)
#endif #endif
...@@ -2987,7 +2987,7 @@ module ELPA1_compute ...@@ -2987,7 +2987,7 @@ module ELPA1_compute
else else
#ifdef WITH_MPI #ifdef WITH_MPI
call mpi_send(tmp,n,MPI_REAL8,npc_0,1111,mpi_comm_cols,mpierr) call mpi_send(tmp,n,MPI_REAL8,npc_0,1111,mpi_comm_cols,mpierr)
call mpi_recv(z ,n,MPI_REAL8,npc_0,1111,mpi_comm_cols,mpi_status,mpierr) call mpi_recv(z ,n,MPI_REAL8,npc_0,1111,mpi_comm_cols,my_mpi_status,mpierr)
#else #else
z(1:n) = tmp(1:n) z(1:n) = tmp(1:n)
#endif #endif
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment