Commit 4d6fa796 authored by Volker Springel's avatar Volker Springel
Browse files

more MPI_Sendrecv -> myMPI_Sendrecv replacements to hedge against very large transfers

parent f5a0fa47
...@@ -619,10 +619,10 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator) ...@@ -619,10 +619,10 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator)
{ {
if(Send_count[target] > 0 || Recv_count[target] > 0) if(Send_count[target] > 0 || Recv_count[target] > 0)
{ {
MPI_Sendrecv(sphBuf + Send_offset[target], Send_count[target] * sizeof(sph_particle_data), MPI_BYTE, myMPI_Sendrecv(sphBuf + Send_offset[target], Send_count[target] * sizeof(sph_particle_data), MPI_BYTE,
target, TAG_SPHDATA, Tp->SphP + Recv_offset[target] + nstay, target, TAG_SPHDATA, Tp->SphP + Recv_offset[target] + nstay,
Recv_count[target] * sizeof(sph_particle_data), MPI_BYTE, target, TAG_SPHDATA, Communicator, Recv_count[target] * sizeof(sph_particle_data), MPI_BYTE, target, TAG_SPHDATA,
MPI_STATUS_IGNORE); Communicator, MPI_STATUS_IGNORE);
} }
} }
} }
...@@ -724,9 +724,9 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator) ...@@ -724,9 +724,9 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator)
{ {
if(Send_count[target] > 0 || Recv_count[target] > 0) if(Send_count[target] > 0 || Recv_count[target] > 0)
{ {
MPI_Sendrecv(partBuf + Send_offset[target], Send_count[target] * sizeof(pdata), MPI_BYTE, target, TAG_PDATA, myMPI_Sendrecv(partBuf + Send_offset[target], Send_count[target] * sizeof(pdata), MPI_BYTE, target,
Tp->P + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(pdata), MPI_BYTE, target, TAG_PDATA, Tp->P + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(pdata), MPI_BYTE,
TAG_PDATA, Communicator, MPI_STATUS_IGNORE); target, TAG_PDATA, Communicator, MPI_STATUS_IGNORE);
} }
} }
} }
...@@ -822,9 +822,9 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator) ...@@ -822,9 +822,9 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator)
{ {
if(Send_count[target] > 0 || Recv_count[target] > 0) if(Send_count[target] > 0 || Recv_count[target] > 0)
{ {
MPI_Sendrecv(subBuf + Send_offset[target], Send_count[target] * sizeof(subfind_data), MPI_BYTE, target, myMPI_Sendrecv(subBuf + Send_offset[target], Send_count[target] * sizeof(subfind_data), MPI_BYTE, target,
TAG_KEY, Tp->PS + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(subfind_data), TAG_KEY, Tp->PS + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(subfind_data),
MPI_BYTE, target, TAG_KEY, Communicator, MPI_STATUS_IGNORE); MPI_BYTE, target, TAG_KEY, Communicator, MPI_STATUS_IGNORE);
} }
} }
} }
......
...@@ -980,9 +980,10 @@ void lightcone::lightcone_massmap_binning(void) ...@@ -980,9 +980,10 @@ void lightcone::lightcone_massmap_binning(void)
if(recvTask < NTask) if(recvTask < NTask)
if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0) if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
MPI_Sendrecv(&send_P[Send_offset[recvTask]], Send_count[recvTask] * sizeof(lightcone_massmap_data), MPI_BYTE, recvTask, myMPI_Sendrecv(&send_P[Send_offset[recvTask]], Send_count[recvTask] * sizeof(lightcone_massmap_data), MPI_BYTE, recvTask,
TAG_DENS_A, &Mp->P[Mp->NumPart + Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(lightcone_massmap_data), TAG_DENS_A, &Mp->P[Mp->NumPart + Recv_offset[recvTask]],
MPI_BYTE, recvTask, TAG_DENS_A, Communicator, MPI_STATUS_IGNORE); Recv_count[recvTask] * sizeof(lightcone_massmap_data), MPI_BYTE, recvTask, TAG_DENS_A, Communicator,
MPI_STATUS_IGNORE);
} }
Mp->NumPart += nimport; Mp->NumPart += nimport;
......
...@@ -116,9 +116,9 @@ void fof<simparticles>::subfind_match_ids_of_previously_most_bound_ids(simpartic ...@@ -116,9 +116,9 @@ void fof<simparticles>::subfind_match_ids_of_previously_most_bound_ids(simpartic
int recvTask = ThisTask ^ ngrp; int recvTask = ThisTask ^ ngrp;
if(recvTask < NTask) if(recvTask < NTask)
if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0) if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
MPI_Sendrecv(&export_data[Send_offset[recvTask]], Send_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B, myMPI_Sendrecv(&export_data[Send_offset[recvTask]], Send_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B,
&import_data[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B, &import_data[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B,
Communicator, MPI_STATUS_IGNORE); Communicator, MPI_STATUS_IGNORE);
} }
/* incoming data should already be sorted, so now do the match */ /* incoming data should already be sorted, so now do the match */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment