Commit 4d6fa796 authored by Volker Springel's avatar Volker Springel
Browse files

more MPI_Sendrecv -> myMPI_Sendrecv replacements to hedge against very large transfers

parent f5a0fa47
......@@ -619,10 +619,10 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator)
{
if(Send_count[target] > 0 || Recv_count[target] > 0)
{
MPI_Sendrecv(sphBuf + Send_offset[target], Send_count[target] * sizeof(sph_particle_data), MPI_BYTE,
target, TAG_SPHDATA, Tp->SphP + Recv_offset[target] + nstay,
Recv_count[target] * sizeof(sph_particle_data), MPI_BYTE, target, TAG_SPHDATA, Communicator,
MPI_STATUS_IGNORE);
myMPI_Sendrecv(sphBuf + Send_offset[target], Send_count[target] * sizeof(sph_particle_data), MPI_BYTE,
target, TAG_SPHDATA, Tp->SphP + Recv_offset[target] + nstay,
Recv_count[target] * sizeof(sph_particle_data), MPI_BYTE, target, TAG_SPHDATA,
Communicator, MPI_STATUS_IGNORE);
}
}
}
......@@ -724,9 +724,9 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator)
{
if(Send_count[target] > 0 || Recv_count[target] > 0)
{
MPI_Sendrecv(partBuf + Send_offset[target], Send_count[target] * sizeof(pdata), MPI_BYTE, target, TAG_PDATA,
Tp->P + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(pdata), MPI_BYTE, target,
TAG_PDATA, Communicator, MPI_STATUS_IGNORE);
myMPI_Sendrecv(partBuf + Send_offset[target], Send_count[target] * sizeof(pdata), MPI_BYTE, target,
TAG_PDATA, Tp->P + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(pdata), MPI_BYTE,
target, TAG_PDATA, Communicator, MPI_STATUS_IGNORE);
}
}
}
......@@ -822,9 +822,9 @@ void domain<partset>::particle_exchange_based_on_PS(MPI_Comm Communicator)
{
if(Send_count[target] > 0 || Recv_count[target] > 0)
{
MPI_Sendrecv(subBuf + Send_offset[target], Send_count[target] * sizeof(subfind_data), MPI_BYTE, target,
TAG_KEY, Tp->PS + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(subfind_data),
MPI_BYTE, target, TAG_KEY, Communicator, MPI_STATUS_IGNORE);
myMPI_Sendrecv(subBuf + Send_offset[target], Send_count[target] * sizeof(subfind_data), MPI_BYTE, target,
TAG_KEY, Tp->PS + Recv_offset[target] + nlocal, Recv_count[target] * sizeof(subfind_data),
MPI_BYTE, target, TAG_KEY, Communicator, MPI_STATUS_IGNORE);
}
}
}
......
......@@ -980,9 +980,10 @@ void lightcone::lightcone_massmap_binning(void)
if(recvTask < NTask)
if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
MPI_Sendrecv(&send_P[Send_offset[recvTask]], Send_count[recvTask] * sizeof(lightcone_massmap_data), MPI_BYTE, recvTask,
TAG_DENS_A, &Mp->P[Mp->NumPart + Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(lightcone_massmap_data),
MPI_BYTE, recvTask, TAG_DENS_A, Communicator, MPI_STATUS_IGNORE);
myMPI_Sendrecv(&send_P[Send_offset[recvTask]], Send_count[recvTask] * sizeof(lightcone_massmap_data), MPI_BYTE, recvTask,
TAG_DENS_A, &Mp->P[Mp->NumPart + Recv_offset[recvTask]],
Recv_count[recvTask] * sizeof(lightcone_massmap_data), MPI_BYTE, recvTask, TAG_DENS_A, Communicator,
MPI_STATUS_IGNORE);
}
Mp->NumPart += nimport;
......
......@@ -116,9 +116,9 @@ void fof<simparticles>::subfind_match_ids_of_previously_most_bound_ids(simpartic
int recvTask = ThisTask ^ ngrp;
if(recvTask < NTask)
if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
MPI_Sendrecv(&export_data[Send_offset[recvTask]], Send_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B,
&import_data[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B,
Communicator, MPI_STATUS_IGNORE);
myMPI_Sendrecv(&export_data[Send_offset[recvTask]], Send_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B,
&import_data[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(MyIDType), MPI_BYTE, recvTask, TAG_DENS_B,
Communicator, MPI_STATUS_IGNORE);
}
/* incoming data should already be sorted, so now do the match */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment