Skip to content
Snippets Groups Projects
Commit e226745a authored by mattijs's avatar mattijs
Browse files

nonblocking transfer of particles

parent d0e866f2
No related branches found
No related tags found
No related merge requests found
......@@ -105,20 +105,15 @@ void Foam::Cloud<ParticleType>::move(TrackingData& td)
const globalMeshData& pData = polyMesh_.globalData();
const labelList& processorPatches = pData.processorPatches();
const labelList& processorPatchIndices = pData.processorPatchIndices();
const labelList& processorPatchNeighbours =
pData.processorPatchNeighbours();
// Initialise the setpFraction moved for the particles
// Initialise the stepFraction moved for the particles
forAllIter(typename Cloud<ParticleType>, *this, pIter)
{
pIter().stepFraction() = 0;
}
// Assume there will be particles to transfer
bool transfered = true;
// While there are particles to transfer
while (transfered)
while (true)
{
// List of lists of particles to be transfered for all the processor
// patches
......@@ -158,105 +153,94 @@ void Foam::Cloud<ParticleType>::move(TrackingData& td)
}
}
if (Pstream::parRun())
if (!Pstream::parRun())
{
// List of the numbers of particles to be transfered across the
// processor patches
labelList nsTransPs(transferList.size());
forAll(transferList, i)
{
nsTransPs[i] = transferList[i].size();
}
break;
}
// List of the numbers of particles to be transfered across the
// processor patches for all the processors
labelListList allNTrans(Pstream::nProcs());
allNTrans[Pstream::myProcNo()] = nsTransPs;
combineReduce(allNTrans, UPstream::listEq());
transfered = false;
// Allocate transfer buffers
PstreamBuffers pBufs(Pstream::nonBlocking);
forAll(allNTrans, i)
// Stream into send buffers
forAll(transferList, i)
{
if (transferList[i].size())
{
forAll(allNTrans[i], j)
{
if (allNTrans[i][j])
{
transfered = true;
break;
}
}
}
UOPstream particleStream
(
refCast<const processorPolyPatch>
(
pMesh().boundaryMesh()[processorPatches[i]]
).neighbProcNo(),
pBufs
);
if (!transfered)
{
break;
particleStream << transferList[i];
}
}
// Set up transfers when in non-blocking mode. Returns sizes (in bytes)
// to be sent/received.
labelListList allNTrans(Pstream::nProcs());
pBufs.finishedSends(allNTrans);
forAll(transferList, i)
bool transfered = false;
forAll(allNTrans, i)
{
forAll(allNTrans[i], j)
{
if (transferList[i].size())
if (allNTrans[i][j])
{
OPstream particleStream
(
Pstream::blocking,
refCast<const processorPolyPatch>
(
pMesh().boundaryMesh()[processorPatches[i]]
).neighbProcNo()
);
particleStream << transferList[i];
transfered = true;
break;
}
}
}
forAll(processorPatches, i)
{
label patchi = processorPatches[i];
if (!transfered)
{
break;
}
const processorPolyPatch& procPatch =
refCast<const processorPolyPatch>
(pMesh().boundaryMesh()[patchi]);
label neighbProci =
procPatch.neighbProcNo() - Pstream::masterNo();
// Retrieve from receive buffers
forAll(processorPatches, i)
{
label patchi = processorPatches[i];
label neighbProcPatchi = processorPatchNeighbours[patchi];
const processorPolyPatch& procPatch =
refCast<const processorPolyPatch>
(pMesh().boundaryMesh()[patchi]);
label nRecPs = allNTrans[neighbProci][neighbProcPatchi];
label neighbProci = procPatch.neighbProcNo();
if (nRecPs)
{
IPstream particleStream
(
Pstream::blocking,
procPatch.neighbProcNo()
);
IDLList<ParticleType> newParticles
(
particleStream,
typename ParticleType::iNew(*this)
);
label nRecPs = allNTrans[neighbProci][Pstream::myProcNo()];
forAllIter
(
typename Cloud<ParticleType>,
newParticles,
newpIter
)
{
ParticleType& newp = newpIter();
newp.correctAfterParallelTransfer(patchi, td);
addParticle(newParticles.remove(&newp));
}
if (nRecPs)
{
UIPstream particleStream(neighbProci, pBufs);
IDLList<ParticleType> newParticles
(
particleStream,
typename ParticleType::iNew(*this)
);
forAllIter
(
typename Cloud<ParticleType>,
newParticles,
newpIter
)
{
ParticleType& newp = newpIter();
newp.correctAfterParallelTransfer(patchi, td);
addParticle(newParticles.remove(&newp));
}
}
}
else
{
transfered = false;
}
}
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment