From 62b980f2034655f372e120a851ef75035ac0d7fd Mon Sep 17 00:00:00 2001 From: graham <g.macpherson@opencfd.co.uk> Date: Tue, 1 Jun 2010 16:09:08 +0100 Subject: [PATCH] BUG: Fixing multiple processor patch problems in parallel transfer of particles. --- .../ProcessorTopology/ProcessorTopology.C | 30 +++++-- .../ProcessorTopology/ProcessorTopology.H | 3 + src/lagrangian/basic/Cloud/Cloud.C | 89 +++++++++++++------ 3 files changed, 87 insertions(+), 35 deletions(-) diff --git a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C index 85736f52e51..9aebf1d43d5 100644 --- a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C +++ b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C @@ -27,6 +27,7 @@ License #include "ListOps.H" #include "Pstream.H" #include "commSchedule.H" +#include "boolList.H" // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // @@ -42,6 +43,8 @@ Foam::labelList Foam::ProcessorTopology<Patch, ProcPatch>::procNeighbours label maxNb = 0; + boolList isNeighbourProc(Pstream::nProcs(), false); + forAll(patches, patchi) { const Patch& patch = patches[patchi]; @@ -51,19 +54,34 @@ Foam::labelList Foam::ProcessorTopology<Patch, ProcPatch>::procNeighbours const ProcPatch& procPatch = refCast<const ProcPatch>(patch); - nNeighbours++; + label pNeighbProcNo = procPatch.neighbProcNo(); + + if (!isNeighbourProc[pNeighbProcNo]) + { + nNeighbours++; + + maxNb = max(maxNb, procPatch.neighbProcNo()); - maxNb = max(maxNb, procPatch.neighbProcNo()); + isNeighbourProc[pNeighbProcNo] = true; + } } } - labelList neighbours(nNeighbours); + labelList neighbours(nNeighbours, -1); + + nNeighbours = 0; + + forAll(isNeighbourProc, procI) + { + if (isNeighbourProc[procI]) + { + neighbours[nNeighbours++] = procI; + } + } procPatchMap_.setSize(maxNb + 1); procPatchMap_ = -1; - nNeighbours = 0; - forAll(patches, patchi) { const Patch& patch = patches[patchi]; @@ -73,8 +91,6 @@ Foam::labelList Foam::ProcessorTopology<Patch, ProcPatch>::procNeighbours const ProcPatch& procPatch = refCast<const ProcPatch>(patch); - neighbours[nNeighbours++] = procPatch.neighbProcNo(); - // Construct reverse map procPatchMap_[procPatch.neighbProcNo()] = patchi; } diff --git a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H index 2bb37934c66..d50ba659c45 100644 --- a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H +++ b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H @@ -30,6 +30,9 @@ Description *this[procI] gives the list of neighbouring processors. + TODO: This does not currently correctly support multiple processor + patches connecting two processors. + SourceFiles ProcessorTopology.C diff --git a/src/lagrangian/basic/Cloud/Cloud.C b/src/lagrangian/basic/Cloud/Cloud.C index ceaf1c44d4f..95ae4d798a4 100644 --- a/src/lagrangian/basic/Cloud/Cloud.C +++ b/src/lagrangian/basic/Cloud/Cloud.C @@ -102,8 +102,18 @@ template<class TrackingData> void Foam::Cloud<ParticleType>::move(TrackingData& td) { const globalMeshData& pData = polyMesh_.globalData(); - const labelList& processorPatches = pData.processorPatches(); - const labelList& processorPatchIndices = pData.processorPatchIndices(); + const labelList& neighbourProcs = pData[Pstream::myProcNo()]; + const labelList& procPatches = pData.processorPatches(); + const labelList& procPatchIndices = pData.processorPatchIndices(); + const labelList& procPatchNeighbours = pData.processorPatchNeighbours(); + const polyBoundaryMesh& pbm = pMesh().boundaryMesh(); + + labelList neighbourProcIndices(Pstream::nProcs(), -1); + + forAll(neighbourProcs, i) + { + neighbourProcIndices[neighbourProcs[i]] = i; + } // Initialise the stepFraction moved for the particles forAllIter(typename Cloud<ParticleType>, *this, pIter) @@ -114,9 +124,19 @@ void Foam::Cloud<ParticleType>::move(TrackingData& td) // While there are particles to transfer while (true) { - // List of lists of particles to be transfered for all the processor - // patches - List<IDLList<ParticleType> > transferList(processorPatches.size()); + // List of lists of particles to be transfered for all of the + // neighbour processors + List<IDLList<ParticleType> > particleTransferLists + ( + neighbourProcs.size() + ); + + // List of destination processorPatches indices for all of the + // neighbour processors + List<DynamicList<label> > patchIndexTransferLists + ( + neighbourProcs.size() + ); // Loop over all particles forAllIter(typename Cloud<ParticleType>, *this, pIter) @@ -134,15 +154,28 @@ void Foam::Cloud<ParticleType>::move(TrackingData& td) // boundary face if (Pstream::parRun() && p.facei_ >= pMesh().nInternalFaces()) { - label patchi = pMesh().boundaryMesh().whichPatch(p.facei_); - label n = processorPatchIndices[patchi]; + label patchi = pbm.whichPatch(p.facei_); // ... and the face is on a processor patch // prepare it for transfer - if (n != -1) + if (procPatchIndices[patchi] != -1) { + label n = neighbourProcIndices + [ + refCast<const processorPolyPatch> + ( + pbm[patchi] + ).neighbProcNo() + ]; + p.prepareForParallelTransfer(patchi, td); - transferList[n].append(this->remove(&p)); + + particleTransferLists[n].append(this->remove(&p)); + + patchIndexTransferLists[n].append + ( + procPatchNeighbours[patchi] + ); } } } @@ -157,31 +190,30 @@ void Foam::Cloud<ParticleType>::move(TrackingData& td) break; } - // Allocate transfer buffers PstreamBuffers pBufs(Pstream::nonBlocking); // Stream into send buffers - forAll(transferList, i) + forAll(particleTransferLists, i) { - if (transferList[i].size()) + if (particleTransferLists[i].size()) { UOPstream particleStream ( - refCast<const processorPolyPatch> - ( - pMesh().boundaryMesh()[processorPatches[i]] - ).neighbProcNo(), + neighbourProcs[i], pBufs ); - particleStream << transferList[i]; + particleStream + << labelList(patchIndexTransferLists[i]) + << particleTransferLists[i]; } } // Set up transfers when in non-blocking mode. Returns sizes (in bytes) // to be sent/received. labelListList allNTrans(Pstream::nProcs()); + pBufs.finishedSends(allNTrans); bool transfered = false; @@ -203,34 +235,35 @@ void Foam::Cloud<ParticleType>::move(TrackingData& td) break; } - // Retrieve from receive buffers - forAll(processorPatches, i) + forAll(neighbourProcs, i) { - label patchi = processorPatches[i]; - - const processorPolyPatch& procPatch = - refCast<const processorPolyPatch> - (pMesh().boundaryMesh()[patchi]); + label neighbProci = neighbourProcs[i]; - label neighbProci = procPatch.neighbProcNo(); + label nRec = allNTrans[neighbProci][Pstream::myProcNo()]; - label nRecPs = allNTrans[neighbProci][Pstream::myProcNo()]; - - if (nRecPs) + if (nRec) { UIPstream particleStream(neighbProci, pBufs); + labelList receivePatchIndex(particleStream); + IDLList<ParticleType> newParticles ( particleStream, typename ParticleType::iNew(*this) ); + label pI = 0; + forAllIter(typename Cloud<ParticleType>, newParticles, newpIter) { ParticleType& newp = newpIter(); + + label patchi = procPatches[receivePatchIndex[pI++]]; + newp.correctAfterParallelTransfer(patchi, td); + addParticle(newParticles.remove(&newp)); } } -- GitLab