From 2b54d86152c5ff9837a6b1b4dc8d253e982f7710 Mon Sep 17 00:00:00 2001
From: Mark Olesen <Mark.Olesen@esi-group.com>
Date: Wed, 2 Mar 2022 14:00:29 +0100
Subject: [PATCH] ENH: improve processor topology handling in zoneDistribute
 (#2371)

- now largely encapsulated using PstreamBuffers methods,
  which makes it simpler to centralize and maintain

- avoid building intermediate structures when sending data,
  remove unused methods/data

TUT: parallel version of depthCharge2D

STYLE: minor update in ProcessorTopology
---
 src/OpenFOAM/Make/files                       |   3 +-
 .../db/IOstreams/Pstreams/PstreamBuffers.C    |  69 ++++++++++
 .../db/IOstreams/Pstreams/PstreamBuffers.H    |  22 +++
 .../ProcessorTopology/ProcessorTopology.C     |  53 +++----
 .../ProcessorTopology/ProcessorTopology.H     |  33 +++--
 .../commSchedule}/commSchedule.C              |   9 +-
 .../commSchedule}/commSchedule.H              |   0
 .../fvMesh/zoneDistribute/zoneDistribute.C    | 129 ++----------------
 .../fvMesh/zoneDistribute/zoneDistribute.H    |  61 +++++----
 .../fvMesh/zoneDistribute/zoneDistributeI.H   |  92 ++++---------
 .../reconstructedDistanceFunction.C           |   6 +-
 .../plicSchemes/gradAlpha/gradAlpha.C         |  10 +-
 .../plicSchemes/gradAlpha/gradAlpha.H         |   4 +-
 .../plicSchemes/plicRDF/plicRDF.C             |   9 +-
 .../plicSchemes/plicRDF/plicRDF.H             |   1 -
 .../laminar/depthCharge2D/Allrun-parallel     |  18 +++
 .../depthCharge2D/system/decomposeParDict     |  27 ++++
 17 files changed, 262 insertions(+), 284 deletions(-)
 rename src/OpenFOAM/{meshes/ProcessorTopology => parallel/commSchedule}/commSchedule.C (97%)
 rename src/OpenFOAM/{meshes/ProcessorTopology => parallel/commSchedule}/commSchedule.H (100%)
 create mode 100755 tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/Allrun-parallel
 create mode 100644 tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/system/decomposeParDict

diff --git a/src/OpenFOAM/Make/files b/src/OpenFOAM/Make/files
index 3dddd831904..6d5f7933164 100644
--- a/src/OpenFOAM/Make/files
+++ b/src/OpenFOAM/Make/files
@@ -592,8 +592,6 @@ polyBoundaryMesh = $(polyMesh)/polyBoundaryMesh
 $(polyBoundaryMesh)/polyBoundaryMesh.C
 $(polyBoundaryMesh)/polyBoundaryMeshEntries.C
 
-meshes/ProcessorTopology/commSchedule.C
-
 globalMeshData = $(polyMesh)/globalMeshData
 $(globalMeshData)/globalMeshData.C
 $(globalMeshData)/globalPoints.C
@@ -812,6 +810,7 @@ algorithms/indexedOctree/volumeType.C
 algorithms/dynamicIndexedOctree/dynamicIndexedOctreeName.C
 algorithms/dynamicIndexedOctree/dynamicTreeDataPoint.C
 
+parallel/commSchedule/commSchedule.C
 parallel/globalIndex/globalIndex.C
 
 meshes/data/data.C
diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.C b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.C
index 07e7eccc49e..d7e4c3d87ed 100644
--- a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.C
+++ b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.C
@@ -27,6 +27,7 @@ License
 \*---------------------------------------------------------------------------*/
 
 #include "PstreamBuffers.H"
+#include "bitSet.H"
 
 // * * * * * * * * * * * * * Private Member Functions  * * * * * * * * * * * //
 
@@ -306,4 +307,72 @@ void Foam::PstreamBuffers::finishedSends
 }
 
 
+bool Foam::PstreamBuffers::finishedSends
+(
+    bitSet& sendConnections,
+    DynamicList<label>& sendProcs,
+    DynamicList<label>& recvProcs,
+    const bool wait
+)
+{
+    bool changed = (sendConnections.size() != nProcs());
+
+    if (changed)
+    {
+        sendConnections.resize(nProcs());
+    }
+
+    // Update send connections
+    // - reasonable to assume there are no self-sends on UPstream::myProcNo
+    forAll(sendBuf_, proci)
+    {
+        // ie, hasSendData(proci)
+        if (sendConnections.set(proci, !sendBuf_[proci].empty()))
+        {
+            // The state changed
+            changed = true;
+        }
+    }
+
+    reduce(changed, orOp<bool>());
+
+    if (changed)
+    {
+        // Create send/recv topology
+
+        // The send ranks
+        sendProcs.clear();
+        forAll(sendBuf_, proci)
+        {
+            // ie, hasSendData(proci)
+            if (!sendBuf_[proci].empty())
+            {
+                sendProcs.append(proci);
+            }
+        }
+
+        finishedSends(wait);  // All-to-all
+
+        // The recv ranks
+        recvProcs.clear();
+        forAll(recvBuf_, proci)
+        {
+            // ie, hasRecvData(proci)
+            if (!recvBuf_[proci].empty())
+            {
+                recvProcs.append(proci);
+            }
+        }
+    }
+    else
+    {
+        // Use existing send/recv ranks
+
+        finishedSends(sendProcs, recvProcs, wait);
+    }
+
+    return changed;
+}
+
+
 // ************************************************************************* //
diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.H b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.H
index 2fec28c518f..81666658e6a 100644
--- a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.H
+++ b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBuffers.H
@@ -82,6 +82,9 @@ SourceFiles
 namespace Foam
 {
 
+// Forward Declarations
+class bitSet;
+
 /*---------------------------------------------------------------------------*\
                        Class PstreamBuffers Declaration
 \*---------------------------------------------------------------------------*/
@@ -300,6 +303,25 @@ public:
             const bool wait = true
         );
 
+        //- A caching version that uses a limited send/recv connectivity.
+        //
+        //  Non-blocking mode: populates receive buffers.
+        //  \param sendConnections on/off for sending ranks
+        //  \param sendProcs ranks used for sends
+        //  \param recvProcs ranks used for recvs
+        //  \param wait wait for requests to complete (in nonBlocking mode)
+        //
+        //  \return True if the send/recv connectivity changed
+        //
+        //  \warning currently only valid for nonBlocking comms.
+        bool finishedSends
+        (
+            bitSet& sendConnections,
+            DynamicList<label>& sendProcs,
+            DynamicList<label>& recvProcs,
+            const bool wait = true
+        );
+
         //- Mark sends as done using subset of send/recv ranks
         //- and recover the sizes (bytes) received.
         //
diff --git a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C
index 2a5877d5142..1343bd05d41 100644
--- a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C
+++ b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.C
@@ -29,7 +29,6 @@ License
 #include "ListOps.H"
 #include "Pstream.H"
 #include "commSchedule.H"
-#include "boolList.H"
 
 // * * * * * * * * * * * * * Private Member Functions  * * * * * * * * * * * //
 
@@ -50,22 +49,16 @@ Foam::labelList Foam::ProcessorTopology<Container, ProcPatch>::procNeighbours
 
     forAll(patches, patchi)
     {
-        const typename Container::const_reference patch = patches[patchi];
-
-        if (isA<ProcPatch>(patch))
+        const auto* cpp = isA<ProcPatch>(patches[patchi]);
+        if (cpp)
         {
-            const ProcPatch& procPatch =
-                refCast<const ProcPatch>(patch);
-
-            label pNeighbProcNo = procPatch.neighbProcNo();
+            const label nbrProci = cpp->neighbProcNo();
 
-            if (!isNeighbourProc[pNeighbProcNo])
+            if (!isNeighbourProc[nbrProci])
             {
-                nNeighbours++;
-
-                maxNb = max(maxNb, procPatch.neighbProcNo());
-
-                isNeighbourProc[pNeighbProcNo] = true;
+                isNeighbourProc[nbrProci] = true;
+                maxNb = max(maxNb, nbrProci);
+                ++nNeighbours;
             }
         }
     }
@@ -87,15 +80,13 @@ Foam::labelList Foam::ProcessorTopology<Container, ProcPatch>::procNeighbours
 
     forAll(patches, patchi)
     {
-        const typename Container::const_reference patch = patches[patchi];
-
-        if (isA<ProcPatch>(patch))
+        const auto* cpp = isA<ProcPatch>(patches[patchi]);
+        if (cpp)
         {
-            const ProcPatch& procPatch =
-                refCast<const ProcPatch>(patch);
+            const label nbrProci = cpp->neighbProcNo();
 
-            // Construct reverse map
-            procPatchMap_[procPatch.neighbProcNo()] = patchi;
+            // Reverse map
+            procPatchMap_[nbrProci] = patchi;
         }
     }
 
@@ -113,7 +104,7 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
 )
 :
     labelListList(Pstream::nProcs(comm)),
-    patchSchedule_(2*patches.size())
+    patchSchedule_()
 {
     if (Pstream::parRun())
     {
@@ -132,6 +123,8 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
      && Pstream::defaultCommsType == Pstream::commsTypes::scheduled
     )
     {
+        patchSchedule_.resize(2*patches.size());
+
         label patchEvali = 0;
 
         // 1. All non-processor patches
@@ -155,21 +148,21 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
         // to determine the schedule. Each processor pair stands for both
         // send and receive.
         label nComms = 0;
-        forAll(*this, proci)
+        for (const labelList& neighbours : *this)
         {
-            nComms += operator[](proci).size();
+            nComms += neighbours.size();
         }
         DynamicList<labelPair> comms(nComms);
 
         forAll(*this, proci)
         {
-            const labelList& nbrs = operator[](proci);
+            const labelList& neighbours = operator[](proci);
 
-            forAll(nbrs, i)
+            for (const label nbrProci : neighbours)
             {
-                if (proci < nbrs[i])
+                if (proci < nbrProci)
                 {
-                    comms.append(labelPair(proci, nbrs[i]));
+                    comms.append(labelPair(proci, nbrProci));
                 }
             }
         }
@@ -185,10 +178,8 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
             ).procSchedule()[Pstream::myProcNo(comm)]
         );
 
-        forAll(mySchedule, iter)
+        for (const label commI : mySchedule)
         {
-            label commI = mySchedule[iter];
-
             // Get the other processor
             label nb = comms[commI][0];
             if (nb == Pstream::myProcNo(comm))
diff --git a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H
index 976d5dc8735..3dbcff1777c 100644
--- a/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H
+++ b/src/OpenFOAM/meshes/ProcessorTopology/ProcessorTopology.H
@@ -40,8 +40,8 @@ SourceFiles
 
 \*---------------------------------------------------------------------------*/
 
-#ifndef ProcessorTopology_H
-#define ProcessorTopology_H
+#ifndef Foam_ProcessorTopology_H
+#define Foam_ProcessorTopology_H
 
 #include "labelList.H"
 #include "lduSchedule.H"
@@ -60,10 +60,7 @@ class ProcessorTopology
 :
     public labelListList
 {
-
-private:
-
-    // Private data
+    // Private Data
 
         //- Local map from neighbour proc to patchi. Different per processor!
         //  -1 or patchi for connection to procID
@@ -76,8 +73,8 @@ private:
 
     // Private Member Functions
 
-        //- Return all neighbouring processors of this processor. Set
-        //  procPatchMap_.
+        //- Return all neighbouring processors of this processor.
+        //  Sets procPatchMap_.
         labelList procNeighbours(const label nProcs, const Container&);
 
 public:
@@ -88,25 +85,27 @@ public:
         ProcessorTopology(const Container& patches, const label comm);
 
 
+    // Static Functions
+
+        //- Calculate non-blocking (i.e. unscheduled) schedule
+        static lduSchedule nonBlockingSchedule(const Container& patches);
+
+
     // Member Functions
 
-        //- From neighbour processor to index in boundaryMesh. Local information
-        //  (so not same over all processors)
-        const labelList& procPatchMap() const
+        //- From neighbour processor to index in boundaryMesh.
+        //  Local information (so not same over all processors)
+        const labelList& procPatchMap() const noexcept
         {
             return procPatchMap_;
         }
 
         //- Order in which the patches should be initialised/evaluated
-        //  corresponding to the schedule
-        const lduSchedule& patchSchedule() const
+        //- corresponding to the schedule
+        const lduSchedule& patchSchedule() const noexcept
         {
             return patchSchedule_;
         }
-
-        //- Calculate non-blocking (i.e. unscheduled) schedule
-        static lduSchedule nonBlockingSchedule(const Container& patches);
-
 };
 
 
diff --git a/src/OpenFOAM/meshes/ProcessorTopology/commSchedule.C b/src/OpenFOAM/parallel/commSchedule/commSchedule.C
similarity index 97%
rename from src/OpenFOAM/meshes/ProcessorTopology/commSchedule.C
rename to src/OpenFOAM/parallel/commSchedule/commSchedule.C
index 0f2bb2e65d5..6bb6d5949bf 100644
--- a/src/OpenFOAM/meshes/ProcessorTopology/commSchedule.C
+++ b/src/OpenFOAM/parallel/commSchedule/commSchedule.C
@@ -45,7 +45,6 @@ namespace Foam
 
 namespace Foam
 {
-// Private Member Functions
 
 // Count the number of outstanding communications for a single processor
 static label outstandingComms
@@ -99,7 +98,7 @@ Foam::commSchedule::commSchedule
     }
     // Note: no need to shrink procToComms. Are small.
 
-    if (debug && Pstream::master())
+    if (debug && UPstream::master())
     {
         Pout<< "commSchedule : Wanted communication:" << endl;
 
@@ -198,7 +197,7 @@ Foam::commSchedule::commSchedule
             busy[comms[maxCommI][1]] = true;
         }
 
-        if (debug && Pstream::master())
+        if (debug && UPstream::master())
         {
             label nIterComms = nScheduled-oldNScheduled;
 
@@ -240,7 +239,7 @@ Foam::commSchedule::commSchedule
         iter++;
     }
 
-    if (debug && Pstream::master())
+    if (debug && UPstream::master())
     {
         Pout<< endl;
     }
@@ -282,7 +281,7 @@ Foam::commSchedule::commSchedule
         procSchedule_[proc1][nProcScheduled[proc1]++] = commI;
     }
 
-    if (debug && Pstream::master())
+    if (debug && UPstream::master())
     {
         Pout<< "commSchedule::commSchedule : Per processor:" << endl;
 
diff --git a/src/OpenFOAM/meshes/ProcessorTopology/commSchedule.H b/src/OpenFOAM/parallel/commSchedule/commSchedule.H
similarity index 100%
rename from src/OpenFOAM/meshes/ProcessorTopology/commSchedule.H
rename to src/OpenFOAM/parallel/commSchedule/commSchedule.H
diff --git a/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.C b/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.C
index bd600b1fb5b..2720b020484 100644
--- a/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.C
+++ b/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.C
@@ -27,13 +27,6 @@ License
 \*---------------------------------------------------------------------------*/
 
 #include "zoneDistribute.H"
-#include "dummyTransform.H"
-#include "emptyPolyPatch.H"
-#include "processorPolyPatch.H"
-#include "syncTools.H"
-#include "wedgePolyPatch.H"
-
-#include "globalPoints.H"
 
 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
 
@@ -43,61 +36,14 @@ namespace Foam
 }
 
 
-// * * * * * * * * * * * * * Private Member Functions  * * * * * * * * * * * //
-
-Foam::autoPtr<Foam::indirectPrimitivePatch>
-Foam::zoneDistribute::coupledFacesPatch() const
-{
-    const polyBoundaryMesh& patches = mesh_.boundaryMesh();
-
-    label nCoupled = 0;
-
-    for (const polyPatch& pp : patches)
-    {
-        if (isA<processorPolyPatch>(pp))
-        {
-            nCoupled += pp.size();
-        }
-    }
-    labelList coupledFaces(nCoupled);
-    nCoupled = 0;
-
-    for (const polyPatch& pp : patches)
-    {
-        if (isA<processorPolyPatch>(pp))
-        {
-            label facei = pp.start();
-
-            forAll(pp, i)
-            {
-                coupledFaces[nCoupled++] = facei++;
-            }
-        }
-    }
-
-    return autoPtr<indirectPrimitivePatch>::New
-    (
-        IndirectList<face>
-        (
-            mesh_.faces(),
-            coupledFaces
-        ),
-        mesh_.points()
-    );
-}
-
-
 // * * * * * * * * * * * * * * * * Constructors  * * * * * * * * * * * * * * //
 
 Foam::zoneDistribute::zoneDistribute(const fvMesh& mesh)
 :
     MeshObject<fvMesh, Foam::TopologicalMeshObject, zoneDistribute>(mesh),
-    coupledBoundaryPoints_(coupledFacesPatch()().meshPoints()),
-    send_(UPstream::nProcs()),
     stencil_(zoneCPCStencil::New(mesh)),
     globalNumbering_(stencil_.globalNumbering()),
-    sendTo_(),   // Initial zero-sized
-    recvFrom_()  // Initial zero-sized
+    send_(UPstream::nProcs())
 {}
 
 
@@ -140,20 +86,10 @@ void Foam::zoneDistribute::setUpCommforZone
 
     if (UPstream::parRun())
     {
-        if (sendTo_.empty())
-        {
-            // First time
-            sendTo_.resize(UPstream::nProcs());
-            recvFrom_.resize(UPstream::nProcs());
-            sendTo_ = false;
-            recvFrom_ = false;
-        }
-
-        const labelHashSet& comms = stencil.needsComm();
-
         List<labelHashSet> needed(UPstream::nProcs());
 
-        for (const label celli : comms)
+        // Bin according to originating (sending) processor
+        for (const label celli : stencil.needsComm())
         {
             if (zone[celli])
             {
@@ -169,73 +105,24 @@ void Foam::zoneDistribute::setUpCommforZone
             }
         }
 
+        // Stream the send data into PstreamBuffers,
+        // which we also use to track the current topology.
 
         PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
 
-        // Stream data into buffer
         for (const int proci : UPstream::allProcs())
         {
             if (proci != UPstream::myProcNo() && !needed[proci].empty())
             {
-                // Put data into send buffer
+                // Serialize as List
                 UOPstream toProc(proci, pBufs);
-
                 toProc << needed[proci].sortedToc();
             }
         }
 
+        pBufs.finishedSends(sendConnections_, sendProcs_, recvProcs_);
 
-        // Need update, or use existing partial communication info?
-        bool fullUpdate = false;
-        for (const int proci : UPstream::allProcs())
-        {
-            if
-            (
-                proci != UPstream::myProcNo()
-             && (!sendTo_[proci] && !needed[proci].empty())
-            )
-            {
-                // Changed state sendTo_ from false -> true
-                sendTo_[proci] = true;
-                fullUpdate = true;
-            }
-        }
-
-
-        if (returnReduce(fullUpdate, orOp<bool>()))
-        {
-            pBufs.finishedSends();
-
-            // Update which ones receive
-            for (const int proci : UPstream::allProcs())
-            {
-                recvFrom_[proci] = pBufs.hasRecvData(proci);
-            }
-        }
-        else
-        {
-            // No change in senders...
-            // - can communicate with a subset of processors
-            DynamicList<label> sendProcs;
-            DynamicList<label> recvProcs;
-
-            for (const int proci : UPstream::allProcs())
-            {
-                if (sendTo_[proci])
-                {
-                    sendProcs.append(proci);
-                }
-                if (recvFrom_[proci])
-                {
-                    recvProcs.append(proci);
-                }
-            }
-
-            // Wait until everything is written
-            pBufs.finishedSends(sendProcs, recvProcs);
-        }
-
-        for (const int proci : UPstream::allProcs())
+        for (const int proci : pBufs.allProcs())
         {
             send_[proci].clear();
 
diff --git a/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.H b/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.H
index 0dbb7b2f3b3..4a58c8445f6 100644
--- a/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.H
+++ b/src/finiteVolume/fvMesh/zoneDistribute/zoneDistribute.H
@@ -31,12 +31,28 @@ Description
     Class for parallel communication in a narrow band. It either provides a Map
     with the neighbouring values of the selected region or returns a Map of the
     required values in global addressing. Also holds a reference to the stencil
-    Before the data transfer the communation has to be set up:
+    Before the data transfer the communication has to be set up:
     exchangeFields_.setUpCommforZone(interfaceCell_);
     Is used in the plicRDF
 
     Original code supplied by Henning Scheufler, DLR (2019)
 
+    Additional optimization of processor communication
+    provided by Tetsuo AOYAGI, RIST (2022), to use a more compact
+    exchange of sizes with an updated version of PstreamBuffers.
+    This optimization uses additional sendTo/recvFrom member data
+    to track the topological connectivity, acting like an on-the-fly
+    sub-communicator, and respects corner connectivity.
+
+    -# Initially topological connections are empty (or all false).
+    -# Scan the stencil global cellIds (active zones only) and split
+       into sub-lists according the originating processor (the sender).
+    -# If an originating processor appears/disappears, need to update
+       the connectivity information (requires an all-to-all).
+    -# When possible, the topological send/recv is used in PstreamBuffers
+       finishedSends (minimizes communication).
+    .
+
 SourceFiles
     zoneDistributeI.H
     zoneDistribute.C
@@ -51,7 +67,6 @@ SourceFiles
 #include "volFields.H"
 
 #include "zoneCPCStencil.H"
-#include "IOobject.H"
 #include "MeshObject.H"
 
 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@@ -69,47 +84,41 @@ class zoneDistribute
 {
     // Private Data
 
-        //- labels of the points on coupled patches
-        labelList coupledBoundaryPoints_;
-
-        //- Addressing for processor-to-processor comms
-        List<labelList> send_;
-
-        //- Return patch of all coupled faces.
-        autoPtr<indirectPrimitivePatch> coupledFacesPatch() const;
-
+        //- Reference to the zone stencil
         zoneCPCStencil& stencil_;
 
-        //- Global number into cells and faces
+        //- Global number into index of cells/faces
         const globalIndex& globalNumbering_;
 
-        //- Parallel: send data to processors (true/false)
-        //  updated in setUpCommforZone
-        boolList sendTo_;
+        //- Global cell/face index to send for processor-to-processor comms
+        List<labelList> send_;
 
-        //- Parallel: receive data from processors (true/false)
-        //  updated in setUpCommforZone
-        boolList recvFrom_;
+        //- Parallel [cache]: send connectivity (true/false)
+        bitSet sendConnections_;
+
+        //- Parallel [cache]: send data to these ranks
+        DynamicList<label> sendProcs_;
+
+        //- Parallel [cache]: recv data from these ranks
+        DynamicList<label> recvProcs_;
 
 
     // Private Member Functions
 
-        //- Gives patchNumber and patchFaceNumber for a given
-        //- Geometric volume field
+        //- Return local volField value at (cell or face) index
         template<typename Type>
         Type getLocalValue
         (
-            const GeometricField<Type, fvPatchField, volMesh>& phi,
+            const VolumeField<Type>& phi,
             const label localIdx
         ) const;
 
-
         //- Gives patchNumber and patchFaceNumber for a given
         //- Geometric volume field
         template<typename Type>
         Type faceValue
         (
-            const GeometricField<Type, fvPatchField, volMesh>& phi,
+            const VolumeField<Type>& phi,
             const label localIdx
         ) const;
 
@@ -158,7 +167,7 @@ public:
         template<typename Type>
         Type getValue
         (
-            const GeometricField<Type, fvPatchField, volMesh>& phi,
+            const VolumeField<Type>& phi,
             const Map<Type>& valuesFromOtherProc,
             const label gblIdx
         ) const;
@@ -168,7 +177,7 @@ public:
         Map<Field<Type>> getFields
         (
             const boolList& zone,
-            const GeometricField<Type, fvPatchField, volMesh>& phi
+            const VolumeField<Type>& phi
         );
 
         //- Returns stencil and provides a Map with globalNumbering
@@ -176,7 +185,7 @@ public:
         Map<Type> getDatafromOtherProc
         (
             const boolList& zone,
-            const GeometricField<Type, fvPatchField, volMesh>& phi
+            const VolumeField<Type>& phi
         );
 };
 
diff --git a/src/finiteVolume/fvMesh/zoneDistribute/zoneDistributeI.H b/src/finiteVolume/fvMesh/zoneDistribute/zoneDistributeI.H
index e709dde4bb2..91c70f31cf7 100644
--- a/src/finiteVolume/fvMesh/zoneDistribute/zoneDistributeI.H
+++ b/src/finiteVolume/fvMesh/zoneDistribute/zoneDistributeI.H
@@ -26,16 +26,14 @@ License
 
 \*---------------------------------------------------------------------------*/
 
-#include "zoneDistribute.H"
 #include "DynamicField.H"
-#include "syncTools.H"
 
 // * * * * * * * * * * * * * Private Member Functions  * * * * * * * * * * * //
 
 template<typename Type>
 Type Foam::zoneDistribute::getLocalValue
 (
-    const GeometricField<Type, fvPatchField, volMesh>& phi,
+    const VolumeField<Type>& phi,
     const label localIdx
 ) const
 {
@@ -51,7 +49,7 @@ Type Foam::zoneDistribute::getLocalValue
 template<typename Type>
 Type Foam::zoneDistribute::faceValue
 (
-    const GeometricField<Type, fvPatchField, volMesh>& phi,
+    const VolumeField<Type>& phi,
     const label localIdx
 ) const
 {
@@ -80,7 +78,7 @@ Type Foam::zoneDistribute::faceValue
 template<typename Type>
 Type Foam::zoneDistribute::getValue
 (
-    const GeometricField<Type, fvPatchField, volMesh>& phi,
+    const VolumeField<Type>& phi,
     const Map<Type>& valuesFromOtherProc,
     const label gblIdx
 ) const
@@ -90,8 +88,9 @@ Type Foam::zoneDistribute::getValue
         const label localIdx = globalNumbering_.toLocal(gblIdx);
         return getLocalValue(phi,localIdx);
     }
-    else // from other proc
+    else
     {
+        // From other proc
         return valuesFromOtherProc[gblIdx];
     }
 }
@@ -101,7 +100,7 @@ template<typename Type>
 Foam::Map<Foam::Field<Type>> Foam::zoneDistribute::getFields
 (
     const boolList& zone,
-    const GeometricField<Type, fvPatchField, volMesh>& phi
+    const VolumeField<Type>& phi
 )
 {
     if (zone.size() != phi.size())
@@ -144,7 +143,7 @@ template<typename Type>
 Foam::Map<Type> Foam::zoneDistribute::getDatafromOtherProc
 (
     const boolList& zone,
-    const GeometricField<Type, fvPatchField, volMesh>& phi
+    const VolumeField<Type>& phi
 )
 {
     if (zone.size() != phi.size())
@@ -162,86 +161,51 @@ Foam::Map<Type> Foam::zoneDistribute::getDatafromOtherProc
 
     if (UPstream::parRun())
     {
-        const bool commsPrepared = (sendTo_.size() == UPstream::nProcs());
-
-        if (!commsPrepared)
+        if (sendConnections_.empty())
         {
-            FatalErrorInFunction
-                << "The send/recv information not initialized - "
+            WarningInFunction
+                << "The send/recv connections not initialized - "
                 << "likely that setUpCommforZone() was not called"
                 << endl;
-
             // But don't exit/abort for now
         }
 
-
-        List<Map<Type>> sendValues(UPstream::nProcs());
-
-        forAll(send_, proci)
-        {
-            for (const label sendIdx : send_[proci])
-            {
-                sendValues[proci].insert
-                (
-                    sendIdx,
-                    getLocalValue(phi, globalNumbering_.toLocal(sendIdx))
-                );
-            }
-        }
-
+        // Stream the send data into PstreamBuffers,
+        // which we also use to track the current topology.
 
         PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
 
-        // Stream data into buffer
         for (const int proci : UPstream::allProcs())
         {
-            if (proci != UPstream::myProcNo() && !sendValues[proci].empty())
+            if (proci != UPstream::myProcNo() && !send_[proci].empty())
             {
-                // Put data into send buffer
-                UOPstream toProc(proci, pBufs);
-                toProc << sendValues[proci];
-            }
-        }
-
+                // Serialize as Map
+                Map<Type> sendValues(2*send_[proci].size());
 
-        if (commsPrepared)
-        {
-            DynamicList<label> sendProcs;
-            DynamicList<label> recvProcs;
-
-            for (const int proci : UPstream::allProcs())
-            {
-                if (sendTo_[proci])
+                for (const label sendIdx : send_[proci])
                 {
-                    sendProcs.append(proci);
+                    sendValues.insert
+                    (
+                        sendIdx,
+                        getLocalValue(phi, globalNumbering_.toLocal(sendIdx))
+                    );
                 }
-                if (recvFrom_[proci])
-                {
-                    recvProcs.append(proci);
-                }
-            }
 
-            pBufs.finishedSends(sendProcs, recvProcs);
-        }
-        else
-        {
-            // Fallback to all-to-all communication
-
-            pBufs.finishedSends();
+                UOPstream toProc(proci, pBufs);
+                toProc << sendValues;
+            }
         }
 
+        pBufs.finishedSends(sendConnections_, sendProcs_, recvProcs_);
 
-        for (const int proci : UPstream::allProcs())
+        for (const int proci : pBufs.allProcs())
         {
-            // Do not use recvFrom_[proci] here
-            // - could be incorrect if comms are not prepared!
-
             if (proci != UPstream::myProcNo() && pBufs.hasRecvData(proci))
             {
                 UIPstream fromProc(proci, pBufs);
-                Map<Type> tmpValue(fromProc);
+                Map<Type> tmpValues(fromProc);
 
-                neiValues += tmpValue;
+                neiValues += tmpValues;
             }
         }
     }
diff --git a/src/transportModels/geometricVoF/reconstructedDistanceFunction/reconstructedDistanceFunction.C b/src/transportModels/geometricVoF/reconstructedDistanceFunction/reconstructedDistanceFunction.C
index fedd64a9090..da0c0a0b473 100644
--- a/src/transportModels/geometricVoF/reconstructedDistanceFunction/reconstructedDistanceFunction.C
+++ b/src/transportModels/geometricVoF/reconstructedDistanceFunction/reconstructedDistanceFunction.C
@@ -67,11 +67,7 @@ Foam::reconstructedDistanceFunction::coupledFacesPatch() const
 
     return autoPtr<indirectPrimitivePatch>::New
     (
-        IndirectList<face>
-        (
-            mesh_.faces(),
-            nCoupledFaces
-        ),
+        IndirectList<face>(mesh_.faces(), std::move(nCoupledFaces)),
         mesh_.points()
     );
 }
diff --git a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.C b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.C
index 14d899b4b31..7b9e7fe1105 100644
--- a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.C
+++ b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.C
@@ -28,8 +28,10 @@ License
 #include "gradAlpha.H"
 #include "fvc.H"
 #include "leastSquareGrad.H"
+#include "zoneDistribute.H"
 #include "addToRunTimeSelectionTable.H"
 #include "profiling.H"
+
 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
 
 namespace Foam
@@ -50,8 +52,7 @@ void Foam::reconstruction::gradAlpha::gradSurf(const volScalarField& phi)
     leastSquareGrad<scalar> lsGrad("polyDegree1",mesh_.geometricD());
 
     zoneDistribute& exchangeFields = zoneDistribute::New(mesh_);
-
-    exchangeFields.setUpCommforZone(interfaceCell_,true);
+    exchangeFields.setUpCommforZone(interfaceCell_, true);
 
     Map<vector> mapCC
     (
@@ -135,10 +136,7 @@ void Foam::reconstruction::gradAlpha::reconstruct(bool forceUpdate)
     if (mesh_.topoChanging())
     {
         // Introduced resizing to cope with changing meshes
-        if (interfaceCell_.size() != mesh_.nCells())
-        {
-            interfaceCell_.resize(mesh_.nCells());
-        }
+        interfaceCell_.resize_nocopy(mesh_.nCells());
     }
     interfaceCell_ = false;
 
diff --git a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.H b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.H
index f895ff625c7..7666d348fe1 100644
--- a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.H
+++ b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/gradAlpha/gradAlpha.H
@@ -45,10 +45,9 @@ SourceFiles
 #include "typeInfo.H"
 #include "reconstructionSchemes.H"
 #include "volFields.H"
+#include "DynamicField.H"
 #include "dimensionedScalar.H"
-#include "autoPtr.H"
 #include "surfaceIteratorPLIC.H"
-#include "zoneDistribute.H"
 
 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
 
@@ -86,6 +85,7 @@ class gradAlpha
         //- SurfaceIterator finds the plane centre for specified VOF value
         surfaceIteratorPLIC sIterPLIC_;
 
+
     // Private Member Functions
 
         //- Compute gradient at the surfaces
diff --git a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.C b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.C
index dbdcb9b21a6..10fe2c055a8 100644
--- a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.C
+++ b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.C
@@ -29,6 +29,7 @@ License
 #include "interpolationCellPoint.H"
 #include "fvc.H"
 #include "leastSquareGrad.H"
+#include "zoneDistribute.H"
 #include "addToRunTimeSelectionTable.H"
 #include "profiling.H"
 
@@ -50,11 +51,11 @@ void Foam::reconstruction::plicRDF::interpolateNormal()
 {
     addProfilingInFunction(geometricVoF);
     scalar dt = mesh_.time().deltaTValue();
-    zoneDistribute& exchangeFields = zoneDistribute::New(mesh_);
 
     leastSquareGrad<scalar> lsGrad("polyDegree1",mesh_.geometricD());
 
-    exchangeFields.setUpCommforZone(interfaceCell_,false);
+    zoneDistribute& exchangeFields = zoneDistribute::New(mesh_);
+    exchangeFields.setUpCommforZone(interfaceCell_, false);
 
     Map<vector> mapCentre
     (
@@ -74,8 +75,8 @@ void Foam::reconstruction::plicRDF::interpolateNormal()
         exchangeFields.getDatafromOtherProc(interfaceCell_, alpha1_)
     );
 
-    DynamicField<vector > cellCentre(100);
-    DynamicField<scalar > alphaValues(100);
+    DynamicField<vector> cellCentre(100);
+    DynamicField<scalar> alphaValues(100);
 
     DynamicList<vector> foundNormals(30);
 
diff --git a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.H b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.H
index 02a1bdf139d..146b35ccbd0 100644
--- a/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.H
+++ b/src/transportModels/geometricVoF/reconstructionSchemes/plicSchemes/plicRDF/plicRDF.H
@@ -59,7 +59,6 @@ SourceFiles
 #include "autoPtr.H"
 #include "surfaceIteratorPLIC.H"
 #include "reconstructedDistanceFunction.H"
-#include "zoneDistribute.H"
 
 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
 
diff --git a/tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/Allrun-parallel b/tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/Allrun-parallel
new file mode 100755
index 00000000000..921e2b8fa24
--- /dev/null
+++ b/tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/Allrun-parallel
@@ -0,0 +1,18 @@
+#!/bin/sh
+cd "${0%/*}" || exit                                # Run from this directory
+. ${WM_PROJECT_DIR:?}/bin/tools/RunFunctions        # Tutorial run functions
+#------------------------------------------------------------------------------
+
+runApplication blockMesh
+
+restore0Dir
+
+runApplication setFields
+
+runApplication setAlphaField
+
+decomposePar
+
+runParallel $(getApplication)
+
+#------------------------------------------------------------------------------
diff --git a/tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/system/decomposeParDict b/tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/system/decomposeParDict
new file mode 100644
index 00000000000..d283b764dc5
--- /dev/null
+++ b/tutorials/multiphase/compressibleInterIsoFoam/laminar/depthCharge2D/system/decomposeParDict
@@ -0,0 +1,27 @@
+/*--------------------------------*- C++ -*----------------------------------*\
+| =========                 |                                                 |
+| \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox           |
+|  \\    /   O peration     | Version:  v2112                                 |
+|   \\  /    A nd           | Website:  www.openfoam.com                      |
+|    \\/     M anipulation  |                                                 |
+\*---------------------------------------------------------------------------*/
+FoamFile
+{
+    version     2.0;
+    format      ascii;
+    class       dictionary;
+    object      decomposeParDict;
+}
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
+
+numberOfSubdomains 8;
+
+method          hierarchical;
+
+coeffs
+{
+    n           (4 2 1);
+}
+
+
+// ************************************************************************* //
-- 
GitLab