diff --git a/applications/utilities/mesh/conversion/Optional/ccm26ToFoam/ccm26ToFoam.C b/applications/utilities/mesh/conversion/Optional/ccm26ToFoam/ccm26ToFoam.C
index 47ec7f913746b0c9582f5a286f1217788708b9be..f4bf559009dafed37de0c24168862e077ef86f0b 100644
--- a/applications/utilities/mesh/conversion/Optional/ccm26ToFoam/ccm26ToFoam.C
+++ b/applications/utilities/mesh/conversion/Optional/ccm26ToFoam/ccm26ToFoam.C
@@ -908,10 +908,10 @@ int main(int argc, char *argv[])
             runTime.constant(),
             runTime
         ),
-        foamPoints,
-        foamFaces,
-        foamOwner,
-        foamNeighbour
+        xferMove<pointField>(foamPoints),
+        xferMove<faceList>(foamFaces),
+        xferCopy<labelList>(foamOwner),
+        xferMove<labelList>(foamNeighbour)
     );
 
     // Create patches. Use patch types to determine what Foam types to generate.
diff --git a/src/OpenFOAM/meshes/primitiveMesh/primitiveMeshCheck/primitiveMeshCheck.C b/src/OpenFOAM/meshes/primitiveMesh/primitiveMeshCheck/primitiveMeshCheck.C
index 3534519ca7187dca572c9efd8eee8a7fc293370b..29841d3ec86892fd222c2dae5f6f0864e03a7cec 100644
--- a/src/OpenFOAM/meshes/primitiveMesh/primitiveMeshCheck/primitiveMeshCheck.C
+++ b/src/OpenFOAM/meshes/primitiveMesh/primitiveMeshCheck/primitiveMeshCheck.C
@@ -626,7 +626,8 @@ bool Foam::primitiveMesh::checkFaceSkewness
         vector d = cellCtrs[nei[faceI]] - cellCtrs[own[faceI]];
 
         // Skewness vector
-        vector sv = Cpf - ((fAreas[faceI] & Cpf)/(fAreas[faceI] & d))*d;
+        vector sv =
+            Cpf - ((fAreas[faceI] & Cpf)/((fAreas[faceI] & d) + SMALL))*d;
         vector svHat = sv/(mag(sv) + VSMALL);
 
         // Normalisation distance calculated as the approximate distance
diff --git a/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.C b/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.C
index 01b1a56d03f3fe14d6828654fc119b1bf7617f13..c38e0494ae280a4156a81a10b32c5e0b11c13a16 100644
--- a/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.C
+++ b/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.C
@@ -25,8 +25,6 @@ License
 InClass
     decompositionMethod
 
-Description
-
 \*---------------------------------------------------------------------------*/
 
 #include "decompositionMethod.H"
@@ -104,14 +102,26 @@ Foam::autoPtr<Foam::decompositionMethod> Foam::decompositionMethod::New
 }
 
 
+Foam::labelList Foam::decompositionMethod::decompose
+(
+    const pointField& points
+)
+{
+    scalarField weights(0);
+
+    return decompose(points, weights);
+}
+
+
 Foam::labelList Foam::decompositionMethod::decompose
 (
     const labelList& fineToCoarse,
-    const pointField& coarsePoints
+    const pointField& coarsePoints,
+    const scalarField& coarseWeights
 )
 {
     // Decompose based on agglomerated points
-    labelList coarseDistribution(decompose(coarsePoints));
+    labelList coarseDistribution(decompose(coarsePoints, coarseWeights));
 
     // Rework back into decomposition for original mesh_
     labelList fineDistribution(fineToCoarse.size());
@@ -125,6 +135,18 @@ Foam::labelList Foam::decompositionMethod::decompose
 }
 
 
+Foam::labelList Foam::decompositionMethod::decompose
+(
+    const labelList& fineToCoarse,
+    const pointField& coarsePoints
+)
+{
+    scalarField coarseWeights(0);
+
+    return decompose(fineToCoarse, coarsePoints, coarseWeights);
+}
+
+
 void Foam::decompositionMethod::calcCellCells
 (
     const polyMesh& mesh,
@@ -170,4 +192,16 @@ void Foam::decompositionMethod::calcCellCells
 }
 
 
+Foam::labelList Foam::decompositionMethod::decompose
+(
+    const labelListList& globalCellCells,
+    const pointField& cc
+)
+{
+    scalarField cWeights(0);
+
+    return decompose(globalCellCells, cc, cWeights);
+}
+
+
 // ************************************************************************* //
diff --git a/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.H b/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.H
index 7055f24d7b31803b81bf7c73d068954d6125cc36..7f66e9f50d0916319a4f22e0d90af2192625b168 100644
--- a/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.H
+++ b/src/decompositionMethods/decompositionMethods/decompositionMethod/decompositionMethod.H
@@ -150,20 +150,37 @@ public:
 
         //- Return for every coordinate the wanted processor number. Use the
         //  mesh connectivity (if needed)
-        virtual labelList decompose(const pointField&) = 0;
+        virtual labelList decompose
+        (
+            const pointField& points,
+            const scalarField& pointWeights
+        ) = 0;
+
+        //- Like decompose but with uniform weights on the points
+        virtual labelList decompose(const pointField&);
+
 
         //- Return for every coordinate the wanted processor number. Gets
         //  passed agglomeration map (from fine to coarse cells) and coarse cell
         //  location. Can be overridden by decomposers that provide this
         //  functionality natively. Coarse cells are local to the processor
         //  (if in parallel). If you want to have coarse cells spanning
-        //  processors use the next function below instead.
+        //  processors use the globalCellCells instead.
+        virtual labelList decompose
+        (
+            const labelList& cellToRegion,
+            const pointField& regionPoints,
+            const scalarField& regionWeights
+        );
+
+        //- Like decompose but with uniform weights on the regions
         virtual labelList decompose
         (
             const labelList& cellToRegion,
             const pointField& regionPoints
         );
 
+
         //- Return for every coordinate the wanted processor number. Explicitly
         //  provided connectivity - does not use mesh_.
         //  The connectivity is equal to mesh.cellCells() except for
@@ -174,9 +191,17 @@ public:
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
-            const pointField& cc
+            const pointField& cc,
+            const scalarField& cWeights
         ) = 0;
 
+        //- Like decompose but with uniform weights on the cells
+        virtual labelList decompose
+        (
+            const labelListList& globalCellCells,
+            const pointField& cc
+        );
+
 };
 
 
diff --git a/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.C b/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.C
index 72d39d215cb3357c2a7f6a779b37cc2bcc11275e..bde1181c0911e823c9497b855cab96e5630f5f21 100644
--- a/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.C
+++ b/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.C
@@ -142,6 +142,38 @@ Foam::label Foam::hierarchGeomDecomp::findLower
 }
 
 
+// Create a mapping between the index and the weighted size.
+// For convenience, sortedWeightedSize is one size bigger than current. This
+// avoids extra tests.
+void Foam::hierarchGeomDecomp::calculateSortedWeightedSizes
+(
+    const labelList& current,
+    const labelList& indices,
+    const scalarField& weights,
+    const label globalCurrentSize,
+
+    scalarField& sortedWeightedSizes
+)
+{
+    // Evaluate cumulative weights.
+    sortedWeightedSizes[0] = 0;
+    forAll(current, i)
+    {
+        label pointI = current[indices[i]];
+        sortedWeightedSizes[i + 1] = sortedWeightedSizes[i] + weights[pointI];
+    }
+    // Non-dimensionalise and multiply by size.
+    scalar globalCurrentLength = returnReduce
+    (
+        sortedWeightedSizes[current.size()], 
+        sumOp<scalar>()
+    );
+    // Normalise weights by global sum of weights and multiply through
+    // by global size.
+    sortedWeightedSizes *= (globalCurrentSize/globalCurrentLength);
+}
+
+
 // Find position in values so between minIndex and this position there
 // are wantedSize elements.
 void Foam::hierarchGeomDecomp::findBinary
@@ -170,7 +202,6 @@ void Foam::hierarchGeomDecomp::findBinary
     label midPrev = -1;
     label highPrev = -1;
 
-    //while (low <= high)
     while (true)
     {
         label size = returnReduce(mid-minIndex, sumOp<label>());
@@ -204,7 +235,98 @@ void Foam::hierarchGeomDecomp::findBinary
         mid = findLower(values, midValue, low, high);
 
         // Safeguard if same as previous.
-        bool hasNotChanged = (mid == midPrev) && (low == lowPrev) && (high == highPrev);
+        bool hasNotChanged =
+            (mid == midPrev)
+         && (low == lowPrev)
+         && (high == highPrev);
+
+        if (returnReduce(hasNotChanged, andOp<bool>()))
+        {
+            WarningIn("hierarchGeomDecomp::findBinary(..)")
+                << "unable to find desired deomposition split, making do!" 
+                << endl;
+            break;
+        }
+
+        midPrev = mid;
+        lowPrev = low;
+        highPrev = high;
+    }
+}
+
+
+// Find position in values so between minIndex and this position there
+// are wantedSize elements.
+void Foam::hierarchGeomDecomp::findBinary
+(
+    const label sizeTol,
+    const List<scalar>& sortedWeightedSizes,
+    const List<scalar>& values,
+    const label minIndex,       // index of previous value
+    const scalar minValue,      // value at minIndex
+    const scalar maxValue,      // global max of values
+    const scalar wantedSize,    // wanted size
+
+    label& mid,                 // index where size of bin is
+                                // wantedSize (to within sizeTol)
+    scalar& midValue            // value at mid
+)
+{
+    label low = minIndex;
+    scalar lowValue = minValue;
+
+    scalar highValue = maxValue;
+    // (one beyond) index of highValue
+    label high = values.size();
+
+    // Safeguards to avoid infinite loop.
+    label lowPrev = -1;
+    label midPrev = -1;
+    label highPrev = -1;
+
+    while (true)
+    {
+        label weightedSize = returnReduce
+        (
+            sortedWeightedSizes[mid] - sortedWeightedSizes[minIndex],
+            sumOp<label>()
+        );
+
+        if (debug)
+        {
+            Pout<< "low:" << low << " lowValue:" << lowValue
+                << " high:" << high << " highValue:" << highValue
+                << " mid:" << mid << " midValue:" << midValue << nl
+                << "globalSize:" << weightedSize 
+                << " wantedSize:" << wantedSize
+                << " sizeTol:" << sizeTol << endl;
+        }
+
+        if (wantedSize < weightedSize - sizeTol)
+        {
+            high = mid;
+            highValue = midValue;
+        }
+        else if (wantedSize > weightedSize + sizeTol)
+        {
+            low = mid;
+            lowValue = midValue;
+        }
+        else
+        {
+            break;
+        }
+
+        // Update mid, midValue
+        midValue = 0.5*(lowValue+highValue);
+        mid = findLower(values, midValue, low, high);
+
+        // Safeguard if same as previous.
+        bool hasNotChanged =
+            (mid == midPrev)
+         && (low == lowPrev)
+         && (high == highPrev);
+
         if (returnReduce(hasNotChanged, andOp<bool>()))
         {
             WarningIn("hierarchGeomDecomp::findBinary(..)")
@@ -394,6 +516,190 @@ void Foam::hierarchGeomDecomp::sortComponent
 }
 
 
+// Sort points into bins according to one component. Recurses to next component.
+void Foam::hierarchGeomDecomp::sortComponent
+(
+    const label sizeTol,
+    const scalarField& weights,
+    const pointField& points,
+    const labelList& current,       // slice of points to decompose
+    const direction componentIndex, // index in decompOrder_
+    const label mult,               // multiplication factor for finalDecomp
+    labelList& finalDecomp
+)
+{
+    // Current component
+    label compI = decompOrder_[componentIndex];
+
+    if (debug)
+    {
+        Pout<< "sortComponent : Sorting slice of size " << current.size()
+            << " in component " << compI << endl;
+    }
+
+    // Storage for sorted component compI
+    SortableList<scalar> sortedCoord(current.size());
+
+    forAll(current, i)
+    {
+        label pointI = current[i];
+
+        sortedCoord[i] = points[pointI][compI];
+    }
+    sortedCoord.sort();
+
+    label globalCurrentSize = returnReduce(current.size(), sumOp<label>());
+
+    // Now evaluate local cumulative weights, based on the sorting.
+    // Make one bigger than the nodes.
+    scalarField sortedWeightedSizes(current.size()+1, 0);
+    calculateSortedWeightedSizes
+    (
+        current,
+        sortedCoord.indices(),
+        weights,
+        globalCurrentSize,
+        sortedWeightedSizes
+    );
+
+    scalar minCoord = returnReduce
+    (
+        (
+            sortedCoord.size()
+          ? sortedCoord[0]
+          : GREAT
+        ),
+        minOp<scalar>()
+    );
+
+    scalar maxCoord = returnReduce
+    (
+        (
+            sortedCoord.size()
+          ? sortedCoord[sortedCoord.size()-1]
+          : -GREAT
+        ),
+        maxOp<scalar>()
+    );
+
+    if (debug)
+    {   
+        Pout<< "sortComponent : minCoord:" << minCoord
+            << " maxCoord:" << maxCoord << endl;
+    }
+
+    // starting index (in sortedCoord) of bin (= local)
+    label leftIndex = 0;
+    // starting value of bin (= global since coordinate)
+    scalar leftCoord = minCoord;
+
+    // Sort bins of size n
+    for (label bin = 0; bin < n_[compI]; bin++)
+    {
+        // Now we need to determine the size of the bin (dx). This is
+        // determined by the 'pivot' values - everything to the left of this
+        // value goes in the current bin, everything to the right into the next
+        // bins.
+
+        // Local number of elements
+        label localSize = -1;     // offset from leftOffset
+
+        // Value at right of bin (leftIndex+localSize-1)
+        scalar rightCoord = -GREAT;
+        
+        if (bin == n_[compI]-1)
+        {
+            // Last bin. Copy all.
+            localSize = current.size()-leftIndex;
+            rightCoord = maxCoord;                  // note: not used anymore
+        }
+        else
+        {
+            // For the current bin (starting at leftCoord) we want a rightCoord
+            // such that the sum of all weighted sizes are 
+            // globalCurrentLength/n_[compI].
+            // We have to iterate to obtain this.
+
+            label rightIndex = current.size();
+            rightCoord = maxCoord;
+
+            // Calculate rightIndex/rightCoord to have wanted size
+            findBinary
+            (
+                sizeTol,
+                sortedWeightedSizes,
+                sortedCoord,
+                leftIndex,
+                leftCoord,
+                maxCoord,
+                globalCurrentSize/n_[compI],  // wanted size
+
+                rightIndex,
+                rightCoord
+            );
+            localSize = rightIndex - leftIndex;
+        }
+
+        if (debug)
+        {
+            Pout<< "For component " << compI << ", bin " << bin
+                << " copying" << nl
+                << "from " << leftCoord << " at local index "
+                << leftIndex << nl
+                << "to " << rightCoord << " localSize:"
+                << localSize << nl
+                << endl;
+        }
+
+
+        // Copy localSize elements starting from leftIndex.
+        labelList slice(localSize);
+
+        forAll(slice, i)
+        {
+            label pointI = current[sortedCoord.indices()[leftIndex+i]];
+
+            // Mark point into correct bin
+            finalDecomp[pointI] += bin*mult;
+
+            // And collect for next sorting action
+            slice[i] = pointI;
+        }
+
+        // Sort slice in next component
+        if (componentIndex < 2)
+        {
+            string oldPrefix;
+            if (debug)
+            {
+                oldPrefix = Pout.prefix();
+                Pout.prefix() = "  " + oldPrefix;
+            }
+
+            sortComponent
+            (
+                sizeTol,
+                weights,
+                points,
+                slice,
+                componentIndex+1,
+                mult*n_[compI],     // Multiplier to apply to decomposition.
+                finalDecomp
+            );
+
+            if (debug)
+            {
+                Pout.prefix() = oldPrefix;
+            }
+        }
+
+        // Step to next bin.
+        leftIndex += localSize;
+        leftCoord = rightCoord;
+    }
+}
+
+
 // * * * * * * * * * * * * * * * * Constructors  * * * * * * * * * * * * * * //
 
 Foam::hierarchGeomDecomp::hierarchGeomDecomp
@@ -464,4 +770,47 @@ Foam::labelList Foam::hierarchGeomDecomp::decompose
 }
 
 
+Foam::labelList Foam::hierarchGeomDecomp::decompose
+(
+    const pointField& points,
+    const scalarField& weights
+)
+{
+    // construct a list for the final result
+    labelList finalDecomp(points.size(), 0);
+
+    // Start off with every point sorted onto itself.
+    labelList slice(points.size());
+    forAll(slice, i)
+    {
+        slice[i] = i;
+    }
+
+    pointField rotatedPoints = rotDelta_ & points;
+
+
+    // Calculate tolerance of cell distribution. For large cases finding
+    // distibution to the cell exact would cause too many iterations so allow
+    // some slack.
+    label allSize = points.size();
+    reduce(allSize, sumOp<label>());
+
+    const label sizeTol = max(1, label(1E-3*allSize/nProcessors_));
+
+    // Sort recursive
+    sortComponent
+    (
+        sizeTol,
+        weights,
+        rotatedPoints,
+        slice,
+        0,              // Sort first component in decompOrder.
+        1,              // Offset for different x bins.
+        finalDecomp
+    );
+
+    return finalDecomp;
+}
+
+
 // ************************************************************************* //
diff --git a/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.H b/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.H
index c2ed56521136a9157b1e0536703930c7345fe218..932ca5dcaa85ae1dffd278332889dca6eb57af49 100644
--- a/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.H
+++ b/src/decompositionMethods/decompositionMethods/hierarchGeomDecomp/hierarchGeomDecomp.H
@@ -80,6 +80,17 @@ class hierarchGeomDecomp
         //- Convert ordering string ("xyz") into list of components.
         void setDecompOrder();
 
+        //- Evaluates the weighted sizes for each sorted point.
+        static void calculateSortedWeightedSizes
+        (
+            const labelList& current,
+            const labelList& indices,
+            const scalarField& weights,
+            const label globalCurrentSize,
+
+            scalarField& sortedWeightedSizes
+        );
+
         //- Find index of t in list inbetween indices left and right
         static label findLower
         (
@@ -104,10 +115,39 @@ class hierarchGeomDecomp
             scalar& midValue            // value at mid
         );
 
+        //- Find midValue (at local index mid) such that the number of
+        //  elements between mid and leftIndex are (globally summed) the
+        //  wantedSize. Binary search.
+        static void findBinary
+        (
+            const label sizeTol,        // size difference considered acceptible
+            const List<scalar>& sortedWeightedSizes,
+            const List<scalar>&,
+            const label leftIndex,      // index of previous value
+            const scalar leftValue,     // value at leftIndex
+            const scalar maxValue,      // global max of values
+            const scalar wantedSize,    // wanted size
+            label& mid,                 // index where size of bin is wantedSize
+            scalar& midValue            // value at mid
+        );
+
+        //- Recursively sort in x,y,z (or rather acc. to decompOrder_)
+        void sortComponent
+        (
+            const label sizeTol,
+            const pointField&,
+            const labelList& slice,         // slice of points to decompose
+            const direction componentIndex, // index in decompOrder_
+            const label prevMult,           // multiplication factor
+            labelList& finalDecomp          // overall decomposition
+        );
+
         //- Recursively sort in x,y,z (or rather acc. to decompOrder_)
+        //- using weighted points.
         void sortComponent
         (
             const label sizeTol,
+            const scalarField& weights,
             const pointField&,
             const labelList& slice,         // slice of points to decompose
             const direction componentIndex, // index in decompOrder_
@@ -156,6 +196,14 @@ public:
 
         //- Return for every coordinate the wanted processor number. Use the
         //  mesh connectivity (if needed)
+        virtual labelList decompose
+        (
+            const pointField&,
+            const scalarField& weights
+        );
+
+        //- Without weights. Code for weighted decomposition is a bit complex
+        //  so kept separate for now.
         virtual labelList decompose(const pointField&);
 
         //- Return for every coordinate the wanted processor number. Explicitly
@@ -165,6 +213,16 @@ public:
         //    from 0 at processor0 and then incrementing all through the
         //    processors)
         //  - the connections are across coupled patches
+        virtual labelList decompose
+        (
+            const labelListList& globalCellCells,
+            const pointField& cc,
+            const scalarField& cWeights
+        )
+        {
+            return decompose(cc, cWeights);
+        }
+
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
diff --git a/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.C b/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.C
index fd2ff9562d3afa1425ca6b7bdfb7fafb247db47d..b852e1cf8d058f302fb44b3ff43ea3272e978992 100644
--- a/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.C
+++ b/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.C
@@ -82,7 +82,11 @@ Foam::manualDecomp::manualDecomp
 
 // * * * * * * * * * * * * * * * Member Functions  * * * * * * * * * * * * * //
 
-Foam::labelList Foam::manualDecomp::decompose(const pointField& points)
+Foam::labelList Foam::manualDecomp::decompose
+(
+    const pointField& points,
+    const scalarField& pointWeights
+)
 {
     const polyMesh& mesh = *meshPtr_;
 
@@ -103,8 +107,10 @@ Foam::labelList Foam::manualDecomp::decompose(const pointField& points)
 
     if (finalDecomp.size() != points.size())
     {
-        FatalErrorIn("manualDecomp::decompose(const pointField&)")
-            << "Size of decomposition list does not correspond "
+        FatalErrorIn
+        (
+            "manualDecomp::decompose(const pointField&, const scalarField&)"
+        )   << "Size of decomposition list does not correspond "
             << "to the number of points.  Size: "
             << finalDecomp.size() << " Number of points: "
             << points.size()
@@ -115,8 +121,10 @@ Foam::labelList Foam::manualDecomp::decompose(const pointField& points)
 
     if (min(finalDecomp) < 0 || max(finalDecomp) > nProcessors_ - 1)
     {
-        FatalErrorIn("labelList manualDecomp::decompose(const pointField&)")
-            << "According to the decomposition, cells assigned to "
+        FatalErrorIn
+        (
+            "manualDecomp::decompose(const pointField&, const scalarField&)"
+        )   << "According to the decomposition, cells assigned to "
             << "impossible processor numbers.  Min processor = "
             << min(finalDecomp) << " Max processor = " << max(finalDecomp)
             << ".\n" << "Manual decomposition data read from file "
diff --git a/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.H b/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.H
index ed5ad4e8c02838178cb66143f4ac5f86c4544aab..0829f3e02382b69ac0d4812bff1101bbcf32658e 100644
--- a/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.H
+++ b/src/decompositionMethods/decompositionMethods/manualDecomp/manualDecomp.H
@@ -99,7 +99,11 @@ public:
 
         //- Return for every coordinate the wanted processor number. Use the
         //  mesh connectivity (if needed)
-        virtual labelList decompose(const pointField&);
+        virtual labelList decompose
+        (
+            const pointField& points,
+            const scalarField& pointWeights
+        );
 
         //- Return for every coordinate the wanted processor number. Explicitly
         //  provided connectivity - does not use mesh_.
@@ -111,10 +115,11 @@ public:
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
-            const pointField& cc
+            const pointField& cc,
+            const scalarField& cWeights
         )
         {
-            return decompose(cc);
+            return decompose(cc, cWeights);
         }
 };
 
diff --git a/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.C b/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.C
index b4a643562f0f21bbd6a60c7b535b3ce5a80b1201..3b2c5fb58cedddaa6fe57f771dd0ebbf15b282e8 100644
--- a/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.C
+++ b/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.C
@@ -60,6 +60,7 @@ Foam::label Foam::metisDecomp::decompose
 (
     const List<int>& adjncy,
     const List<int>& xadj,
+    const scalarField& cWeights,
 
     List<int>& finalDecomp
 )
@@ -72,6 +73,8 @@ Foam::label Foam::metisDecomp::decompose
     // k-way: multi-level k-way
     word method("k-way");
 
+    int numCells = xadj.size()-1;
+
     // decomposition options. 0 = use defaults
     List<int> options(5, 0);
 
@@ -85,6 +88,30 @@ Foam::label Foam::metisDecomp::decompose
     // face weights (so on the edges of the dual)
     List<int> faceWeights;
 
+
+    // Check for externally provided cellweights and if so initialise weights
+    scalar maxWeights = gMax(cWeights);
+    if (cWeights.size() > 0)
+    {
+        if (cWeights.size() != numCells)
+        {
+            FatalErrorIn
+            (
+                "metisDecomp::decompose"
+                "(const pointField&, const scalarField&)"
+            )   << "Number of cell weights " << cWeights.size()
+                << " does not equal number of cells " << numCells
+                << exit(FatalError);
+        }
+        // Convert to integers.
+        cellWeights.setSize(cWeights.size());
+        forAll(cellWeights, i)
+        {
+            cellWeights[i] = int(1000000*cWeights[i]/maxWeights);
+        }
+    }
+
+
     // Check for user supplied weights and decomp options
     if (decompositionDict_.found("metisCoeffs"))
     {
@@ -161,52 +188,8 @@ Foam::label Foam::metisDecomp::decompose
                     << exit(FatalError);
             }
         }
-
-        //- faceWeights disabled. Only makes sense for cellCells from mesh.
-        //if (metisCoeffs.readIfPresent("faceWeightsFile", weightsFile))
-        //{
-        //    Info<< "metisDecomp : Using face-based weights." << endl;
-        //
-        //    IOList<int> weights
-        //    (
-        //        IOobject
-        //        (
-        //            weightsFile,
-        //            mesh_.time().timeName(),
-        //            mesh_,
-        //            IOobject::MUST_READ,
-        //            IOobject::AUTO_WRITE
-        //        )
-        //    );
-        //
-        //    if (weights.size() != adjncy.size()/2)
-        //    {
-        //        FatalErrorIn("metisDecomp::decompose(const pointField&)")
-        //            << "Number of face weights " << weights.size()
-        //            << " does not equal number of internal faces "
-        //            << adjncy.size()/2
-        //            << exit(FatalError);
-        //    }
-        //
-        //    // Assume symmetric weights. Keep same ordering as adjncy.
-        //    faceWeights.setSize(adjncy.size());
-        //
-        //    labelList nFacesPerCell(mesh_.nCells(), 0);
-        //
-        //    for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
-        //    {
-        //        label w = weights[faceI];
-        //
-        //        label own = mesh_.faceOwner()[faceI];
-        //        label nei = mesh_.faceNeighbour()[faceI];
-        //
-        //        faceWeights[xadj[own] + nFacesPerCell[own]++] = w;
-        //        faceWeights[xadj[nei] + nFacesPerCell[nei]++] = w;
-        //    }
-        //}
     }
 
-    int numCells = xadj.size()-1;
     int nProcs = nProcessors_;
 
     // output: cell -> processor addressing
@@ -327,12 +310,18 @@ Foam::metisDecomp::metisDecomp
 
 // * * * * * * * * * * * * * * * Member Functions  * * * * * * * * * * * * * //
 
-Foam::labelList Foam::metisDecomp::decompose(const pointField& points)
+Foam::labelList Foam::metisDecomp::decompose
+(
+    const pointField& points,
+    const scalarField& pointWeights
+)
 {
     if (points.size() != mesh_.nCells())
     {
-        FatalErrorIn("metisDecomp::decompose(const pointField&)")
-            << "Can use this decomposition method only for the whole mesh"
+        FatalErrorIn
+        (
+            "metisDecomp::decompose(const pointField&,const scalarField&)"
+        )   << "Can use this decomposition method only for the whole mesh"
             << endl
             << "and supply one coordinate (cellCentre) for every cell." << endl
             << "The number of coordinates " << points.size() << endl
@@ -340,19 +329,49 @@ Foam::labelList Foam::metisDecomp::decompose(const pointField& points)
             << exit(FatalError);
     }
 
+    List<int> adjncy;
+    List<int> xadj;
+    calcMetisCSR
+    (
+        mesh_,
+        adjncy,
+        xadj
+    );
+
+    // Decompose using default weights
+    List<int> finalDecomp;
+    decompose(adjncy, xadj, pointWeights, finalDecomp);
+
+    // Copy back to labelList
+    labelList decomp(finalDecomp.size());
+    forAll(decomp, i)
+    {
+        decomp[i] = finalDecomp[i];
+    }
+    return decomp;
+}
+
+
+void Foam::metisDecomp::calcMetisCSR
+(
+    const polyMesh& mesh,
+    List<int>& adjncy,
+    List<int>& xadj
+)
+{
     // Make Metis CSR (Compressed Storage Format) storage
     //   adjncy      : contains neighbours (= edges in graph)
     //   xadj(celli) : start of information in adjncy for celli
 
-    List<int> xadj(mesh_.nCells()+1);
+    xadj.setSize(mesh.nCells()+1);
 
     // Initialise the number of internal faces of the cells to twice the
     // number of internal faces
-    label nInternalFaces = 2*mesh_.nInternalFaces();
+    label nInternalFaces = 2*mesh.nInternalFaces();
 
     // Check the boundary for coupled patches and add to the number of
     // internal faces
-    const polyBoundaryMesh& pbm = mesh_.boundaryMesh();
+    const polyBoundaryMesh& pbm = mesh.boundaryMesh();
 
     forAll(pbm, patchi)
     {
@@ -364,17 +383,17 @@ Foam::labelList Foam::metisDecomp::decompose(const pointField& points)
 
     // Create the adjncy array the size of the total number of internal and
     // coupled faces
-    List<int> adjncy(nInternalFaces);
+    adjncy.setSize(nInternalFaces);
 
     // Fill in xadj
     // ~~~~~~~~~~~~
     label freeAdj = 0;
 
-    for (label cellI = 0; cellI < mesh_.nCells(); cellI++)
+    for (label cellI = 0; cellI < mesh.nCells(); cellI++)
     {
         xadj[cellI] = freeAdj;
 
-        const labelList& cFaces = mesh_.cells()[cellI];
+        const labelList& cFaces = mesh.cells()[cellI];
 
         forAll(cFaces, i)
         {
@@ -382,7 +401,7 @@ Foam::labelList Foam::metisDecomp::decompose(const pointField& points)
 
             if
             (
-                mesh_.isInternalFace(faceI)
+                mesh.isInternalFace(faceI)
              || isA<cyclicPolyPatch>(pbm[pbm.whichPatch(faceI)])
             )
             {
@@ -390,19 +409,19 @@ Foam::labelList Foam::metisDecomp::decompose(const pointField& points)
             }
         }
     }
-    xadj[mesh_.nCells()] = freeAdj;
+    xadj[mesh.nCells()] = freeAdj;
 
 
     // Fill in adjncy
     // ~~~~~~~~~~~~~~
 
-    labelList nFacesPerCell(mesh_.nCells(), 0);
+    labelList nFacesPerCell(mesh.nCells(), 0);
 
     // Internal faces
-    for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
+    for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
     {
-        label own = mesh_.faceOwner()[faceI];
-        label nei = mesh_.faceNeighbour()[faceI];
+        label own = mesh.faceOwner()[faceI];
+        label nei = mesh.faceNeighbour()[faceI];
 
         adjncy[xadj[own] + nFacesPerCell[own]++] = nei;
         adjncy[xadj[nei] + nFacesPerCell[nei]++] = own;
@@ -427,18 +446,6 @@ Foam::labelList Foam::metisDecomp::decompose(const pointField& points)
             }
         }
     }
-
-    // Decompose using default weights
-    List<int> finalDecomp;
-    decompose(adjncy, xadj, finalDecomp);
-
-    // Copy back to labelList
-    labelList decomp(finalDecomp.size());
-    forAll(decomp, i)
-    {
-        decomp[i] = finalDecomp[i];
-    }
-    return decomp;
 }
 
 
@@ -487,14 +494,16 @@ void Foam::metisDecomp::calcMetisCSR
 Foam::labelList Foam::metisDecomp::decompose
 (
     const labelList& agglom,
-    const pointField& agglomPoints
+    const pointField& agglomPoints,
+    const scalarField& agglomWeights
 )
 {
     if (agglom.size() != mesh_.nCells())
     {
         FatalErrorIn
         (
-            "parMetisDecomp::decompose(const labelList&, const pointField&)"
+            "metisDecomp::decompose"
+            "(const labelList&, const pointField&, const scalarField&)"
         )   << "Size of cell-to-coarse map " << agglom.size()
             << " differs from number of cells in mesh " << mesh_.nCells()
             << exit(FatalError);
@@ -521,7 +530,7 @@ Foam::labelList Foam::metisDecomp::decompose
 
     // Decompose using default weights
     List<int> finalDecomp;
-    decompose(adjncy, xadj, finalDecomp);
+    decompose(adjncy, xadj, agglomWeights, finalDecomp);
 
 
     // Rework back into decomposition for original mesh_
@@ -539,14 +548,16 @@ Foam::labelList Foam::metisDecomp::decompose
 Foam::labelList Foam::metisDecomp::decompose
 (
     const labelListList& globalCellCells,
-    const pointField& cellCentres
+    const pointField& cellCentres,
+    const scalarField& cellWeights
 )
 {
     if (cellCentres.size() != globalCellCells.size())
     {
         FatalErrorIn
         (
-            "metisDecomp::decompose(const pointField&, const labelListList&)"
+            "metisDecomp::decompose"
+            "(const pointField&, const labelListList&, const scalarField&)"
         )   << "Inconsistent number of cells (" << globalCellCells.size()
             << ") and number of cell centres (" << cellCentres.size()
             << ")." << exit(FatalError);
@@ -564,7 +575,7 @@ Foam::labelList Foam::metisDecomp::decompose
 
     // Decompose using default weights
     List<int> finalDecomp;
-    decompose(adjncy, xadj, finalDecomp);
+    decompose(adjncy, xadj, cellWeights, finalDecomp);
 
     // Copy back to labelList
     labelList decomp(finalDecomp.size());
diff --git a/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.H b/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.H
index a91427a8e9118f66a5f301017ab61327bfe0641e..6a38e25c8a83bd0fd12285f1bb1608dec5829df3 100644
--- a/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.H
+++ b/src/decompositionMethods/decompositionMethods/metisDecomp/metisDecomp.H
@@ -60,6 +60,7 @@ class metisDecomp
         (
             const List<int>& adjncy,
             const List<int>& xadj,
+            const scalarField& cellWeights,
             List<int>& finalDecomp
         );
 
@@ -100,7 +101,11 @@ public:
 
         //- Return for every coordinate the wanted processor number. Use the
         //  mesh connectivity (if needed)
-        virtual labelList decompose(const pointField&);
+        virtual labelList decompose
+        (
+            const pointField& points,
+            const scalarField& pointWeights
+        );
 
         //- Return for every coordinate the wanted processor number. Gets
         //  passed agglomeration map (from fine to coarse cells) and coarse cell
@@ -109,7 +114,8 @@ public:
         virtual labelList decompose
         (
             const labelList& agglom,
-            const pointField&
+            const pointField& regionPoints,
+            const scalarField& regionWeights
         );
 
         //- Return for every coordinate the wanted processor number. Explicitly
@@ -122,10 +128,21 @@ public:
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
-            const pointField& cc
+            const pointField& cc,
+            const scalarField& cWeights
+        );
+
+        //- Helper to convert connectivity (supplied as owner,neighbour) into
+        //  Metis storage
+        static void calcMetisCSR
+        (
+            const polyMesh& mesh,
+            List<int>& adjncy,
+            List<int>& xadj
         );
 
-        //- Helper to convert cellcells into Metis storage
+        //- Helper to convert connectivity (supplied as cellcells) into
+        //  Metis storage
         static void calcMetisCSR
         (
             const labelListList& globalCellCells,
diff --git a/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.C b/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.C
index 994a82419285a24da2695f3e3850ee4580d1f8ac..61a9483837daef150a8aabfe8da01acfddb142b4 100644
--- a/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.C
+++ b/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.C
@@ -65,6 +65,7 @@ License
 #include "Time.H"
 #include "cyclicPolyPatch.H"
 #include "OFstream.H"
+#include "metisDecomp.H"
 
 extern "C"
 {
@@ -125,6 +126,7 @@ Foam::label Foam::scotchDecomp::decompose
 (
     const List<int>& adjncy,
     const List<int>& xadj,
+    const scalarField& cWeights,
 
     List<int>& finalDecomp
 )
@@ -160,6 +162,33 @@ Foam::label Foam::scotchDecomp::decompose
     // Graph
     // ~~~~~
 
+    List<int> velotab;
+
+
+    // Check for externally provided cellweights and if so initialise weights
+    scalar maxWeights = gMax(cWeights);
+    if (cWeights.size() > 0)
+    {
+        if (cWeights.size() != xadj.size()-1)
+        {
+            FatalErrorIn
+            (
+                "parMetisDecomp::decompose"
+                "(const pointField&, const scalarField&)"
+            )   << "Number of cell weights " << cWeights.size()
+                << " does not equal number of cells " << xadj.size()-1
+                << exit(FatalError);
+        }
+        // Convert to integers.
+        velotab.setSize(cWeights.size());
+        forAll(velotab, i)
+        {
+            velotab[i] = int(1000000*cWeights[i]/maxWeights);
+        }
+    }
+
+
+
     SCOTCH_Graph grafdat;
     check(SCOTCH_graphInit(&grafdat), "SCOTCH_graphInit");
     check
@@ -171,7 +200,7 @@ Foam::label Foam::scotchDecomp::decompose
             xadj.size()-1,          // vertnbr, nCells
             xadj.begin(),           // verttab, start index per cell into adjncy
             &xadj[1],               // vendtab, end index  ,,
-            NULL,                   // velotab, vertex weights
+            velotab.begin(),        // velotab, vertex weights
             NULL,                   // vlbltab
             adjncy.size(),          // edgenbr, number of arcs
             adjncy.begin(),         // edgetab
@@ -340,7 +369,11 @@ Foam::scotchDecomp::scotchDecomp
 
 // * * * * * * * * * * * * * * * Member Functions  * * * * * * * * * * * * * //
 
-Foam::labelList Foam::scotchDecomp::decompose(const pointField& points)
+Foam::labelList Foam::scotchDecomp::decompose
+(
+    const pointField& points,
+    const scalarField& pointWeights
+)
 {
     if (points.size() != mesh_.nCells())
     {
@@ -356,94 +389,18 @@ Foam::labelList Foam::scotchDecomp::decompose(const pointField& points)
     // Make Metis CSR (Compressed Storage Format) storage
     //   adjncy      : contains neighbours (= edges in graph)
     //   xadj(celli) : start of information in adjncy for celli
-
-    List<int> xadj(mesh_.nCells()+1);
-
-    // Initialise the number of internal faces of the cells to twice the
-    // number of internal faces
-    label nInternalFaces = 2*mesh_.nInternalFaces();
-
-    // Check the boundary for coupled patches and add to the number of
-    // internal faces
-    const polyBoundaryMesh& pbm = mesh_.boundaryMesh();
-
-    forAll(pbm, patchi)
-    {
-        if (isA<cyclicPolyPatch>(pbm[patchi]))
-        {
-            nInternalFaces += pbm[patchi].size();
-        }
-    }
-
-    // Create the adjncy array the size of the total number of internal and
-    // coupled faces
-    List<int> adjncy(nInternalFaces);
-
-    // Fill in xadj
-    // ~~~~~~~~~~~~
-    label freeAdj = 0;
-
-    for (label cellI = 0; cellI < mesh_.nCells(); cellI++)
-    {
-        xadj[cellI] = freeAdj;
-
-        const labelList& cFaces = mesh_.cells()[cellI];
-
-        forAll(cFaces, i)
-        {
-            label faceI = cFaces[i];
-
-            if
-            (
-                mesh_.isInternalFace(faceI)
-             || isA<cyclicPolyPatch>(pbm[pbm.whichPatch(faceI)])
-            )
-            {
-                freeAdj++;
-            }
-        }
-    }
-    xadj[mesh_.nCells()] = freeAdj;
-
-
-    // Fill in adjncy
-    // ~~~~~~~~~~~~~~
-
-    labelList nFacesPerCell(mesh_.nCells(), 0);
-
-    // Internal faces
-    for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
-    {
-        label own = mesh_.faceOwner()[faceI];
-        label nei = mesh_.faceNeighbour()[faceI];
-
-        adjncy[xadj[own] + nFacesPerCell[own]++] = nei;
-        adjncy[xadj[nei] + nFacesPerCell[nei]++] = own;
-    }
-
-    // Coupled faces. Only cyclics done.
-    forAll(pbm, patchi)
-    {
-        if (isA<cyclicPolyPatch>(pbm[patchi]))
-        {
-            const unallocLabelList& faceCells = pbm[patchi].faceCells();
-
-            label sizeby2 = faceCells.size()/2;
-
-            for (label facei=0; facei<sizeby2; facei++)
-            {
-                label own = faceCells[facei];
-                label nei = faceCells[facei + sizeby2];
-
-                adjncy[xadj[own] + nFacesPerCell[own]++] = nei;
-                adjncy[xadj[nei] + nFacesPerCell[nei]++] = own;
-            }
-        }
-    }
+    List<int> adjncy;
+    List<int> xadj;
+    metisDecomp::calcMetisCSR
+    (
+        mesh_,
+        adjncy,
+        xadj
+    );
 
     // Decompose using default weights
     List<int> finalDecomp;
-    decompose(adjncy, xadj, finalDecomp);
+    decompose(adjncy, xadj, pointWeights, finalDecomp);
 
     // Copy back to labelList
     labelList decomp(finalDecomp.size());
@@ -455,52 +412,11 @@ Foam::labelList Foam::scotchDecomp::decompose(const pointField& points)
 }
 
 
-// From cell-cell connections to Metis format (like CompactListList)
-void Foam::scotchDecomp::calcMetisCSR
-(
-    const labelListList& cellCells,
-    List<int>& adjncy,
-    List<int>& xadj
-)
-{
-    // Count number of internal faces
-    label nConnections = 0;
-
-    forAll(cellCells, coarseI)
-    {
-        nConnections += cellCells[coarseI].size();
-    }
-
-    // Create the adjncy array as twice the size of the total number of
-    // internal faces
-    adjncy.setSize(nConnections);
-
-    xadj.setSize(cellCells.size()+1);
-
-
-    // Fill in xadj
-    // ~~~~~~~~~~~~
-    label freeAdj = 0;
-
-    forAll(cellCells, coarseI)
-    {
-        xadj[coarseI] = freeAdj;
-
-        const labelList& cCells = cellCells[coarseI];
-
-        forAll(cCells, i)
-        {
-            adjncy[freeAdj++] = cCells[i];
-        }
-    }
-    xadj[cellCells.size()] = freeAdj;
-}
-
-
 Foam::labelList Foam::scotchDecomp::decompose
 (
     const labelList& agglom,
-    const pointField& agglomPoints
+    const pointField& agglomPoints,
+    const scalarField& pointWeights
 )
 {
     if (agglom.size() != mesh_.nCells())
@@ -529,13 +445,12 @@ Foam::labelList Foam::scotchDecomp::decompose
             cellCells
         );
 
-        calcMetisCSR(cellCells, adjncy, xadj);
+        metisDecomp::calcMetisCSR(cellCells, adjncy, xadj);
     }
 
-    // Decompose using default weights
+    // Decompose using weights
     List<int> finalDecomp;
-    decompose(adjncy, xadj, finalDecomp);
-
+    decompose(adjncy, xadj, pointWeights, finalDecomp);
 
     // Rework back into decomposition for original mesh_
     labelList fineDistribution(agglom.size());
@@ -552,7 +467,8 @@ Foam::labelList Foam::scotchDecomp::decompose
 Foam::labelList Foam::scotchDecomp::decompose
 (
     const labelListList& globalCellCells,
-    const pointField& cellCentres
+    const pointField& cellCentres,
+    const scalarField& cWeights
 )
 {
     if (cellCentres.size() != globalCellCells.size())
@@ -572,12 +488,11 @@ Foam::labelList Foam::scotchDecomp::decompose
 
     List<int> adjncy;
     List<int> xadj;
-    calcMetisCSR(globalCellCells, adjncy, xadj);
+    metisDecomp::calcMetisCSR(globalCellCells, adjncy, xadj);
 
-
-    // Decompose using default weights
+    // Decompose using weights
     List<int> finalDecomp;
-    decompose(adjncy, xadj, finalDecomp);
+    decompose(adjncy, xadj, cWeights, finalDecomp);
 
     // Copy back to labelList
     labelList decomp(finalDecomp.size());
diff --git a/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.H b/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.H
index e470f84f98c58170ee7bcc0f8e9298a1ba8f3a9e..54efb1bafa283d3aa9cb1ad84c87670b2894d74e 100644
--- a/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.H
+++ b/src/decompositionMethods/decompositionMethods/scotchDecomp/scotchDecomp.H
@@ -63,6 +63,7 @@ class scotchDecomp
         (
             const List<int>& adjncy,
             const List<int>& xadj,
+            const scalarField& cWeights,
             List<int>& finalDecomp
         );
 
@@ -103,7 +104,11 @@ public:
 
         //- Return for every coordinate the wanted processor number. Use the
         //  mesh connectivity (if needed)
-        virtual labelList decompose(const pointField&);
+        virtual labelList decompose
+        (
+            const pointField& points,
+            const scalarField& pointWeights
+        );
 
         //- Return for every coordinate the wanted processor number. Gets
         //  passed agglomeration map (from fine to coarse cells) and coarse cell
@@ -112,7 +117,8 @@ public:
         virtual labelList decompose
         (
             const labelList& agglom,
-            const pointField&
+            const pointField& regionPoints,
+            const scalarField& regionWeights
         );
 
         //- Return for every coordinate the wanted processor number. Explicitly
@@ -125,15 +131,8 @@ public:
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
-            const pointField& cc
-        );
-
-        //- Helper to convert cellcells into compact row storage
-        static void calcMetisCSR
-        (
-            const labelListList& globalCellCells,
-            List<int>& adjncy,
-            List<int>& xadj
+            const pointField& cc,
+            const scalarField& cWeights
         );
 
 };
diff --git a/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.C b/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.C
index 57cf17e55fdd56164234586db48ddbeede68bd77..50d5842fb8a60683e067d61fa96f310f966b525c 100644
--- a/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.C
+++ b/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.C
@@ -22,8 +22,6 @@ License
     along with OpenFOAM; if not, write to the Free Software Foundation,
     Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 
-Description
-
 \*---------------------------------------------------------------------------*/
 
 #include "simpleGeomDecomp.H"
@@ -93,6 +91,49 @@ void Foam::simpleGeomDecomp::assignToProcessorGroup
 }
 
 
+void Foam::simpleGeomDecomp::assignToProcessorGroup
+(
+    labelList& processorGroup,
+    const label nProcGroup,
+    const labelList& indices,
+    const scalarField& weights,
+    const scalar summedWeights
+)
+{
+    // This routine gets the sorted points.
+    // Easiest to explain with an example.
+    // E.g. 400 points, sum of weights : 513.
+    // Now with number of divisions in this direction (nProcGroup) : 4
+    // gives the split at 513/4 = 128
+    // So summed weight from 0..128 goes into bin 0,
+    //     ,,              128..256 goes into bin 1
+    //   etc.
+    // Finally any remaining ones go into the last bin (3).
+
+    const scalar jump = summedWeights/nProcGroup;
+    const label nProcGroupM1 = nProcGroup - 1;
+    scalar sumWeights = 0;
+    label ind = 0;
+    label j = 0;
+
+    // assign cells to all except last group.
+    for (j=0; j<nProcGroupM1; j++)
+    {
+        const scalar limit = jump*scalar(j + 1);
+        while (sumWeights < limit)
+        {
+            sumWeights += weights[indices[ind]];
+            processorGroup[ind++] = j;
+        }
+    }
+    // Ensure last included.
+    while (ind < processorGroup.size())
+    {
+       processorGroup[ind++] = nProcGroupM1;
+    }
+}
+
+
 // * * * * * * * * * * * * * * * * Constructors  * * * * * * * * * * * * * * //
 
 Foam::simpleGeomDecomp::simpleGeomDecomp(const dictionary& decompositionDict)
@@ -121,7 +162,10 @@ Foam::labelList Foam::simpleGeomDecomp::decompose(const pointField& points)
     labelList processorGroups(points.size());
 
     labelList pointIndices(points.size());
-    forAll(pointIndices, i) {pointIndices[i] = i;}
+    forAll(pointIndices, i)
+    {
+        pointIndices[i] = i;
+    }
 
     pointField rotatedPoints = rotDelta_ & points;
 
@@ -175,9 +219,101 @@ Foam::labelList Foam::simpleGeomDecomp::decompose(const pointField& points)
         finalDecomp[pointIndices[i]] += n_.x()*n_.y()*processorGroups[i];
     }
 
-
     return finalDecomp;
 }
 
 
+Foam::labelList Foam::simpleGeomDecomp::decompose
+(
+    const pointField& points,
+    const scalarField& weights
+)
+{
+    // construct a list for the final result
+    labelList finalDecomp(points.size());
+
+    labelList processorGroups(points.size());
+
+    labelList pointIndices(points.size());
+    forAll(pointIndices, i)
+    {
+        pointIndices[i] = i;
+    }
+
+    pointField rotatedPoints = rotDelta_ & points;
+
+    // and one to take the processor group id's. For each direction.
+    // we assign the processors to groups of processors labelled
+    // 0..nX to give a banded structure on the mesh. Then we
+    // construct the actual processor number by treating this as
+    // the units part of the processor number.
+    sort
+    (
+        pointIndices,
+        UList<scalar>::less(rotatedPoints.component(vector::X))
+    );
+    
+    const scalar summedWeights = sum(weights);
+    assignToProcessorGroup
+    (
+        processorGroups, 
+        n_.x(), 
+        pointIndices, 
+        weights, 
+        summedWeights
+    );
+
+    forAll(points, i)
+    {
+        finalDecomp[pointIndices[i]] = processorGroups[i];
+    }
+
+
+    // now do the same thing in the Y direction. These processor group
+    // numbers add multiples of nX to the proc. number (columns)
+    sort
+    (
+        pointIndices,
+        UList<scalar>::less(rotatedPoints.component(vector::Y))
+    );
+
+    assignToProcessorGroup
+    (
+        processorGroups,
+        n_.y(), 
+        pointIndices, 
+        weights, 
+        summedWeights
+    );
+
+    forAll(points, i)
+    {
+        finalDecomp[pointIndices[i]] += n_.x()*processorGroups[i];
+    }
+
+
+    // finally in the Z direction. Now we add multiples of nX*nY to give
+    // layers
+    sort
+    (
+        pointIndices,
+        UList<scalar>::less(rotatedPoints.component(vector::Z))
+    );
+
+    assignToProcessorGroup
+    (
+        processorGroups, 
+        n_.z(), 
+        pointIndices, 
+        weights, 
+        summedWeights
+    );
+
+    forAll(points, i)
+    {
+        finalDecomp[pointIndices[i]] += n_.x()*n_.y()*processorGroups[i];
+    }
+
+    return finalDecomp;
+} 
 // ************************************************************************* //
diff --git a/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.H b/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.H
index e1f312b5f38129930878cdec63e60a0611e3933e..595db50c2ce7877029f0d862a1de3f79c911a79b 100644
--- a/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.H
+++ b/src/decompositionMethods/decompositionMethods/simpleGeomDecomp/simpleGeomDecomp.H
@@ -52,6 +52,15 @@ class simpleGeomDecomp
 
         void assignToProcessorGroup(labelList& processorGroup, const label);
 
+        void assignToProcessorGroup
+        (
+            labelList& processorGroup, 
+            const label nProcGroup,
+            const labelList& indices,
+            const scalarField& weights,
+            const scalar summedWeights
+        );
+
         //- Disallow default bitwise copy construct and assignment
         void operator=(const simpleGeomDecomp&);
         simpleGeomDecomp(const simpleGeomDecomp&);
@@ -86,21 +95,31 @@ public:
 
         virtual bool parallelAware() const
         {
-            // simple decomp does not attempt to do anything across proc
+            // simpleDecomp does not attempt to do anything across proc
             // boundaries
             return false;
         }
 
-        virtual labelList decompose(const pointField&);
+        virtual labelList decompose
+        (
+            const pointField& points
+        );
+
+        virtual labelList decompose
+        (
+            const pointField& points,
+            const scalarField& pointWeights
+        );
 
         //- Explicitly provided connectivity
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
-            const pointField& cc
+            const pointField& cc,
+            const scalarField& cWeights
         )
         {
-            return decompose(cc);
+            return decompose(cc, cWeights);
         }
 };
 
diff --git a/src/decompositionMethods/parMetisDecomp/parMetisDecomp.C b/src/decompositionMethods/parMetisDecomp/parMetisDecomp.C
index c6c6e061e92b63f28e20523b2dd5c2f0a1f902fc..c4dde21d65799a895d1fd5f2d913acfc08d6c5d7 100644
--- a/src/decompositionMethods/parMetisDecomp/parMetisDecomp.C
+++ b/src/decompositionMethods/parMetisDecomp/parMetisDecomp.C
@@ -24,9 +24,9 @@ License
 
 \*---------------------------------------------------------------------------*/
 
-#include "syncTools.H"
 #include "parMetisDecomp.H"
 #include "metisDecomp.H"
+#include "syncTools.H"
 #include "addToRunTimeSelectionTable.H"
 #include "floatScalar.H"
 #include "polyMesh.H"
@@ -370,15 +370,22 @@ Foam::parMetisDecomp::parMetisDecomp
 
 // * * * * * * * * * * * * * * * Member Functions  * * * * * * * * * * * * * //
 
-Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
+Foam::labelList Foam::parMetisDecomp::decompose
+(
+    const pointField& cc,
+    const scalarField& cWeights
+)
 {
-    if (points.size() != mesh_.nCells())
+    if (cc.size() != mesh_.nCells())
     {
-        FatalErrorIn("parMetisDecomp::decompose(const pointField&)")
-            << "Can use this decomposition method only for the whole mesh"
+        FatalErrorIn
+        (
+            "parMetisDecomp::decompose"
+            "(const pointField&, const scalarField&)"
+        )   << "Can use this decomposition method only for the whole mesh"
             << endl
             << "and supply one coordinate (cellCentre) for every cell." << endl
-            << "The number of coordinates " << points.size() << endl
+            << "The number of coordinates " << cc.size() << endl
             << "The number of cells in the mesh " << mesh_.nCells()
             << exit(FatalError);
     }
@@ -386,156 +393,24 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
     // For running sequential ...
     if (Pstream::nProcs() <= 1)
     {
-        return metisDecomp(decompositionDict_, mesh_).decompose(points);
-    }
-
-    // Create global cell numbers
-    // ~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    // Get number of cells on all processors
-    List<int> nLocalCells(Pstream::nProcs());
-    nLocalCells[Pstream::myProcNo()] = mesh_.nCells();
-    Pstream::gatherList(nLocalCells);
-    Pstream::scatterList(nLocalCells);
-
-    // Get cell offsets.
-    List<int> cellOffsets(Pstream::nProcs()+1);
-    int nGlobalCells = 0;
-    forAll(nLocalCells, procI)
-    {
-        cellOffsets[procI] = nGlobalCells;
-        nGlobalCells += nLocalCells[procI];
-    }
-    cellOffsets[Pstream::nProcs()] = nGlobalCells;
-
-    int myOffset = cellOffsets[Pstream::myProcNo()];
-
-
-    //
-    // Make Metis Distributed CSR (Compressed Storage Format) storage
-    //   adjncy      : contains cellCells (= edges in graph)
-    //   xadj(celli) : start of information in adjncy for celli
-    //
-
-
-
-    const labelList& faceOwner = mesh_.faceOwner();
-    const labelList& faceNeighbour = mesh_.faceNeighbour();
-    const polyBoundaryMesh& patches = mesh_.boundaryMesh();
-
-
-    // Get renumbered owner on other side of coupled faces
-    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    List<int> globalNeighbour(mesh_.nFaces()-mesh_.nInternalFaces());
-
-    forAll(patches, patchI)
-    {
-        const polyPatch& pp = patches[patchI];
-
-        if (pp.coupled())
-        {
-            label faceI = pp.start();
-            label bFaceI = pp.start() - mesh_.nInternalFaces();
-
-            forAll(pp, i)
-            {
-                globalNeighbour[bFaceI++] = faceOwner[faceI++] + myOffset;
-            }
-        }
-    }
-
-    // Get the cell on the other side of coupled patches
-    syncTools::swapBoundaryFaceList(mesh_, globalNeighbour, false);
-
-
-    // Count number of faces (internal + coupled)
-    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    // Number of faces per cell
-    List<int> nFacesPerCell(mesh_.nCells(), 0);
-
-    // Number of coupled faces
-    label nCoupledFaces = 0;
-
-    for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
-    {
-        nFacesPerCell[faceOwner[faceI]]++;
-        nFacesPerCell[faceNeighbour[faceI]]++;
-    }
-    // Handle coupled faces
-    forAll(patches, patchI)
-    {
-        const polyPatch& pp = patches[patchI];
-
-        if (pp.coupled())
-        {
-            label faceI = pp.start();
-
-            forAll(pp, i)
-            {
-                nCoupledFaces++;
-                nFacesPerCell[faceOwner[faceI++]]++;
-            }
-        }
-    }
-
-
-    // Fill in xadj
-    // ~~~~~~~~~~~~
-
-    Field<int> xadj(mesh_.nCells()+1, -1);
-
-    int freeAdj = 0;
-
-    for (label cellI = 0; cellI < mesh_.nCells(); cellI++)
-    {
-        xadj[cellI] = freeAdj;
-
-        freeAdj += nFacesPerCell[cellI];
+        return metisDecomp(decompositionDict_, mesh_).decompose
+        (
+            cc,
+            cWeights
+        );
     }
-    xadj[mesh_.nCells()] = freeAdj;
-
 
 
-    // Fill in adjncy
-    // ~~~~~~~~~~~~~~
-
-    Field<int> adjncy(2*mesh_.nInternalFaces() + nCoupledFaces, -1);
-
-    nFacesPerCell = 0;
-
-    // For internal faces is just offsetted owner and neighbour
-    for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
-    {
-        label own = faceOwner[faceI];
-        label nei = faceNeighbour[faceI];
-
-        adjncy[xadj[own] + nFacesPerCell[own]++] = nei + myOffset;
-        adjncy[xadj[nei] + nFacesPerCell[nei]++] = own + myOffset;
-    }
-    // For boundary faces is offsetted coupled neighbour
-    forAll(patches, patchI)
-    {
-        const polyPatch& pp = patches[patchI];
-
-        if (pp.coupled())
-        {
-            label faceI = pp.start();
-            label bFaceI = pp.start()-mesh_.nInternalFaces();
-
-            forAll(pp, i)
-            {
-                label own = faceOwner[faceI];
-                adjncy[xadj[own] + nFacesPerCell[own]++] =
-                    globalNeighbour[bFaceI];
-
-                faceI++;
-                bFaceI++;
-            }
-        }
-    }
-
+    // Connections
+    Field<int> adjncy;
+    // Offsets into adjncy
+    Field<int> xadj;
+    calcMetisDistributedCSR
+    (
+        mesh_,
+        adjncy,
+        xadj
+    );
 
 
     // decomposition options. 0 = use defaults
@@ -550,6 +425,30 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
     // face weights (so on the edges of the dual)
     Field<int> faceWeights;
 
+
+    // Check for externally provided cellweights and if so initialise weights
+    scalar maxWeights = gMax(cWeights);
+    if (cWeights.size() > 0)
+    {
+        if (cWeights.size() != mesh_.nCells())
+        {
+            FatalErrorIn
+            (
+                "metisDecomp::decompose"
+                "(const pointField&, const scalarField&)"
+            )   << "Number of cell weights " << cWeights.size()
+                << " does not equal number of cells " << mesh_.nCells()
+                << exit(FatalError);
+        }
+        // Convert to integers.
+        cellWeights.setSize(cWeights.size());
+        forAll(cellWeights, i)
+        {
+            cellWeights[i] = int(1000000*cWeights[i]/maxWeights);
+        }
+    }
+
+
     // Check for user supplied weights and decomp options
     if (decompositionDict_.found("metisCoeffs"))
     {
@@ -577,8 +476,11 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
 
             if (cellWeights.size() != mesh_.nCells())
             {
-                FatalErrorIn("parMetisDecomp::decompose(const pointField&)")
-                    << "Number of cell weights " << cellWeights.size()
+                FatalErrorIn
+                (
+                    "parMetisDecomp::decompose"
+                    "(const pointField&, const scalarField&)"
+                )   << "Number of cell weights " << cellWeights.size()
                     << " read from " << cellIOWeights.objectPath()
                     << " does not equal number of cells " << mesh_.nCells()
                     << exit(FatalError);
@@ -604,30 +506,35 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
 
             if (weights.size() != mesh_.nFaces())
             {
-                FatalErrorIn("parMetisDecomp::decompose(const pointField&)")
-                    << "Number of face weights " << weights.size()
+                FatalErrorIn
+                (
+                    "parMetisDecomp::decompose"
+                    "(const pointField&, const scalarField&)"
+                )   << "Number of face weights " << weights.size()
                     << " does not equal number of internal and boundary faces "
                     << mesh_.nFaces()
                     << exit(FatalError);
             }
 
-            faceWeights.setSize(2*mesh_.nInternalFaces()+nCoupledFaces);
+            faceWeights.setSize(adjncy.size());
 
             // Assume symmetric weights. Keep same ordering as adjncy.
-            nFacesPerCell = 0;
+            List<int> nFacesPerCell(mesh_.nCells(), 0);
 
             // Handle internal faces
             for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
             {
                 label w = weights[faceI];
 
-                label own = faceOwner[faceI];
-                label nei = faceNeighbour[faceI];
+                label own = mesh_.faceOwner()[faceI];
+                label nei = mesh_.faceNeighbour()[faceI];
 
                 faceWeights[xadj[own] + nFacesPerCell[own]++] = w;
                 faceWeights[xadj[nei] + nFacesPerCell[nei]++] = w;
             }
             // Coupled boundary faces
+            const polyBoundaryMesh& patches = mesh_.boundaryMesh();
+
             forAll(patches, patchI)
             {
                 const polyPatch& pp = patches[patchI];
@@ -639,8 +546,8 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
                     forAll(pp, i)
                     {
                         label w = weights[faceI];
-                        label own = faceOwner[faceI];
-                        adjncy[xadj[own] + nFacesPerCell[own]++] = w;
+                        label own = mesh_.faceOwner()[faceI];
+                        faceWeights[xadj[own] + nFacesPerCell[own]++] = w;
                         faceI++;
                     }
                 }
@@ -654,8 +561,11 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
 
             if (options.size() != 3)
             {
-                FatalErrorIn("parMetisDecomp::decompose(const pointField&)")
-                    << "Number of options " << options.size()
+                FatalErrorIn
+                (
+                    "parMetisDecomp::decompose"
+                    "(const pointField&, const scalarField&)"
+                )   << "Number of options " << options.size()
                     << " should be three." << exit(FatalError);
             }
         }
@@ -668,7 +578,7 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
     (
         xadj,
         adjncy,
-        points,
+        cc,
         cellWeights,
         faceWeights,
         options,
@@ -689,7 +599,8 @@ Foam::labelList Foam::parMetisDecomp::decompose(const pointField& points)
 Foam::labelList Foam::parMetisDecomp::decompose
 (
     const labelList& cellToRegion,
-    const pointField& regionPoints
+    const pointField& regionPoints,
+    const scalarField& regionWeights
 )
 {
     const labelList& faceOwner = mesh_.faceOwner();
@@ -798,7 +709,15 @@ Foam::labelList Foam::parMetisDecomp::decompose
         }
     }
 
-    labelList regionDecomp(decompose(globalRegionRegions, regionPoints));
+    labelList regionDecomp
+    (
+        decompose
+        (
+            globalRegionRegions,
+            regionPoints,
+            regionWeights
+        )
+    );
 
     // Rework back into decomposition for original mesh_
     labelList cellDistribution(cellToRegion.size());
@@ -814,24 +733,26 @@ Foam::labelList Foam::parMetisDecomp::decompose
 Foam::labelList Foam::parMetisDecomp::decompose
 (
     const labelListList& globalCellCells,
-    const pointField& cellCentres
+    const pointField& cellCentres,
+    const scalarField& cWeights
 )
 {
     if (cellCentres.size() != globalCellCells.size())
     {
         FatalErrorIn
         (
-            "parMetisDecomp::decompose(const labelListList&, const pointField&)"
+            "parMetisDecomp::decompose(const labelListList&"
+            ", const pointField&, const scalarField&)"
         )   << "Inconsistent number of cells (" << globalCellCells.size()
             << ") and number of cell centres (" << cellCentres.size()
-            << ")." << exit(FatalError);
+            << ") or weights (" << cWeights.size() << ")." << exit(FatalError);
     }
 
     // For running sequential ...
     if (Pstream::nProcs() <= 1)
     {
         return metisDecomp(decompositionDict_, mesh_)
-            .decompose(globalCellCells, cellCentres);
+            .decompose(globalCellCells, cellCentres, cWeights);
     }
 
 
@@ -855,106 +776,35 @@ Foam::labelList Foam::parMetisDecomp::decompose
     // face weights (so on the edges of the dual)
     Field<int> faceWeights;
 
-    // Check for user supplied weights and decomp options
-    if (decompositionDict_.found("metisCoeffs"))
-    {
-        const dictionary& metisCoeffs =
-            decompositionDict_.subDict("metisCoeffs");
-        word weightsFile;
 
-        if (metisCoeffs.readIfPresent("cellWeightsFile", weightsFile))
+    // Check for externally provided cellweights and if so initialise weights
+    scalar maxWeights = gMax(cWeights);
+    if (cWeights.size() > 0)
+    {
+        if (cWeights.size() != globalCellCells.size())
         {
-            Info<< "parMetisDecomp : Using cell-based weights read from "
-                << weightsFile << endl;
-
-            labelIOField cellIOWeights
+            FatalErrorIn
             (
-                IOobject
-                (
-                    weightsFile,
-                    mesh_.time().timeName(),
-                    mesh_,
-                    IOobject::MUST_READ,
-                    IOobject::AUTO_WRITE
-                )
-            );
-            cellWeights.transfer(cellIOWeights);
-
-            if (cellWeights.size() != cellCentres.size())
-            {
-                FatalErrorIn
-                (
-                    "parMetisDecomp::decompose"
-                    "(const labelListList&, const pointField&)"
-                )   << "Number of cell weights " << cellWeights.size()
-                    << " read from " << cellIOWeights.objectPath()
-                    << " does not equal number of cells " << cellCentres.size()
-                    << exit(FatalError);
-            }
+                "parMetisDecomp::decompose(const labelListList&"
+                ", const pointField&, const scalarField&)"
+            )   << "Number of cell weights " << cWeights.size()
+                << " does not equal number of cells " << globalCellCells.size()
+                << exit(FatalError);
         }
+        // Convert to integers.
+        cellWeights.setSize(cWeights.size());
+        forAll(cellWeights, i)
+        {
+            cellWeights[i] = int(1000000*cWeights[i]/maxWeights);
+        }
+    }
 
-        //- faceWeights disabled. Only makes sense for cellCells from mesh.
-        //if (metisCoeffs.readIfPresent("faceWeightsFile", weightsFile))
-        //{
-        //    Info<< "parMetisDecomp : Using face-based weights read from "
-        //        << weightsFile << endl;
-        //
-        //    labelIOField weights
-        //    (
-        //        IOobject
-        //        (
-        //            weightsFile,
-        //            mesh_.time().timeName(),
-        //            mesh_,
-        //            IOobject::MUST_READ,
-        //            IOobject::AUTO_WRITE
-        //        )
-        //    );
-        //
-        //    if (weights.size() != mesh_.nFaces())
-        //    {
-        //        FatalErrorIn("parMetisDecomp::decompose(const pointField&)")
-        //            << "Number of face weights " << weights.size()
-        //            << " does not equal number of internal and boundary faces "
-        //            << mesh_.nFaces()
-        //            << exit(FatalError);
-        //    }
-        //
-        //    faceWeights.setSize(2*mesh_.nInternalFaces()+nCoupledFaces);
-        //
-        //    // Assume symmetric weights. Keep same ordering as adjncy.
-        //    nFacesPerCell = 0;
-        //
-        //    // Handle internal faces
-        //    for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
-        //    {
-        //        label w = weights[faceI];
-        //
-        //        label own = faceOwner[faceI];
-        //        label nei = faceNeighbour[faceI];
-        //
-        //        faceWeights[xadj[own] + nFacesPerCell[own]++] = w;
-        //        faceWeights[xadj[nei] + nFacesPerCell[nei]++] = w;
-        //    }
-        //    // Coupled boundary faces
-        //    forAll(patches, patchI)
-        //    {
-        //        const polyPatch& pp = patches[patchI];
-        //
-        //        if (pp.coupled())
-        //        {
-        //            label faceI = pp.start();
-        //
-        //            forAll(pp, i)
-        //            {
-        //                label w = weights[faceI];
-        //                label own = faceOwner[faceI];
-        //                adjncy[xadj[own] + nFacesPerCell[own]++] = w;
-        //                faceI++;
-        //            }
-        //        }
-        //    }
-        //}
+
+    // Check for user supplied weights and decomp options
+    if (decompositionDict_.found("metisCoeffs"))
+    {
+        const dictionary& metisCoeffs =
+            decompositionDict_.subDict("metisCoeffs");
 
         if (metisCoeffs.readIfPresent("options", options))
         {
@@ -965,8 +815,8 @@ Foam::labelList Foam::parMetisDecomp::decompose
             {
                 FatalErrorIn
                 (
-                    "parMetisDecomp::decompose"
-                    "(const labelListList&, const pointField&)"
+                    "parMetisDecomp::decompose(const labelListList&"
+                    ", const pointField&, const scalarField&)"
                 )   << "Number of options " << options.size()
                     << " should be three." << exit(FatalError);
             }
@@ -998,4 +848,146 @@ Foam::labelList Foam::parMetisDecomp::decompose
 }
 
 
+void Foam::parMetisDecomp::calcMetisDistributedCSR
+(
+    const polyMesh& mesh,
+    List<int>& adjncy,
+    List<int>& xadj
+)
+{
+    // Create global cell numbers
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    globalIndex globalCells(mesh.nCells());
+
+
+    //
+    // Make Metis Distributed CSR (Compressed Storage Format) storage
+    //   adjncy      : contains cellCells (= edges in graph)
+    //   xadj(celli) : start of information in adjncy for celli
+    //
+
+
+    const labelList& faceOwner = mesh.faceOwner();
+    const labelList& faceNeighbour = mesh.faceNeighbour();
+    const polyBoundaryMesh& patches = mesh.boundaryMesh();
+
+
+    // Get renumbered owner on other side of coupled faces
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    List<int> globalNeighbour(mesh.nFaces()-mesh.nInternalFaces());
+
+    forAll(patches, patchI)
+    {
+        const polyPatch& pp = patches[patchI];
+
+        if (pp.coupled())
+        {
+            label faceI = pp.start();
+            label bFaceI = pp.start() - mesh.nInternalFaces();
+
+            forAll(pp, i)
+            {
+                globalNeighbour[bFaceI++] = globalCells.toGlobal
+                (
+                    faceOwner[faceI++]
+                );
+            }
+        }
+    }
+
+    // Get the cell on the other side of coupled patches
+    syncTools::swapBoundaryFaceList(mesh, globalNeighbour, false);
+
+
+    // Count number of faces (internal + coupled)
+    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    // Number of faces per cell
+    List<int> nFacesPerCell(mesh.nCells(), 0);
+
+    // Number of coupled faces
+    label nCoupledFaces = 0;
+
+    for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
+    {
+        nFacesPerCell[faceOwner[faceI]]++;
+        nFacesPerCell[faceNeighbour[faceI]]++;
+    }
+    // Handle coupled faces
+    forAll(patches, patchI)
+    {
+        const polyPatch& pp = patches[patchI];
+
+        if (pp.coupled())
+        {
+            label faceI = pp.start();
+
+            forAll(pp, i)
+            {
+                nCoupledFaces++;
+                nFacesPerCell[faceOwner[faceI++]]++;
+            }
+        }
+    }
+
+
+    // Fill in xadj
+    // ~~~~~~~~~~~~
+
+    xadj.setSize(mesh.nCells()+1);
+
+    int freeAdj = 0;
+
+    for (label cellI = 0; cellI < mesh.nCells(); cellI++)
+    {
+        xadj[cellI] = freeAdj;
+
+        freeAdj += nFacesPerCell[cellI];
+    }
+    xadj[mesh.nCells()] = freeAdj;
+
+
+
+    // Fill in adjncy
+    // ~~~~~~~~~~~~~~
+
+    adjncy.setSize(2*mesh.nInternalFaces() + nCoupledFaces);
+
+    nFacesPerCell = 0;
+
+    // For internal faces is just offsetted owner and neighbour
+    for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
+    {
+        label own = faceOwner[faceI];
+        label nei = faceNeighbour[faceI];
+
+        adjncy[xadj[own] + nFacesPerCell[own]++] = globalCells.toGlobal(nei);
+        adjncy[xadj[nei] + nFacesPerCell[nei]++] = globalCells.toGlobal(own);
+    }
+    // For boundary faces is offsetted coupled neighbour
+    forAll(patches, patchI)
+    {
+        const polyPatch& pp = patches[patchI];
+
+        if (pp.coupled())
+        {
+            label faceI = pp.start();
+            label bFaceI = pp.start()-mesh.nInternalFaces();
+
+            forAll(pp, i)
+            {
+                label own = faceOwner[faceI];
+                adjncy[xadj[own] + nFacesPerCell[own]++] =
+                    globalNeighbour[bFaceI];
+
+                faceI++;
+                bFaceI++;
+            }
+        }
+    }
+}
+
+
 // ************************************************************************* //
diff --git a/src/decompositionMethods/parMetisDecomp/parMetisDecomp.H b/src/decompositionMethods/parMetisDecomp/parMetisDecomp.H
index 13aa57277c488ca93f1eef39d4358a10f2d13d77..dda817355739bf0afe3b12f6497951b3ba28d392 100644
--- a/src/decompositionMethods/parMetisDecomp/parMetisDecomp.H
+++ b/src/decompositionMethods/parMetisDecomp/parMetisDecomp.H
@@ -112,7 +112,11 @@ public:
 
         //- Return for every coordinate the wanted processor number. Use the
         //  mesh connectivity (if needed)
-        virtual labelList decompose(const pointField&);
+        virtual labelList decompose
+        (
+            const pointField& points,
+            const scalarField& pointWeights
+        );
 
         //- Return for every coordinate the wanted processor number. Gets
         //  passed agglomeration map (from fine to coarse cells) and coarse cell
@@ -120,8 +124,9 @@ public:
         //  functionality natively.
         virtual labelList decompose
         (
-            const labelList& agglom,
-            const pointField&
+            const labelList& cellToRegion,
+            const pointField& regionPoints,
+            const scalarField& regionWeights
         );
 
         //- Return for every coordinate the wanted processor number. Explicitly
@@ -134,7 +139,16 @@ public:
         virtual labelList decompose
         (
             const labelListList& globalCellCells,
-            const pointField& cc
+            const pointField& cc,
+            const scalarField& cWeights
+        );
+
+        //- Helper to convert mesh connectivity into distributed CSR
+        static void calcMetisDistributedCSR
+        (
+            const polyMesh&,
+            List<int>& adjncy,
+            List<int>& xadj
         );
 };
 
diff --git a/src/postProcessing/functionObjects/Allwmake b/src/postProcessing/functionObjects/Allwmake
index 6c642dfe7b6333d4e71ab8c22f4458da73294503..b068fe51cb80c3abf2a00263f01ea7ff3237042a 100755
--- a/src/postProcessing/functionObjects/Allwmake
+++ b/src/postProcessing/functionObjects/Allwmake
@@ -7,5 +7,6 @@ wmake libso forces
 wmake libso IO
 wmake libso utilities
 wmake libso systemCall
+wmake libso zones
 
 # ----------------------------------------------------------------- end-of-file
diff --git a/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMax.C b/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMax.C
index ec77782df4e7100291150b701ecb5bafb280bc0b..31e511377ee2152c32861ad1e011feae4ed0868e 100644
--- a/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMax.C
+++ b/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMax.C
@@ -197,13 +197,13 @@ void Foam::fieldMinMax::calcMinMaxFields<Foam::scalar>
 {
     if (obr_.foundObject<volScalarField>(fieldName))
     {
+        const volScalarField& field =
+            obr_.lookupObject<volScalarField>(fieldName);
+        scalar minValue = min(field).value();
+        scalar maxValue = max(field).value();
+
         if (Pstream::master())
         {
-            const volScalarField& field =
-                obr_.lookupObject<volScalarField>(fieldName);
-            scalar minValue = min(field).value();
-            scalar maxValue = max(field).value();
-
             fieldMinMaxFilePtr_() << obr_.time().value() << tab
                 << fieldName << tab << minValue << tab << maxValue << endl;
 
diff --git a/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMaxTemplates.C b/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMaxTemplates.C
index 25daf62dd6ee73ecaacf41d0a340eb7d30332327..66f212f4c36d32f1178bd7473f6d51e6c923bf77 100644
--- a/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMaxTemplates.C
+++ b/src/postProcessing/functionObjects/field/fieldMinMax/fieldMinMaxTemplates.C
@@ -38,16 +38,16 @@ void Foam::fieldMinMax::calcMinMaxFields(const word& fieldName)
 
     if (obr_.foundObject<fieldType>(fieldName))
     {
-        if (Pstream::master())
+        const fieldType& field = obr_.lookupObject<fieldType>(fieldName);
+        switch (mode_)
         {
-            const fieldType& field = obr_.lookupObject<fieldType>(fieldName);
-            switch (mode_)
+            case mdMag:
             {
-                case mdMag:
-                {
-                    scalar minValue = min(mag(field)).value();
-                    scalar maxValue = max(mag(field)).value();
+                scalar minValue = min(mag(field)).value();
+                scalar maxValue = max(mag(field)).value();
 
+                if (Pstream::master())
+                {
                     fieldMinMaxFilePtr_() << obr_.time().value() << tab
                         << fieldName << tab << minValue << tab << maxValue
                         << endl;
@@ -61,13 +61,16 @@ void Foam::fieldMinMax::calcMinMaxFields(const word& fieldName)
                             << maxValue << nl
                             << endl;
                     }
-                    break;
                 }
-                case mdCmpt:
-                {
-                    Type minValue = min(field).value();
-                    Type maxValue = max(field).value();
+                break;
+            }
+            case mdCmpt:
+            {
+                Type minValue = min(field).value();
+                Type maxValue = max(field).value();
 
+                if (Pstream::master())
+                {
                     fieldMinMaxFilePtr_() << obr_.time().value() << tab
                         << fieldName << tab << minValue << tab << maxValue
                         << endl;
@@ -81,17 +84,17 @@ void Foam::fieldMinMax::calcMinMaxFields(const word& fieldName)
                             << maxValue << nl
                             << endl;
                     }
-                    break;
-                }
-                default:
-                {
-                    FatalErrorIn
-                    (
-                        "Foam::fieldMinMax::calcMinMaxFields"
-                        "(const word& fieldName)"
-                    )<< "Unknown min/max mode: " << modeTypeNames_[mode_]
-                     << exit(FatalError);
                 }
+                break;
+            }
+            default:
+            {
+                FatalErrorIn
+                (
+                    "Foam::fieldMinMax::calcMinMaxFields"
+                    "(const word& fieldName)"
+                )<< "Unknown min/max mode: " << modeTypeNames_[mode_]
+                 << exit(FatalError);
             }
         }
     }
diff --git a/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.C b/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.C
index 0b8314667c728839bc11e72eddc2149e94026fdd..e0489208991d047a207359280ded08d5df2ce5ef 100644
--- a/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.C
+++ b/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.C
@@ -191,7 +191,7 @@ void Foam::faceZonesIntegration::write()
     if (active_)
     {
         makeFile();
-        scalar dm = 0.0;
+
         forAll(fItems_, fieldI)
         {
             const word& fieldName = fItems_[fieldI];
@@ -201,8 +201,30 @@ void Foam::faceZonesIntegration::write()
 
             const fvMesh& mesh = fD.mesh();
 
+            // 1. integrate over all face zones
+
+            scalarField integralVals(faceZonesSet_.size());
+
+            forAll(faceZonesSet_, setI)
+            {
+                const word name = faceZonesSet_[setI];
+
+                label zoneID = mesh.faceZones().findZoneID(name);
+
+                const faceZone& fz = mesh.faceZones()[zoneID];
+
+                integralVals[setI] = returnReduce
+                (
+                    calcFaceZonesIntegral(fD, fz),
+                    sumOp<scalar>()
+                );
+            }
+
+
             unsigned int w = IOstream::defaultPrecision() + 7;
 
+            // 2. Write only on master
+
             if
             (
                 Pstream::master()
@@ -213,26 +235,14 @@ void Foam::faceZonesIntegration::write()
 
                 os  << obr_.time().value();
 
-                const faceZoneMesh& faceZoneList =  mesh.faceZones();
-
-                forAll(faceZonesSet_, zoneI)
+                forAll(integralVals, setI)
                 {
-                    const word name = faceZonesSet_[zoneI];
-
-                    label zoneID = faceZoneList.findZoneID(name);
-
-                    const faceZone& fz = mesh.faceZones()[zoneID];
-
-                    dm = calcFaceZonesIntegral(fD, fz);
-
-                    reduce(dm, sumOp<scalar>());
-
-                    os  << ' ' << setw(w) << dm;
+                    os  << ' ' << setw(w) << integralVals[setI];
 
                     if (log_)
                     {
                         Info<< "faceZonesIntegration output:" << nl
-                            << "    Integration" << dm << endl;
+                            << "    Integration" << integralVals[setI] << endl;
                     }
                 }
 
@@ -258,7 +268,7 @@ Foam::scalar Foam::faceZonesIntegration::calcFaceZonesIntegral
 
         if (mesh.isInternalFace(faceI))
         {
-            if (fz.flipMap()[faceI])
+            if (fz.flipMap()[i])
             {
                 dm -= fD[faceI];
             }
@@ -275,7 +285,7 @@ Foam::scalar Foam::faceZonesIntegration::calcFaceZonesIntegral
             {
                 if (refCast<const processorPolyPatch>(pp).owner())
                 {
-                    if (fz.flipMap()[faceI])
+                    if (fz.flipMap()[i])
                     {
                         dm -= fD.boundaryField()[patchI][pp.whichFace(faceI)];
                     }
@@ -290,7 +300,7 @@ Foam::scalar Foam::faceZonesIntegration::calcFaceZonesIntegral
                 label patchFaceI = faceI - pp.start();
                 if (patchFaceI < pp.size()/2)
                 {
-                    if (fz.flipMap()[patchFaceI])
+                    if (fz.flipMap()[i])
                     {
                         dm -= fD.boundaryField()[patchI][patchFaceI];
                     }
@@ -303,7 +313,7 @@ Foam::scalar Foam::faceZonesIntegration::calcFaceZonesIntegral
             else if (!isA<emptyPolyPatch>(pp))
             {
                 label patchFaceI = faceI - pp.start();
-                if (fz.flipMap()[patchFaceI])
+                if (fz.flipMap()[i])
                 {
                     dm -= fD.boundaryField()[patchI][patchFaceI];
                 }
diff --git a/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.H b/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.H
index 5fc957bba739954dcf3181cd03d5a7f654e6609f..87659ac3406dc03dec4864001a88819d708729a6 100644
--- a/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.H
+++ b/src/postProcessing/functionObjects/zones/faceZoneIntegration/faceZonesIntegration.H
@@ -73,16 +73,13 @@ protected:
 
         const objectRegistry& obr_;
 
-        //- on/off switch
-        bool active_;
+        // Read from dictionary
 
-        //- Switch to send output to Info as well as to file
-        Switch log_;
+            //- on/off switch
+            bool active_;
 
-        //- Current open files
-        HashPtrTable<OFstream> faceZonesIntegrationFilePtr_;
-
-        // Read from dictionary
+            //- Switch to send output to Info as well as to file
+            Switch log_;
 
             //- faceZones to integrate over
             wordList faceZonesSet_;
@@ -91,6 +88,11 @@ protected:
             wordList fItems_;
 
 
+        //- Current open files
+        HashPtrTable<OFstream> faceZonesIntegrationFilePtr_;
+
+
+
     // Private Member Functions
 
         //- If the integration file has not been created create it