diff --git a/applications/utilities/mesh/manipulation/splitMeshRegions/splitMeshRegions.C b/applications/utilities/mesh/manipulation/splitMeshRegions/splitMeshRegions.C index 4bce1fdd9bdbce1bd81f155daae7930543b846fc..eddfe5189845f71f601a8c39aa49880184d942fd 100644 --- a/applications/utilities/mesh/manipulation/splitMeshRegions/splitMeshRegions.C +++ b/applications/utilities/mesh/manipulation/splitMeshRegions/splitMeshRegions.C @@ -200,6 +200,7 @@ void subsetSurfaceFields ( const fvMesh& mesh, const fvMesh& subMesh, + const labelList& cellMap, const labelList& faceMap, const labelHashSet& addedPatches ) @@ -223,6 +224,7 @@ void subsetSurfaceFields fld, subMesh, patchMap, + cellMap, faceMap ) ); @@ -828,6 +830,7 @@ void createAndWriteRegion ( mesh, newMesh(), + map().cellMap(), map().faceMap(), addedPatches ); @@ -835,6 +838,7 @@ void createAndWriteRegion ( mesh, newMesh(), + map().cellMap(), map().faceMap(), addedPatches ); @@ -842,6 +846,7 @@ void createAndWriteRegion ( mesh, newMesh(), + map().cellMap(), map().faceMap(), addedPatches ); @@ -849,6 +854,7 @@ void createAndWriteRegion ( mesh, newMesh(), + map().cellMap(), map().faceMap(), addedPatches ); @@ -856,6 +862,7 @@ void createAndWriteRegion ( mesh, newMesh(), + map().cellMap(), map().faceMap(), addedPatches ); diff --git a/applications/utilities/mesh/manipulation/subsetMesh/Make/options b/applications/utilities/mesh/manipulation/subsetMesh/Make/options index 969020c4afaf5d784299462b9e1af282040ba6b4..759535d95f96feac249c1cbc64c22285689caa8e 100644 --- a/applications/utilities/mesh/manipulation/subsetMesh/Make/options +++ b/applications/utilities/mesh/manipulation/subsetMesh/Make/options @@ -1,8 +1,8 @@ EXE_INC = \ -I$(LIB_SRC)/finiteVolume/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude EXE_LIBS = \ - -lfiniteVolume \ - -lmeshTools \ + -ldynamicMesh \ -lgenericPatchFields diff --git a/applications/utilities/mesh/manipulation/subsetMesh/subsetMesh.C b/applications/utilities/mesh/manipulation/subsetMesh/subsetMesh.C index 896ee84bddfb3f0f5b193cb3a4efa493a2be93b9..adebc93d38f86122c3b806d62ffb6fc9520e5841 100644 --- a/applications/utilities/mesh/manipulation/subsetMesh/subsetMesh.C +++ b/applications/utilities/mesh/manipulation/subsetMesh/subsetMesh.C @@ -150,6 +150,40 @@ void subsetPointFields } +template<class Type> +void subsetDimensionedFields +( + const fvMeshSubset& subsetter, + const wordList& fieldNames, + PtrList<DimensionedField<Type, volMesh> >& subFields +) +{ + const fvMesh& baseMesh = subsetter.baseMesh(); + + forAll(fieldNames, i) + { + const word& fieldName = fieldNames[i]; + + Info<< "Subsetting field " << fieldName << endl; + + DimensionedField<Type, volMesh> fld + ( + IOobject + ( + fieldName, + baseMesh.time().timeName(), + baseMesh, + IOobject::MUST_READ, + IOobject::NO_WRITE + ), + baseMesh + ); + + subFields.set(i, subsetter.interpolate(fld)); + } +} + + int main(int argc, char *argv[]) { @@ -361,6 +395,42 @@ int main(int argc, char *argv[]) subsetPointFields(subsetter, pMesh, pointTensorNames, pointTensorFlds); + // Read dimensioned fields and subset + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + typedef volScalarField::Internal dimScalType; + wordList scalarDimNames(objects.names(dimScalType::typeName)); + PtrList<dimScalType> scalarDimFlds(scalarDimNames.size()); + subsetDimensionedFields(subsetter, scalarDimNames, scalarDimFlds); + + typedef volVectorField::Internal dimVecType; + wordList vectorDimNames(objects.names(dimVecType::typeName)); + PtrList<dimVecType> vectorDimFlds(vectorDimNames.size()); + subsetDimensionedFields(subsetter, vectorDimNames, vectorDimFlds); + + typedef volSphericalTensorField::Internal dimSphereType; + wordList sphericalTensorDimNames(objects.names(dimSphereType::typeName)); + PtrList<dimSphereType> sphericalTensorDimFlds + ( + sphericalTensorDimNames.size() + ); + subsetDimensionedFields + ( + subsetter, + sphericalTensorDimNames, + sphericalTensorDimFlds + ); + + typedef volSymmTensorField::Internal dimSymmTensorType; + wordList symmTensorDimNames(objects.names(dimSymmTensorType::typeName)); + PtrList<dimSymmTensorType> symmTensorDimFlds(symmTensorDimNames.size()); + subsetDimensionedFields(subsetter, symmTensorDimNames, symmTensorDimFlds); + + typedef volTensorField::Internal dimTensorType; + wordList tensorDimNames(objects.names(dimTensorType::typeName)); + PtrList<dimTensorType> tensorDimFlds(tensorDimNames.size()); + subsetDimensionedFields(subsetter, tensorDimNames, tensorDimFlds); + // Write mesh and fields to new time // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -461,6 +531,33 @@ int main(int argc, char *argv[]) pointTensorFlds[i].write(); } + // DimensionedFields + forAll(scalarDimFlds, i) + { + scalarDimFlds[i].rename(scalarDimNames[i]); + scalarDimFlds[i].write(); + } + forAll(vectorDimFlds, i) + { + vectorDimFlds[i].rename(vectorDimNames[i]); + vectorDimFlds[i].write(); + } + forAll(sphericalTensorDimFlds, i) + { + sphericalTensorDimFlds[i].rename(sphericalTensorDimNames[i]); + sphericalTensorDimFlds[i].write(); + } + forAll(symmTensorDimFlds, i) + { + symmTensorDimFlds[i].rename(symmTensorDimNames[i]); + symmTensorDimFlds[i].write(); + } + forAll(tensorDimFlds, i) + { + tensorDimFlds[i].rename(tensorDimNames[i]); + tensorDimFlds[i].write(); + } + Info<< "End\n" << endl; diff --git a/applications/utilities/parallelProcessing/decomposePar/Make/options b/applications/utilities/parallelProcessing/decomposePar/Make/options index 5d5d67f98932b2c57203c73786eaf24c3c0ab41b..3fa418594271344d4f159aca89907d26a127b0da 100644 --- a/applications/utilities/parallelProcessing/decomposePar/Make/options +++ b/applications/utilities/parallelProcessing/decomposePar/Make/options @@ -2,15 +2,15 @@ EXE_INC = \ -I$(LIB_SRC)/parallel/decompose/decompose/lnInclude \ -I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/regionModels/regionModel/lnInclude EXE_LIBS = \ - -lfiniteVolume \ + -ldynamicMesh \ -ldecompose \ -lgenericPatchFields \ -ldecompositionMethods -L$(FOAM_LIBBIN)/dummy -lmetisDecomp -lscotchDecomp \ -llagrangian \ - -lmeshTools \ -lregionModels diff --git a/applications/utilities/parallelProcessing/decomposePar/decomposePar.C b/applications/utilities/parallelProcessing/decomposePar/decomposePar.C index bb3c80ab7782d15313114308f8316ad160c866dc..4958b521e20eff6ac93dc3d9a74f331d50c95106 100644 --- a/applications/utilities/parallelProcessing/decomposePar/decomposePar.C +++ b/applications/utilities/parallelProcessing/decomposePar/decomposePar.C @@ -96,6 +96,7 @@ Usage #include "fvFieldDecomposer.H" #include "pointFieldDecomposer.H" #include "lagrangianFieldDecomposer.H" +#include "decompositionModel.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -260,7 +261,8 @@ int main(int argc, char *argv[]) ++nProcs; } - // get requested numberOfSubdomains + // get requested numberOfSubdomains. Note: have no mesh yet so + // cannot use decompositionModel::New const label nDomains = readLabel ( IOdictionary @@ -819,16 +821,6 @@ int main(int argc, char *argv[]) processorDb.setTime(runTime); - // remove files remnants that can cause horrible problems - // - mut and nut are used to mark the new turbulence models, - // their existence prevents old models from being upgraded - { - fileName timeDir(processorDb.path()/processorDb.timeName()); - - rm(timeDir/"mut"); - rm(timeDir/"nut"); - } - // read the mesh if (!procMeshList.set(proci)) { diff --git a/applications/utilities/parallelProcessing/decomposePar/decomposeParDict b/applications/utilities/parallelProcessing/decomposePar/decomposeParDict index 39483a01e3eddb5f08b4cdd9dcf91438baf93d8f..0beef4f7621cd734d27b43f31e8d44ee12443b42 100644 --- a/applications/utilities/parallelProcessing/decomposePar/decomposeParDict +++ b/applications/utilities/parallelProcessing/decomposePar/decomposeParDict @@ -17,11 +17,61 @@ FoamFile numberOfSubdomains 2; + +// Optional decomposition constraints +//constraints +//{ +// preserveBaffles +// { +// //- Keep owner and neighbour of baffles on same processor (i.e. +// // keep it detectable as a baffle). Baffles are two boundary face +// // sharing the same points +// type preserveBaffles; +// } +// preserveFaceZones +// { +// //- Keep owner and neighbour on same processor for faces in zones +// type preserveFaceZones; +// zones (".*"); +// } +// preservePatches +// { +// //- Keep owner and neighbour on same processor for faces in patches +// // (only makes sense for cyclic patches. Not suitable for e.g. +// // cyclicAMI since these are not coupled on the patch level. Use +// // singleProcessorFaceSets for those) +// type preservePatches; +// patches (".*"); +// } +// singleProcessorFaceSets +// { +// //- Keep all of faceSet on a single processor. This puts all cells +// // connected with a point, edge or face on the same processor. +// // (just having face connected cells might not guarantee a balanced +// // decomposition) +// // The processor can be -1 (the decompositionMethod chooses the +// // processor for a good load balance) or explicitly provided (upsets +// // balance) +// type singleProcessorFaceSets; +// singleProcessorFaceSets ((f1 -1)); +// } +// refinementHistory +// { +// //- Decompose cells such that all cell originating from single cell +// // end up on same processor +// type refinementHistory; +// } +//} + + +// Deprecated form of specifying decomposition constraints: //- Keep owner and neighbour on same processor for faces in zones: // preserveFaceZones (heater solid1 solid3); //- Keep owner and neighbour on same processor for faces in patches: -// (makes sense only for cyclic patches) +// (makes sense only for cyclic patches. Not suitable for e.g. cyclicAMI +// since these are not coupled on the patch level. Use +// singleProcessorFaceSets for those) //preservePatches (cyclic_half0 cyclic_half1); //- Keep all of faceSet on a single processor. This puts all cells @@ -32,12 +82,13 @@ numberOfSubdomains 2; // for a good load balance) or explicitly provided (upsets balance). //singleProcessorFaceSets ((f0 -1)); - //- Keep owner and neighbour of baffles on same processor (i.e. keep it // detectable as a baffle). Baffles are two boundary face sharing the // same points. //preserveBaffles true; + + //- Use the volScalarField named here as a weight for each cell in the // decomposition. For example, use a particle population field to decompose // for a balanced number of particles in a lagrangian simulation. diff --git a/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.C b/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.C index 0010965f848afcb61ef93facba1ff5067e50bb52..d54d25b3819fe4178e7d8aa540610084aa18e801 100644 --- a/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.C +++ b/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.C @@ -31,14 +31,14 @@ License #include "fvMesh.H" #include "OSspecific.H" #include "Map.H" -#include "globalMeshData.H" #include "DynamicList.H" #include "fvFieldDecomposer.H" #include "IOobjectList.H" #include "cellSet.H" #include "faceSet.H" #include "pointSet.H" -#include "uniformDimensionedFields.H" +#include "decompositionModel.H" +#include "hexRef8Data.H" // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // @@ -90,18 +90,16 @@ Foam::domainDecomposition::domainDecomposition(const IOobject& io) ) : NULL ), - decompositionDict_ + nProcs_ ( - IOobject + readInt ( - "decomposeParDict", - time().system(), - *this, - IOobject::MUST_READ_IF_MODIFIED, - IOobject::NO_WRITE + decompositionModel::New + ( + *this + ).lookup("numberOfSubdomains") ) ), - nProcs_(readInt(decompositionDict_.lookup("numberOfSubdomains"))), distributed_(false), cellToProc_(nCells()), procPointAddressing_(nProcs_), @@ -115,7 +113,10 @@ Foam::domainDecomposition::domainDecomposition(const IOobject& io) procProcessorPatchSubPatchIDs_(nProcs_), procProcessorPatchSubPatchStarts_(nProcs_) { - decompositionDict_.readIfPresent("distributed", distributed_); + decompositionModel::New + ( + *this + ).readIfPresent("distributed", distributed_); } @@ -195,57 +196,20 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets) } - autoPtr<labelIOList> cellLevelPtr; - { - IOobject io - ( - "cellLevel", - facesInstance(), - polyMesh::meshSubDir, - *this, - IOobject::MUST_READ, - IOobject::NO_WRITE - ); - if (io.headerOk()) - { - Info<< "Reading hexRef8 data : " << io.name() << endl; - cellLevelPtr.reset(new labelIOList(io)); - } - } - autoPtr<labelIOList> pointLevelPtr; - { - IOobject io - ( - "pointLevel", - facesInstance(), - polyMesh::meshSubDir, - *this, - IOobject::MUST_READ, - IOobject::NO_WRITE - ); - if (io.headerOk()) - { - Info<< "Reading hexRef8 data : " << io.name() << endl; - pointLevelPtr.reset(new labelIOList(io)); - } - } - autoPtr<uniformDimensionedScalarField> level0EdgePtr; - { - IOobject io + // Load refinement data (if any) + hexRef8Data baseMeshData + ( + IOobject ( - "level0Edge", + "dummy", facesInstance(), polyMesh::meshSubDir, *this, - IOobject::MUST_READ, - IOobject::NO_WRITE - ); - if (io.headerOk()) - { - Info<< "Reading hexRef8 data : " << io.name() << endl; - level0EdgePtr.reset(new uniformDimensionedScalarField(io)); - } - } + IOobject::READ_IF_PRESENT, + IOobject::NO_WRITE, + false + ) + ); @@ -771,8 +735,8 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets) } } - // Set the precision of the points data to 10 - IOstream::defaultPrecision(10); + // Set the precision of the points data to be min 10 + IOstream::defaultPrecision(max(10u, IOstream::defaultPrecision())); procMesh.write(); @@ -842,64 +806,23 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets) } - // hexRef8 data - if (cellLevelPtr.valid()) - { - labelIOList - ( - IOobject - ( - cellLevelPtr().name(), - facesInstance(), - polyMesh::meshSubDir, - procMesh, - IOobject::NO_READ, - IOobject::AUTO_WRITE - ), - UIndirectList<label> - ( - cellLevelPtr(), - procCellAddressing_[proci] - )() - ).write(); - } - if (pointLevelPtr.valid()) - { - labelIOList - ( - IOobject - ( - pointLevelPtr().name(), - facesInstance(), - polyMesh::meshSubDir, - procMesh, - IOobject::NO_READ, - IOobject::AUTO_WRITE - ), - UIndirectList<label> - ( - pointLevelPtr(), - procPointAddressing_[proci] - )() - ).write(); - } - if (level0EdgePtr.valid()) - { - uniformDimensionedScalarField + // Optional hexRef8 data + hexRef8Data + ( + IOobject ( - IOobject - ( - level0EdgePtr().name(), - facesInstance(), - polyMesh::meshSubDir, - procMesh, - IOobject::NO_READ, - IOobject::AUTO_WRITE - ), - level0EdgePtr() - ).write(); - } - + "dummy", + facesInstance(), + polyMesh::meshSubDir, + procMesh, + IOobject::NO_READ, + IOobject::NO_WRITE, + false + ), + baseMeshData, + procCellAddressing_[proci], + procPointAddressing_[proci] + ).write(); // Statistics diff --git a/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.H b/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.H index 2951b10d23cece0da982ffea39e150618e198bff..c183b2df68c2217abc22d27304e608104c30f4da 100644 --- a/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.H +++ b/applications/utilities/parallelProcessing/decomposePar/domainDecomposition.H @@ -61,9 +61,6 @@ class domainDecomposition //- Optional: points at the facesInstance autoPtr<pointIOField> facesInstancePointsPtr_; - //- Mesh decomposition control dictionary - IOdictionary decompositionDict_; - //- Number of processors in decomposition label nProcs_; diff --git a/applications/utilities/parallelProcessing/decomposePar/domainDecompositionDistribute.C b/applications/utilities/parallelProcessing/decomposePar/domainDecompositionDistribute.C index 41cbbe9a548d7c3f71d3a755521b186cffdb58a4..c6df1cfa7b5b959b74821bae10f807d346dd6462 100644 --- a/applications/utilities/parallelProcessing/decomposePar/domainDecompositionDistribute.C +++ b/applications/utilities/parallelProcessing/decomposePar/domainDecompositionDistribute.C @@ -30,6 +30,7 @@ License #include "regionSplit.H" #include "Tuple2.H" #include "faceSet.H" +#include "decompositionModel.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -39,15 +40,12 @@ void Foam::domainDecomposition::distributeCells() cpuTime decompositionTime; - autoPtr<decompositionMethod> decomposePtr = decompositionMethod::New - ( - decompositionDict_ - ); + const decompositionModel& method = decompositionModel::New(*this); scalarField cellWeights; - if (decompositionDict_.found("weightField")) + if (method.found("weightField")) { - word weightName = decompositionDict_.lookup("weightField"); + word weightName = method.lookup("weightField"); volScalarField weights ( @@ -64,7 +62,7 @@ void Foam::domainDecomposition::distributeCells() cellWeights = weights.primitiveField(); } - cellToProc_ = decomposePtr().decompose(*this, cellWeights); + cellToProc_ = method.decomposer().decompose(*this, cellWeights); Info<< "\nFinished decomposition in " << decompositionTime.elapsedCpuTime() diff --git a/applications/utilities/parallelProcessing/decomposePar/readFields.C b/applications/utilities/parallelProcessing/decomposePar/readFields.C index 7faa5a66c4e0504c61df2c863e5a3ad9468755fa..66dddc8af3d155ca368cbf6d31b5efda4daee87f 100644 --- a/applications/utilities/parallelProcessing/decomposePar/readFields.C +++ b/applications/utilities/parallelProcessing/decomposePar/readFields.C @@ -23,18 +23,22 @@ License \*---------------------------------------------------------------------------*/ +#include "GeometricField.H" #include "readFields.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // -template<class Mesh, class GeoField> +template<class Type, template<class> class PatchField, class GeoMesh> void Foam::readFields ( - const Mesh& mesh, + const typename GeoMesh::Mesh& mesh, const IOobjectList& objects, - PtrList<GeoField>& fields + PtrList<GeometricField<Type, PatchField, GeoMesh> >& fields, + const bool readOldTime ) { + typedef GeometricField<Type, PatchField, GeoMesh> GeoField; + // Search list of objects for fields of type GeomField IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName)); @@ -45,17 +49,48 @@ void Foam::readFields fieldObjects.erase(celDistIter); } + // Get sorted set of names (different processors might read objects in + // different order) + const wordList masterNames(fieldObjects.sortedNames()); + + // Construct the fields + fields.setSize(masterNames.size()); + + forAll(masterNames, i) + { + const IOobject& io = *fieldObjects[masterNames[i]]; + + fields.set(i, new GeoField(io, mesh, readOldTime)); + } +} + + +template<class Mesh, class GeoField> +void Foam::readFields +( + const Mesh& mesh, + const IOobjectList& objects, + PtrList<GeoField>& fields +) +{ + // Search list of objects for fields of type GeomField + IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName)); + // Construct the fields fields.setSize(fieldObjects.size()); - label fieldi = 0; - forAllIter(IOobjectList, fieldObjects, iter) + // Get sorted set of names (different processors might read objects in + // different order) + const wordList masterNames(fieldObjects.sortedNames()); + + // Construct the fields + fields.setSize(masterNames.size()); + + forAll(masterNames, i) { - fields.set - ( - fieldi++, - new GeoField(*iter(), mesh) - ); + const IOobject& io = *fieldObjects[masterNames[i]]; + + fields.set(i, new GeoField(io, mesh)); } } diff --git a/applications/utilities/parallelProcessing/decomposePar/readFields.H b/applications/utilities/parallelProcessing/decomposePar/readFields.H index 0ff4c1f92b001ea123c5e827f0eba8e7c3ad46b3..4f19c3a08ebdf59f2af4eb517f2e2bdf14f205b6 100644 --- a/applications/utilities/parallelProcessing/decomposePar/readFields.H +++ b/applications/utilities/parallelProcessing/decomposePar/readFields.H @@ -2,7 +2,7 @@ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | - \\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation + \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License @@ -41,6 +41,16 @@ SourceFiles namespace Foam { + // Read the fields and hold on the pointer list + template<class Type, template<class> class PatchField, class GeoMesh> + void readFields + ( + const typename GeoMesh::Mesh& mesh, + const IOobjectList& objects, + PtrList<GeometricField<Type, PatchField, GeoMesh> >& fields, + const bool readOldTime + ); + // Read the fields and hold on the pointer list template<class Mesh, class GeoField> void readFields diff --git a/applications/utilities/parallelProcessing/reconstructPar/Make/options b/applications/utilities/parallelProcessing/reconstructPar/Make/options index ece7c8b030bee4fab556a964b9da1eec4c51a64d..1ebb8c02d314fb6230eef50e44957be3befdfdda 100644 --- a/applications/utilities/parallelProcessing/reconstructPar/Make/options +++ b/applications/utilities/parallelProcessing/reconstructPar/Make/options @@ -1,6 +1,7 @@ EXE_INC = \ -I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/parallel/reconstruct/reconstruct/lnInclude \ -I$(LIB_SRC)/regionModels/regionModel/lnInclude @@ -9,6 +10,7 @@ EXE_LIBS = \ -lfiniteVolume \ -lgenericPatchFields \ -llagrangian \ + -ldynamicMesh \ -lmeshTools \ -lreconstruct \ -lregionModels diff --git a/applications/utilities/parallelProcessing/reconstructPar/reconstructPar.C b/applications/utilities/parallelProcessing/reconstructPar/reconstructPar.C index 95f8b038cd601d0d6736e659e0d576c4189cb94a..9e2d7aebe559fe948a4f6bc4b51b9ad1528de032 100644 --- a/applications/utilities/parallelProcessing/reconstructPar/reconstructPar.C +++ b/applications/utilities/parallelProcessing/reconstructPar/reconstructPar.C @@ -45,6 +45,8 @@ Description #include "faceSet.H" #include "pointSet.H" +#include "hexRef8Data.H" + // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // bool haveAllTimes @@ -868,6 +870,78 @@ int main(int argc, char *argv[]) pointSets[i].write(); } } + + + // Reconstruct refinement data + { + PtrList<hexRef8Data> procData(procMeshes.meshes().size()); + + forAll(procMeshes.meshes(), procI) + { + const fvMesh& procMesh = procMeshes.meshes()[procI]; + + procData.set + ( + procI, + new hexRef8Data + ( + IOobject + ( + "dummy", + procMesh.time().timeName(), + polyMesh::meshSubDir, + procMesh, + IOobject::READ_IF_PRESENT, + IOobject::NO_WRITE, + false + ) + ) + ); + } + + // Combine individual parts + + const PtrList<labelIOList>& cellAddr = + procMeshes.cellProcAddressing(); + + UPtrList<const labelList> cellMaps(cellAddr.size()); + forAll(cellAddr, i) + { + cellMaps.set(i, &cellAddr[i]); + } + + const PtrList<labelIOList>& pointAddr = + procMeshes.pointProcAddressing(); + + UPtrList<const labelList> pointMaps(pointAddr.size()); + forAll(pointAddr, i) + { + pointMaps.set(i, &pointAddr[i]); + } + + UPtrList<const hexRef8Data> procRefs(procData.size()); + forAll(procData, i) + { + procRefs.set(i, &procData[i]); + } + + hexRef8Data + ( + IOobject + ( + "dummy", + mesh.time().timeName(), + polyMesh::meshSubDir, + mesh, + IOobject::NO_READ, + IOobject::NO_WRITE, + false + ), + cellMaps, + pointMaps, + procRefs + ).write(); + } } } @@ -885,7 +959,7 @@ int main(int argc, char *argv[]) } } - Info<< "End.\n" << endl; + Info<< "\nEnd\n" << endl; return 0; } diff --git a/applications/utilities/parallelProcessing/reconstructParMesh/reconstructParMesh.C b/applications/utilities/parallelProcessing/reconstructParMesh/reconstructParMesh.C index eb2ddf8746d7ebb6d8fe56c3ac7b30f3ba96d807..1c57cd3274c2440cc13c1d5bc09f8143d6db38b9 100644 --- a/applications/utilities/parallelProcessing/reconstructParMesh/reconstructParMesh.C +++ b/applications/utilities/parallelProcessing/reconstructParMesh/reconstructParMesh.C @@ -49,7 +49,7 @@ Description #include "faceCoupleInfo.H" #include "fvMeshAdder.H" #include "polyTopoChange.H" -#include "zeroGradientFvPatchFields.H" +#include "extrapolatedCalculatedFvPatchFields.H" using namespace Foam; @@ -401,13 +401,14 @@ void writeCellDistance ), masterMesh, dimensionedScalar("cellDist", dimless, 0), - zeroGradientFvPatchScalarField::typeName + extrapolatedCalculatedFvPatchScalarField::typeName ); forAll(cellDecomposition, celli) { cellDist[celli] = cellDecomposition[celli]; } + cellDist.correctBoundaryConditions(); cellDist.write(); diff --git a/applications/utilities/parallelProcessing/redistributePar/redistributePar.C b/applications/utilities/parallelProcessing/redistributePar/redistributePar.C index ef73afa2a5976b73738a343e95fb34d5ce62d1b4..89580aa9af2b3c5d9115cbb76ce50fd24b171ed5 100644 --- a/applications/utilities/parallelProcessing/redistributePar/redistributePar.C +++ b/applications/utilities/parallelProcessing/redistributePar/redistributePar.C @@ -236,13 +236,15 @@ void writeDecomposition false // do not register ), mesh, - dimensionedScalar(name, dimless, -1) + dimensionedScalar(name, dimless, -1), + extrapolatedCalculatedFvPatchScalarField::typeName ); forAll(procCells, cI) { procCells[cI] = decomp[cI]; } + procCells.correctBoundaryConditions(); procCells.write(); } diff --git a/applications/utilities/postProcessing/dataConversion/foamToEnsight/Make/options b/applications/utilities/postProcessing/dataConversion/foamToEnsight/Make/options index 7799242880217734105bd4f630e73042539721b0..18bbc7a510c93601b5f81d684ee51dd5ade4df87 100644 --- a/applications/utilities/postProcessing/dataConversion/foamToEnsight/Make/options +++ b/applications/utilities/postProcessing/dataConversion/foamToEnsight/Make/options @@ -2,12 +2,13 @@ EXE_INC = \ /* -DFULLDEBUG -g -O0 */ \ -I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/sampling/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude EXE_LIBS = \ - -lfiniteVolume \ - -lmeshTools \ + -ldynamicMesh \ -lsampling \ -lgenericPatchFields \ -llagrangian diff --git a/applications/utilities/postProcessing/dataConversion/foamToTecplot360/Make/options b/applications/utilities/postProcessing/dataConversion/foamToTecplot360/Make/options index 97a2251f7bb9bf8d90bb23bd4ddbccf3c74a04a9..50981e77e1c3b689364b1db7660ae0b8c5a86368 100644 --- a/applications/utilities/postProcessing/dataConversion/foamToTecplot360/Make/options +++ b/applications/utilities/postProcessing/dataConversion/foamToTecplot360/Make/options @@ -2,12 +2,12 @@ EXE_INC = \ -I$(WM_THIRD_PARTY_DIR)/tecio/tecsrc/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude EXE_LIBS = \ -llagrangian \ - -lfiniteVolume \ + -ldynamicMesh \ -lgenericPatchFields \ - -lmeshTools \ -L$(FOAM_EXT_LIBBIN) -ltecio diff --git a/applications/utilities/postProcessing/graphics/PVReaders/PVFoamReader/vtkPVFoam/Make/options b/applications/utilities/postProcessing/graphics/PVReaders/PVFoamReader/vtkPVFoam/Make/options index cb6127c483dec72116db3f39f0f893989f2a4803..df8b71d2864b8f18d2b767bb2676e30667467aa9 100644 --- a/applications/utilities/postProcessing/graphics/PVReaders/PVFoamReader/vtkPVFoam/Make/options +++ b/applications/utilities/postProcessing/graphics/PVReaders/PVFoamReader/vtkPVFoam/Make/options @@ -3,6 +3,7 @@ EXE_INC = \ -I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I../../vtkPVReaders/lnInclude \ -I../PVFoamReader \ @@ -14,8 +15,7 @@ EXE_INC = \ ) LIB_LIBS = \ - -lmeshTools \ - -lfiniteVolume \ + -ldynamicMesh \ -lgenericPatchFields \ -llagrangian \ -L$(FOAM_LIBBIN) -lvtkPVReaders \ diff --git a/src/Allwmake b/src/Allwmake index 395557d19c521c7f757914f9eb7ef63cdfc65e68..eb70924aeb6499f0f2d6a5f9bce3a2abba191627 100755 --- a/src/Allwmake +++ b/src/Allwmake @@ -49,9 +49,9 @@ wmake $targetType lagrangian/distributionModels wmake $targetType genericPatchFields wmake $targetType conversion -wmake $targetType sampling wmake $targetType mesh/extrudeModel wmake $targetType dynamicMesh +wmake $targetType sampling wmake $targetType dynamicFvMesh wmake $targetType topoChangerFvMesh diff --git a/src/OpenFOAM/Make/files b/src/OpenFOAM/Make/files index cb62163050663108291fd993c1d201bba9242f5d..3bf63435bc5577a0c3b9985978734aca5d580eb6 100644 --- a/src/OpenFOAM/Make/files +++ b/src/OpenFOAM/Make/files @@ -94,6 +94,9 @@ $(strings)/wordRe/wordRe.C $(strings)/lists/hashedWordList.C $(strings)/stringOps/stringOps.C +ops = primitives/ops +$(ops)/flipOp.C + primitives/hashes/Hasher/Hasher.C sha1 = primitives/hashes/SHA1 @@ -216,8 +219,8 @@ $(dll)/codedBase/codedBase.C db/functionObjects/functionObject/functionObject.C db/functionObjects/functionObjectList/functionObjectList.C -db/functionObjects/functionObjectFile/functionObjectFile.C -db/functionObjects/functionObjectFiles/functionObjectFiles.C +db/functionObjects/writeFile/writeFile.C +db/functionObjects/writeFiles/writeFiles.C db/functionObjects/timeControl/timeControl.C db/functionObjects/timeControl/timeControlFunctionObject.C @@ -513,6 +516,7 @@ $(mapPolyMesh)/mapPolyMesh.C $(mapPolyMesh)/faceMapper/faceMapper.C $(mapPolyMesh)/cellMapper/cellMapper.C $(mapPolyMesh)/mapDistribute/mapDistribute.C +$(mapPolyMesh)/mapDistribute/mapDistributeBase.C $(mapPolyMesh)/mapDistribute/mapDistributePolyMesh.C $(mapPolyMesh)/mapDistribute/IOmapDistribute.C $(mapPolyMesh)/mapAddedPolyMesh.C diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.C b/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.C index af61a05dec36e80eee4b61577d46c79a25a8e3b0..9dad08b9a6511ec02bb9d346669782f9fd7d20d0 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.C +++ b/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.C @@ -57,22 +57,41 @@ const Foam::NamedEnum<Foam::UPstream::commsTypes, 3> void Foam::UPstream::setParRun(const label nProcs) { - parRun_ = true; - - // Redo worldComm communicator (this has been created at static - // initialisation time) - freeCommunicator(UPstream::worldComm); - label comm = allocateCommunicator(-1, identity(nProcs), true); - if (comm != UPstream::worldComm) + if (nProcs == 0) { - FatalErrorInFunction - << "problem : comm:" << comm - << " UPstream::worldComm:" << UPstream::worldComm - << Foam::exit(FatalError); + parRun_ = false; + freeCommunicator(UPstream::worldComm); + label comm = allocateCommunicator(-1, labelList(1, label(0)), false); + if (comm != UPstream::worldComm) + { + FatalErrorIn("UPstream::setParRun(const label)") + << "problem : comm:" << comm + << " UPstream::worldComm:" << UPstream::worldComm + << Foam::exit(FatalError); + } + + Pout.prefix() = ""; + Perr.prefix() = ""; } + else + { + parRun_ = true; - Pout.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] "; - Perr.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] "; + // Redo worldComm communicator (this has been created at static + // initialisation time) + freeCommunicator(UPstream::worldComm); + label comm = allocateCommunicator(-1, identity(nProcs), true); + if (comm != UPstream::worldComm) + { + FatalErrorInFunction + << "problem : comm:" << comm + << " UPstream::worldComm:" << UPstream::worldComm + << Foam::exit(FatalError); + } + + Pout.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] "; + Perr.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] "; + } } diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/combineGatherScatter.C b/src/OpenFOAM/db/IOstreams/Pstreams/combineGatherScatter.C index d1ac549922520d8066ae0fe6cfe4100529f3d517..6403905ad65fb71eeb53c271b3b0ae94d261526c 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/combineGatherScatter.C +++ b/src/OpenFOAM/db/IOstreams/Pstreams/combineGatherScatter.C @@ -50,7 +50,7 @@ void Foam::Pstream::combineGather const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; @@ -177,7 +177,7 @@ void Foam::Pstream::combineScatter const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)]; @@ -278,7 +278,7 @@ void Foam::Pstream::listCombineGather const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; @@ -412,7 +412,7 @@ void Foam::Pstream::listCombineScatter const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)]; @@ -525,7 +525,7 @@ void Foam::Pstream::mapCombineGather const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; @@ -625,7 +625,7 @@ void Foam::Pstream::mapCombineScatter const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)]; diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/exchange.C b/src/OpenFOAM/db/IOstreams/Pstreams/exchange.C index 749527333d73f13bb373307ee88d24bc18d6fc98..dc514f572ae81f078fe159c82dac030c2eea2a2b 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/exchange.C +++ b/src/OpenFOAM/db/IOstreams/Pstreams/exchange.C @@ -61,7 +61,9 @@ void Foam::Pstream::exchange recvBufs.setSize(sendBufs.size()); - if (UPstream::nProcs(comm) > 1) + recvBufs.setSize(sendBufs.size()); + + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { label startOfRequests = Pstream::nRequests(); diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatter.C b/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatter.C index a270bea29eb4206e8ff9921049fb4b785a314dca..270c08fee57580256aaf48587cc4df6d6739ae2a 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatter.C +++ b/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatter.C @@ -52,7 +52,7 @@ void Pstream::gather const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; @@ -151,7 +151,7 @@ void Pstream::scatter const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { // Get my communication order const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatterList.C b/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatterList.C index abd4dba8f956c4f1c7c9521936e54f48a1a1d557..393d9d1b008c13d3a3e3e48d2fd10d209280443d 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatterList.C +++ b/src/OpenFOAM/db/IOstreams/Pstreams/gatherScatterList.C @@ -53,7 +53,7 @@ void Pstream::gatherList const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { if (Values.size() != UPstream::nProcs(comm)) { @@ -209,7 +209,7 @@ void Pstream::scatterList const label comm ) { - if (UPstream::nProcs(comm) > 1) + if (UPstream::parRun() && UPstream::nProcs(comm) > 1) { if (Values.size() != UPstream::nProcs(comm)) { diff --git a/src/OpenFOAM/db/objectRegistry/objectRegistry.C b/src/OpenFOAM/db/objectRegistry/objectRegistry.C index 94e76b6fd7f34c18ce8ac67a6ee9b90cb07d89fa..e7957eb3ee5c348f1e594b3656e84d8439805da3 100644 --- a/src/OpenFOAM/db/objectRegistry/objectRegistry.C +++ b/src/OpenFOAM/db/objectRegistry/objectRegistry.C @@ -222,6 +222,7 @@ bool Foam::objectRegistry::checkIn(regIOobject& io) const { Pout<< "objectRegistry::checkIn(regIOobject&) : " << name() << " : checking in " << io.name() + << " of type " << io.type() << endl; } @@ -354,6 +355,7 @@ bool Foam::objectRegistry::writeObject Pout<< "objectRegistry::write() : " << name() << " : Considering writing object " << iter.key() + << " of type " << iter()->type() << " with writeOpt " << iter()->writeOpt() << " to file " << iter()->objectPath() << endl; diff --git a/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedField.H b/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedField.H index d76f31c05805f5e694788dadf5d8462539d637bd..4886e14a9fdcef9a4ad823f3c5f071f2e25d1039 100644 --- a/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedField.H +++ b/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedField.H @@ -155,6 +155,15 @@ public: const word& fieldDictEntry="value" ); + //- Construct from dictionary + DimensionedField + ( + const IOobject&, + const Mesh& mesh, + const dictionary& fieldDict, + const word& fieldDictEntry="value" + ); + //- Construct as copy DimensionedField ( diff --git a/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedFieldIO.C b/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedFieldIO.C index 0a7b21514763053631f5916b54bf151e57d64fa7..74da71025c3122c36fe03cdb041c232f742a9b6f 100644 --- a/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedFieldIO.C +++ b/src/OpenFOAM/fields/DimensionedFields/DimensionedField/DimensionedFieldIO.C @@ -80,6 +80,24 @@ Foam::DimensionedField<Type, GeoMesh>::DimensionedField } +template<class Type, class GeoMesh> +Foam::DimensionedField<Type, GeoMesh>::DimensionedField +( + const IOobject& io, + const Mesh& mesh, + const dictionary& fieldDict, + const word& fieldDictEntry +) +: + regIOobject(io), + Field<Type>(0), + mesh_(mesh), + dimensions_(dimless) +{ + readField(fieldDict, fieldDictEntry); +} + + // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // template<class Type, class GeoMesh> diff --git a/src/OpenFOAM/fields/Fields/Field/Field.C b/src/OpenFOAM/fields/Fields/Field/Field.C index f590912822fb74eb83d98722ce98246b350c8594..ea27071fddebafba494f023c8cacceafd480b104 100644 --- a/src/OpenFOAM/fields/Fields/Field/Field.C +++ b/src/OpenFOAM/fields/Fields/Field/Field.C @@ -27,6 +27,8 @@ License #include "FieldM.H" #include "dictionary.H" #include "contiguous.H" +#include "mapDistributeBase.H" +#include "flipOp.H" // * * * * * * * * * * * * * * * Static Members * * * * * * * * * * * * * * // @@ -122,12 +124,13 @@ template<class Type> Foam::Field<Type>::Field ( const UList<Type>& mapF, - const FieldMapper& mapper + const FieldMapper& mapper, + const bool applyFlip ) : List<Type>(mapper.size()) { - map(mapF, mapper); + map(mapF, mapper, applyFlip); } @@ -136,12 +139,13 @@ Foam::Field<Type>::Field ( const UList<Type>& mapF, const FieldMapper& mapper, - const Type& defaultValue + const Type& defaultValue, + const bool applyFlip ) : List<Type>(mapper.size(), defaultValue) { - map(mapF, mapper); + map(mapF, mapper, applyFlip); } @@ -150,12 +154,13 @@ Foam::Field<Type>::Field ( const UList<Type>& mapF, const FieldMapper& mapper, - const UList<Type>& defaultValues + const UList<Type>& defaultValues, + const bool applyFlip ) : List<Type>(defaultValues) { - map(mapF, mapper); + map(mapF, mapper, applyFlip); } @@ -163,12 +168,13 @@ template<class Type> Foam::Field<Type>::Field ( const tmp<Field<Type>>& tmapF, - const FieldMapper& mapper + const FieldMapper& mapper, + const bool applyFlip ) : List<Type>(mapper.size()) { - map(tmapF, mapper); + map(tmapF, mapper, applyFlip); } @@ -177,12 +183,13 @@ Foam::Field<Type>::Field ( const tmp<Field<Type>>& tmapF, const FieldMapper& mapper, - const Type& defaultValue + const Type& defaultValue, + const bool applyFlip ) : List<Type>(mapper.size(), defaultValue) { - map(tmapF, mapper); + map(tmapF, mapper, applyFlip); } @@ -191,12 +198,13 @@ Foam::Field<Type>::Field ( const tmp<Field<Type>>& tmapF, const FieldMapper& mapper, - const UList<Type>& defaultValues + const UList<Type>& defaultValues, + const bool applyFlip ) : List<Type>(defaultValues) { - map(tmapF, mapper); + map(tmapF, mapper, applyFlip); } @@ -433,21 +441,57 @@ template<class Type> void Foam::Field<Type>::map ( const UList<Type>& mapF, - const FieldMapper& mapper + const FieldMapper& mapper, + const bool applyFlip ) { - if - ( - mapper.direct() - && notNull(mapper.directAddressing()) - && mapper.directAddressing().size() - ) + if (mapper.distributed()) { - map(mapF, mapper.directAddressing()); + // Fetch remote parts of mapF + const mapDistributeBase& distMap = mapper.distributeMap(); + Field<Type> newMapF(mapF); + + if (applyFlip) + { + distMap.distribute(newMapF); + } + else + { + distMap.distribute(newMapF, noOp()); + } + + if (mapper.direct() && notNull(mapper.directAddressing())) + { + map(newMapF, mapper.directAddressing()); + } + else if (!mapper.direct()) + { + map(newMapF, mapper.addressing(), mapper.weights()); + } + else if (mapper.direct() && isNull(mapper.directAddressing())) + { + // Special case, no local mapper. Assume ordering already correct + // from distribution. Note: this behaviour is different compared + // to local mapper. + this->transfer(newMapF); + this->setSize(mapper.size()); + } } - else if (!mapper.direct() && mapper.addressing().size()) + else { - map(mapF, mapper.addressing(), mapper.weights()); + if + ( + mapper.direct() + && notNull(mapper.directAddressing()) + && mapper.directAddressing().size() + ) + { + map(mapF, mapper.directAddressing()); + } + else if (!mapper.direct() && mapper.addressing().size()) + { + map(mapF, mapper.addressing(), mapper.weights()); + } } } @@ -456,10 +500,11 @@ template<class Type> void Foam::Field<Type>::map ( const tmp<Field<Type>>& tmapF, - const FieldMapper& mapper + const FieldMapper& mapper, + const bool applyFlip ) { - map(tmapF(), mapper); + map(tmapF(), mapper, applyFlip); tmapF.clear(); } @@ -467,25 +512,62 @@ void Foam::Field<Type>::map template<class Type> void Foam::Field<Type>::autoMap ( - const FieldMapper& mapper + const FieldMapper& mapper, + const bool applyFlip ) { - if - ( - ( - mapper.direct() - && notNull(mapper.directAddressing()) - && mapper.directAddressing().size() - ) - || (!mapper.direct() && mapper.addressing().size()) - ) + if (mapper.distributed()) { + // Fetch remote parts of *this + const mapDistributeBase& distMap = mapper.distributeMap(); Field<Type> fCpy(*this); - map(fCpy, mapper); + + if (applyFlip) + { + distMap.distribute(fCpy); + } + else + { + distMap.distribute(fCpy, noOp()); + } + + if + ( + (mapper.direct() + && notNull(mapper.directAddressing())) + || !mapper.direct() + ) + { + this->map(fCpy, mapper); + } + else if (mapper.direct() && isNull(mapper.directAddressing())) + { + // Special case, no local mapper. Assume ordering already correct + // from distribution. Note: this behaviour is different compared + // to local mapper. + this->transfer(fCpy); + this->setSize(mapper.size()); + } } else { - this->setSize(mapper.size()); + if + ( + ( + mapper.direct() + && notNull(mapper.directAddressing()) + && mapper.directAddressing().size() + ) + || (!mapper.direct() && mapper.addressing().size()) + ) + { + Field<Type> fCpy(*this); + map(fCpy, mapper); + } + else + { + this->setSize(mapper.size()); + } } } diff --git a/src/OpenFOAM/fields/Fields/Field/Field.H b/src/OpenFOAM/fields/Fields/Field/Field.H index 2e531dfc80864dfc4940c499f47bdd2cea972b76..87715a241619f072aa9de76acc2d43c6899282ae 100644 --- a/src/OpenFOAM/fields/Fields/Field/Field.H +++ b/src/OpenFOAM/fields/Fields/Field/Field.H @@ -160,7 +160,8 @@ public: Field ( const UList<Type>& mapF, - const FieldMapper& map + const FieldMapper& map, + const bool applyFlip = true ); //- Construct by mapping from the given field @@ -168,7 +169,8 @@ public: ( const UList<Type>& mapF, const FieldMapper& map, - const Type& defaultValue + const Type& defaultValue, + const bool applyFlip = true ); //- Construct by mapping from the given field @@ -176,14 +178,16 @@ public: ( const UList<Type>& mapF, const FieldMapper& map, - const UList<Type>& defaultValues + const UList<Type>& defaultValues, + const bool applyFlip = true ); //- Construct by mapping from the given tmp field Field ( const tmp<Field<Type>>& tmapF, - const FieldMapper& map + const FieldMapper& map, + const bool applyFlip = true ); //- Construct by mapping from the given tmp field. Supplied uniform @@ -192,7 +196,8 @@ public: ( const tmp<Field<Type>>& tmapF, const FieldMapper& map, - const Type& defaultValue + const Type& defaultValue, + const bool applyFlip = true ); //- Construct by mapping from the given tmp field. Supplied values @@ -201,7 +206,8 @@ public: ( const tmp<Field<Type>>& tmapF, const FieldMapper& map, - const UList<Type>& defaultValues + const UList<Type>& defaultValues, + const bool applyFlip = true ); //- Construct as copy @@ -272,20 +278,23 @@ public: void map ( const UList<Type>& mapF, - const FieldMapper& map + const FieldMapper& map, + const bool applyFlip = true ); //- Map from the given tmp field void map ( const tmp<Field<Type>>& tmapF, - const FieldMapper& map + const FieldMapper& map, + const bool applyFlip = true ); //- Map from self void autoMap ( - const FieldMapper& map + const FieldMapper& map, + const bool applyFlip = true ); //- 1 to 1 reverse-map from the given field diff --git a/src/OpenFOAM/fields/Fields/Field/FieldMapper.H b/src/OpenFOAM/fields/Fields/Field/FieldMapper.H index b15b45b295b90d015db28bffb3e1c7ba716a423f..b40e2fd03b609841c0c4014ee42c811fae10c4b2 100644 --- a/src/OpenFOAM/fields/Fields/Field/FieldMapper.H +++ b/src/OpenFOAM/fields/Fields/Field/FieldMapper.H @@ -37,6 +37,8 @@ Description namespace Foam { +class mapDistributeBase; + /*---------------------------------------------------------------------------*\ Class FieldMapper Declaration \*---------------------------------------------------------------------------*/ @@ -64,6 +66,19 @@ public: virtual bool direct() const = 0; + virtual bool distributed() const + { + return false; + } + + virtual const mapDistributeBase& distributeMap() const + { + FatalErrorInFunction + << "attempt to access null distributeMap" + << abort(FatalError); + return *reinterpret_cast<mapDistributeBase*>(NULL); + } + //- Are there unmapped values? I.e. do all size() elements get // get value virtual bool hasUnmapped() const = 0; diff --git a/src/OpenFOAM/global/argList/argList.C b/src/OpenFOAM/global/argList/argList.C index 5434b4037798c394feae0b22fb55527cfc7478e1..d9cf7f2065ad32696971f7eb2658f54893cc8d3e 100644 --- a/src/OpenFOAM/global/argList/argList.C +++ b/src/OpenFOAM/global/argList/argList.C @@ -784,9 +784,6 @@ void Foam::argList::parse if (Pstream::master()) { slaveProcs.setSize(Pstream::nProcs() - 1); - string slaveMachine; - label slavePid; - label proci = 0; for ( @@ -796,15 +793,30 @@ void Foam::argList::parse ) { IPstream fromSlave(Pstream::scheduled, slave); - fromSlave >> slaveMachine >> slavePid; + + string slaveBuild; + string slaveMachine; + label slavePid; + fromSlave >> slaveBuild >> slaveMachine >> slavePid; slaveProcs[proci++] = slaveMachine + "." + name(slavePid); + + // Check build string to make sure all processors are running + // the same build + if (slaveBuild != Foam::FOAMbuild) + { + FatalErrorIn(executable()) + << "Master is running version " << Foam::FOAMbuild + << "; slave " << proci << " is running version " + << slaveBuild + << exit(FatalError); + } } } else { OPstream toMaster(Pstream::scheduled, Pstream::masterNo()); - toMaster << hostName() << pid(); + toMaster << string(Foam::FOAMbuild) << hostName() << pid(); } } diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.C b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.C index 9a9b20810cbb497f97a4eb5bb0d4aca6851c2b34..05323ace5462ebaa5258bccb00a4e3fc75d2d313 100644 --- a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.C +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.C @@ -24,12 +24,8 @@ License \*---------------------------------------------------------------------------*/ #include "mapDistribute.H" -#include "commSchedule.H" -#include "HashSet.H" -#include "globalIndex.H" #include "globalIndexAndTransform.H" #include "transformField.H" -#include "ListOps.H" // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // @@ -38,7 +34,6 @@ namespace Foam defineTypeNameAndDebug(mapDistribute, 0); } - // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // template<> @@ -134,218 +129,10 @@ void Foam::mapDistribute::transform::operator() {} -Foam::List<Foam::labelPair> Foam::mapDistribute::schedule -( - const labelListList& subMap, - const labelListList& constructMap, - const int tag -) -{ - // Communications: send and receive processor - List<labelPair> allComms; - - { - HashSet<labelPair, labelPair::Hash<>> commsSet(Pstream::nProcs()); - - // Find what communication is required - forAll(subMap, proci) - { - if (proci != Pstream::myProcNo()) - { - if (subMap[proci].size()) - { - // I need to send to proci - commsSet.insert(labelPair(Pstream::myProcNo(), proci)); - } - if (constructMap[proci].size()) - { - // I need to receive from proci - commsSet.insert(labelPair(proci, Pstream::myProcNo())); - } - } - } - allComms = commsSet.toc(); - } - - - // Reduce - if (Pstream::master()) - { - // Receive and merge - for - ( - int slave=Pstream::firstSlave(); - slave<=Pstream::lastSlave(); - slave++ - ) - { - IPstream fromSlave(Pstream::scheduled, slave, 0, tag); - List<labelPair> nbrData(fromSlave); - - forAll(nbrData, i) - { - if (findIndex(allComms, nbrData[i]) == -1) - { - label sz = allComms.size(); - allComms.setSize(sz+1); - allComms[sz] = nbrData[i]; - } - } - } - // Send back - for - ( - int slave=Pstream::firstSlave(); - slave<=Pstream::lastSlave(); - slave++ - ) - { - OPstream toSlave(Pstream::scheduled, slave, 0, tag); - toSlave << allComms; - } - } - else - { - { - OPstream toMaster(Pstream::scheduled, Pstream::masterNo(), 0, tag); - toMaster << allComms; - } - { - IPstream fromMaster - ( - Pstream::scheduled, - Pstream::masterNo(), - 0, - tag - ); - fromMaster >> allComms; - } - } - - - // Determine my schedule. - labelList mySchedule - ( - commSchedule - ( - Pstream::nProcs(), - allComms - ).procSchedule()[Pstream::myProcNo()] - ); - - // Processors involved in my schedule - return List<labelPair>(UIndirectList<labelPair>(allComms, mySchedule)); - - - //if (debug) - //{ - // Pout<< "I need to:" << endl; - // const List<labelPair>& comms = schedule(); - // forAll(comms, i) - // { - // const labelPair& twoProcs = comms[i]; - // label sendProc = twoProcs[0]; - // label recvProc = twoProcs[1]; - // - // if (recvProc == Pstream::myProcNo()) - // { - // Pout<< " receive from " << sendProc << endl; - // } - // else - // { - // Pout<< " send to " << recvProc << endl; - // } - // } - //} -} - - -const Foam::List<Foam::labelPair>& Foam::mapDistribute::schedule() const -{ - if (schedulePtr_.empty()) - { - schedulePtr_.reset - ( - new List<labelPair> - ( - schedule(subMap_, constructMap_, Pstream::msgType()) - ) - ); - } - return schedulePtr_(); -} - - -void Foam::mapDistribute::checkReceivedSize -( - const label proci, - const label expectedSize, - const label receivedSize -) -{ - if (receivedSize != expectedSize) - { - FatalErrorInFunction - << "Expected from processor " << proci - << " " << expectedSize << " but received " - << receivedSize << " elements." - << abort(FatalError); - } -} - - void Foam::mapDistribute::printLayout(Ostream& os) const { - // Determine offsets of remote data. - labelList minIndex(Pstream::nProcs(), labelMax); - labelList maxIndex(Pstream::nProcs(), labelMin); - forAll(constructMap_, proci) - { - const labelList& construct = constructMap_[proci]; - minIndex[proci] = min(minIndex[proci], min(construct)); - maxIndex[proci] = max(maxIndex[proci], max(construct)); - } - - label localSize; - if (maxIndex[Pstream::myProcNo()] == labelMin) - { - localSize = 0; - } - else - { - localSize = maxIndex[Pstream::myProcNo()]+1; - } - - os << "Layout: (constructSize:" << constructSize_ << ")" << endl - << "local (processor " << Pstream::myProcNo() << "):" << endl - << " start : 0" << endl - << " size : " << localSize << endl; + mapDistributeBase::printLayout(os); - label offset = localSize; - forAll(minIndex, proci) - { - if (proci != Pstream::myProcNo()) - { - if (constructMap_[proci].size() > 0) - { - if (minIndex[proci] != offset) - { - FatalErrorInFunction - << "offset:" << offset - << " proci:" << proci - << " minIndex:" << minIndex[proci] - << abort(FatalError); - } - - label size = maxIndex[proci]-minIndex[proci]+1; - os << "processor " << proci << ':' << endl - << " start : " << offset << endl - << " size : " << size << endl; - - offset += size; - } - } - } forAll(transformElements_, trafoI) { if (transformElements_[trafoI].size() > 0) @@ -358,281 +145,11 @@ void Foam::mapDistribute::printLayout(Ostream& os) const } -void Foam::mapDistribute::calcCompactAddressing -( - const globalIndex& globalNumbering, - const labelList& elements, - List<Map<label>>& compactMap -) const -{ - compactMap.setSize(Pstream::nProcs()); - - // Count all (non-local) elements needed. Just for presizing map. - labelList nNonLocal(Pstream::nProcs(), 0); - - forAll(elements, i) - { - label globalIndex = elements[i]; - - if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) - { - label proci = globalNumbering.whichProcID(globalIndex); - nNonLocal[proci]++; - } - } - - forAll(compactMap, proci) - { - compactMap[proci].clear(); - if (proci != Pstream::myProcNo()) - { - compactMap[proci].resize(2*nNonLocal[proci]); - } - } - - - // Collect all (non-local) elements needed. - forAll(elements, i) - { - label globalIndex = elements[i]; - - if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) - { - label proci = globalNumbering.whichProcID(globalIndex); - label index = globalNumbering.toLocal(proci, globalIndex); - label nCompact = compactMap[proci].size(); - compactMap[proci].insert(index, nCompact); - } - } -} - - -void Foam::mapDistribute::calcCompactAddressing -( - const globalIndex& globalNumbering, - const labelListList& cellCells, - List<Map<label>>& compactMap -) const -{ - compactMap.setSize(Pstream::nProcs()); - - // Count all (non-local) elements needed. Just for presizing map. - labelList nNonLocal(Pstream::nProcs(), 0); - - forAll(cellCells, celli) - { - const labelList& cCells = cellCells[celli]; - - forAll(cCells, i) - { - label globalIndex = cCells[i]; - - if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) - { - label proci = globalNumbering.whichProcID(globalIndex); - nNonLocal[proci]++; - } - } - } - - forAll(compactMap, proci) - { - compactMap[proci].clear(); - if (proci != Pstream::myProcNo()) - { - compactMap[proci].resize(2*nNonLocal[proci]); - } - } - - - // Collect all (non-local) elements needed. - forAll(cellCells, celli) - { - const labelList& cCells = cellCells[celli]; - - forAll(cCells, i) - { - label globalIndex = cCells[i]; - - if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) - { - label proci = globalNumbering.whichProcID(globalIndex); - label index = globalNumbering.toLocal(proci, globalIndex); - label nCompact = compactMap[proci].size(); - compactMap[proci].insert(index, nCompact); - } - } - } -} - - -void Foam::mapDistribute::exchangeAddressing -( - const int tag, - const globalIndex& globalNumbering, - labelList& elements, - List<Map<label>>& compactMap, - labelList& compactStart -) -{ - // The overall compact addressing is - // - myProcNo data first (uncompacted) - // - all other processors consecutively - - compactStart.setSize(Pstream::nProcs()); - compactStart[Pstream::myProcNo()] = 0; - constructSize_ = globalNumbering.localSize(); - forAll(compactStart, proci) - { - if (proci != Pstream::myProcNo()) - { - compactStart[proci] = constructSize_; - constructSize_ += compactMap[proci].size(); - } - } - - - - // Find out what to receive/send in compact addressing. - - // What I want to receive is what others have to send - labelListList wantedRemoteElements(Pstream::nProcs()); - // Compact addressing for received data - constructMap_.setSize(Pstream::nProcs()); - forAll(compactMap, proci) - { - if (proci == Pstream::myProcNo()) - { - // All my own elements are used - label nLocal = globalNumbering.localSize(); - wantedRemoteElements[proci] = identity(nLocal); - constructMap_[proci] = identity(nLocal); - } - else - { - // Remote elements wanted from processor proci - labelList& remoteElem = wantedRemoteElements[proci]; - labelList& localElem = constructMap_[proci]; - remoteElem.setSize(compactMap[proci].size()); - localElem.setSize(compactMap[proci].size()); - label i = 0; - forAllIter(Map<label>, compactMap[proci], iter) - { - const label compactI = compactStart[proci] + iter(); - remoteElem[i] = iter.key(); - localElem[i] = compactI; - iter() = compactI; - i++; - } - } - } - - subMap_.setSize(Pstream::nProcs()); - Pstream::exchange<labelList, label> - ( - wantedRemoteElements, - subMap_, - tag, - Pstream::worldComm //TBD - ); - - // Renumber elements - forAll(elements, i) - { - elements[i] = renumber(globalNumbering, compactMap, elements[i]); - } -} - - -void Foam::mapDistribute::exchangeAddressing -( - const int tag, - const globalIndex& globalNumbering, - labelListList& cellCells, - List<Map<label>>& compactMap, - labelList& compactStart -) -{ - // The overall compact addressing is - // - myProcNo data first (uncompacted) - // - all other processors consecutively - - compactStart.setSize(Pstream::nProcs()); - compactStart[Pstream::myProcNo()] = 0; - constructSize_ = globalNumbering.localSize(); - forAll(compactStart, proci) - { - if (proci != Pstream::myProcNo()) - { - compactStart[proci] = constructSize_; - constructSize_ += compactMap[proci].size(); - } - } - - - - // Find out what to receive/send in compact addressing. - - // What I want to receive is what others have to send - labelListList wantedRemoteElements(Pstream::nProcs()); - // Compact addressing for received data - constructMap_.setSize(Pstream::nProcs()); - forAll(compactMap, proci) - { - if (proci == Pstream::myProcNo()) - { - // All my own elements are used - label nLocal = globalNumbering.localSize(); - wantedRemoteElements[proci] = identity(nLocal); - constructMap_[proci] = identity(nLocal); - } - else - { - // Remote elements wanted from processor proci - labelList& remoteElem = wantedRemoteElements[proci]; - labelList& localElem = constructMap_[proci]; - remoteElem.setSize(compactMap[proci].size()); - localElem.setSize(compactMap[proci].size()); - label i = 0; - forAllIter(Map<label>, compactMap[proci], iter) - { - const label compactI = compactStart[proci] + iter(); - remoteElem[i] = iter.key(); - localElem[i] = compactI; - iter() = compactI; - i++; - } - } - } - - subMap_.setSize(Pstream::nProcs()); - Pstream::exchange<labelList, label> - ( - wantedRemoteElements, - subMap_, - tag, - Pstream::worldComm //TBD - ); - - // Renumber elements - forAll(cellCells, celli) - { - labelList& cCells = cellCells[celli]; - - forAll(cCells, i) - { - cCells[i] = renumber(globalNumbering, compactMap, cCells[i]); - } - } -} - - // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // Foam::mapDistribute::mapDistribute() : - constructSize_(0), - schedulePtr_() + mapDistributeBase() {} @@ -640,13 +157,19 @@ Foam::mapDistribute::mapDistribute ( const label constructSize, const Xfer<labelListList>& subMap, - const Xfer<labelListList>& constructMap + const Xfer<labelListList>& constructMap, + const bool subHasFlip, + const bool constructHasFlip ) : - constructSize_(constructSize), - subMap_(subMap), - constructMap_(constructMap), - schedulePtr_() + mapDistributeBase + ( + constructSize, + subMap, + constructMap, + subHasFlip, + constructHasFlip + ) {} @@ -656,15 +179,21 @@ Foam::mapDistribute::mapDistribute const Xfer<labelListList>& subMap, const Xfer<labelListList>& constructMap, const Xfer<labelListList>& transformElements, - const Xfer<labelList>& transformStart + const Xfer<labelList>& transformStart, + const bool subHasFlip, + const bool constructHasFlip ) : - constructSize_(constructSize), - subMap_(subMap), - constructMap_(constructMap), + mapDistributeBase + ( + constructSize, + subMap, + constructMap, + subHasFlip, + constructHasFlip + ), transformElements_(transformElements), - transformStart_(transformStart), - schedulePtr_() + transformStart_(transformStart) {} @@ -674,70 +203,8 @@ Foam::mapDistribute::mapDistribute const labelList& recvProcs ) : - constructSize_(0), - schedulePtr_() -{ - if (sendProcs.size() != recvProcs.size()) - { - FatalErrorInFunction - << "The send and receive data is not the same length. sendProcs:" - << sendProcs.size() << " recvProcs:" << recvProcs.size() - << abort(FatalError); - } - - // Per processor the number of samples we have to send/receive. - labelList nSend(Pstream::nProcs(), 0); - labelList nRecv(Pstream::nProcs(), 0); - - forAll(sendProcs, sampleI) - { - label sendProc = sendProcs[sampleI]; - label recvProc = recvProcs[sampleI]; - - // Note that also need to include local communication (both - // RecvProc and sendProc on local processor) - - if (Pstream::myProcNo() == sendProc) - { - // I am the sender. Count destination processor. - nSend[recvProc]++; - } - if (Pstream::myProcNo() == recvProc) - { - // I am the receiver. - nRecv[sendProc]++; - } - } - - subMap_.setSize(Pstream::nProcs()); - constructMap_.setSize(Pstream::nProcs()); - forAll(nSend, proci) - { - subMap_[proci].setSize(nSend[proci]); - constructMap_[proci].setSize(nRecv[proci]); - } - nSend = 0; - nRecv = 0; - - forAll(sendProcs, sampleI) - { - label sendProc = sendProcs[sampleI]; - label recvProc = recvProcs[sampleI]; - - if (Pstream::myProcNo() == sendProc) - { - // I am the sender. Store index I need to send. - subMap_[recvProc][nSend[recvProc]++] = sampleI; - } - if (Pstream::myProcNo() == recvProc) - { - // I am the receiver. - constructMap_[sendProc][nRecv[sendProc]++] = sampleI; - // Largest entry inside constructMap - constructSize_ = sampleI+1; - } - } -} + mapDistributeBase(sendProcs, recvProcs) +{} Foam::mapDistribute::mapDistribute @@ -748,54 +215,14 @@ Foam::mapDistribute::mapDistribute const int tag ) : - constructSize_(0), - schedulePtr_() -{ - // Construct per processor compact addressing of the global elements - // needed. The ones from the local processor are not included since - // these are always all needed. - calcCompactAddressing - ( - globalNumbering, - elements, - compactMap - ); - - //// Sort remote elements needed (not really necessary) - //forAll(compactMap, proci) - //{ - // if (proci != Pstream::myProcNo()) - // { - // Map<label>& globalMap = compactMap[proci]; - // - // SortableList<label> sorted(globalMap.toc().xfer()); - // - // forAll(sorted, i) - // { - // Map<label>::iterator iter = globalMap.find(sorted[i]); - // iter() = i; - // } - // } - //} - - - // Exchange what I need with processor that supplies it. Renumber elements - // into compact numbering - labelList compactStart; - exchangeAddressing + mapDistributeBase ( - tag, globalNumbering, elements, compactMap, - compactStart - ); - - if (debug) - { - printLayout(Pout); - } -} + tag + ) +{} Foam::mapDistribute::mapDistribute @@ -806,54 +233,14 @@ Foam::mapDistribute::mapDistribute const int tag ) : - constructSize_(0), - schedulePtr_() -{ - // Construct per processor compact addressing of the global elements - // needed. The ones from the local processor are not included since - // these are always all needed. - calcCompactAddressing - ( - globalNumbering, - cellCells, - compactMap - ); - - //// Sort remote elements needed (not really necessary) - //forAll(compactMap, proci) - //{ - // if (proci != Pstream::myProcNo()) - // { - // Map<label>& globalMap = compactMap[proci]; - // - // SortableList<label> sorted(globalMap.toc().xfer()); - // - // forAll(sorted, i) - // { - // Map<label>::iterator iter = globalMap.find(sorted[i]); - // iter() = i; - // } - // } - //} - - - // Exchange what I need with processor that supplies it. Renumber elements - // into compact numbering - labelList compactStart; - exchangeAddressing + mapDistributeBase ( - tag, globalNumbering, cellCells, compactMap, - compactStart - ); - - if (debug) - { - printLayout(Pout); - } -} + tag + ) +{} Foam::mapDistribute::mapDistribute @@ -867,8 +254,7 @@ Foam::mapDistribute::mapDistribute const int tag ) : - constructSize_(0), - schedulePtr_() + mapDistributeBase() { // Construct per processor compact addressing of the global elements // needed. The ones from the local processor are not included since @@ -973,8 +359,7 @@ Foam::mapDistribute::mapDistribute const int tag ) : - constructSize_(0), - schedulePtr_() + mapDistributeBase() { // Construct per processor compact addressing of the global elements // needed. The ones from the local processor are not included since @@ -1083,216 +468,59 @@ Foam::mapDistribute::mapDistribute Foam::mapDistribute::mapDistribute(const mapDistribute& map) : - constructSize_(map.constructSize_), - subMap_(map.subMap_), - constructMap_(map.constructMap_), + mapDistributeBase(map), transformElements_(map.transformElements_), - transformStart_(map.transformStart_), - schedulePtr_() + transformStart_(map.transformStart_) {} Foam::mapDistribute::mapDistribute(const Xfer<mapDistribute>& map) : - constructSize_(map().constructSize_), - subMap_(map().subMap_.xfer()), - constructMap_(map().constructMap_.xfer()), + mapDistributeBase + ( + map().constructSize_, + map().subMap_.xfer(), + map().constructMap_.xfer(), + map().subHasFlip(), + map().constructHasFlip() + ), transformElements_(map().transformElements_.xfer()), - transformStart_(map().transformStart_.xfer()), - schedulePtr_() + transformStart_(map().transformStart_.xfer()) {} -// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // - -Foam::label Foam::mapDistribute::whichTransform(const label index) const +Foam::mapDistribute::mapDistribute(Istream& is) { - return findLower(transformStart_, index+1); + is >> *this; } -void Foam::mapDistribute::transfer(mapDistribute& rhs) +Foam::autoPtr<Foam::mapDistribute> Foam::mapDistribute::clone() const { - constructSize_ = rhs.constructSize_; - subMap_.transfer(rhs.subMap_); - constructMap_.transfer(rhs.constructMap_); - transformElements_.transfer(rhs.transformElements_); - transformStart_.transfer(rhs.transformStart_); - schedulePtr_.clear(); + return autoPtr<mapDistribute>(new mapDistribute(*this)); } -Foam::Xfer<Foam::mapDistribute> Foam::mapDistribute::xfer() +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +Foam::label Foam::mapDistribute::whichTransform(const label index) +const { - return xferMove(*this); + return findLower(transformStart_, index+1); } -Foam::label Foam::mapDistribute::renumber -( - const globalIndex& globalNumbering, - const List<Map<label>>& compactMap, - const label globalI -) +void Foam::mapDistribute::transfer(mapDistribute& rhs) { - if (globalI == -1) - { - return globalI; - } - if (globalNumbering.isLocal(globalI)) - { - return globalNumbering.toLocal(globalI); - } - else - { - label proci = globalNumbering.whichProcID(globalI); - label index = globalNumbering.toLocal(proci, globalI); - return compactMap[proci][index]; - } + mapDistributeBase::transfer(rhs); + transformElements_.transfer(rhs.transformElements_); + transformStart_.transfer(rhs.transformStart_); } -void Foam::mapDistribute::compact(const boolList& elemIsUsed, const int tag) +Foam::Xfer<Foam::mapDistribute> Foam::mapDistribute::xfer() { - // 1. send back to sender. Have sender delete the corresponding element - // from the submap and do the same to the constructMap locally - // (and in same order). - - // Send elemIsUsed field to neighbour. Use nonblocking code from - // mapDistribute but in reverse order. - if (Pstream::parRun()) - { - label startOfRequests = Pstream::nRequests(); - - // Set up receives from neighbours - - List<boolList> recvFields(Pstream::nProcs()); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap_[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - recvFields[domain].setSize(map.size()); - IPstream::read - ( - Pstream::nonBlocking, - domain, - reinterpret_cast<char*>(recvFields[domain].begin()), - recvFields[domain].size()*sizeof(bool), - tag - ); - } - } - - - List<boolList> sendFields(Pstream::nProcs()); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap_[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - boolList& subField = sendFields[domain]; - subField.setSize(map.size()); - forAll(map, i) - { - subField[i] = elemIsUsed[map[i]]; - } - - OPstream::write - ( - Pstream::nonBlocking, - domain, - reinterpret_cast<const char*>(subField.begin()), - subField.size()*sizeof(bool), - tag - ); - } - } - - - - // Set up 'send' to myself - write directly into recvFields - - { - const labelList& map = constructMap_[Pstream::myProcNo()]; - - recvFields[Pstream::myProcNo()].setSize(map.size()); - forAll(map, i) - { - recvFields[Pstream::myProcNo()][i] = elemIsUsed[map[i]]; - } - } - - - // Wait for all to finish - - Pstream::waitRequests(startOfRequests); - - - // Compact out all submap entries that are referring to unused elements - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap_[domain]; - - labelList newMap(map.size()); - label newI = 0; - - forAll(map, i) - { - if (recvFields[domain][i]) - { - // So element is used on destination side - newMap[newI++] = map[i]; - } - } - if (newI < map.size()) - { - newMap.setSize(newI); - subMap_[domain].transfer(newMap); - } - } - } - - - // 2. remove from construct map - since end-result (element in elemIsUsed) - // not used. - - label maxConstructIndex = -1; - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap_[domain]; - - labelList newMap(map.size()); - label newI = 0; - - forAll(map, i) - { - label destinationI = map[i]; - - // Is element is used on destination side - if (elemIsUsed[destinationI]) - { - maxConstructIndex = max(maxConstructIndex, destinationI); - - newMap[newI++] = destinationI; - } - } - if (newI < map.size()) - { - newMap.setSize(newI); - constructMap_[domain].transfer(newMap); - } - } - - constructSize_ = maxConstructIndex+1; - - // Clear the schedule (note:not necessary if nothing changed) - schedulePtr_.clear(); + return xferMove(*this); } @@ -1307,12 +535,9 @@ void Foam::mapDistribute::operator=(const mapDistribute& rhs) << "Attempted assignment to self" << abort(FatalError); } - constructSize_ = rhs.constructSize_; - subMap_ = rhs.subMap_; - constructMap_ = rhs.constructMap_; + mapDistributeBase::operator=(rhs); transformElements_ = rhs.transformElements_; transformStart_ = rhs.transformStart_; - schedulePtr_.clear(); } @@ -1322,7 +547,7 @@ Foam::Istream& Foam::operator>>(Istream& is, mapDistribute& map) { is.fatalCheck("operator>>(Istream&, mapDistribute&)"); - is >> map.constructSize_ >> map.subMap_ >> map.constructMap_ + is >> static_cast<mapDistributeBase&>(map) >> map.transformElements_ >> map.transformStart_; return is; @@ -1333,11 +558,9 @@ Foam::Istream& Foam::operator>>(Istream& is, mapDistribute& map) Foam::Ostream& Foam::operator<<(Ostream& os, const mapDistribute& map) { - os << map.constructSize_ << token::NL - << map.subMap_ << token::NL - << map.constructMap_ << token::NL + os << static_cast<const mapDistributeBase&>(map) << token::NL << map.transformElements_ << token::NL - << map.transformStart_ << token::NL; + << map.transformStart_; return os; } diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.H b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.H index b7b2299490b7011ebcdbc9b82695b2e802efe878..50e3095002e01ca8787af07c4ec3ddd2d6c2030b 100644 --- a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.H +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.H @@ -118,6 +118,12 @@ Note: +------+ 0 + When constructing from components optionally a 'flip' on + the maps can be specified. This will interpret the map + values as index+flip, similar to e.g. faceProcAddressing. The flip + will only be applied to fieldTypes (scalar, vector, .. triad) + + SourceFiles mapDistribute.C mapDistributeTemplates.C @@ -127,12 +133,8 @@ SourceFiles #ifndef mapDistribute_H #define mapDistribute_H +#include "mapDistributeBase.H" #include "transformList.H" -#include "labelList.H" -#include "labelPair.H" -#include "Pstream.H" -#include "boolList.H" -#include "Map.H" #include "vectorTensorTransform.H" #include "coupledPolyPatch.H" @@ -141,9 +143,6 @@ SourceFiles namespace Foam { -class mapPolyMesh; -class globalIndex; -class PstreamBuffers; class globalIndexAndTransform; /*---------------------------------------------------------------------------*\ @@ -151,75 +150,19 @@ class globalIndexAndTransform; \*---------------------------------------------------------------------------*/ class mapDistribute +: + public mapDistributeBase { // Private data - //- Size of reconstructed data - label constructSize_; - - //- Maps from subsetted data back to original data - labelListList subMap_; - - //- Maps from subsetted data to new reconstructed data - labelListList constructMap_; - - // Optional transformation - - //- For every globalIndexAndTransform::transformPermutations - // gives the elements that need to be transformed - labelListList transformElements_; - - //- Destination in constructMap for transformed elements - labelList transformStart_; - - - //- Schedule - mutable autoPtr<List<labelPair>> schedulePtr_; - - - // Private Member Functions + //- For every globalIndexAndTransform::transformPermutations + // gives the elements that need to be transformed + labelListList transformElements_; - static void checkReceivedSize - ( - const label proci, - const label expectedSize, - const label receivedSize - ); - - //- Construct per processor compact addressing of the global elements - // needed. The ones from the local processor are not included since - // these are always all needed. - void calcCompactAddressing - ( - const globalIndex& globalNumbering, - const labelList& elements, - List<Map<label>>& compactMap - ) const; - - void calcCompactAddressing - ( - const globalIndex& globalNumbering, - const labelListList& elements, - List<Map<label>>& compactMap - ) const; - - void exchangeAddressing - ( - const int tag, - const globalIndex& globalNumbering, - labelList& elements, - List<Map<label>>& compactMap, - labelList& compactStart - ); - void exchangeAddressing - ( - const int tag, - const globalIndex& globalNumbering, - labelListList& elements, - List<Map<label>>& compactMap, - labelList& compactStart - ); + //- Destination in constructMap for transformed elements + labelList transformStart_; + // Private Member Functions //- Helper function: copy transformElements without transformation template<class T> @@ -245,7 +188,6 @@ class mapDistribute const TransformOp& top ) const; - public: // Public classes @@ -362,7 +304,6 @@ public: }; - // Declare name of the class and its debug switch ClassName("mapDistribute"); @@ -377,7 +318,9 @@ public: ( const label constructSize, const Xfer<labelListList>& subMap, - const Xfer<labelListList>& constructMap + const Xfer<labelListList>& constructMap, + const bool subHasFlip = false, + const bool constructHasFlip = false ); //- Construct from components @@ -387,7 +330,9 @@ public: const Xfer<labelListList>& subMap, const Xfer<labelListList>& constructMap, const Xfer<labelListList>& transformElements, - const Xfer<labelList>& transformStart + const Xfer<labelList>& transformStart, + const bool subHasFlip = false, + const bool constructHasFlip = false ); //- Construct from reverse addressing: per data item the send @@ -457,46 +402,21 @@ public: //- Construct copy mapDistribute(const mapDistribute&); + //- Construct from Istream + mapDistribute(Istream&); - // Member Functions + //- Clone + autoPtr<mapDistribute> clone() const; - // Access - //- Constructed data size - label constructSize() const - { - return constructSize_; - } + //- Destructor + virtual ~mapDistribute() + {} - //- Constructed data size - label& constructSize() - { - return constructSize_; - } - //- From subsetted data back to original data - const labelListList& subMap() const - { - return subMap_; - } - - //- From subsetted data back to original data - labelListList& subMap() - { - return subMap_; - } - - //- From subsetted data to new reconstructed data - const labelListList& constructMap() const - { - return constructMap_; - } + // Member Functions - //- From subsetted data to new reconstructed data - labelListList& constructMap() - { - return constructMap_; - } + // Access //- For every globalIndexAndTransform::transformPermutations // gives the elements that need to be transformed @@ -514,17 +434,6 @@ public: //- Find transform from transformElements label whichTransform(const label index) const; - //- Calculate a schedule. See above. - static List<labelPair> schedule - ( - const labelListList& subMap, - const labelListList& constructMap, - const int tag - ); - - //- Return a schedule. Demand driven. See above. - const List<labelPair>& schedule() const; - // Other @@ -534,59 +443,22 @@ public: //- Transfer contents to the Xfer container Xfer<mapDistribute> xfer(); - //- Helper for construct from globalIndex. Renumbers element - // (in globalIndex numbering) into compact indices. - static label renumber - ( - const globalIndex&, - const List<Map<label>>& compactMap, - const label globalElement - ); - - //- Compact maps. Gets per field a bool whether it is used (locally) - // and works out itself what this side and sender side can remove - // from maps. - void compact - ( - const boolList& elemIsUsed, - const int tag = UPstream::msgType() - ); - //- Distribute data. Note:schedule only used for Pstream::scheduled - // for now, all others just use send-to-all, receive-from-all. + //- Distribute data using default commsType. template<class T> - static void distribute - ( - const Pstream::commsTypes commsType, - const List<labelPair>& schedule, - const label constructSize, - const labelListList& subMap, - const labelListList& constructMap, - List<T>&, - const int tag = UPstream::msgType() - ); - - //- Distribute data. If multiple processors writing to same - // position adds contributions using cop. - template<class T, class CombineOp> - static void distribute + void distribute ( - const Pstream::commsTypes commsType, - const List<labelPair>& schedule, - const label constructSize, - const labelListList& subMap, - const labelListList& constructMap, - List<T>&, - const CombineOp& cop, - const T& nullValue, + List<T>& fld, + const bool dummyTransform = true, const int tag = UPstream::msgType() - ); + ) const; //- Distribute data using default commsType. - template<class T> + template<class T, class negateOp> void distribute ( - DynamicList<T>& fld, + List<T>& fld, + const negateOp& negOp, const bool dummyTransform = true, const int tag = UPstream::msgType() ) const; @@ -595,56 +467,56 @@ public: template<class T> void distribute ( - List<T>& fld, + DynamicList<T>& fld, const bool dummyTransform = true, const int tag = UPstream::msgType() ) const; - //- Same but with transforms - template<class T, class TransformOp> - void distribute + //- Reverse distribute data using default commsType. + template<class T> + void reverseDistribute ( - const globalIndexAndTransform&, - List<T>& fld, - const TransformOp& top, + const label constructSize, + List<T>&, + const bool dummyTransform = true, const int tag = UPstream::msgType() ) const; //- Reverse distribute data using default commsType. + // Since constructSize might be larger than supplied size supply + // a nullValue template<class T> void reverseDistribute ( const label constructSize, - List<T>&, + const T& nullValue, + List<T>& fld, const bool dummyTransform = true, const int tag = UPstream::msgType() ) const; - //- Same but with transforms + //- Distribute with transforms template<class T, class TransformOp> - void reverseDistribute + void distribute ( const globalIndexAndTransform&, - const label constructSize, List<T>& fld, const TransformOp& top, const int tag = UPstream::msgType() ) const; - //- Reverse distribute data using default commsType. - // Since constructSize might be larger than supplied size supply - // a nullValue - template<class T> + //- Reverse distribute with transforms + template<class T, class TransformOp> void reverseDistribute ( + const globalIndexAndTransform&, const label constructSize, - const T& nullValue, List<T>& fld, - const bool dummyTransform = true, + const TransformOp& top, const int tag = UPstream::msgType() ) const; - //- Same but with transforms + //- Reverse distribute with transforms template<class T, class TransformOp> void reverseDistribute ( @@ -656,13 +528,6 @@ public: const int tag = UPstream::msgType() ) const; - //- Do all sends using PstreamBuffers - template<class T> - void send(PstreamBuffers&, const List<T>&) const; - //- Do all receives using PstreamBuffers - template<class T> - void receive(PstreamBuffers&, List<T>&) const; - //- Debug: print layout. Can only be used on maps with sorted // storage (local data first, then non-local data) void printLayout(Ostream& os) const; @@ -670,14 +535,16 @@ public: //- Correct for topo change. void updateMesh(const mapPolyMesh&) { - NotImplemented; + notImplemented + ( + "mapDistribute::updateMesh(const mapPolyMesh&)" + ); } // Member Operators void operator=(const mapDistribute&); - // IOstream operators //- Read dictionary from Istream @@ -689,6 +556,7 @@ public: }; +// Template specialisation for primitives that do not need transform template<> void mapDistribute::transform::operator() ( diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBase.C b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBase.C new file mode 100644 index 0000000000000000000000000000000000000000..e5a7879e24c9686ffbea92ae37498b09b656fbae --- /dev/null +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBase.C @@ -0,0 +1,1268 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "mapDistributeBase.H" +#include "commSchedule.H" +#include "HashSet.H" +#include "globalIndex.H" +#include "ListOps.H" + +// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // + +namespace Foam +{ +defineTypeNameAndDebug(mapDistributeBase, 0); +} + + +// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // + +Foam::List<Foam::labelPair> Foam::mapDistributeBase::schedule +( + const labelListList& subMap, + const labelListList& constructMap, + const int tag +) +{ + // Communications: send and receive processor + List<labelPair> allComms; + + { + HashSet<labelPair, labelPair::Hash<>> commsSet(Pstream::nProcs()); + + // Find what communication is required + forAll(subMap, proci) + { + if (proci != Pstream::myProcNo()) + { + if (subMap[proci].size()) + { + // I need to send to proci + commsSet.insert(labelPair(Pstream::myProcNo(), proci)); + } + if (constructMap[proci].size()) + { + // I need to receive from proci + commsSet.insert(labelPair(proci, Pstream::myProcNo())); + } + } + } + allComms = commsSet.toc(); + } + + + // Reduce + if (Pstream::master()) + { + // Receive and merge + for + ( + int slave=Pstream::firstSlave(); + slave<=Pstream::lastSlave(); + slave++ + ) + { + IPstream fromSlave(Pstream::scheduled, slave, 0, tag); + List<labelPair> nbrData(fromSlave); + + forAll(nbrData, i) + { + if (findIndex(allComms, nbrData[i]) == -1) + { + label sz = allComms.size(); + allComms.setSize(sz+1); + allComms[sz] = nbrData[i]; + } + } + } + // Send back + for + ( + int slave=Pstream::firstSlave(); + slave<=Pstream::lastSlave(); + slave++ + ) + { + OPstream toSlave(Pstream::scheduled, slave, 0, tag); + toSlave << allComms; + } + } + else + { + { + OPstream toMaster(Pstream::scheduled, Pstream::masterNo(), 0, tag); + toMaster << allComms; + } + { + IPstream fromMaster + ( + Pstream::scheduled, + Pstream::masterNo(), + 0, + tag + ); + fromMaster >> allComms; + } + } + + + // Determine my schedule. + labelList mySchedule + ( + commSchedule + ( + Pstream::nProcs(), + allComms + ).procSchedule()[Pstream::myProcNo()] + ); + + // Processors involved in my schedule + return List<labelPair>(UIndirectList<labelPair>(allComms, mySchedule)); + + + //if (debug) + //{ + // Pout<< "I need to:" << endl; + // const List<labelPair>& comms = schedule(); + // forAll(comms, i) + // { + // const labelPair& twoProcs = comms[i]; + // label sendProc = twoProcs[0]; + // label recvProc = twoProcs[1]; + // + // if (recvProc == Pstream::myProcNo()) + // { + // Pout<< " receive from " << sendProc << endl; + // } + // else + // { + // Pout<< " send to " << recvProc << endl; + // } + // } + //} +} + + +const Foam::List<Foam::labelPair>& Foam::mapDistributeBase::schedule() const +{ + if (schedulePtr_.empty()) + { + schedulePtr_.reset + ( + new List<labelPair> + ( + schedule(subMap_, constructMap_, Pstream::msgType()) + ) + ); + } + return schedulePtr_(); +} + + +void Foam::mapDistributeBase::checkReceivedSize +( + const label proci, + const label expectedSize, + const label receivedSize +) +{ + if (receivedSize != expectedSize) + { + FatalErrorInFunction + << "Expected from processor " << proci + << " " << expectedSize << " but received " + << receivedSize << " elements." + << abort(FatalError); + } +} + + +void Foam::mapDistributeBase::printLayout(Ostream& os) const +{ + // Determine offsets of remote data. + labelList minIndex(Pstream::nProcs(), labelMax); + labelList maxIndex(Pstream::nProcs(), labelMin); + forAll(constructMap_, proci) + { + const labelList& construct = constructMap_[proci]; + if (constructHasFlip_) + { + forAll(construct, i) + { + label index = mag(construct[i])-1; + minIndex[proci] = min(minIndex[proci], index); + maxIndex[proci] = max(maxIndex[proci], index); + } + } + else + { + forAll(construct, i) + { + label index = construct[i]; + minIndex[proci] = min(minIndex[proci], index); + maxIndex[proci] = max(maxIndex[proci], index); + } + } + } + + label localSize; + if (maxIndex[Pstream::myProcNo()] == labelMin) + { + localSize = 0; + } + else + { + localSize = maxIndex[Pstream::myProcNo()]+1; + } + + os << "Layout: (constructSize:" << constructSize_ + << " subHasFlip:" << subHasFlip_ + << " constructHasFlip:" << constructHasFlip_ + << ")" << endl + << "local (processor " << Pstream::myProcNo() << "):" << endl + << " start : 0" << endl + << " size : " << localSize << endl; + + label offset = localSize; + forAll(minIndex, proci) + { + if (proci != Pstream::myProcNo()) + { + if (constructMap_[proci].size() > 0) + { + if (minIndex[proci] != offset) + { + FatalErrorInFunction + << "offset:" << offset + << " proci:" << proci + << " minIndex:" << minIndex[proci] + << abort(FatalError); + } + + label size = maxIndex[proci]-minIndex[proci]+1; + os << "processor " << proci << ':' << endl + << " start : " << offset << endl + << " size : " << size << endl; + + offset += size; + } + } + } +} + + +void Foam::mapDistributeBase::calcCompactAddressing +( + const globalIndex& globalNumbering, + const labelList& elements, + List<Map<label>>& compactMap +) const +{ + compactMap.setSize(Pstream::nProcs()); + + // Count all (non-local) elements needed. Just for presizing map. + labelList nNonLocal(Pstream::nProcs(), 0); + + forAll(elements, i) + { + label globalIndex = elements[i]; + + if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) + { + label proci = globalNumbering.whichProcID(globalIndex); + nNonLocal[proci]++; + } + } + + forAll(compactMap, proci) + { + compactMap[proci].clear(); + if (proci != Pstream::myProcNo()) + { + compactMap[proci].resize(2*nNonLocal[proci]); + } + } + + + // Collect all (non-local) elements needed. + forAll(elements, i) + { + label globalIndex = elements[i]; + + if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) + { + label proci = globalNumbering.whichProcID(globalIndex); + label index = globalNumbering.toLocal(proci, globalIndex); + label nCompact = compactMap[proci].size(); + compactMap[proci].insert(index, nCompact); + } + } +} + + +void Foam::mapDistributeBase::calcCompactAddressing +( + const globalIndex& globalNumbering, + const labelListList& cellCells, + List<Map<label>>& compactMap +) const +{ + compactMap.setSize(Pstream::nProcs()); + + // Count all (non-local) elements needed. Just for presizing map. + labelList nNonLocal(Pstream::nProcs(), 0); + + forAll(cellCells, cellI) + { + const labelList& cCells = cellCells[cellI]; + + forAll(cCells, i) + { + label globalIndex = cCells[i]; + + if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) + { + label proci = globalNumbering.whichProcID(globalIndex); + nNonLocal[proci]++; + } + } + } + + forAll(compactMap, proci) + { + compactMap[proci].clear(); + if (proci != Pstream::myProcNo()) + { + compactMap[proci].resize(2*nNonLocal[proci]); + } + } + + + // Collect all (non-local) elements needed. + forAll(cellCells, cellI) + { + const labelList& cCells = cellCells[cellI]; + + forAll(cCells, i) + { + label globalIndex = cCells[i]; + + if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex)) + { + label proci = globalNumbering.whichProcID(globalIndex); + label index = globalNumbering.toLocal(proci, globalIndex); + label nCompact = compactMap[proci].size(); + compactMap[proci].insert(index, nCompact); + } + } + } +} + + +void Foam::mapDistributeBase::exchangeAddressing +( + const int tag, + const globalIndex& globalNumbering, + labelList& elements, + List<Map<label>>& compactMap, + labelList& compactStart +) +{ + // The overall compact addressing is + // - myProcNo data first (uncompacted) + // - all other processors consecutively + + compactStart.setSize(Pstream::nProcs()); + compactStart[Pstream::myProcNo()] = 0; + constructSize_ = globalNumbering.localSize(); + forAll(compactStart, proci) + { + if (proci != Pstream::myProcNo()) + { + compactStart[proci] = constructSize_; + constructSize_ += compactMap[proci].size(); + } + } + + + + // Find out what to receive/send in compact addressing. + + // What I want to receive is what others have to send + labelListList wantedRemoteElements(Pstream::nProcs()); + // Compact addressing for received data + constructMap_.setSize(Pstream::nProcs()); + forAll(compactMap, proci) + { + if (proci == Pstream::myProcNo()) + { + // All my own elements are used + label nLocal = globalNumbering.localSize(); + wantedRemoteElements[proci] = identity(nLocal); + constructMap_[proci] = identity(nLocal); + } + else + { + // Remote elements wanted from processor proci + labelList& remoteElem = wantedRemoteElements[proci]; + labelList& localElem = constructMap_[proci]; + remoteElem.setSize(compactMap[proci].size()); + localElem.setSize(compactMap[proci].size()); + label i = 0; + forAllIter(Map<label>, compactMap[proci], iter) + { + const label compactI = compactStart[proci] + iter(); + remoteElem[i] = iter.key(); + localElem[i] = compactI; + iter() = compactI; + i++; + } + } + } + + subMap_.setSize(Pstream::nProcs()); + Pstream::exchange<labelList, label> + ( + wantedRemoteElements, + subMap_, + tag, + Pstream::worldComm //TBD + ); + + // Renumber elements + forAll(elements, i) + { + elements[i] = renumber(globalNumbering, compactMap, elements[i]); + } +} + + +void Foam::mapDistributeBase::exchangeAddressing +( + const int tag, + const globalIndex& globalNumbering, + labelListList& cellCells, + List<Map<label>>& compactMap, + labelList& compactStart +) +{ + // The overall compact addressing is + // - myProcNo data first (uncompacted) + // - all other processors consecutively + + compactStart.setSize(Pstream::nProcs()); + compactStart[Pstream::myProcNo()] = 0; + constructSize_ = globalNumbering.localSize(); + forAll(compactStart, proci) + { + if (proci != Pstream::myProcNo()) + { + compactStart[proci] = constructSize_; + constructSize_ += compactMap[proci].size(); + } + } + + + + // Find out what to receive/send in compact addressing. + + // What I want to receive is what others have to send + labelListList wantedRemoteElements(Pstream::nProcs()); + // Compact addressing for received data + constructMap_.setSize(Pstream::nProcs()); + forAll(compactMap, proci) + { + if (proci == Pstream::myProcNo()) + { + // All my own elements are used + label nLocal = globalNumbering.localSize(); + wantedRemoteElements[proci] = identity(nLocal); + constructMap_[proci] = identity(nLocal); + } + else + { + // Remote elements wanted from processor proci + labelList& remoteElem = wantedRemoteElements[proci]; + labelList& localElem = constructMap_[proci]; + remoteElem.setSize(compactMap[proci].size()); + localElem.setSize(compactMap[proci].size()); + label i = 0; + forAllIter(Map<label>, compactMap[proci], iter) + { + const label compactI = compactStart[proci] + iter(); + remoteElem[i] = iter.key(); + localElem[i] = compactI; + iter() = compactI; + i++; + } + } + } + + subMap_.setSize(Pstream::nProcs()); + Pstream::exchange<labelList, label> + ( + wantedRemoteElements, + subMap_, + tag, + Pstream::worldComm //TBD + ); + + // Renumber elements + forAll(cellCells, cellI) + { + labelList& cCells = cellCells[cellI]; + + forAll(cCells, i) + { + cCells[i] = renumber(globalNumbering, compactMap, cCells[i]); + } + } +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::mapDistributeBase::mapDistributeBase() +: + constructSize_(0), + subHasFlip_(false), + constructHasFlip_(false), + schedulePtr_() +{} + + +Foam::mapDistributeBase::mapDistributeBase +( + const label constructSize, + const Xfer<labelListList>& subMap, + const Xfer<labelListList>& constructMap, + const bool subHasFlip, + const bool constructHasFlip +) +: + constructSize_(constructSize), + subMap_(subMap), + constructMap_(constructMap), + subHasFlip_(subHasFlip), + constructHasFlip_(constructHasFlip), + schedulePtr_() +{} + + +Foam::mapDistributeBase::mapDistributeBase +( + const labelList& sendProcs, + const labelList& recvProcs +) +: + constructSize_(0), + subHasFlip_(false), + constructHasFlip_(false), + schedulePtr_() +{ + if (sendProcs.size() != recvProcs.size()) + { + FatalErrorInFunction + << "The send and receive data is not the same length. sendProcs:" + << sendProcs.size() << " recvProcs:" << recvProcs.size() + << abort(FatalError); + } + + // Per processor the number of samples we have to send/receive. + labelList nSend(Pstream::nProcs(), 0); + labelList nRecv(Pstream::nProcs(), 0); + + forAll(sendProcs, sampleI) + { + label sendProc = sendProcs[sampleI]; + label recvProc = recvProcs[sampleI]; + + // Note that also need to include local communication (both + // RecvProc and sendProc on local processor) + + if (Pstream::myProcNo() == sendProc) + { + // I am the sender. Count destination processor. + nSend[recvProc]++; + } + if (Pstream::myProcNo() == recvProc) + { + // I am the receiver. + nRecv[sendProc]++; + } + } + + subMap_.setSize(Pstream::nProcs()); + constructMap_.setSize(Pstream::nProcs()); + forAll(nSend, proci) + { + subMap_[proci].setSize(nSend[proci]); + constructMap_[proci].setSize(nRecv[proci]); + } + nSend = 0; + nRecv = 0; + + forAll(sendProcs, sampleI) + { + label sendProc = sendProcs[sampleI]; + label recvProc = recvProcs[sampleI]; + + if (Pstream::myProcNo() == sendProc) + { + // I am the sender. Store index I need to send. + subMap_[recvProc][nSend[recvProc]++] = sampleI; + } + if (Pstream::myProcNo() == recvProc) + { + // I am the receiver. + constructMap_[sendProc][nRecv[sendProc]++] = sampleI; + // Largest entry inside constructMap + constructSize_ = sampleI+1; + } + } +} + + +Foam::mapDistributeBase::mapDistributeBase +( + const globalIndex& globalNumbering, + labelList& elements, + List<Map<label>>& compactMap, + const int tag +) +: + constructSize_(0), + subHasFlip_(false), + constructHasFlip_(false), + schedulePtr_() +{ + // Construct per processor compact addressing of the global elements + // needed. The ones from the local processor are not included since + // these are always all needed. + calcCompactAddressing + ( + globalNumbering, + elements, + compactMap + ); + + //// Sort remote elements needed (not really necessary) + //forAll(compactMap, proci) + //{ + // if (proci != Pstream::myProcNo()) + // { + // Map<label>& globalMap = compactMap[proci]; + // + // SortableList<label> sorted(globalMap.toc().xfer()); + // + // forAll(sorted, i) + // { + // Map<label>::iterator iter = globalMap.find(sorted[i]); + // iter() = i; + // } + // } + //} + + + // Exchange what I need with processor that supplies it. Renumber elements + // into compact numbering + labelList compactStart; + exchangeAddressing + ( + tag, + globalNumbering, + elements, + compactMap, + compactStart + ); + + if (debug) + { + printLayout(Pout); + } +} + + +Foam::mapDistributeBase::mapDistributeBase +( + const globalIndex& globalNumbering, + labelListList& cellCells, + List<Map<label>>& compactMap, + const int tag +) +: + constructSize_(0), + subHasFlip_(false), + constructHasFlip_(false), + schedulePtr_() +{ + // Construct per processor compact addressing of the global elements + // needed. The ones from the local processor are not included since + // these are always all needed. + calcCompactAddressing + ( + globalNumbering, + cellCells, + compactMap + ); + + //// Sort remote elements needed (not really necessary) + //forAll(compactMap, proci) + //{ + // if (proci != Pstream::myProcNo()) + // { + // Map<label>& globalMap = compactMap[proci]; + // + // SortableList<label> sorted(globalMap.toc().xfer()); + // + // forAll(sorted, i) + // { + // Map<label>::iterator iter = globalMap.find(sorted[i]); + // iter() = i; + // } + // } + //} + + + // Exchange what I need with processor that supplies it. Renumber elements + // into compact numbering + labelList compactStart; + exchangeAddressing + ( + tag, + globalNumbering, + cellCells, + compactMap, + compactStart + ); + + if (debug) + { + printLayout(Pout); + } +} + + +Foam::mapDistributeBase::mapDistributeBase(const mapDistributeBase& map) +: + constructSize_(map.constructSize_), + subMap_(map.subMap_), + constructMap_(map.constructMap_), + subHasFlip_(map.subHasFlip_), + constructHasFlip_(map.constructHasFlip_), + schedulePtr_() +{} + + +Foam::mapDistributeBase::mapDistributeBase(const Xfer<mapDistributeBase>& map) +: + constructSize_(map().constructSize_), + subMap_(map().subMap_.xfer()), + constructMap_(map().constructMap_.xfer()), + subHasFlip_(map().subHasFlip_), + constructHasFlip_(map().constructHasFlip_), + schedulePtr_() +{} + + +Foam::mapDistributeBase::mapDistributeBase(Istream& is) +{ + is >> *this; +} + + +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +void Foam::mapDistributeBase::transfer(mapDistributeBase& rhs) +{ + constructSize_ = rhs.constructSize_; + subMap_.transfer(rhs.subMap_); + constructMap_.transfer(rhs.constructMap_); + subHasFlip_ = rhs.subHasFlip_; + constructHasFlip_ = rhs.constructHasFlip_; + schedulePtr_.clear(); +} + + +Foam::Xfer<Foam::mapDistributeBase> Foam::mapDistributeBase::xfer() +{ + return xferMove(*this); +} + + +Foam::label Foam::mapDistributeBase::renumber +( + const globalIndex& globalNumbering, + const List<Map<label>>& compactMap, + const label globalI +) +{ + if (globalI == -1) + { + return globalI; + } + if (globalNumbering.isLocal(globalI)) + { + return globalNumbering.toLocal(globalI); + } + else + { + label proci = globalNumbering.whichProcID(globalI); + label index = globalNumbering.toLocal(proci, globalI); + return compactMap[proci][index]; + } +} + + +void Foam::mapDistributeBase::compact(const boolList& elemIsUsed, const int tag) +{ + // 1. send back to sender. Have sender delete the corresponding element + // from the submap and do the same to the constructMap locally + // (and in same order). + + // Send elemIsUsed field to neighbour. Use nonblocking code from + // mapDistributeBase but in reverse order. + if (Pstream::parRun()) + { + label startOfRequests = Pstream::nRequests(); + + // Set up receives from neighbours + + List<boolList> recvFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap_[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + recvFields[domain].setSize(map.size()); + IPstream::read + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<char*>(recvFields[domain].begin()), + recvFields[domain].size()*sizeof(bool), + tag + ); + } + } + + + List<boolList> sendFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap_[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + boolList& subField = sendFields[domain]; + subField.setSize(map.size()); + forAll(map, i) + { + subField[i] = accessAndFlip + ( + elemIsUsed, + map[i], + constructHasFlip_, + noOp() // do not flip elemIsUsed value + ); + } + + OPstream::write + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<const char*>(subField.begin()), + subField.size()*sizeof(bool), + tag + ); + } + } + + + + // Set up 'send' to myself - write directly into recvFields + + { + const labelList& map = constructMap_[Pstream::myProcNo()]; + + recvFields[Pstream::myProcNo()].setSize(map.size()); + forAll(map, i) + { + recvFields[Pstream::myProcNo()][i] = accessAndFlip + ( + elemIsUsed, + map[i], + constructHasFlip_, + noOp() // do not flip elemIsUsed value + ); + } + } + + + // Wait for all to finish + + Pstream::waitRequests(startOfRequests); + + + // Compact out all submap entries that are referring to unused elements + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap_[domain]; + + labelList newMap(map.size()); + label newI = 0; + + forAll(map, i) + { + if (recvFields[domain][i]) + { + // So element is used on destination side + newMap[newI++] = map[i]; + } + } + if (newI < map.size()) + { + newMap.setSize(newI); + subMap_[domain].transfer(newMap); + } + } + } + + + // 2. remove from construct map - since end-result (element in elemIsUsed) + // not used. + + label maxConstructIndex = -1; + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap_[domain]; + + labelList newMap(map.size()); + label newI = 0; + + forAll(map, i) + { + label destinationI = map[i]; + if (constructHasFlip_) + { + destinationI = mag(destinationI)-1; + } + + // Is element is used on destination side + if (elemIsUsed[destinationI]) + { + maxConstructIndex = max(maxConstructIndex, destinationI); + + newMap[newI++] = map[i]; + } + } + if (newI < map.size()) + { + newMap.setSize(newI); + constructMap_[domain].transfer(newMap); + } + } + + constructSize_ = maxConstructIndex+1; + + // Clear the schedule (note:not necessary if nothing changed) + schedulePtr_.clear(); +} + + +void Foam::mapDistributeBase::compact +( + const boolList& elemIsUsed, + const label localSize, // max index for subMap + labelList& oldToNewSub, + labelList& oldToNewConstruct, + const int tag +) +{ + // 1. send back to sender. Have sender delete the corresponding element + // from the submap and do the same to the constructMap locally + // (and in same order). + + // Send elemIsUsed field to neighbour. Use nonblocking code from + // mapDistributeBase but in reverse order. + if (Pstream::parRun()) + { + label startOfRequests = Pstream::nRequests(); + + // Set up receives from neighbours + + List<boolList> recvFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap_[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + recvFields[domain].setSize(map.size()); + IPstream::read + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<char*>(recvFields[domain].begin()), + recvFields[domain].size()*sizeof(bool), + tag + ); + } + } + + + List<boolList> sendFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap_[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + boolList& subField = sendFields[domain]; + subField.setSize(map.size()); + forAll(map, i) + { + label index = map[i]; + if (constructHasFlip_) + { + index = mag(index)-1; + } + subField[i] = elemIsUsed[index]; + } + + OPstream::write + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<const char*>(subField.begin()), + subField.size()*sizeof(bool), + tag + ); + } + } + + + + // Set up 'send' to myself - write directly into recvFields + + { + const labelList& map = constructMap_[Pstream::myProcNo()]; + + recvFields[Pstream::myProcNo()].setSize(map.size()); + forAll(map, i) + { + label index = map[i]; + if (constructHasFlip_) + { + index = mag(index)-1; + } + recvFields[Pstream::myProcNo()][i] = elemIsUsed[index]; + } + } + + + // Wait for all to finish + + Pstream::waitRequests(startOfRequests); + + + + + // Work out which elements on the sending side are needed + { + oldToNewSub.setSize(localSize, -1); + + boolList sendElemIsUsed(localSize, false); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap_[domain]; + forAll(map, i) + { + if (recvFields[domain][i]) + { + label index = map[i]; + if (subHasFlip_) + { + index = mag(index)-1; + } + sendElemIsUsed[index] = true; + } + } + } + + label newI = 0; + forAll(sendElemIsUsed, i) + { + if (sendElemIsUsed[i]) + { + oldToNewSub[i] = newI++; + } + } + } + + + // Compact out all submap entries that are referring to unused elements + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap_[domain]; + + labelList newMap(map.size()); + label newI = 0; + + forAll(map, i) + { + if (recvFields[domain][i]) + { + // So element is used on destination side + label index = map[i]; + label sign = 1; + if (subHasFlip_) + { + if (index < 0) + { + sign = -1; + } + index = mag(index)-1; + } + label newIndex = oldToNewSub[index]; + if (subHasFlip_) + { + newIndex = sign*(newIndex+1); + } + newMap[newI++] = newIndex; + } + } + newMap.setSize(newI); + subMap_[domain].transfer(newMap); + } + } + + + // 2. remove from construct map - since end-result (element in elemIsUsed) + // not used. + + + oldToNewConstruct.setSize(elemIsUsed.size(), -1); + constructSize_ = 0; + forAll(elemIsUsed, i) + { + if (elemIsUsed[i]) + { + oldToNewConstruct[i] = constructSize_++; + } + } + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap_[domain]; + + labelList newMap(map.size()); + label newI = 0; + + forAll(map, i) + { + label destinationI = map[i]; + label sign = 1; + if (constructHasFlip_) + { + if (destinationI < 0) + { + sign = -1; + } + destinationI = mag(destinationI)-1; + } + + // Is element is used on destination side + if (elemIsUsed[destinationI]) + { + label newIndex = oldToNewConstruct[destinationI]; + if (constructHasFlip_) + { + newIndex = sign*(newIndex+1); + } + newMap[newI++] = newIndex; + } + } + newMap.setSize(newI); + constructMap_[domain].transfer(newMap); + } +} + + +// * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * // + +void Foam::mapDistributeBase::operator=(const mapDistributeBase& rhs) +{ + // Check for assignment to self + if (this == &rhs) + { + FatalErrorInFunction + << "Attempted assignment to self" + << abort(FatalError); + } + constructSize_ = rhs.constructSize_; + subMap_ = rhs.subMap_; + constructMap_ = rhs.constructMap_; + subHasFlip_ = rhs.subHasFlip_; + constructHasFlip_ = rhs.constructHasFlip_; + schedulePtr_.clear(); +} + + +// * * * * * * * * * * * * * * Istream Operator * * * * * * * * * * * * * * // + +Foam::Istream& Foam::operator>>(Istream& is, mapDistributeBase& map) +{ + is.fatalCheck("operator>>(Istream&, mapDistributeBase&)"); + + is >> map.constructSize_ >> map.subMap_ >> map.constructMap_ + >> map.subHasFlip_ >> map.constructHasFlip_; + + return is; +} + + +// * * * * * * * * * * * * * * Ostream Operator * * * * * * * * * * * * * * // + +Foam::Ostream& Foam::operator<<(Ostream& os, const mapDistributeBase& map) +{ + os << map.constructSize_ << token::NL + << map.subMap_ << token::NL + << map.constructMap_ << token::NL + << map.subHasFlip_ << token::SPACE << map.constructHasFlip_ + << token::NL; + + return os; +} + + +// ************************************************************************* // diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBase.H b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBase.H new file mode 100644 index 0000000000000000000000000000000000000000..fe9a9be6159809b9f6661349d76c4f1905f2dba4 --- /dev/null +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBase.H @@ -0,0 +1,486 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::mapDistributeBase + +Description + Class containing processor-to-processor mapping information. + + We store mapping from the bits-to-send to the complete starting list + (subXXXMap) and from the received bits to their location in the new + list (constructXXXMap). + +Note: + Schedule is a list of processor pairs (one send, one receive. One of + them will be myself) which forms a scheduled (i.e. non-buffered) exchange. + See distribute on how to use it. + Note2: number of items sent on one processor have to equal the number + of items received on the other processor. + + To aid constructing these maps there are the constructors from global + numbering, either with or without transforms. + + Constructors using compact numbering: layout is + - all my own elements first (whether used or not) + - followed by used-only remote elements sorted by remote processor. + So e.g 4 procs and on proc 1 the compact + table will first have all globalIndex.localSize() elements from proc1 + followed by used-only elements of proc0, proc2, proc3. + The constructed mapDistributeBase sends the local elements from and + receives the remote elements into their compact position. + compactMap[proci] is the position of elements from proci in the compact + map. compactMap[myProcNo()] is empty since trivial addressing. + + It rewrites the input global indices into indices into the constructed + data. + + When constructing from components optionally a 'flip' on + the maps can be specified. This will interpret the map + values as index+flip, similar to e.g. faceProcAddressing. The flip + will only be applied to fieldTypes (scalar, vector, .. triad) + + +SourceFiles + mapDistributeBase.C + mapDistributeBaseTemplates.C + +\*---------------------------------------------------------------------------*/ + +#ifndef mapDistributeBase_H +#define mapDistributeBase_H + +#include "labelList.H" +#include "labelPair.H" +#include "Pstream.H" +#include "boolList.H" +#include "Map.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +class mapPolyMesh; +class globalIndex; +class PstreamBuffers; + +/*---------------------------------------------------------------------------*\ + Class mapDistributeBase Declaration +\*---------------------------------------------------------------------------*/ + +class mapDistributeBase +{ +protected: + + // Protected data + + //- Size of reconstructed data + label constructSize_; + + //- Maps from subsetted data back to original data + labelListList subMap_; + + //- Maps from subsetted data to new reconstructed data + labelListList constructMap_; + + //- Whether subMap includes flip or not + bool subHasFlip_; + + //- Whether constructMap includes flip or not + bool constructHasFlip_; + + + //- Schedule + mutable autoPtr<List<labelPair>> schedulePtr_; + + + // Private Member Functions + + static void checkReceivedSize + ( + const label proci, + const label expectedSize, + const label receivedSize + ); + + //- Construct per processor compact addressing of the global elements + // needed. The ones from the local processor are not included since + // these are always all needed. + void calcCompactAddressing + ( + const globalIndex& globalNumbering, + const labelList& elements, + List<Map<label>>& compactMap + ) const; + + void calcCompactAddressing + ( + const globalIndex& globalNumbering, + const labelListList& elements, + List<Map<label>>& compactMap + ) const; + + void exchangeAddressing + ( + const int tag, + const globalIndex& globalNumbering, + labelList& elements, + List<Map<label>>& compactMap, + labelList& compactStart + ); + void exchangeAddressing + ( + const int tag, + const globalIndex& globalNumbering, + labelListList& elements, + List<Map<label>>& compactMap, + labelList& compactStart + ); + + template<class T, class CombineOp, class negateOp> + static void flipAndCombine + ( + const UList<label>& map, + const bool hasFlip, + const UList<T>& rhs, + const CombineOp& cop, + const negateOp& negOp, + List<T>& lhs + ); + + template<class T, class negateOp> + static T accessAndFlip + ( + const UList<T>& fld, + const label index, + const bool hasFlip, + const negateOp& negOp + ); + +public: + + // Declare name of the class and its debug switch + ClassName("mapDistributeBase"); + + + // Constructors + + //- Construct null + mapDistributeBase(); + + //- Construct from components + mapDistributeBase + ( + const label constructSize, + const Xfer<labelListList>& subMap, + const Xfer<labelListList>& constructMap, + const bool subHasFlip = false, + const bool constructHasFlip = false + ); + + //- Construct from reverse addressing: per data item the send + // processor and the receive processor. (note: data is not stored + // sorted per processor so cannot use printLayout). + mapDistributeBase + ( + const labelList& sendProcs, + const labelList& recvProcs + ); + + //- Construct from list of (possibly) remote elements in globalIndex + // numbering (or -1). Determines compact numbering (see above) and + // distribute map to get data into this ordering and renumbers the + // elements to be in compact numbering. + mapDistributeBase + ( + const globalIndex&, + labelList& elements, + List<Map<label>>& compactMap, + const int tag = Pstream::msgType() + ); + + //- Special variant that works with the info sorted into bins + // according to local indices. E.g. think cellCells where + // cellCells[localCellI] is a list of global cells + mapDistributeBase + ( + const globalIndex&, + labelListList& cellCells, + List<Map<label>>& compactMap, + const int tag = Pstream::msgType() + ); + + //- Construct by transferring parameter content + mapDistributeBase(const Xfer<mapDistributeBase>&); + + //- Construct copy + mapDistributeBase(const mapDistributeBase&); + + //- Construct from Istream + mapDistributeBase(Istream&); + + + // Member Functions + + // Access + + //- Constructed data size + label constructSize() const + { + return constructSize_; + } + + //- Constructed data size + label& constructSize() + { + return constructSize_; + } + + //- From subsetted data back to original data + const labelListList& subMap() const + { + return subMap_; + } + + //- From subsetted data back to original data + labelListList& subMap() + { + return subMap_; + } + + //- From subsetted data to new reconstructed data + const labelListList& constructMap() const + { + return constructMap_; + } + + //- From subsetted data to new reconstructed data + labelListList& constructMap() + { + return constructMap_; + } + + //- Does subMap include a sign + bool subHasFlip() const + { + return subHasFlip_; + } + + //- Does subMap include a sign + bool& subHasFlip() + { + return subHasFlip_; + } + + //- Does constructMap include a sign + bool constructHasFlip() const + { + return constructHasFlip_; + } + + //- Does constructMap include a sign + bool& constructHasFlip() + { + return constructHasFlip_; + } + + //- Calculate a schedule. See above. + static List<labelPair> schedule + ( + const labelListList& subMap, + const labelListList& constructMap, + const int tag + ); + + //- Return a schedule. Demand driven. See above. + const List<labelPair>& schedule() const; + + + // Other + + //- Transfer the contents of the argument and annul the argument. + void transfer(mapDistributeBase&); + + //- Transfer contents to the Xfer container + Xfer<mapDistributeBase> xfer(); + + //- Helper for construct from globalIndex. Renumbers element + // (in globalIndex numbering) into compact indices. + static label renumber + ( + const globalIndex&, + const List<Map<label>>& compactMap, + const label globalElement + ); + + //- Compact maps. Gets per field a bool whether it is used (locally) + // and works out itself what this side and sender side can remove + // from maps. Only compacts non-local elements (i.e. the stuff + // that gets sent over), does not change the local layout + void compact + ( + const boolList& elemIsUsed, + const int tag = UPstream::msgType() + ); + + //- Compact all maps and layout. Returns compaction maps for + // subMap and constructMap + void compact + ( + const boolList& elemIsUsed, + const label localSize, // max index for subMap + labelList& oldToNewSub, + labelList& oldToNewConstruct, + const int tag = UPstream::msgType() + ); + + //- Distribute data. Note:schedule only used for Pstream::scheduled + // for now, all others just use send-to-all, receive-from-all. + template<class T, class negateOp> + static void distribute + ( + const Pstream::commsTypes commsType, + const List<labelPair>& schedule, + const label constructSize, + const labelListList& subMap, + const bool subHasFlip, + const labelListList& constructMap, + const bool constructHasFlip, + List<T>&, + const negateOp& negOp, + const int tag = UPstream::msgType() + ); + + //- Distribute data. If multiple processors writing to same + // position adds contributions using cop. + template<class T, class CombineOp, class negateOp> + static void distribute + ( + const Pstream::commsTypes commsType, + const List<labelPair>& schedule, + const label constructSize, + const labelListList& subMap, + const bool subHasFlip, + const labelListList& constructMap, + const bool constructHasFlip, + List<T>&, + const CombineOp& cop, + const negateOp& negOp, + const T& nullValue, + const int tag = UPstream::msgType() + ); + + //- Distribute data using default commsType. + template<class T> + void distribute + ( + List<T>& fld, + const int tag = UPstream::msgType() + ) const; + + //- Distribute data using default commsType. + template<class T, class negateOp> + void distribute + ( + List<T>& fld, + const negateOp& negOp, + const int tag = UPstream::msgType() + ) const; + + //- Distribute data using default commsType. + template<class T> + void distribute + ( + DynamicList<T>& fld, + const int tag = UPstream::msgType() + ) const; + + //- Reverse distribute data using default commsType. + template<class T> + void reverseDistribute + ( + const label constructSize, + List<T>&, + const int tag = UPstream::msgType() + ) const; + + //- Reverse distribute data using default commsType. + // Since constructSize might be larger than supplied size supply + // a nullValue + template<class T> + void reverseDistribute + ( + const label constructSize, + const T& nullValue, + List<T>& fld, + const int tag = UPstream::msgType() + ) const; + + //- Do all sends using PstreamBuffers + template<class T> + void send(PstreamBuffers&, const List<T>&) const; + //- Do all receives using PstreamBuffers + template<class T> + void receive(PstreamBuffers&, List<T>&) const; + + //- Debug: print layout. Can only be used on maps with sorted + // storage (local data first, then non-local data) + void printLayout(Ostream& os) const; + + //- Correct for topo change. + void updateMesh(const mapPolyMesh&) + { + NotImplemented; + } + + // Member Operators + + void operator=(const mapDistributeBase&); + + // IOstream operators + + //- Read dictionary from Istream + friend Istream& operator>>(Istream&, mapDistributeBase&); + + //- Write dictionary to Ostream + friend Ostream& operator<<(Ostream&, const mapDistributeBase&); + +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#ifdef NoRepository + #include "mapDistributeBaseTemplates.C" +#endif + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBaseTemplates.C b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBaseTemplates.C new file mode 100644 index 0000000000000000000000000000000000000000..a445d79f2a6d73a1e8c85dfc48d3c4bc24f77543 --- /dev/null +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeBaseTemplates.C @@ -0,0 +1,1368 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "Pstream.H" +#include "PstreamBuffers.H" +#include "PstreamCombineReduceOps.H" +#include "flipOp.H" + +// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // + +template<class T, class CombineOp, class negateOp> +void Foam::mapDistributeBase::flipAndCombine +( + const UList<label>& map, + const bool hasFlip, + const UList<T>& rhs, + const CombineOp& cop, + const negateOp& negOp, + List<T>& lhs +) +{ + if (hasFlip) + { + forAll(map, i) + { + if (map[i] > 0) + { + label index = map[i]-1; + cop(lhs[index], rhs[i]); + } + else if (map[i] < 0) + { + label index = -map[i]-1; + cop(lhs[index], negOp(rhs[i])); + } + else + { + FatalErrorInFunction + << "At index " << i << " out of " << map.size() + << " have illegal index " << map[i] + << " for field " << rhs.size() << " with flipMap" + << exit(FatalError); + } + } + } + else + { + forAll(map, i) + { + cop(lhs[map[i]], rhs[i]); + } + } +} + + +template<class T, class negateOp> +T Foam::mapDistributeBase::accessAndFlip +( + const UList<T>& fld, + const label index, + const bool hasFlip, + const negateOp& negOp +) +{ + T t; + if (hasFlip) + { + if (index > 0) + { + t = fld[index-1]; + } + else if (index < 0) + { + t = negOp(fld[-index-1]); + } + else + { + FatalErrorInFunction + << "Illegal index " << index + << " into field of size " << fld.size() + << " with face-flipping" + << exit(FatalError); + t = fld[index]; + } + } + else + { + t = fld[index]; + } + return t; +} + + +// Distribute list. +template<class T, class negateOp> +void Foam::mapDistributeBase::distribute +( + const Pstream::commsTypes commsType, + const List<labelPair>& schedule, + const label constructSize, + const labelListList& subMap, + const bool subHasFlip, + const labelListList& constructMap, + const bool constructHasFlip, + List<T>& field, + const negateOp& negOp, + const int tag +) +{ + if (!Pstream::parRun()) + { + // Do only me to me. + + const labelList& mySubMap = subMap[Pstream::myProcNo()]; + + List<T> subField(mySubMap.size()); + forAll(mySubMap, i) + { + subField[i] = accessAndFlip(field, mySubMap[i], subHasFlip, negOp); + } + + // Receive sub field from myself (subField) + const labelList& map = constructMap[Pstream::myProcNo()]; + + field.setSize(constructSize); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + field + ); + + return; + } + + if (commsType == Pstream::blocking) + { + // Since buffered sending can reuse the field to collect the + // received data. + + // Send sub field to neighbour + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + OPstream toNbr(Pstream::blocking, domain, 0, tag); + + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toNbr << subField; + } + } + + // Subset myself + const labelList& mySubMap = subMap[Pstream::myProcNo()]; + + List<T> subField(mySubMap.size()); + forAll(mySubMap, i) + { + subField[i] = accessAndFlip(field, mySubMap[i], subHasFlip, negOp); + } + + // Receive sub field from myself (subField) + const labelList& map = constructMap[Pstream::myProcNo()]; + + field.setSize(constructSize); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + field + ); + + // Receive sub field from neighbour + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + IPstream fromNbr(Pstream::blocking, domain, 0, tag); + List<T> subField(fromNbr); + + checkReceivedSize(domain, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + field + ); + } + } + } + else if (commsType == Pstream::scheduled) + { + // Need to make sure I don't overwrite field with received data + // since the data might need to be sent to another processor. So + // allocate a new field for the results. + List<T> newField(constructSize); + + // Receive sub field from myself + { + const labelList& mySubMap = subMap[Pstream::myProcNo()]; + + List<T> subField(mySubMap.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + mySubMap[i], + subHasFlip, + negOp + ); + } + + // Receive sub field from myself (subField) + flipAndCombine + ( + constructMap[Pstream::myProcNo()], + constructHasFlip, + subField, + eqOp<T>(), + negOp, + newField + ); + } + + // Schedule will already have pruned 0-sized comms + forAll(schedule, i) + { + const labelPair& twoProcs = schedule[i]; + // twoProcs is a swap pair of processors. The first one is the + // one that needs to send first and then receive. + + label sendProc = twoProcs[0]; + label recvProc = twoProcs[1]; + + if (Pstream::myProcNo() == sendProc) + { + // I am send first, receive next + { + OPstream toNbr(Pstream::scheduled, recvProc, 0, tag); + + const labelList& map = subMap[recvProc]; + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toNbr << subField; + } + { + IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag); + List<T> subField(fromNbr); + + const labelList& map = constructMap[recvProc]; + + checkReceivedSize(recvProc, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + newField + ); + } + } + else + { + // I am receive first, send next + { + IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag); + List<T> subField(fromNbr); + + const labelList& map = constructMap[sendProc]; + + checkReceivedSize(sendProc, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + newField + ); + } + { + OPstream toNbr(Pstream::scheduled, sendProc, 0, tag); + + const labelList& map = subMap[sendProc]; + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toNbr << subField; + } + } + } + field.transfer(newField); + } + else if (commsType == Pstream::nonBlocking) + { + label nOutstanding = Pstream::nRequests(); + + if (!contiguous<T>()) + { + PstreamBuffers pBufs(Pstream::nonBlocking, tag); + + // Stream data into buffer + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + // Put data into send buffer + UOPstream toDomain(domain, pBufs); + + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toDomain << subField; + } + } + + // Start receiving. Do not block. + pBufs.finishedSends(false); + + { + // Set up 'send' to myself + const labelList& mySub = subMap[Pstream::myProcNo()]; + List<T> mySubField(mySub.size()); + forAll(mySub, i) + { + mySubField[i] = accessAndFlip + ( + field, + mySub[i], + subHasFlip, + negOp + ); + } + // Combine bits. Note that can reuse field storage + field.setSize(constructSize); + // Receive sub field from myself + { + const labelList& map = constructMap[Pstream::myProcNo()]; + + flipAndCombine + ( + map, + constructHasFlip, + mySubField, + eqOp<T>(), + negOp, + field + ); + } + } + + // Block ourselves, waiting only for the current comms + Pstream::waitRequests(nOutstanding); + + // Consume + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + UIPstream str(domain, pBufs); + List<T> recvField(str); + + checkReceivedSize(domain, map.size(), recvField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + recvField, + eqOp<T>(), + negOp, + field + ); + } + } + } + else + { + // Set up sends to neighbours + + List<List<T > > sendFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + List<T>& subField = sendFields[domain]; + subField.setSize(map.size()); + forAll(map, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + + OPstream::write + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<const char*>(subField.begin()), + subField.byteSize(), + tag + ); + } + } + + // Set up receives from neighbours + + List<List<T > > recvFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + recvFields[domain].setSize(map.size()); + IPstream::read + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<char*>(recvFields[domain].begin()), + recvFields[domain].byteSize(), + tag + ); + } + } + + + // Set up 'send' to myself + + { + const labelList& map = subMap[Pstream::myProcNo()]; + + List<T>& subField = sendFields[Pstream::myProcNo()]; + subField.setSize(map.size()); + forAll(map, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + } + + + // Combine bits. Note that can reuse field storage + + field.setSize(constructSize); + + + // Receive sub field from myself (sendFields[Pstream::myProcNo()]) + { + const labelList& map = constructMap[Pstream::myProcNo()]; + const List<T>& subField = sendFields[Pstream::myProcNo()]; + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + field + ); + } + + + // Wait for all to finish + + Pstream::waitRequests(nOutstanding); + + + // Collect neighbour fields + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + const List<T>& subField = recvFields[domain]; + + checkReceivedSize(domain, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + eqOp<T>(), + negOp, + field + ); + } + } + } + } + else + { + FatalErrorInFunction + << "Unknown communication schedule " << commsType + << abort(FatalError); + } +} + + +// Distribute list. +template<class T, class CombineOp, class negateOp> +void Foam::mapDistributeBase::distribute +( + const Pstream::commsTypes commsType, + const List<labelPair>& schedule, + const label constructSize, + const labelListList& subMap, + const bool subHasFlip, + const labelListList& constructMap, + const bool constructHasFlip, + List<T>& field, + const CombineOp& cop, + const negateOp& negOp, + const T& nullValue, + const int tag +) +{ + if (!Pstream::parRun()) + { + // Do only me to me. + + const labelList& mySubMap = subMap[Pstream::myProcNo()]; + + List<T> subField(mySubMap.size()); + forAll(mySubMap, i) + { + subField[i] = accessAndFlip(field, mySubMap[i], subHasFlip, negOp); + } + + // Receive sub field from myself (subField) + const labelList& map = constructMap[Pstream::myProcNo()]; + + field.setSize(constructSize); + field = nullValue; + + flipAndCombine(map, constructHasFlip, subField, cop, negOp, field); + + return; + } + + if (commsType == Pstream::blocking) + { + // Since buffered sending can reuse the field to collect the + // received data. + + // Send sub field to neighbour + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + OPstream toNbr(Pstream::blocking, domain, 0, tag); + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toNbr << subField; + } + } + + // Subset myself + const labelList& mySubMap = subMap[Pstream::myProcNo()]; + + List<T> subField(mySubMap.size()); + forAll(mySubMap, i) + { + subField[i] = accessAndFlip(field, mySubMap[i], subHasFlip, negOp); + } + + // Receive sub field from myself (subField) + const labelList& map = constructMap[Pstream::myProcNo()]; + + field.setSize(constructSize); + field = nullValue; + + flipAndCombine(map, constructHasFlip, subField, cop, negOp, field); + + // Receive sub field from neighbour + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + IPstream fromNbr(Pstream::blocking, domain, 0, tag); + List<T> subField(fromNbr); + + checkReceivedSize(domain, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + cop, + negOp, + field + ); + } + } + } + else if (commsType == Pstream::scheduled) + { + // Need to make sure I don't overwrite field with received data + // since the data might need to be sent to another processor. So + // allocate a new field for the results. + List<T> newField(constructSize, nullValue); + + { + const labelList& mySubMap = subMap[Pstream::myProcNo()]; + + // Subset myself + List<T> subField(mySubMap.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + mySubMap[i], + subHasFlip, + negOp + ); + } + + // Receive sub field from myself (subField) + const labelList& map = constructMap[Pstream::myProcNo()]; + + flipAndCombine + ( + map, + constructHasFlip, + subField, + cop, + negOp, + newField + ); + } + + + // Schedule will already have pruned 0-sized comms + forAll(schedule, i) + { + const labelPair& twoProcs = schedule[i]; + // twoProcs is a swap pair of processors. The first one is the + // one that needs to send first and then receive. + + label sendProc = twoProcs[0]; + label recvProc = twoProcs[1]; + + if (Pstream::myProcNo() == sendProc) + { + // I am send first, receive next + { + OPstream toNbr(Pstream::scheduled, recvProc, 0, tag); + + const labelList& map = subMap[recvProc]; + + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toNbr << subField; + } + { + IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag); + List<T> subField(fromNbr); + const labelList& map = constructMap[recvProc]; + + checkReceivedSize(recvProc, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + cop, + negOp, + newField + ); + } + } + else + { + // I am receive first, send next + { + IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag); + List<T> subField(fromNbr); + const labelList& map = constructMap[sendProc]; + + checkReceivedSize(sendProc, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + cop, + negOp, + newField + ); + } + { + OPstream toNbr(Pstream::scheduled, sendProc, 0, tag); + + const labelList& map = subMap[sendProc]; + + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toNbr << subField; + } + } + } + field.transfer(newField); + } + else if (commsType == Pstream::nonBlocking) + { + label nOutstanding = Pstream::nRequests(); + + if (!contiguous<T>()) + { + PstreamBuffers pBufs(Pstream::nonBlocking, tag); + + // Stream data into buffer + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + // Put data into send buffer + UOPstream toDomain(domain, pBufs); + + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + toDomain << subField; + } + } + + // Start receiving. Do not block. + pBufs.finishedSends(false); + + { + // Set up 'send' to myself + const labelList& myMap = subMap[Pstream::myProcNo()]; + + List<T> mySubField(myMap.size()); + forAll(myMap, i) + { + mySubField[i] = accessAndFlip + ( + field, + myMap[i], + subHasFlip, + negOp + ); + } + + // Combine bits. Note that can reuse field storage + field.setSize(constructSize); + field = nullValue; + // Receive sub field from myself + { + const labelList& map = constructMap[Pstream::myProcNo()]; + + flipAndCombine + ( + map, + constructHasFlip, + mySubField, + cop, + negOp, + field + ); + } + } + + // Block ourselves, waiting only for the current comms + Pstream::waitRequests(nOutstanding); + + // Consume + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + UIPstream str(domain, pBufs); + List<T> recvField(str); + + checkReceivedSize(domain, map.size(), recvField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + recvField, + cop, + negOp, + field + ); + } + } + } + else + { + // Set up sends to neighbours + + List<List<T > > sendFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + List<T>& subField = sendFields[domain]; + subField.setSize(map.size()); + forAll(map, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + + OPstream::write + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<const char*>(subField.begin()), + subField.size()*sizeof(T), + tag + ); + } + } + + // Set up receives from neighbours + + List<List<T > > recvFields(Pstream::nProcs()); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + recvFields[domain].setSize(map.size()); + UIPstream::read + ( + Pstream::nonBlocking, + domain, + reinterpret_cast<char*>(recvFields[domain].begin()), + recvFields[domain].size()*sizeof(T), + tag + ); + } + } + + // Set up 'send' to myself + + { + const labelList& map = subMap[Pstream::myProcNo()]; + + List<T>& subField = sendFields[Pstream::myProcNo()]; + subField.setSize(map.size()); + forAll(map, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip, + negOp + ); + } + } + + + // Combine bits. Note that can reuse field storage + + field.setSize(constructSize); + field = nullValue; + + // Receive sub field from myself (subField) + { + const labelList& map = constructMap[Pstream::myProcNo()]; + const List<T>& subField = sendFields[Pstream::myProcNo()]; + + flipAndCombine + ( + map, + constructHasFlip, + subField, + cop, + negOp, + field + ); + } + + + // Wait for all to finish + + Pstream::waitRequests(nOutstanding); + + + // Collect neighbour fields + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap[domain]; + + if (domain != Pstream::myProcNo() && map.size()) + { + const List<T>& subField = recvFields[domain]; + + checkReceivedSize(domain, map.size(), subField.size()); + + flipAndCombine + ( + map, + constructHasFlip, + subField, + cop, + negOp, + field + ); + } + } + } + } + else + { + FatalErrorInFunction + << "Unknown communication schedule " << commsType + << abort(FatalError); + } +} + + +template<class T> +void Foam::mapDistributeBase::send(PstreamBuffers& pBufs, const List<T>& field) +const +{ + // Stream data into buffer + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = subMap_[domain]; + + if (map.size()) + { + // Put data into send buffer + UOPstream toDomain(domain, pBufs); + + List<T> subField(map.size()); + forAll(subField, i) + { + subField[i] = accessAndFlip + ( + field, + map[i], + subHasFlip_, + flipOp() + ); + } + toDomain << subField; + } + } + + // Start sending and receiving but do not block. + pBufs.finishedSends(false); +} + + +template<class T> +void Foam::mapDistributeBase::receive(PstreamBuffers& pBufs, List<T>& field) +const +{ + // Consume + field.setSize(constructSize_); + + for (label domain = 0; domain < Pstream::nProcs(); domain++) + { + const labelList& map = constructMap_[domain]; + + if (map.size()) + { + UIPstream str(domain, pBufs); + List<T> recvField(str); + + if (recvField.size() != map.size()) + { + FatalErrorInFunction + << "Expected from processor " << domain + << " " << map.size() << " but received " + << recvField.size() << " elements." + << abort(FatalError); + } + + flipAndCombine + ( + map, + constructHasFlip_, + recvField, + eqOp<T>(), + flipOp(), + field + ); + } + } +} + + +//- Distribute data using default commsType. +template<class T, class negateOp> +void Foam::mapDistributeBase::distribute +( + List<T>& fld, + const negateOp& negOp, + const int tag +) const +{ + if (Pstream::defaultCommsType == Pstream::nonBlocking) + { + distribute + ( + Pstream::nonBlocking, + List<labelPair>(), + constructSize_, + subMap_, + subHasFlip_, + constructMap_, + constructHasFlip_, + fld, + negOp, + tag + ); + } + else if (Pstream::defaultCommsType == Pstream::scheduled) + { + distribute + ( + Pstream::scheduled, + schedule(), + constructSize_, + subMap_, + subHasFlip_, + constructMap_, + constructHasFlip_, + fld, + negOp, + tag + ); + } + else + { + distribute + ( + Pstream::blocking, + List<labelPair>(), + constructSize_, + subMap_, + subHasFlip_, + constructMap_, + constructHasFlip_, + fld, + negOp, + tag + ); + } +} + + +//- Distribute data using default commsType. +template<class T> +void Foam::mapDistributeBase::distribute +( + List<T>& fld, + const int tag +) const +{ + distribute(fld, flipOp(), tag); +} + + +//- Distribute data using default commsType. +template<class T> +void Foam::mapDistributeBase::distribute +( + DynamicList<T>& fld, + const int tag +) const +{ + fld.shrink(); + + List<T>& fldList = static_cast<List<T>& >(fld); + + distribute(fldList, tag); + + fld.setCapacity(fldList.size()); +} + + +//- Reverse distribute data using default commsType. +template<class T> +void Foam::mapDistributeBase::reverseDistribute +( + const label constructSize, + List<T>& fld, + const int tag +) const +{ + if (Pstream::defaultCommsType == Pstream::nonBlocking) + { + distribute + ( + Pstream::nonBlocking, + List<labelPair>(), + constructSize, + constructMap_, + constructHasFlip_, + subMap_, + subHasFlip_, + fld, + flipOp(), + tag + ); + } + else if (Pstream::defaultCommsType == Pstream::scheduled) + { + distribute + ( + Pstream::scheduled, + schedule(), + constructSize, + constructMap_, + constructHasFlip_, + subMap_, + subHasFlip_, + fld, + flipOp(), + tag + ); + } + else + { + distribute + ( + Pstream::blocking, + List<labelPair>(), + constructSize, + constructMap_, + constructHasFlip_, + subMap_, + subHasFlip_, + fld, + flipOp(), + tag + ); + } +} + + +//- Reverse distribute data using default commsType. +// Since constructSize might be larger than supplied size supply +// a nullValue +template<class T> +void Foam::mapDistributeBase::reverseDistribute +( + const label constructSize, + const T& nullValue, + List<T>& fld, + const int tag +) const +{ + if (Pstream::defaultCommsType == Pstream::nonBlocking) + { + distribute + ( + Pstream::nonBlocking, + List<labelPair>(), + constructSize, + constructMap_, + constructHasFlip_, + subMap_, + subHasFlip_, + fld, + eqOp<T>(), + flipOp(), + nullValue, + tag + ); + } + else if (Pstream::defaultCommsType == Pstream::scheduled) + { + distribute + ( + Pstream::scheduled, + schedule(), + constructSize, + constructMap_, + constructHasFlip_, + subMap_, + subHasFlip_, + fld, + eqOp<T>(), + flipOp(), + nullValue, + tag + ); + } + else + { + distribute + ( + Pstream::blocking, + List<labelPair>(), + constructSize, + constructMap_, + constructHasFlip_, + subMap_, + subHasFlip_, + fld, + eqOp<T>(), + flipOp(), + nullValue, + tag + ); + } +} + + +// ************************************************************************* // diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.C b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.C index a21509fdea40ef41150df427a3e64974818b3dd2..044ebbd11b6bf4d85578de23df02f1be83ad72bd 100644 --- a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.C +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.C @@ -33,29 +33,47 @@ void Foam::mapDistributePolyMesh::calcPatchSizes() { oldPatchSizes_.setSize(oldPatchStarts_.size()); - // Calculate old patch sizes - for (label patchi = 0; patchi < oldPatchStarts_.size() - 1; patchi++) + if (oldPatchStarts_.size()) { - oldPatchSizes_[patchi] = - oldPatchStarts_[patchi + 1] - oldPatchStarts_[patchi]; - } - - // Set the last one by hand - const label lastPatchID = oldPatchStarts_.size() - 1; - - oldPatchSizes_[lastPatchID] = nOldFaces_ - oldPatchStarts_[lastPatchID]; - - if (min(oldPatchSizes_) < 0) - { - FatalErrorInFunction - << "Calculated negative old patch size:" << oldPatchSizes_ << nl - << "Error in mapping data" << abort(FatalError); + // Calculate old patch sizes + for (label patchi = 0; patchi < oldPatchStarts_.size() - 1; patchi++) + { + oldPatchSizes_[patchi] = + oldPatchStarts_[patchi + 1] - oldPatchStarts_[patchi]; + } + + // Set the last one by hand + const label lastPatchID = oldPatchStarts_.size() - 1; + + oldPatchSizes_[lastPatchID] = nOldFaces_ - oldPatchStarts_[lastPatchID]; + + if (min(oldPatchSizes_) < 0) + { + FatalErrorInFunction + << "Calculated negative old patch size:" << oldPatchSizes_ << nl + << "Error in mapping data" << abort(FatalError); + } } } // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // +Foam::mapDistributePolyMesh::mapDistributePolyMesh() +: + nOldPoints_(0), + nOldFaces_(0), + nOldCells_(0), + oldPatchSizes_(0), + oldPatchStarts_(0), + oldPatchNMeshPoints_(0), + pointMap_(), + faceMap_(), + cellMap_(), + patchMap_() +{} + + Foam::mapDistributePolyMesh::mapDistributePolyMesh ( const polyMesh& mesh, @@ -77,10 +95,12 @@ Foam::mapDistributePolyMesh::mapDistributePolyMesh const Xfer<labelListList>& constructPointMap, const Xfer<labelListList>& constructFaceMap, const Xfer<labelListList>& constructCellMap, - const Xfer<labelListList>& constructPatchMap + const Xfer<labelListList>& constructPatchMap, + + const bool subFaceHasFlip, + const bool constructFaceHasFlip ) : - mesh_(mesh), nOldPoints_(nOldPoints), nOldFaces_(nOldFaces), nOldCells_(nOldCells), @@ -88,7 +108,14 @@ Foam::mapDistributePolyMesh::mapDistributePolyMesh oldPatchStarts_(oldPatchStarts), oldPatchNMeshPoints_(oldPatchNMeshPoints), pointMap_(mesh.nPoints(), subPointMap, constructPointMap), - faceMap_(mesh.nFaces(), subFaceMap, constructFaceMap), + faceMap_ + ( + mesh.nFaces(), + subFaceMap, + constructFaceMap, + subFaceHasFlip, + constructFaceHasFlip + ), cellMap_(mesh.nCells(), subCellMap, constructCellMap), patchMap_(mesh.boundaryMesh().size(), subPatchMap, constructPatchMap) { @@ -96,8 +123,84 @@ Foam::mapDistributePolyMesh::mapDistributePolyMesh } +Foam::mapDistributePolyMesh::mapDistributePolyMesh +( + // mesh before changes + const label nOldPoints, + const label nOldFaces, + const label nOldCells, + const Xfer<labelList>& oldPatchStarts, + const Xfer<labelList>& oldPatchNMeshPoints, + + // how to transfer pieces of mesh + const Xfer<mapDistribute>& pointMap, + const Xfer<mapDistribute>& faceMap, + const Xfer<mapDistribute>& cellMap, + const Xfer<mapDistribute>& patchMap +) +: + nOldPoints_(nOldPoints), + nOldFaces_(nOldFaces), + nOldCells_(nOldCells), + oldPatchSizes_(oldPatchStarts().size()), + oldPatchStarts_(oldPatchStarts), + oldPatchNMeshPoints_(oldPatchNMeshPoints), + pointMap_(pointMap), + faceMap_(faceMap), + cellMap_(cellMap), + patchMap_(patchMap) +{ + calcPatchSizes(); +} + + +Foam::mapDistributePolyMesh::mapDistributePolyMesh +( + const Xfer<mapDistributePolyMesh>& map +) +: + nOldPoints_(map().nOldPoints_), + nOldFaces_(map().nOldFaces_), + nOldCells_(map().nOldCells_), + oldPatchSizes_(map().oldPatchSizes_.xfer()), + oldPatchStarts_(map().oldPatchStarts_.xfer()), + oldPatchNMeshPoints_(map().oldPatchNMeshPoints_.xfer()), + pointMap_(map().pointMap_.xfer()), + faceMap_(map().faceMap_.xfer()), + cellMap_(map().cellMap_.xfer()), + patchMap_(map().patchMap_.xfer()) +{} + + +Foam::mapDistributePolyMesh::mapDistributePolyMesh(Istream& is) +{ + is >> *this; +} + + // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // +void Foam::mapDistributePolyMesh::transfer(mapDistributePolyMesh& rhs) +{ + nOldPoints_ = rhs.nOldPoints_; + nOldFaces_ = rhs.nOldFaces_; + nOldCells_ = rhs.nOldCells_; + oldPatchSizes_.transfer(rhs.oldPatchSizes_); + oldPatchStarts_.transfer(rhs.oldPatchStarts_); + oldPatchNMeshPoints_.transfer(rhs.oldPatchNMeshPoints_); + pointMap_.transfer(rhs.pointMap_); + faceMap_.transfer(rhs.faceMap_); + cellMap_.transfer(rhs.cellMap_); + patchMap_.transfer(rhs.patchMap_); +} + + +Foam::Xfer<Foam::mapDistributePolyMesh> Foam::mapDistributePolyMesh::xfer() +{ + return xferMove(*this); +} + + void Foam::mapDistributePolyMesh::distributePointIndices(labelList& lst) const { // Construct boolList from selected elements @@ -186,10 +289,61 @@ void Foam::mapDistributePolyMesh::distributePatchIndices(labelList& lst) const } -// * * * * * * * * * * * * * * * Friend Functions * * * * * * * * * * * * * // +// * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * // + +void Foam::mapDistributePolyMesh::operator=(const mapDistributePolyMesh& rhs) +{ + nOldPoints_ = rhs.nOldPoints_; + nOldFaces_ = rhs.nOldFaces_; + nOldCells_ = rhs.nOldCells_; + oldPatchSizes_ = rhs.oldPatchSizes_; + oldPatchStarts_ = rhs.oldPatchStarts_; + oldPatchNMeshPoints_ = rhs.oldPatchNMeshPoints_; + pointMap_ = rhs.pointMap_; + faceMap_ = rhs.faceMap_; + cellMap_ = rhs.cellMap_; + patchMap_ = rhs.patchMap_; +} + + +// * * * * * * * * * * * * * * Istream Operator * * * * * * * * * * * * * * // + +Foam::Istream& Foam::operator>>(Istream& is, mapDistributePolyMesh& map) +{ + is.fatalCheck("operator>>(Istream&, mapDistributePolyMesh&)"); + + is >> map.nOldPoints_ + >> map.nOldFaces_ + >> map.nOldCells_ + >> map.oldPatchSizes_ + >> map.oldPatchStarts_ + >> map.oldPatchNMeshPoints_ + >> map.pointMap_ + >> map.faceMap_ + >> map.cellMap_ + >> map.patchMap_; + + return is; +} -// * * * * * * * * * * * * * * * Friend Operators * * * * * * * * * * * * * // +// * * * * * * * * * * * * * * Ostream Operator * * * * * * * * * * * * * * // + +Foam::Ostream& Foam::operator<<(Ostream& os, const mapDistributePolyMesh& map) +{ + os << map.nOldPoints_ + << token::SPACE << map.nOldFaces_ + << token::SPACE << map.nOldCells_ << token::NL + << map.oldPatchSizes_ << token::NL + << map.oldPatchStarts_ << token::NL + << map.oldPatchNMeshPoints_ << token::NL + << map.pointMap_ << token::NL + << map.faceMap_ << token::NL + << map.cellMap_ << token::NL + << map.patchMap_; + + return os; +} // ************************************************************************* // diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.H b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.H index d4ad4c2fa677d275f343d30f17887417bc882b5f..ed827fe6c38d92b28db057e15eb92d8a3cf6987c 100644 --- a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.H +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributePolyMesh.H @@ -2,7 +2,7 @@ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | - \\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation + \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License @@ -52,46 +52,43 @@ class mapPolyMesh; class polyMesh; /*---------------------------------------------------------------------------*\ - Class mapDistributePolyMesh Declaration + Class mapDistributePolyMesh Declaration \*---------------------------------------------------------------------------*/ class mapDistributePolyMesh { // Private data - const polyMesh& mesh_; - //- Number of old live points - const label nOldPoints_; + label nOldPoints_; //- Number of old live faces - const label nOldFaces_; + label nOldFaces_; //- Number of old live cells - const label nOldCells_; + label nOldCells_; //- List of the old patch sizes labelList oldPatchSizes_; //- List of the old patch start labels - const labelList oldPatchStarts_; + labelList oldPatchStarts_; //- List of numbers of mesh points per old patch - const labelList oldPatchNMeshPoints_; + labelList oldPatchNMeshPoints_; //- Point distribute map - const mapDistribute pointMap_; + mapDistribute pointMap_; //- Face distribute map - const mapDistribute faceMap_; + mapDistribute faceMap_; //- Cell distribute map - const mapDistribute cellMap_; + mapDistribute cellMap_; //- Patch distribute map - const mapDistribute patchMap_; - + mapDistribute patchMap_; // Private Member Functions @@ -101,14 +98,14 @@ class mapDistributePolyMesh //- Disallow default bitwise copy construct mapDistributePolyMesh(const mapDistributePolyMesh&); - //- Disallow default bitwise assignment - void operator=(const mapDistributePolyMesh&); - public: // Constructors + //- Construct null + mapDistributePolyMesh(); + //- Construct from components. Note that mesh has to be changed already // since uses mesh.nPoints etc as the new size. mapDistributePolyMesh @@ -132,19 +129,40 @@ public: const Xfer<labelListList>& constructPointMap, const Xfer<labelListList>& constructFaceMap, const Xfer<labelListList>& constructCellMap, - const Xfer<labelListList>& constructPatchMap + const Xfer<labelListList>& constructPatchMap, + + const bool subFaceHasFlip = false, + const bool constructFaceHasFlip = false + ); + + //- Construct from components + mapDistributePolyMesh + ( + // mesh before changes + const label nOldPoints, + const label nOldFaces, + const label nOldCells, + const Xfer<labelList>& oldPatchStarts, + const Xfer<labelList>& oldPatchNMeshPoints, + + // how to subset pieces of mesh to send across + const Xfer<mapDistribute>& pointMap, + const Xfer<mapDistribute>& faceMap, + const Xfer<mapDistribute>& cellMap, + const Xfer<mapDistribute>& patchMap ); + //- Construct by transferring parameter content + mapDistributePolyMesh(const Xfer<mapDistributePolyMesh>&); + + //- Construct from Istream + mapDistributePolyMesh(Istream&); + // Member Functions // Access - const polyMesh& mesh() const - { - return mesh_; - } - //- Number of points in mesh before distribution label nOldPoints() const { @@ -206,7 +224,13 @@ public: } - // Edit + // Other + + //- Transfer the contents of the argument and annul the argument. + void transfer(mapDistributePolyMesh&); + + //- Transfer contents to the Xfer container + Xfer<mapDistributePolyMesh> xfer(); //- Distribute list of point data template<class T> @@ -251,6 +275,19 @@ public: { NotImplemented; } + + // Member operators + + void operator=(const mapDistributePolyMesh&); + + + // IOstream operators + + //- Read dictionary from Istream + friend Istream& operator>>(Istream&, mapDistributePolyMesh&); + + //- Write dictionary to Ostream + friend Ostream& operator<<(Ostream&, const mapDistributePolyMesh&); }; diff --git a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeTemplates.C b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeTemplates.C index 35b13d6cda90e7229d08e203c364447667c0f081..4b261a67e5becbf260265f6c220340dffc4ee020 100644 --- a/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeTemplates.C +++ b/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistributeTemplates.C @@ -28,759 +28,10 @@ License #include "PstreamCombineReduceOps.H" #include "globalIndexAndTransform.H" #include "transformField.H" +#include "flipOp.H" // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // -template<class T> -void Foam::mapDistribute::distribute -( - const Pstream::commsTypes commsType, - const List<labelPair>& schedule, - const label constructSize, - const labelListList& subMap, - const labelListList& constructMap, - List<T>& field, - const int tag -) -{ - if (!Pstream::parRun()) - { - // Do only me to me. - - const labelList& mySubMap = subMap[Pstream::myProcNo()]; - - List<T> subField(mySubMap.size()); - forAll(mySubMap, i) - { - subField[i] = field[mySubMap[i]]; - } - - // Receive sub field from myself (subField) - const labelList& map = constructMap[Pstream::myProcNo()]; - - field.setSize(constructSize); - - forAll(map, i) - { - field[map[i]] = subField[i]; - } - return; - } - - if (commsType == Pstream::blocking) - { - // Since buffered sending can reuse the field to collect the - // received data. - - // Send sub field to neighbour - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - OPstream toNbr(Pstream::blocking, domain, 0, tag); - toNbr << UIndirectList<T>(field, map); - } - } - - // Subset myself - const labelList& mySubMap = subMap[Pstream::myProcNo()]; - - List<T> subField(mySubMap.size()); - forAll(mySubMap, i) - { - subField[i] = field[mySubMap[i]]; - } - - // Receive sub field from myself (subField) - const labelList& map = constructMap[Pstream::myProcNo()]; - - field.setSize(constructSize); - - forAll(map, i) - { - field[map[i]] = subField[i]; - } - - // Receive sub field from neighbour - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - IPstream fromNbr(Pstream::blocking, domain, 0, tag); - List<T> subField(fromNbr); - - checkReceivedSize(domain, map.size(), subField.size()); - - forAll(map, i) - { - field[map[i]] = subField[i]; - } - } - } - } - else if (commsType == Pstream::scheduled) - { - // Need to make sure I don't overwrite field with received data - // since the data might need to be sent to another processor. So - // allocate a new field for the results. - List<T> newField(constructSize); - - // Subset myself - UIndirectList<T> subField(field, subMap[Pstream::myProcNo()]); - - // Receive sub field from myself (subField) - const labelList& map = constructMap[Pstream::myProcNo()]; - - forAll(map, i) - { - newField[map[i]] = subField[i]; - } - - // Schedule will already have pruned 0-sized comms - forAll(schedule, i) - { - const labelPair& twoProcs = schedule[i]; - // twoProcs is a swap pair of processors. The first one is the - // one that needs to send first and then receive. - - label sendProc = twoProcs[0]; - label recvProc = twoProcs[1]; - - if (Pstream::myProcNo() == sendProc) - { - // I am send first, receive next - { - OPstream toNbr(Pstream::scheduled, recvProc, 0, tag); - toNbr << UIndirectList<T>(field, subMap[recvProc]); - } - { - IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag); - List<T> subField(fromNbr); - - const labelList& map = constructMap[recvProc]; - - checkReceivedSize(recvProc, map.size(), subField.size()); - - forAll(map, i) - { - newField[map[i]] = subField[i]; - } - } - } - else - { - // I am receive first, send next - { - IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag); - List<T> subField(fromNbr); - - const labelList& map = constructMap[sendProc]; - - checkReceivedSize(sendProc, map.size(), subField.size()); - - forAll(map, i) - { - newField[map[i]] = subField[i]; - } - } - { - OPstream toNbr(Pstream::scheduled, sendProc, 0, tag); - toNbr << UIndirectList<T>(field, subMap[sendProc]); - } - } - } - field.transfer(newField); - } - else if (commsType == Pstream::nonBlocking) - { - label nOutstanding = Pstream::nRequests(); - - if (!contiguous<T>()) - { - PstreamBuffers pBufs(Pstream::nonBlocking, tag); - - // Stream data into buffer - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - // Put data into send buffer - UOPstream toDomain(domain, pBufs); - toDomain << UIndirectList<T>(field, map); - } - } - - // Start receiving. Do not block. - pBufs.finishedSends(false); - - { - // Set up 'send' to myself - const labelList& mySubMap = subMap[Pstream::myProcNo()]; - List<T> mySubField(mySubMap.size()); - forAll(mySubMap, i) - { - mySubField[i] = field[mySubMap[i]]; - } - // Combine bits. Note that can reuse field storage - field.setSize(constructSize); - // Receive sub field from myself - { - const labelList& map = constructMap[Pstream::myProcNo()]; - - forAll(map, i) - { - field[map[i]] = mySubField[i]; - } - } - } - - // Block ourselves, waiting only for the current comms - Pstream::waitRequests(nOutstanding); - - // Consume - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - UIPstream str(domain, pBufs); - List<T> recvField(str); - - checkReceivedSize(domain, map.size(), recvField.size()); - - forAll(map, i) - { - field[map[i]] = recvField[i]; - } - } - } - } - else - { - // Set up sends to neighbours - - List<List<T >> sendFields(Pstream::nProcs()); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - List<T>& subField = sendFields[domain]; - subField.setSize(map.size()); - forAll(map, i) - { - subField[i] = field[map[i]]; - } - - OPstream::write - ( - Pstream::nonBlocking, - domain, - reinterpret_cast<const char*>(subField.begin()), - subField.byteSize(), - tag - ); - } - } - - // Set up receives from neighbours - - List<List<T >> recvFields(Pstream::nProcs()); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - recvFields[domain].setSize(map.size()); - IPstream::read - ( - Pstream::nonBlocking, - domain, - reinterpret_cast<char*>(recvFields[domain].begin()), - recvFields[domain].byteSize(), - tag - ); - } - } - - - // Set up 'send' to myself - - { - const labelList& map = subMap[Pstream::myProcNo()]; - - List<T>& subField = sendFields[Pstream::myProcNo()]; - subField.setSize(map.size()); - forAll(map, i) - { - subField[i] = field[map[i]]; - } - } - - - // Combine bits. Note that can reuse field storage - - field.setSize(constructSize); - - - // Receive sub field from myself (sendFields[Pstream::myProcNo()]) - { - const labelList& map = constructMap[Pstream::myProcNo()]; - const List<T>& subField = sendFields[Pstream::myProcNo()]; - - forAll(map, i) - { - field[map[i]] = subField[i]; - } - } - - - // Wait for all to finish - - Pstream::waitRequests(nOutstanding); - - - // Collect neighbour fields - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - const List<T>& subField = recvFields[domain]; - - checkReceivedSize(domain, map.size(), subField.size()); - - forAll(map, i) - { - field[map[i]] = subField[i]; - } - } - } - } - } - else - { - FatalErrorInFunction - << "Unknown communication schedule " << commsType - << abort(FatalError); - } -} - - -template<class T, class CombineOp> -void Foam::mapDistribute::distribute -( - const Pstream::commsTypes commsType, - const List<labelPair>& schedule, - const label constructSize, - const labelListList& subMap, - const labelListList& constructMap, - List<T>& field, - const CombineOp& cop, - const T& nullValue, - const int tag -) -{ - if (!Pstream::parRun()) - { - // Do only me to me. - - const labelList& mySubMap = subMap[Pstream::myProcNo()]; - - List<T> subField(mySubMap.size()); - forAll(mySubMap, i) - { - subField[i] = field[mySubMap[i]]; - } - - // Receive sub field from myself (subField) - const labelList& map = constructMap[Pstream::myProcNo()]; - - field.setSize(constructSize); - field = nullValue; - - forAll(map, i) - { - cop(field[map[i]], subField[i]); - } - return; - } - - if (commsType == Pstream::blocking) - { - // Since buffered sending can reuse the field to collect the - // received data. - - // Send sub field to neighbour - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - OPstream toNbr(Pstream::blocking, domain, 0, tag); - toNbr << UIndirectList<T>(field, map); - } - } - - // Subset myself - const labelList& mySubMap = subMap[Pstream::myProcNo()]; - - List<T> subField(mySubMap.size()); - forAll(mySubMap, i) - { - subField[i] = field[mySubMap[i]]; - } - - // Receive sub field from myself (subField) - const labelList& map = constructMap[Pstream::myProcNo()]; - - field.setSize(constructSize); - field = nullValue; - - forAll(map, i) - { - cop(field[map[i]], subField[i]); - } - - // Receive sub field from neighbour - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - IPstream fromNbr(Pstream::blocking, domain, 0, tag); - List<T> subField(fromNbr); - - checkReceivedSize(domain, map.size(), subField.size()); - - forAll(map, i) - { - cop(field[map[i]], subField[i]); - } - } - } - } - else if (commsType == Pstream::scheduled) - { - // Need to make sure I don't overwrite field with received data - // since the data might need to be sent to another processor. So - // allocate a new field for the results. - List<T> newField(constructSize, nullValue); - - // Subset myself - UIndirectList<T> subField(field, subMap[Pstream::myProcNo()]); - - // Receive sub field from myself (subField) - const labelList& map = constructMap[Pstream::myProcNo()]; - - forAll(map, i) - { - cop(newField[map[i]], subField[i]); - } - - // Schedule will already have pruned 0-sized comms - forAll(schedule, i) - { - const labelPair& twoProcs = schedule[i]; - // twoProcs is a swap pair of processors. The first one is the - // one that needs to send first and then receive. - - label sendProc = twoProcs[0]; - label recvProc = twoProcs[1]; - - if (Pstream::myProcNo() == sendProc) - { - // I am send first, receive next - { - OPstream toNbr(Pstream::scheduled, recvProc, 0, tag); - toNbr << UIndirectList<T>(field, subMap[recvProc]); - } - { - IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag); - List<T> subField(fromNbr); - const labelList& map = constructMap[recvProc]; - - checkReceivedSize(recvProc, map.size(), subField.size()); - - forAll(map, i) - { - cop(newField[map[i]], subField[i]); - } - } - } - else - { - // I am receive first, send next - { - IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag); - List<T> subField(fromNbr); - const labelList& map = constructMap[sendProc]; - - checkReceivedSize(sendProc, map.size(), subField.size()); - - forAll(map, i) - { - cop(newField[map[i]], subField[i]); - } - } - { - OPstream toNbr(Pstream::scheduled, sendProc, 0, tag); - toNbr << UIndirectList<T>(field, subMap[sendProc]); - } - } - } - field.transfer(newField); - } - else if (commsType == Pstream::nonBlocking) - { - label nOutstanding = Pstream::nRequests(); - - if (!contiguous<T>()) - { - PstreamBuffers pBufs(Pstream::nonBlocking, tag); - - // Stream data into buffer - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - // Put data into send buffer - UOPstream toDomain(domain, pBufs); - toDomain << UIndirectList<T>(field, map); - } - } - - // Start receiving. Do not block. - pBufs.finishedSends(false); - - { - // Set up 'send' to myself - List<T> mySubField(field, subMap[Pstream::myProcNo()]); - // Combine bits. Note that can reuse field storage - field.setSize(constructSize); - field = nullValue; - // Receive sub field from myself - { - const labelList& map = constructMap[Pstream::myProcNo()]; - - forAll(map, i) - { - cop(field[map[i]], mySubField[i]); - } - } - } - - // Block ourselves, waiting only for the current comms - Pstream::waitRequests(nOutstanding); - - // Consume - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - UIPstream str(domain, pBufs); - List<T> recvField(str); - - checkReceivedSize(domain, map.size(), recvField.size()); - - forAll(map, i) - { - cop(field[map[i]], recvField[i]); - } - } - } - } - else - { - // Set up sends to neighbours - - List<List<T >> sendFields(Pstream::nProcs()); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - List<T>& subField = sendFields[domain]; - subField.setSize(map.size()); - forAll(map, i) - { - subField[i] = field[map[i]]; - } - - OPstream::write - ( - Pstream::nonBlocking, - domain, - reinterpret_cast<const char*>(subField.begin()), - subField.size()*sizeof(T), - tag - ); - } - } - - // Set up receives from neighbours - - List<List<T >> recvFields(Pstream::nProcs()); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - recvFields[domain].setSize(map.size()); - UIPstream::read - ( - Pstream::nonBlocking, - domain, - reinterpret_cast<char*>(recvFields[domain].begin()), - recvFields[domain].size()*sizeof(T), - tag - ); - } - } - - // Set up 'send' to myself - - { - const labelList& map = subMap[Pstream::myProcNo()]; - - List<T>& subField = sendFields[Pstream::myProcNo()]; - subField.setSize(map.size()); - forAll(map, i) - { - subField[i] = field[map[i]]; - } - } - - - // Combine bits. Note that can reuse field storage - - field.setSize(constructSize); - field = nullValue; - - // Receive sub field from myself (subField) - { - const labelList& map = constructMap[Pstream::myProcNo()]; - const List<T>& subField = sendFields[Pstream::myProcNo()]; - - forAll(map, i) - { - cop(field[map[i]], subField[i]); - } - } - - - // Wait for all to finish - - Pstream::waitRequests(nOutstanding); - - - // Collect neighbour fields - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap[domain]; - - if (domain != Pstream::myProcNo() && map.size()) - { - const List<T>& subField = recvFields[domain]; - - checkReceivedSize(domain, map.size(), subField.size()); - - forAll(map, i) - { - cop(field[map[i]], subField[i]); - } - } - } - } - } - else - { - FatalErrorInFunction - << "Unknown communication schedule " << commsType - << abort(FatalError); - } -} - - -template<class T> -void Foam::mapDistribute::send(PstreamBuffers& pBufs, const List<T>& field) -const -{ - // Stream data into buffer - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = subMap_[domain]; - - if (map.size()) - { - // Put data into send buffer - UOPstream toDomain(domain, pBufs); - toDomain << UIndirectList<T>(field, map); - } - } - - // Start sending and receiving but do not block. - pBufs.finishedSends(false); -} - - -template<class T> -void Foam::mapDistribute::receive(PstreamBuffers& pBufs, List<T>& field) const -{ - // Consume - field.setSize(constructSize_); - - for (label domain = 0; domain < Pstream::nProcs(); domain++) - { - const labelList& map = constructMap_[domain]; - - if (map.size()) - { - UIPstream str(domain, pBufs); - List<T> recvField(str); - - if (recvField.size() != map.size()) - { - FatalErrorInFunction - << "Expected from processor " << domain - << " " << map.size() << " but received " - << recvField.size() << " elements." - << abort(FatalError); - } - - forAll(map, i) - { - field[map[i]] = recvField[i]; - } - } - } -} - - template<class T> void Foam::mapDistribute::applyDummyTransforms(List<T>& field) const { @@ -874,77 +125,52 @@ void Foam::mapDistribute::applyInverseTransforms } -template<class T> +template<class T, class negateOp> void Foam::mapDistribute::distribute ( - DynamicList<T>& fld, + List<T>& fld, + const negateOp& negOp, const bool dummyTransform, const int tag ) const { - fld.shrink(); + mapDistributeBase::distribute(fld, negOp, tag); - List<T>& fldList = static_cast<List<T>& >(fld); + //- Fill in transformed slots with copies + if (dummyTransform) + { + applyDummyTransforms(fld); + } +} - distribute(fldList, dummyTransform, tag); - fld.setCapacity(fldList.size()); +template<class T> +void Foam::mapDistribute::distribute +( + List<T>& fld, + const bool dummyTransform, + const int tag +) const +{ + distribute(fld, flipOp(), dummyTransform, tag); } template<class T> void Foam::mapDistribute::distribute ( - List<T>& fld, + DynamicList<T>& fld, const bool dummyTransform, const int tag ) const { - if (Pstream::defaultCommsType == Pstream::nonBlocking) - { - distribute - ( - Pstream::nonBlocking, - List<labelPair>(), - constructSize_, - subMap_, - constructMap_, - fld, - tag - ); - } - else if (Pstream::defaultCommsType == Pstream::scheduled) - { - distribute - ( - Pstream::scheduled, - schedule(), - constructSize_, - subMap_, - constructMap_, - fld, - tag - ); - } - else - { - distribute - ( - Pstream::blocking, - List<labelPair>(), - constructSize_, - subMap_, - constructMap_, - fld, - tag - ); - } + fld.shrink(); - // Fill in transformed slots with copies - if (dummyTransform) - { - applyDummyTransforms(fld); - } + List<T>& fldList = static_cast<List<T>& >(fld); + + distribute(fldList, dummyTransform, tag); + + fld.setCapacity(fldList.size()); } @@ -962,45 +188,7 @@ void Foam::mapDistribute::reverseDistribute applyDummyInverseTransforms(fld); } - if (Pstream::defaultCommsType == Pstream::nonBlocking) - { - distribute - ( - Pstream::nonBlocking, - List<labelPair>(), - constructSize, - constructMap_, - subMap_, - fld, - tag - ); - } - else if (Pstream::defaultCommsType == Pstream::scheduled) - { - distribute - ( - Pstream::scheduled, - schedule(), - constructSize, - constructMap_, - subMap_, - fld, - tag - ); - } - else - { - distribute - ( - Pstream::blocking, - List<labelPair>(), - constructSize, - constructMap_, - subMap_, - fld, - tag - ); - } + mapDistributeBase::reverseDistribute(constructSize, fld, tag); } @@ -1019,51 +207,7 @@ void Foam::mapDistribute::reverseDistribute applyDummyInverseTransforms(fld); } - if (Pstream::defaultCommsType == Pstream::nonBlocking) - { - distribute - ( - Pstream::nonBlocking, - List<labelPair>(), - constructSize, - constructMap_, - subMap_, - fld, - eqOp<T>(), - nullValue, - tag - ); - } - else if (Pstream::defaultCommsType == Pstream::scheduled) - { - distribute - ( - Pstream::scheduled, - schedule(), - constructSize, - constructMap_, - subMap_, - fld, - eqOp<T>(), - nullValue, - tag - ); - } - else - { - distribute - ( - Pstream::blocking, - List<labelPair>(), - constructSize, - constructMap_, - subMap_, - fld, - eqOp<T>(), - nullValue, - tag - ); - } + mapDistributeBase::reverseDistribute(constructSize, nullValue, fld, tag); } diff --git a/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/cyclic/cyclicPolyPatch.C b/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/cyclic/cyclicPolyPatch.C index ad06adccb304f61dd966077adf7e155cf60408a7..0c9593ca07f19c7ede42a66ec2248d05f0b93d3f 100644 --- a/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/cyclic/cyclicPolyPatch.C +++ b/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/cyclic/cyclicPolyPatch.C @@ -29,11 +29,9 @@ License #include "polyMesh.H" #include "demandDrivenData.H" #include "OFstream.H" -#include "patchZones.H" #include "matchPoints.H" #include "EdgeMap.H" #include "Time.H" -#include "diagTensor.H" #include "transformField.H" #include "SubField.H" #include "unitConversion.H" diff --git a/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/nonuniformTransformCyclic/nonuniformTransformCyclicPolyPatch.H b/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/nonuniformTransformCyclic/nonuniformTransformCyclicPolyPatch.H index 75431fdaa8250d9dc83ef6aace600e2c346161e9..744031a60def31905058392e102800e6bc1079c2 100644 --- a/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/nonuniformTransformCyclic/nonuniformTransformCyclicPolyPatch.H +++ b/src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/nonuniformTransformCyclic/nonuniformTransformCyclicPolyPatch.H @@ -2,7 +2,7 @@ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | - \\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation + \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License @@ -44,7 +44,7 @@ namespace Foam { /*---------------------------------------------------------------------------*\ - Class nonuniformTransformCyclicPolyPatch Declaration + Class nonuniformTransformCyclicPolyPatch Declaration \*---------------------------------------------------------------------------*/ class nonuniformTransformCyclicPolyPatch diff --git a/src/OpenFOAM/meshes/polyMesh/syncTools/syncTools.H b/src/OpenFOAM/meshes/polyMesh/syncTools/syncTools.H index 281b891b6c6c16aa1136833663e68cd6bce907a5..55dd19b72c104bf8937f207d799903f9e90a62d5 100644 --- a/src/OpenFOAM/meshes/polyMesh/syncTools/syncTools.H +++ b/src/OpenFOAM/meshes/polyMesh/syncTools/syncTools.H @@ -164,7 +164,8 @@ public: const polyMesh&, UList<T>&, const CombineOp& cop, - const TransformOp& top + const TransformOp& top, + const bool parRun = Pstream::parRun() ); @@ -556,7 +557,8 @@ public: ( const polyMesh& mesh, PackedList<nBits>& faceValues, - const CombineOp& cop + const CombineOp& cop, + const bool parRun = Pstream::parRun() ); template<unsigned nBits> diff --git a/src/OpenFOAM/meshes/polyMesh/syncTools/syncToolsTemplates.C b/src/OpenFOAM/meshes/polyMesh/syncTools/syncToolsTemplates.C index d8d74926554f8270e5c6200eaeb93482d20ba4a8..8646a5e939ffabc6f2656c9d0717afbf41441f95 100644 --- a/src/OpenFOAM/meshes/polyMesh/syncTools/syncToolsTemplates.C +++ b/src/OpenFOAM/meshes/polyMesh/syncTools/syncToolsTemplates.C @@ -1284,7 +1284,8 @@ void Foam::syncTools::syncBoundaryFaceList const polyMesh& mesh, UList<T>& faceValues, const CombineOp& cop, - const TransformOp& top + const TransformOp& top, + const bool parRun ) { const label nBFaces = mesh.nFaces() - mesh.nInternalFaces(); @@ -1299,7 +1300,7 @@ void Foam::syncTools::syncBoundaryFaceList const polyBoundaryMesh& patches = mesh.boundaryMesh(); - if (Pstream::parRun()) + if (parRun) { PstreamBuffers pBufs(Pstream::nonBlocking); @@ -1405,7 +1406,8 @@ void Foam::syncTools::syncFaceList ( const polyMesh& mesh, PackedList<nBits>& faceValues, - const CombineOp& cop + const CombineOp& cop, + const bool parRun ) { if (faceValues.size() != mesh.nFaces()) @@ -1418,7 +1420,7 @@ void Foam::syncTools::syncFaceList const polyBoundaryMesh& patches = mesh.boundaryMesh(); - if (Pstream::parRun()) + if (parRun) { PstreamBuffers pBufs(Pstream::nonBlocking); diff --git a/src/parallel/decompose/ptscotchDecomp/ptscotchDecompTemplates.C b/src/OpenFOAM/primitives/ops/flipOp.C similarity index 59% rename from src/parallel/decompose/ptscotchDecomp/ptscotchDecompTemplates.C rename to src/OpenFOAM/primitives/ops/flipOp.C index 5cfa0158391dccd861d698344df584b2454e1565..f3681f2c45e578cebdbe1494fdbd5f53858a051f 100644 --- a/src/parallel/decompose/ptscotchDecomp/ptscotchDecompTemplates.C +++ b/src/OpenFOAM/primitives/ops/flipOp.C @@ -2,7 +2,7 @@ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | - \\ / A nd | Copyright (C) 2011 OpenFOAM Foundation + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License @@ -23,53 +23,53 @@ License \*---------------------------------------------------------------------------*/ -#include "ptscotchDecomp.H" +#include "flipOp.H" // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // -// Insert at front of list -template<class Type> -void Foam::ptscotchDecomp::prepend +template<> +Foam::scalar Foam::flipOp::operator()(const scalar& v) const +{ + return -v; +} + + +template<> Foam::vector Foam::flipOp::operator()(const vector& v) const +{ + return -v; +} + + +template<>Foam::sphericalTensor Foam::flipOp::operator() ( - const UList<Type>& extraLst, - List<Type>& lst -) + const sphericalTensor& v +) const { - label nExtra = extraLst.size(); - - // Make space for initial elements - lst.setSize(lst.size() + nExtra); - for (label i = lst.size()-1; i >= nExtra; i--) - { - lst[i] = lst[i-nExtra]; - } - - // Insert at front - forAll(extraLst, i) - { - lst[i] = extraLst[i]; - } + return -v; } -// Insert at back of list -template<class Type> -void Foam::ptscotchDecomp::append +template<> Foam::symmTensor Foam::flipOp::operator() ( - const UList<Type>& extraLst, - List<Type>& lst -) + const symmTensor& v +) const { - label sz = lst.size(); + return -v; +} + - // Make space for initial elements - lst.setSize(sz + extraLst.size()); +template<> Foam::tensor Foam::flipOp::operator()(const tensor& v) const +{ + return -v; +} - // Insert at back - forAll(extraLst, i) - { - lst[sz++] = extraLst[i]; - } + +template<> Foam::triad Foam::flipOp::operator() +( + const triad& v +) const +{ + return -v; } diff --git a/src/OpenFOAM/primitives/ops/flipOp.H b/src/OpenFOAM/primitives/ops/flipOp.H new file mode 100644 index 0000000000000000000000000000000000000000..8aba8a0c766c7e50912f685abe4632d14631ea78 --- /dev/null +++ b/src/OpenFOAM/primitives/ops/flipOp.H @@ -0,0 +1,103 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::flipOp + +Description + Class containing functor to negate primitives. Dummy for all other types. + + Used in mesh transformations where face can flip. + +SourceFiles + flipOp.C + +\*---------------------------------------------------------------------------*/ + +#ifndef flipOp_H +#define flipOp_H + +#include "fieldTypes.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +/*---------------------------------------------------------------------------*\ + Class flipOp Declaration +\*---------------------------------------------------------------------------*/ + +class flipOp +{ +public: + + template<class Type> + Type operator()(const Type& val) const + { + return val; + } +}; + + +class noOp +{ +public: + + template<class Type> + Type operator()(const Type& val) const + { + return val; + } +}; + + +class flipLabelOp +{ +public: + + label operator()(const label& val) const + { + return -val; + } +}; + + +// Template specialisation for primitives that support negation +template<> scalar flipOp::operator()(const scalar&) const; +template<> vector flipOp::operator()(const vector&) const; +template<> sphericalTensor flipOp::operator()(const sphericalTensor&) const; +template<> symmTensor flipOp::operator()(const symmTensor&) const; +template<> tensor flipOp::operator()(const tensor&) const; +template<> triad flipOp::operator()(const triad&) const; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/dynamicMesh/Make/files b/src/dynamicMesh/Make/files index 851530cd04647d97c55883aa2b0dca69ed7e5f71..a96b93bdf9db363e3b02b293511e38fe4f60d003 100644 --- a/src/dynamicMesh/Make/files +++ b/src/dynamicMesh/Make/files @@ -28,17 +28,22 @@ polyTopoChange/polyTopoChange/addPatchCellLayer.C polyTopoChange/polyTopoChange/pointEdgeCollapse/pointEdgeCollapse.C polyTopoChange/polyTopoChange/edgeCollapser.C polyTopoChange/polyTopoChange/faceCollapser.C -polyTopoChange/polyTopoChange/hexRef8.C polyTopoChange/polyTopoChange/removeCells.C polyTopoChange/polyTopoChange/removeFaces.C polyTopoChange/polyTopoChange/refinementData.C polyTopoChange/polyTopoChange/refinementDistanceData.C -polyTopoChange/polyTopoChange/refinementHistory.C polyTopoChange/polyTopoChange/removePoints.C polyTopoChange/polyTopoChange/combineFaces.C polyTopoChange/polyTopoChange/duplicatePoints.C polyTopoChange/polyTopoChange/tetDecomposer.C + +hexRef8 = polyTopoChange/polyTopoChange/hexRef8 + +$(hexRef8)/hexRef8.C +$(hexRef8)/hexRef8Data.C +$(hexRef8)/refinementHistory.C + slidingInterface/slidingInterface.C slidingInterface/slidingInterfaceProjectPoints.C slidingInterface/coupleSlidingInterface.C @@ -83,6 +88,8 @@ polyMeshAdder/polyMeshAdder.C fvMeshTools/fvMeshTools.C +fvMeshSubset/fvMeshSubset.C + motionSmoother/motionSmoother.C motionSmoother/motionSmootherAlgo.C motionSmoother/motionSmootherAlgoCheck.C diff --git a/src/dynamicMesh/fvMeshAdder/fvMeshAdder.C b/src/dynamicMesh/fvMeshAdder/fvMeshAdder.C index 2f2575d782339f869a17d4101a6e7d6629ed5953..2a9965fe4ec0acf00f32079bf1f6d42f4dcd263e 100644 --- a/src/dynamicMesh/fvMeshAdder/fvMeshAdder.C +++ b/src/dynamicMesh/fvMeshAdder/fvMeshAdder.C @@ -28,6 +28,14 @@ License #include "faceCoupleInfo.H" #include "fvMesh.H" +/* * * * * * * * * * * * * * * Static Member Data * * * * * * * * * * * * * */ + +namespace Foam +{ +defineTypeNameAndDebug(fvMeshAdder, 0); +} + + // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // Foam::labelList Foam::fvMeshAdder::calcPatchMap @@ -105,6 +113,12 @@ Foam::autoPtr<Foam::mapAddedPolyMesh> Foam::fvMeshAdder::add fvMeshAdder::MapSurfaceFields<symmTensor>(mapPtr, mesh0, mesh1); fvMeshAdder::MapSurfaceFields<tensor>(mapPtr, mesh0, mesh1); + fvMeshAdder::MapDimFields<scalar>(mapPtr, mesh0, mesh1); + fvMeshAdder::MapDimFields<vector>(mapPtr, mesh0, mesh1); + fvMeshAdder::MapDimFields<sphericalTensor>(mapPtr, mesh0, mesh1); + fvMeshAdder::MapDimFields<symmTensor>(mapPtr, mesh0, mesh1); + fvMeshAdder::MapDimFields<tensor>(mapPtr, mesh0, mesh1); + return mapPtr; } diff --git a/src/dynamicMesh/fvMeshAdder/fvMeshAdder.H b/src/dynamicMesh/fvMeshAdder/fvMeshAdder.H index 9de33e452f6cec467276cb08ff8ed5c1dc2458f4..e86792c396d834ef4abb11533777473263be5daf 100644 --- a/src/dynamicMesh/fvMeshAdder/fvMeshAdder.H +++ b/src/dynamicMesh/fvMeshAdder/fvMeshAdder.H @@ -26,7 +26,7 @@ Class Description Adds two fvMeshes without using any polyMesh morphing. - Uses fvMeshAdder. + Uses polyMeshAdder. SourceFiles fvMeshAdder.C @@ -42,6 +42,7 @@ SourceFiles #include "fvPatchFieldsFwd.H" #include "fvsPatchFieldsFwd.H" #include "fvPatchFieldMapper.H" +#include "DimensionedField.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -101,8 +102,22 @@ private: const GeometricField<Type, fvsPatchField, surfaceMesh>& fldToAdd ); + //- Update single dimensionedField. + template<class Type> + static void MapDimField + ( + const mapAddedPolyMesh& meshMap, + + DimensionedField<Type, volMesh>& fld, + const DimensionedField<Type, volMesh>& fldToAdd + ); + public: + // Declare name of the class and its debug switch + ClassName("fvMeshAdder"); + + // Member Functions //- Inplace add mesh to fvMesh. Maps all stored fields. Returns map. @@ -131,6 +146,15 @@ public: const fvMesh& mesh, const fvMesh& meshToAdd ); + + //- Map all DimensionedFields of Type + template<class Type> + static void MapDimFields + ( + const mapAddedPolyMesh&, + const fvMesh& mesh, + const fvMesh& meshToAdd + ); }; diff --git a/src/dynamicMesh/fvMeshAdder/fvMeshAdderTemplates.C b/src/dynamicMesh/fvMeshAdder/fvMeshAdderTemplates.C index fba219117d01e7de02829adf74a1505c78f6fe79..bc46a7568ce646139b8674a777226b6030fe937d 100644 --- a/src/dynamicMesh/fvMeshAdder/fvMeshAdderTemplates.C +++ b/src/dynamicMesh/fvMeshAdder/fvMeshAdderTemplates.C @@ -280,6 +280,12 @@ void Foam::fvMeshAdder::MapVolFields ++fieldIter ) { + if (debug) + { + Pout<< "MapVolFields : Storing old time for " << fieldIter()->name() + << endl; + } + const_cast<GeometricField<Type, fvPatchField, volMesh>*>(fieldIter()) ->storeOldTimes(); } @@ -304,6 +310,12 @@ void Foam::fvMeshAdder::MapVolFields const GeometricField<Type, fvPatchField, volMesh>& fldToAdd = *fieldsToAdd[fld.name()]; + if (debug) + { + Pout<< "MapVolFields : mapping " << fld.name() + << " and " << fldToAdd.name() << endl; + } + MapVolField<Type>(meshMap, fld, fldToAdd); } else @@ -585,8 +597,13 @@ void Foam::fvMeshAdder::MapSurfaceFields ++fieldIter ) { - const_cast<fldType*>(fieldIter()) - ->storeOldTimes(); + if (debug) + { + Pout<< "MapSurfaceFields : Storing old time for " + << fieldIter()->name() << endl; + } + + const_cast<fldType*>(fieldIter())->storeOldTimes(); } @@ -604,6 +621,12 @@ void Foam::fvMeshAdder::MapSurfaceFields { const fldType& fldToAdd = *fieldsToAdd[fld.name()]; + if (debug) + { + Pout<< "MapSurfaceFields : mapping " << fld.name() + << " and " << fldToAdd.name() << endl; + } + MapSurfaceField<Type>(meshMap, fld, fldToAdd); } else @@ -617,4 +640,80 @@ void Foam::fvMeshAdder::MapSurfaceFields } +template<class Type> +void Foam::fvMeshAdder::MapDimField +( + const mapAddedPolyMesh& meshMap, + + DimensionedField<Type, volMesh>& fld, + const DimensionedField<Type, volMesh>& fldToAdd +) +{ + const fvMesh& mesh = fld.mesh(); + + // Store old field + Field<Type> oldField(fld); + + fld.setSize(mesh.nCells()); + + fld.rmap(oldField, meshMap.oldCellMap()); + fld.rmap(fldToAdd, meshMap.addedCellMap()); +} + + +template<class Type> +void Foam::fvMeshAdder::MapDimFields +( + const mapAddedPolyMesh& meshMap, + const fvMesh& mesh, + const fvMesh& meshToAdd +) +{ + typedef DimensionedField<Type, volMesh> fldType; + + // Note: use strict flag on lookupClass to avoid picking up + // volFields + HashTable<const fldType*> fields + ( + mesh.objectRegistry::lookupClass<fldType>(true) + ); + + HashTable<const fldType*> fieldsToAdd + ( + meshToAdd.objectRegistry::lookupClass<fldType>(true) + ); + + for + ( + typename HashTable<const fldType*>:: + iterator fieldIter = fields.begin(); + fieldIter != fields.end(); + ++fieldIter + ) + { + fldType& fld = const_cast<fldType&>(*fieldIter()); + + if (fieldsToAdd.found(fld.name())) + { + const fldType& fldToAdd = *fieldsToAdd[fld.name()]; + + if (debug) + { + Pout<< "MapDimFields : mapping " << fld.name() + << " and " << fldToAdd.name() << endl; + } + + MapDimField<Type>(meshMap, fld, fldToAdd); + } + else + { + WarningIn("fvMeshAdder::MapDimFields(..)") + << "Not mapping field " << fld.name() + << " since not present on mesh to add" + << endl; + } + } +} + + // ************************************************************************* // diff --git a/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.C b/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.C index 7123919e3a57f1e26c36cdbf5c5268b69641aec8..690944fca9588ffda3d5753772313edf1b4e5fc2 100644 --- a/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.C +++ b/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.C @@ -40,17 +40,125 @@ License #include "syncTools.H" #include "CompactListList.H" #include "fvMeshTools.H" +#include "ListOps.H" // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // namespace Foam { defineTypeNameAndDebug(fvMeshDistribute, 0); + +//- Less function class that can be used for sorting processor patches +class lessProcPatches +{ + const labelList& nbrProc_; + const labelList& referPatchID_; + +public: + + lessProcPatches( const labelList& nbrProc, const labelList& referPatchID) + : + nbrProc_(nbrProc), + referPatchID_(referPatchID) + {} + + bool operator()(const label a, const label b) + { + if (nbrProc_[a] < nbrProc_[b]) + { + return true; + } + else if (nbrProc_[a] > nbrProc_[b]) + { + return false; + } + else + { + // Equal neighbour processor + return referPatchID_[a] < referPatchID_[b]; + } + } +}; + } // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // +void Foam::fvMeshDistribute::inplaceRenumberWithFlip +( + const labelUList& oldToNew, + const bool oldToNewHasFlip, + const bool lstHasFlip, + labelUList& lst +) +{ + if (!lstHasFlip && !oldToNewHasFlip) + { + Foam::inplaceRenumber(oldToNew, lst); + } + else + { + // Either input data or map encodes sign so result encodes sign + + forAll(lst, elemI) + { + // Extract old value and sign + label val = lst[elemI]; + label sign = 1; + if (lstHasFlip) + { + if (val > 0) + { + val = val-1; + } + else if (val < 0) + { + val = -val-1; + sign = -1; + } + else + { + FatalErrorInFunction + << "Problem : zero value " << val + << " at index " << elemI << " out of " << lst.size() + << " list with flip bit" << exit(FatalError); + } + } + + + // Lookup new value and possibly change sign + label newVal = oldToNew[val]; + + if (oldToNewHasFlip) + { + if (newVal > 0) + { + newVal = newVal-1; + } + else if (newVal < 0) + { + newVal = -newVal-1; + sign = -sign; + } + else + { + FatalErrorInFunction + << "Problem : zero value " << newVal + << " at index " << elemI << " out of " + << oldToNew.size() + << " list with flip bit" << exit(FatalError); + } + } + + + // Encode new value and sign + lst[elemI] = sign*(newVal+1); + } + } +} + + Foam::labelList Foam::fvMeshDistribute::select ( const bool selectEqual, @@ -453,7 +561,13 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::fvMeshDistribute::repatch forAll(constructFaceMap, proci) { - inplaceRenumber(map().reverseFaceMap(), constructFaceMap[proci]); + inplaceRenumberWithFlip + ( + map().reverseFaceMap(), + false, + true, + constructFaceMap[proci] + ); } @@ -882,12 +996,45 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::fvMeshDistribute::doRemoveCells meshMod ); + + //// Generate test field + //tmp<surfaceScalarField> sfld(generateTestField(mesh_)); + + // Save internal fields (note: not as DimensionedFields since would + // get mapped) + PtrList<Field<scalar>> sFlds; + saveInternalFields(sFlds); + PtrList<Field<vector>> vFlds; + saveInternalFields(vFlds); + PtrList<Field<sphericalTensor>> sptFlds; + saveInternalFields(sptFlds); + PtrList<Field<symmTensor>> sytFlds; + saveInternalFields(sytFlds); + PtrList<Field<tensor>> tFlds; + saveInternalFields(tFlds); + // Change the mesh. No inflation. Note: no parallel comms allowed. autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh_, false, false); // Update fields mesh_.updateMesh(map); + + // Any exposed faces in a surfaceField will not be mapped. Map the value + // of these separately (until there is support in all PatchFields for + // mapping from internal faces ...) + + mapExposedFaces(map(), sFlds); + mapExposedFaces(map(), vFlds); + mapExposedFaces(map(), sptFlds); + mapExposedFaces(map(), sytFlds); + mapExposedFaces(map(), tFlds); + + + //// Test test field + //testField(sfld); + + // Move mesh (since morphing does not do this) if (map().hasMotionPoints()) { @@ -911,10 +1058,18 @@ void Foam::fvMeshDistribute::addProcPatches // contain for all current boundary faces the global patchID (for non-proc // patch) or the processor. + // Determine a visit order such that the processor patches get added + // in order of increasing neighbour processor (and for same neighbour + // processor (in case of processor cyclics) in order of increasing + // 'refer' patch) + labelList indices; + sortedOrder(nbrProc, indices, lessProcPatches(nbrProc, referPatchID)); + procPatchID.setSize(Pstream::nProcs()); - forAll(nbrProc, bFacei) + forAll(indices, i) { + label bFacei = indices[i]; label proci = nbrProc[bFacei]; if (proci != -1 && proci != Pstream::myProcNo()) @@ -927,6 +1082,7 @@ void Foam::fvMeshDistribute::addProcPatches if (referPatchID[bFacei] == -1) { // Ordinary processor boundary + processorPolyPatch pp ( 0, // size @@ -934,7 +1090,7 @@ void Foam::fvMeshDistribute::addProcPatches mesh_.boundaryMesh().size(), mesh_.boundaryMesh(), Pstream::myProcNo(), - nbrProc[bFacei] + proci ); procPatchID[proci].insert @@ -957,7 +1113,6 @@ void Foam::fvMeshDistribute::addProcPatches ( mesh_.boundaryMesh()[referPatchID[bFacei]] ); - processorCyclicPolyPatch pp ( 0, // size @@ -965,7 +1120,7 @@ void Foam::fvMeshDistribute::addProcPatches mesh_.boundaryMesh().size(), mesh_.boundaryMesh(), Pstream::myProcNo(), - nbrProc[bFacei], + proci, pcPatch.name(), pcPatch.transform() ); @@ -1500,6 +1655,33 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute const wordList surfTensors(mesh_.names(surfaceTensorField::typeName)); checkEqualWordList("surfaceTensorFields", surfTensors); + typedef volScalarField::Internal dimScalType; + const wordList dimScalars(mesh_.names(dimScalType::typeName)); + checkEqualWordList("volScalarField::Internal", dimScalars); + + typedef volVectorField::Internal dimVecType; + const wordList dimVectors(mesh_.names(dimVecType::typeName)); + checkEqualWordList("volVectorField::Internal", dimVectors); + + typedef volSphericalTensorField::Internal dimSphereType; + const wordList dimSphereTensors(mesh_.names(dimSphereType::typeName)); + checkEqualWordList + ( + "volSphericalTensorField::Internal", + dimSphereTensors + ); + + typedef volSymmTensorField::Internal dimSymmTensorType; + const wordList dimSymmTensors(mesh_.names(dimSymmTensorType::typeName)); + checkEqualWordList + ( + "volSymmTensorField::Internal", + dimSymmTensors + ); + + typedef volTensorField::Internal dimTensorType; + const wordList dimTensors(mesh_.names(dimTensorType::typeName)); + checkEqualWordList("volTensorField::Internal", dimTensors); @@ -1626,10 +1808,13 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute ); subCellMap[recvProc] = subsetter.cellMap(); - subFaceMap[recvProc] = renumber + subFaceMap[recvProc] = subsetter.faceFlipMap(); + inplaceRenumberWithFlip ( repatchFaceMap, - subsetter.faceMap() + false, // oldToNew has flip + true, // subFaceMap has flip + subFaceMap[recvProc] ); subPointMap[recvProc] = subsetter.pointMap(); subPatchMap[recvProc] = subsetter.patchMap(); @@ -1681,6 +1866,8 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute procSourceNewNbrProc, str ); + + // volFields sendFields<volScalarField>(recvProc, volScalars, subsetter, str); sendFields<volVectorField>(recvProc, volVectors, subsetter, str); sendFields<volSphericalTensorField> @@ -1699,6 +1886,7 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute ); sendFields<volTensorField>(recvProc, volTensors, subsetter, str); + // surfaceFields sendFields<surfaceScalarField> ( recvProc, @@ -1734,6 +1922,43 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute subsetter, str ); + + // dimensionedFields + sendFields<volScalarField::Internal> + ( + recvProc, + dimScalars, + subsetter, + str + ); + sendFields<volVectorField::Internal> + ( + recvProc, + dimVectors, + subsetter, + str + ); + sendFields<volSphericalTensorField::Internal> + ( + recvProc, + dimSphereTensors, + subsetter, + str + ); + sendFields<volSymmTensorField::Internal> + ( + recvProc, + dimSymmTensors, + subsetter, + str + ); + sendFields<volTensorField::Internal> + ( + recvProc, + dimTensors, + subsetter, + str + ); } } @@ -1771,12 +1996,24 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute repatchFaceMap, subMap().faceMap() ); + // Insert the sign bit from face flipping + labelList& faceMap = subFaceMap[Pstream::myProcNo()]; + forAll(faceMap, faceI) + { + faceMap[faceI] += 1; + } + const labelHashSet& flip = subMap().flipFaceFlux(); + forAllConstIter(labelHashSet, flip, iter) + { + label faceI = iter.key(); + faceMap[faceI] = -faceMap[faceI]; + } subPointMap[Pstream::myProcNo()] = subMap().pointMap(); subPatchMap[Pstream::myProcNo()] = identity(patches.size()); // Initialize all addressing into current mesh constructCellMap[Pstream::myProcNo()] = identity(mesh_.nCells()); - constructFaceMap[Pstream::myProcNo()] = identity(mesh_.nFaces()); + constructFaceMap[Pstream::myProcNo()] = identity(mesh_.nFaces()) + 1; constructPointMap[Pstream::myProcNo()] = identity(mesh_.nPoints()); constructPatchMap[Pstream::myProcNo()] = identity(patches.size()); @@ -1872,17 +2109,26 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute labelList domainSourceNewNbrProc; autoPtr<fvMesh> domainMeshPtr; + PtrList<volScalarField> vsf; PtrList<volVectorField> vvf; PtrList<volSphericalTensorField> vsptf; PtrList<volSymmTensorField> vsytf; PtrList<volTensorField> vtf; + PtrList<surfaceScalarField> ssf; PtrList<surfaceVectorField> svf; PtrList<surfaceSphericalTensorField> ssptf; PtrList<surfaceSymmTensorField> ssytf; PtrList<surfaceTensorField> stf; + PtrList<volScalarField::Internal> dsf; + PtrList<volVectorField::Internal> dvf; + PtrList<volSphericalTensorField::Internal> dstf; + PtrList<volSymmTensorField::Internal> dsytf; + PtrList<volTensorField::Internal> dtf; + + // Opposite of sendMesh { domainMeshPtr = receiveMesh @@ -1908,6 +2154,7 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute // of problems reading consecutive fields from single stream. dictionary fieldDicts(str); + // Vol fields receiveFields<volScalarField> ( sendProc, @@ -1949,6 +2196,7 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute fieldDicts.subDict(volTensorField::typeName) ); + // Surface fields receiveFields<surfaceScalarField> ( sendProc, @@ -1989,12 +2237,70 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute stf, fieldDicts.subDict(surfaceTensorField::typeName) ); + + // Dimensioned fields + receiveFields<volScalarField::Internal> + ( + sendProc, + dimScalars, + domainMesh, + dsf, + fieldDicts.subDict + ( + volScalarField::Internal::typeName + ) + ); + receiveFields<volVectorField::Internal> + ( + sendProc, + dimVectors, + domainMesh, + dvf, + fieldDicts.subDict + ( + volVectorField::Internal::typeName + ) + ); + receiveFields<volSphericalTensorField::Internal> + ( + sendProc, + dimSphereTensors, + domainMesh, + dstf, + fieldDicts.subDict + ( + volSphericalTensorField::Internal:: + typeName + ) + ); + receiveFields<volSymmTensorField::Internal> + ( + sendProc, + dimSymmTensors, + domainMesh, + dsytf, + fieldDicts.subDict + ( + volSymmTensorField::Internal::typeName + ) + ); + receiveFields<volTensorField::Internal> + ( + sendProc, + dimTensors, + domainMesh, + dtf, + fieldDicts.subDict + ( + volTensorField::Internal::typeName + ) + ); } const fvMesh& domainMesh = domainMeshPtr(); constructCellMap[sendProc] = identity(domainMesh.nCells()); - constructFaceMap[sendProc] = identity(domainMesh.nFaces()); + constructFaceMap[sendProc] = identity(domainMesh.nFaces()) + 1; constructPointMap[sendProc] = identity(domainMesh.nPoints()); constructPatchMap[sendProc] = identity(domainMesh.boundaryMesh().size()); @@ -2105,28 +2411,76 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute domainSourceNewNbrProc ); - // Update all addressing so xxProcAddressing points to correct item - // in masterMesh. + // Update all addressing so xxProcAddressing points to correct + // item in masterMesh. const labelList& oldCellMap = map().oldCellMap(); const labelList& oldFaceMap = map().oldFaceMap(); const labelList& oldPointMap = map().oldPointMap(); const labelList& oldPatchMap = map().oldPatchMap(); + //Note: old mesh faces never flipped! forAll(constructPatchMap, proci) { if (proci != sendProc && constructPatchMap[proci].size()) { // Processor already in mesh (either myProcNo or received) inplaceRenumber(oldCellMap, constructCellMap[proci]); - inplaceRenumber(oldFaceMap, constructFaceMap[proci]); + inplaceRenumberWithFlip + ( + oldFaceMap, + false, + true, + constructFaceMap[proci] + ); inplaceRenumber(oldPointMap, constructPointMap[proci]); inplaceRenumber(oldPatchMap, constructPatchMap[proci]); } } + + labelHashSet flippedAddedFaces; + { + // Find out if any faces of domain mesh were flipped (boundary + // faces becoming internal) + label nBnd = domainMesh.nFaces()-domainMesh.nInternalFaces(); + flippedAddedFaces.resize(nBnd/4); + + for + ( + label domainFaceI = domainMesh.nInternalFaces(); + domainFaceI < domainMesh.nFaces(); + domainFaceI++ + ) + { + label newFaceI = map().addedFaceMap()[domainFaceI]; + label newCellI = mesh_.faceOwner()[newFaceI]; + + label domainCellI = domainMesh.faceOwner()[domainFaceI]; + + if (newCellI != map().addedCellMap()[domainCellI]) + { + flippedAddedFaces.insert(domainFaceI); + } + } + } + + // Added processor inplaceRenumber(map().addedCellMap(), constructCellMap[sendProc]); - inplaceRenumber(map().addedFaceMap(), constructFaceMap[sendProc]); + // Add flip + forAllConstIter(labelHashSet, flippedAddedFaces, iter) + { + label domainFaceI = iter.key(); + label& val = constructFaceMap[sendProc][domainFaceI]; + val = -val; + } + inplaceRenumberWithFlip + ( + map().addedFaceMap(), + false, + true, // constructFaceMap has flip sign + constructFaceMap[sendProc] + ); inplaceRenumber(map().addedPointMap(), constructPointMap[sendProc]); inplaceRenumber(map().addedPatchMap(), constructPatchMap[sendProc]); @@ -2236,35 +2590,6 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute Zero ); - initPatchFields<surfaceScalarField, processorFvsPatchField<scalar>> - ( - Zero - ); - initPatchFields<surfaceVectorField, processorFvsPatchField<vector>> - ( - Zero - ); - initPatchFields - < - surfaceSphericalTensorField, - processorFvsPatchField<sphericalTensor> - > - ( - Zero - ); - initPatchFields - < - surfaceSymmTensorField, - processorFvsPatchField<symmTensor> - > - ( - Zero - ); - initPatchFields<surfaceTensorField, processorFvsPatchField<tensor>> - ( - Zero - ); - mesh_.setInstance(mesh_.time().timeName()); @@ -2308,7 +2633,10 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute constructPointMap.xfer(), constructFaceMap.xfer(), constructCellMap.xfer(), - constructPatchMap.xfer() + constructPatchMap.xfer(), + + true, // subFaceMap has flip + true // constructFaceMap has flip ) ); } diff --git a/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.H b/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.H index c9469a4f2fef3fda64875cb8389309604e01e4d8..3a13d365f982519e8f4fb4bff43c1efc40e52480 100644 --- a/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.H +++ b/src/dynamicMesh/fvMeshDistribute/fvMeshDistribute.H @@ -82,6 +82,14 @@ class fvMeshDistribute // Private Member Functions + static void inplaceRenumberWithFlip + ( + const labelUList& oldToNew, + const bool oldToNewHasFlip, + const bool lstHasFlip, + labelUList& lst + ); + //- Find indices with value static labelList select ( @@ -117,6 +125,18 @@ class fvMeshDistribute const PtrList<FieldField<fvsPatchField, T>>& oldBflds ); + //- Save internal fields of surfaceFields + template<class T> + void saveInternalFields(PtrList<Field<T>>& iflds) const; + + //- Set value of patch faces resulting from internal faces + template<class T> + void mapExposedFaces + ( + const mapPolyMesh& map, + const PtrList<Field<T>>& oldFlds + ); + //- Init patch fields of certain type template<class GeoField, class PatchFieldType> void initPatchFields @@ -151,6 +171,7 @@ class fvMeshDistribute labelListList& constructPointMap ); + // Coupling information //- Construct the local environment of all boundary faces. @@ -240,7 +261,7 @@ class fvMeshDistribute ( const labelList& neighbourNewProc, // new processor per b. face const labelList& referPatchID, // -1 or original patch - const List<Map<label>>& procPatchID// patchID + const List<Map<label>>& procPatchID // patchID ); //- Send mesh and coupling data. diff --git a/src/dynamicMesh/fvMeshDistribute/fvMeshDistributeTemplates.C b/src/dynamicMesh/fvMeshDistribute/fvMeshDistributeTemplates.C index 79c02c23b5c7a350f1bc70596fc543ff2aeed68b..d3eedf6e22b44e2de0a6174acf0cf8631cf5cea9 100644 --- a/src/dynamicMesh/fvMeshDistribute/fvMeshDistributeTemplates.C +++ b/src/dynamicMesh/fvMeshDistribute/fvMeshDistributeTemplates.C @@ -55,13 +55,14 @@ void Foam::fvMeshDistribute::printFieldInfo(const fvMesh& mesh) } -// Save whole boundary field template<class T, class Mesh> void Foam::fvMeshDistribute::saveBoundaryFields ( PtrList<FieldField<fvsPatchField, T>>& bflds ) const { + // Save whole boundary field + typedef GeometricField<T, fvsPatchField, Mesh> fldType; HashTable<const fldType*> flds @@ -84,7 +85,6 @@ void Foam::fvMeshDistribute::saveBoundaryFields } -// Map boundary field template<class T, class Mesh> void Foam::fvMeshDistribute::mapBoundaryFields ( @@ -92,6 +92,8 @@ void Foam::fvMeshDistribute::mapBoundaryFields const PtrList<FieldField<fvsPatchField, T>>& oldBflds ) { + // Map boundary field + const labelList& oldPatchStarts = map.oldPatchStarts(); const labelList& faceMap = map.faceMap(); @@ -145,13 +147,103 @@ void Foam::fvMeshDistribute::mapBoundaryFields } -// Init patch fields of certain type +template<class T> +void Foam::fvMeshDistribute::saveInternalFields +( + PtrList<Field<T> >& iflds +) const +{ + typedef GeometricField<T, fvsPatchField, surfaceMesh> fldType; + + HashTable<const fldType*> flds + ( + static_cast<const fvMesh&>(mesh_).objectRegistry::lookupClass<fldType>() + ); + + iflds.setSize(flds.size()); + + label i = 0; + + forAllConstIter(typename HashTable<const fldType*>, flds, iter) + { + const fldType& fld = *iter(); + + iflds.set(i, fld.primitiveField().clone()); + + i++; + } +} + + +template<class T> +void Foam::fvMeshDistribute::mapExposedFaces +( + const mapPolyMesh& map, + const PtrList<Field<T> >& oldFlds +) +{ + // Set boundary values of exposed internal faces + + const labelList& faceMap = map.faceMap(); + + typedef GeometricField<T, fvsPatchField, surfaceMesh> fldType; + + HashTable<fldType*> flds + ( + mesh_.objectRegistry::lookupClass<fldType>() + ); + + if (flds.size() != oldFlds.size()) + { + FatalErrorIn("fvMeshDistribute::mapExposedFaces(..)") << "problem" + << abort(FatalError); + } + + + label fieldI = 0; + + forAllIter(typename HashTable<fldType*>, flds, iter) + { + fldType& fld = *iter(); + typename fldType::Boundary& bfld = fld.boundaryFieldRef(); + + const Field<T>& oldInternal = oldFlds[fieldI++]; + + // Pull from old internal field into bfld. + + forAll(bfld, patchI) + { + fvsPatchField<T>& patchFld = bfld[patchI]; + + forAll(patchFld, i) + { + const label faceI = patchFld.patch().start()+i; + + label oldFaceI = faceMap[faceI]; + + if (oldFaceI < oldInternal.size()) + { + patchFld[i] = oldInternal[oldFaceI]; + + if (map.flipFaceFlux().found(faceI)) + { + patchFld[i] = flipOp()(patchFld[i]); + } + } + } + } + } +} + + template<class GeoField, class PatchFieldType> void Foam::fvMeshDistribute::initPatchFields ( const typename GeoField::value_type& initVal ) { + // Init patch fields of certain type + HashTable<GeoField*> flds ( mesh_.objectRegistry::lookupClass<GeoField>() @@ -161,8 +253,7 @@ void Foam::fvMeshDistribute::initPatchFields { GeoField& fld = *iter(); - typename GeoField::Boundary& bfld = - fld.boundaryFieldRef(); + typename GeoField::Boundary& bfld = fld.boundaryFieldRef(); forAll(bfld, patchi) { @@ -175,10 +266,11 @@ void Foam::fvMeshDistribute::initPatchFields } -// correctBoundaryConditions patch fields of certain type template<class GeoField> void Foam::fvMeshDistribute::correctBoundaryConditions() { + // correctBoundaryConditions patch fields of certain type + HashTable<GeoField*> flds ( mesh_.objectRegistry::lookupClass<GeoField>() @@ -192,24 +284,6 @@ void Foam::fvMeshDistribute::correctBoundaryConditions() } -// Send fields. Note order supplied so we can receive in exactly the same order. -// Note that field gets written as entry in dictionary so we -// can construct from subdictionary. -// (since otherwise the reading as-a-dictionary mixes up entries from -// consecutive fields) -// The dictionary constructed is: -// volScalarField -// { -// p {internalField ..; boundaryField ..;} -// k {internalField ..; boundaryField ..;} -// } -// volVectorField -// { -// U {internalField ... } -// } - -// volVectorField {U {internalField ..; boundaryField ..;}} -// template<class GeoField> void Foam::fvMeshDistribute::sendFields ( @@ -219,6 +293,25 @@ void Foam::fvMeshDistribute::sendFields Ostream& toNbr ) { + // Send fields. Note order supplied so we can receive in exactly the same + // order. + // Note that field gets written as entry in dictionary so we + // can construct from subdictionary. + // (since otherwise the reading as-a-dictionary mixes up entries from + // consecutive fields) + // The dictionary constructed is: + // volScalarField + // { + // p {internalField ..; boundaryField ..;} + // k {internalField ..; boundaryField ..;} + // } + // volVectorField + // { + // U {internalField ... } + // } + + // volVectorField {U {internalField ..; boundaryField ..;}} + toNbr << GeoField::typeName << token::NL << token::BEGIN_BLOCK << token::NL; forAll(fieldNames, i) { @@ -244,7 +337,6 @@ void Foam::fvMeshDistribute::sendFields } -// Opposite of sendFields template<class GeoField> void Foam::fvMeshDistribute::receiveFields ( diff --git a/src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubset.C b/src/dynamicMesh/fvMeshSubset/fvMeshSubset.C similarity index 91% rename from src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubset.C rename to src/dynamicMesh/fvMeshSubset/fvMeshSubset.C index 36f7a6f881ae700d0e71152ec3dd532c7ef8caf9..e23cceea12c0f843cd83fa02479ea2b785ee5f69 100644 --- a/src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubset.C +++ b/src/dynamicMesh/fvMeshSubset/fvMeshSubset.C @@ -34,6 +34,9 @@ Description #include "emptyPolyPatch.H" #include "demandDrivenData.H" #include "cyclicPolyPatch.H" +#include "removeCells.H" +#include "polyTopoChange.H" +#include "mapPolyMesh.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -354,6 +357,39 @@ void Foam::fvMeshSubset::subsetZones() } +Foam::labelList Foam::fvMeshSubset::getCellsToRemove +( + const labelList& region, + const label currentRegion +) const +{ + // Count + label nKeep = 0; + forAll(region, cellI) + { + if (region[cellI] == currentRegion) + { + nKeep++; + } + } + + // Collect cells to remove + label nRemove = baseMesh().nCells() - nKeep; + labelList cellsToRemove(nRemove); + + nRemove = 0; + forAll(region, cellI) + { + if (region[cellI] != currentRegion) + { + cellsToRemove[nRemove++] = cellI; + } + } + + return cellsToRemove; +} + + // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // Foam::fvMeshSubset::fvMeshSubset(const fvMesh& baseMesh) @@ -363,7 +399,8 @@ Foam::fvMeshSubset::fvMeshSubset(const fvMesh& baseMesh) pointMap_(0), faceMap_(0), cellMap_(0), - patchMap_(0) + patchMap_(0), + faceFlipMapPtr_() {} @@ -400,6 +437,10 @@ void Foam::fvMeshSubset::setCellSubset } + // Clear demand driven data + faceFlipMapPtr_.clear(); + + cellMap_ = globalCellMap.toc(); // Sort the cell map in the ascending order @@ -793,6 +834,8 @@ void Foam::fvMeshSubset::setLargeCellSubset << abort(FatalError); } + // Clear demand driven data + faceFlipMapPtr_.clear(); // Get the cells for the current region. cellMap_.setSize(oldCells.size()); @@ -1358,6 +1401,68 @@ void Foam::fvMeshSubset::setLargeCellSubset } +Foam::labelList Foam::fvMeshSubset::getExposedFaces +( + const labelList& region, + const label currentRegion, + const bool syncCouples +) const +{ + // Collect cells to remove + labelList cellsToRemove(getCellsToRemove(region, currentRegion)); + + return removeCells(baseMesh(), syncCouples).getExposedFaces(cellsToRemove); +} + + +void Foam::fvMeshSubset::setLargeCellSubset +( + const labelList& region, + const label currentRegion, + const labelList& exposedFaces, + const labelList& patchIDs, + const bool syncCouples +) +{ + // Collect cells to remove + labelList cellsToRemove(getCellsToRemove(region, currentRegion)); + + // Mesh changing engine. + polyTopoChange meshMod(baseMesh()); + + removeCells cellRemover(baseMesh(), syncCouples); + + cellRemover.setRefinement + ( + cellsToRemove, + exposedFaces, + patchIDs, + meshMod + ); + + // Create mesh, return map from old to new mesh. + autoPtr<mapPolyMesh> map = meshMod.makeMesh + ( + fvMeshSubsetPtr_, + IOobject + ( + baseMesh().name(), + baseMesh().time().timeName(), + baseMesh().time(), + IOobject::NO_READ, + IOobject::NO_WRITE + ), + baseMesh(), + syncCouples + ); + + pointMap_ = map().pointMap(); + faceMap_ = map().faceMap(); + cellMap_ = map().cellMap(); + patchMap_ = identity(baseMesh().boundaryMesh().size()); +} + + bool Foam::fvMeshSubset::hasSubMesh() const { return fvMeshSubsetPtr_.valid(); @@ -1396,6 +1501,44 @@ const labelList& Foam::fvMeshSubset::faceMap() const } +const labelList& Foam::fvMeshSubset::faceFlipMap() const +{ + if (!faceFlipMapPtr_.valid()) + { + const labelList& subToBaseFace = faceMap(); + const labelList& subToBaseCell = cellMap(); + + faceFlipMapPtr_.reset(new labelList(subToBaseFace.size())); + labelList& faceFlipMap = faceFlipMapPtr_(); + + // Only exposed internal faces might be flipped (since we don't do + // any cell renumbering, just compacting) + label subInt = subMesh().nInternalFaces(); + const labelList& subOwn = subMesh().faceOwner(); + const labelList& own = baseMesh_.faceOwner(); + + for (label subFaceI = 0; subFaceI < subInt; subFaceI++) + { + faceFlipMap[subFaceI] = subToBaseFace[subFaceI]+1; + } + for (label subFaceI = subInt; subFaceI < subOwn.size(); subFaceI++) + { + label faceI = subToBaseFace[subFaceI]; + if (subToBaseCell[subOwn[subFaceI]] == own[faceI]) + { + faceFlipMap[subFaceI] = faceI+1; + } + else + { + faceFlipMap[subFaceI] = -faceI-1; + } + } + } + + return faceFlipMapPtr_(); +} + + const labelList& Foam::fvMeshSubset::cellMap() const { checkCellSubset(); diff --git a/src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubset.H b/src/dynamicMesh/fvMeshSubset/fvMeshSubset.H similarity index 75% rename from src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubset.H rename to src/dynamicMesh/fvMeshSubset/fvMeshSubset.H index 471404a98a80a243ead27c11190297ee47b240dc..8f7cd95427416930fe6b972d4c6c71d0f28d6682 100644 --- a/src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubset.H +++ b/src/dynamicMesh/fvMeshSubset/fvMeshSubset.H @@ -41,10 +41,12 @@ Description a face on a coupled patch 'losing' its neighbour it will move the face into the oldInternalFaces patch. - - if a user supplied patch is used the mapping becomes a problem. - Do the new faces get the value of the internal face they came from? - What if e.g. the user supplied patch is a fixedValue 0? So for now - they get the face of existing patch face 0. + - if a user supplied patch is used it is up to the destination + patchField to handle exposed internal faces (mapping from face -1). + If not provided the default is to assign the internalField. All the + basic patch field types (e.g. fixedValue) will give a warning and + preferably derived patch field types should be used that know how to + handle exposed faces (e.g. use uniformFixedValue instead of fixedValue) SourceFiles fvMeshSubset.C @@ -94,6 +96,9 @@ private: //- Patch mapping array labelList patchMap_; + //- Optional face mapping array with flip encoded + mutable autoPtr<labelList> faceFlipMapPtr_; + // Private Member Functions @@ -124,6 +129,13 @@ private: //- Create zones for submesh void subsetZones(); + //- Helper: extract cells-to-remove from cells-to-keep + labelList getCellsToRemove + ( + const labelList& region, + const label currentRegion + ) const; + //- Disallow default bitwise copy construct fvMeshSubset(const fvMeshSubset&); @@ -174,6 +186,32 @@ public: ); + //- Two step subsetting + + //- Get labels of exposed faces. + // These are + // - internal faces that become boundary faces + // - coupled faces that become uncoupled (since one of the + // sides gets deleted) + labelList getExposedFaces + ( + const labelList& region, + const label currentRegion, + const bool syncCouples = true + ) const; + + //- For every exposed face (from above getExposedFaces) + // used supplied (existing!) patch + void setLargeCellSubset + ( + const labelList& region, + const label currentRegion, + const labelList& exposedFaces, + const labelList& patchIDs, + const bool syncCouples = true + ); + + // Access //- Original mesh @@ -196,6 +234,9 @@ public: //- Return face map const labelList& faceMap() const; + //- Return face map with sign to encode flipped faces + const labelList& faceFlipMap() const; + //- Return cell map const labelList& cellMap() const; @@ -224,7 +265,8 @@ public: const GeometricField<Type, fvPatchField, volMesh>& ) const; - //- Map surface field + //- Map surface field. Optionally negates value if flipping + // a face (from exposing an internal face) template<class Type> static tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> interpolate @@ -232,14 +274,17 @@ public: const GeometricField<Type, fvsPatchField, surfaceMesh>&, const fvMesh& sMesh, const labelList& patchMap, - const labelList& faceMap + const labelList& cellMap, + const labelList& faceMap, + const bool negateIfFlipped = true ); template<class Type> tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> interpolate ( - const GeometricField<Type, fvsPatchField, surfaceMesh>& + const GeometricField<Type, fvsPatchField, surfaceMesh>&, + const bool negateIfFlipped = true ) const; //- Map point field @@ -259,6 +304,20 @@ public: ( const GeometricField<Type, pointPatchField, pointMesh>& ) const; + + //- Map dimensioned field + template<class Type> + static tmp<DimensionedField<Type, volMesh>> + interpolate + ( + const DimensionedField<Type, volMesh>&, + const fvMesh& sMesh, + const labelList& cellMap + ); + + template<class Type> + tmp<DimensionedField<Type, volMesh>> + interpolate(const DimensionedField<Type, volMesh>&) const; }; diff --git a/src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubsetInterpolate.C b/src/dynamicMesh/fvMeshSubset/fvMeshSubsetInterpolate.C similarity index 89% rename from src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubsetInterpolate.C rename to src/dynamicMesh/fvMeshSubset/fvMeshSubsetInterpolate.C index 2dc96d4ad66030f1029e1757d6cbd85beb976378..1188fb461c67ee843737bd1178bba33d554f870a 100644 --- a/src/finiteVolume/fvMesh/fvMeshSubset/fvMeshSubsetInterpolate.C +++ b/src/dynamicMesh/fvMeshSubset/fvMeshSubsetInterpolate.C @@ -29,6 +29,7 @@ License #include "emptyFvPatchFields.H" #include "directFvPatchFieldMapper.H" #include "directPointPatchFieldMapper.H" +#include "flipOp.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -178,7 +179,9 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate const GeometricField<Type, fvsPatchField, surfaceMesh>& vf, const fvMesh& sMesh, const labelList& patchMap, - const labelList& faceMap + const labelList& cellMap, + const labelList& faceMap, + const bool negateIfFlipped ) { // 1. Create the complete field with dummy patch fields @@ -297,14 +300,24 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate // Postprocess patch field for exposed faces fvsPatchField<Type>& pfld = bf[patchi]; + const labelUList& fc = bf[patchi].patch().faceCells(); + const labelList& own = vf.mesh().faceOwner(); forAll(pfld, i) { label baseFacei = faceMap[subPatch.start()+i]; if (baseFacei < vf.primitiveField().size()) { - // Exposed internal face - pfld[i] = vf.primitiveField()[baseFacei]; + Type val = vf.internalField()[baseFacei]; + + if (cellMap[fc[i]] == own[baseFacei] || !negateIfFlipped) + { + pfld[i] = val; + } + else + { + pfld[i] = flipOp()(val); + } } else { @@ -329,7 +342,8 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate template<class Type> tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate ( - const GeometricField<Type, fvsPatchField, surfaceMesh>& sf + const GeometricField<Type, fvsPatchField, surfaceMesh>& sf, + const bool negateIfFlipped ) const { return interpolate @@ -337,7 +351,9 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate sf, subMesh(), patchMap(), - faceMap() + cellMap(), + faceMap(), + negateIfFlipped ); } @@ -489,6 +505,47 @@ tmp<GeometricField<Type, pointPatchField, pointMesh>> fvMeshSubset::interpolate } +template<class Type> +tmp<DimensionedField<Type, volMesh>> fvMeshSubset::interpolate +( + const DimensionedField<Type, volMesh>& df, + const fvMesh& sMesh, + const labelList& cellMap +) +{ + // Create the complete field from the pieces + tmp<DimensionedField<Type, volMesh>> tresF + ( + new DimensionedField<Type, volMesh> + ( + IOobject + ( + "subset"+df.name(), + sMesh.time().timeName(), + sMesh, + IOobject::NO_READ, + IOobject::NO_WRITE + ), + sMesh, + df.dimensions(), + Field<Type>(df, cellMap) + ) + ); + + return tresF; +} + + +template<class Type> +tmp<DimensionedField<Type, volMesh>> fvMeshSubset::interpolate +( + const DimensionedField<Type, volMesh>& df +) const +{ + return interpolate(df, subMesh(), cellMap()); +} + + // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // } // End namespace Foam diff --git a/src/dynamicMesh/meshCut/meshModifiers/multiDirRefinement/multiDirRefinement.C b/src/dynamicMesh/meshCut/meshModifiers/multiDirRefinement/multiDirRefinement.C index 7338228f8675afc0017444bb6bc6470e0a480804..6868a9335035d1594e196676b85fa0c907b81e79 100644 --- a/src/dynamicMesh/meshCut/meshModifiers/multiDirRefinement/multiDirRefinement.C +++ b/src/dynamicMesh/meshCut/meshModifiers/multiDirRefinement/multiDirRefinement.C @@ -256,7 +256,8 @@ void Foam::multiDirRefinement::refineHex8 false ), List<refinementHistory::splitCell8>(0), - labelList(0) + labelList(0), + false ) // refinement history ); diff --git a/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8.C b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8.C similarity index 99% rename from src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8.C rename to src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8.C index 18d8837ce0f661804a683b9d9e7fe069439773f0..ba244cd633244e7f11aa1ac98807a6080d4f1275 100644 --- a/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8.C +++ b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8.C @@ -793,7 +793,7 @@ Foam::label Foam::hexRef8::findLevel // Gets cell level such that the face has four points <= level. -Foam::label Foam::hexRef8::getAnchorLevel(const label facei) const +Foam::label Foam::hexRef8::faceLevel(const label facei) const { const face& f = mesh_.faces()[facei]; @@ -2218,7 +2218,8 @@ Foam::hexRef8::hexRef8 IOobject::AUTO_WRITE ), List<refinementHistory::splitCell8>(0), - labelList(0) + labelList(0), + false ), faceRemover_(mesh_, GREAT), // merge boundary faces wherever possible savedPointLevel_(0), @@ -3475,7 +3476,7 @@ Foam::labelListList Foam::hexRef8::setRefinement for (label facei = 0; facei < mesh_.nFaces(); facei++) { - faceAnchorLevel[facei] = getAnchorLevel(facei); + faceAnchorLevel[facei] = faceLevel(facei); } // -1 : no need to split face diff --git a/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8.H b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8.H similarity index 99% rename from src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8.H rename to src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8.H index 6b0ff0ff83d16f6cfd4d856dc7d8c5b3ea021605..1e236e61ebb60e9effa5968e9f581faf4f311d8a 100644 --- a/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8.H +++ b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8.H @@ -411,7 +411,7 @@ public: // Refinement //- Gets level such that the face has four points <= level. - label getAnchorLevel(const label facei) const; + label faceLevel(const label facei) const; //- Given valid mesh and current cell level and proposed // cells to refine calculate any clashes (due to 2:1) and return diff --git a/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8Data.C b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8Data.C new file mode 100644 index 0000000000000000000000000000000000000000..5d8605e1cb59a7ba0c221fabbcf2d1a83a90fa4c --- /dev/null +++ b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8Data.C @@ -0,0 +1,339 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "IOobject.H" +#include "UList.H" + +#include "hexRef8Data.H" +#include "mapPolyMesh.H" +#include "mapDistributePolyMesh.H" +#include "polyMesh.H" +#include "syncTools.H" +#include "refinementHistory.H" +#include "fvMesh.H" + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::hexRef8Data::hexRef8Data(const IOobject& io) +{ + { + IOobject rio(io); + rio.rename("cellLevel"); + bool haveFile = returnReduce(rio.headerOk(), orOp<bool>()); + if (haveFile) + { + Info<< "Reading hexRef8 data : " << rio.name() << endl; + cellLevelPtr_.reset(new labelIOList(rio)); + } + } + { + IOobject rio(io); + rio.rename("pointLevel"); + bool haveFile = returnReduce(rio.headerOk(), orOp<bool>()); + if (haveFile) + { + Info<< "Reading hexRef8 data : " << rio.name() << endl; + pointLevelPtr_.reset(new labelIOList(rio)); + } + } + { + IOobject rio(io); + rio.rename("level0Edge"); + bool haveFile = returnReduce(rio.headerOk(), orOp<bool>()); + if (haveFile) + { + Info<< "Reading hexRef8 data : " << rio.name() << endl; + level0EdgePtr_.reset(new uniformDimensionedScalarField(rio)); + } + } + { + IOobject rio(io); + rio.rename("refinementHistory"); + bool haveFile = returnReduce(rio.headerOk(), orOp<bool>()); + if (haveFile) + { + Info<< "Reading hexRef8 data : " << rio.name() << endl; + refHistoryPtr_.reset(new refinementHistory(rio)); + } + } +} + + +Foam::hexRef8Data::hexRef8Data +( + const IOobject& io, + const hexRef8Data& data, + const labelList& cellMap, + const labelList& pointMap +) +{ + if (data.cellLevelPtr_.valid()) + { + IOobject rio(io); + rio.rename(data.cellLevelPtr_().name()); + + cellLevelPtr_.reset + ( + new labelIOList + ( + rio, + UIndirectList<label>(data.cellLevelPtr_(), cellMap)() + ) + ); + } + if (data.pointLevelPtr_.valid()) + { + IOobject rio(io); + rio.rename(data.pointLevelPtr_().name()); + + pointLevelPtr_.reset + ( + new labelIOList + ( + rio, + UIndirectList<label>(data.pointLevelPtr_(), pointMap)() + ) + ); + } + if (data.level0EdgePtr_.valid()) + { + IOobject rio(io); + rio.rename(data.level0EdgePtr_().name()); + + level0EdgePtr_.reset + ( + new uniformDimensionedScalarField(rio, data.level0EdgePtr_()) + ); + } + if (data.refHistoryPtr_.valid()) + { + IOobject rio(io); + rio.rename(data.refHistoryPtr_().name()); + + refHistoryPtr_ = data.refHistoryPtr_().clone(rio, cellMap); + } +} + + +Foam::hexRef8Data::hexRef8Data +( + const IOobject& io, + const UPtrList<const labelList>& cellMaps, + const UPtrList<const labelList>& pointMaps, + const UPtrList<const hexRef8Data>& procDatas +) +{ + const polyMesh& mesh = dynamic_cast<const polyMesh&>(io.db()); + + // cellLevel + + if (procDatas[0].cellLevelPtr_.valid()) + { + IOobject rio(io); + rio.rename(procDatas[0].cellLevelPtr_().name()); + + cellLevelPtr_.reset(new labelIOList(rio, mesh.nCells())); + labelList& cellLevel = cellLevelPtr_(); + + forAll(procDatas, procI) + { + const labelList& procCellLevel = procDatas[procI].cellLevelPtr_(); + UIndirectList<label>(cellLevel, cellMaps[procI]) = procCellLevel; + } + } + + + // pointLevel + + if (procDatas[0].pointLevelPtr_.valid()) + { + IOobject rio(io); + rio.rename(procDatas[0].pointLevelPtr_().name()); + + pointLevelPtr_.reset(new labelIOList(rio, mesh.nPoints())); + labelList& pointLevel = pointLevelPtr_(); + + forAll(procDatas, procI) + { + const labelList& procPointLevel = procDatas[procI].pointLevelPtr_(); + UIndirectList<label>(pointLevel, pointMaps[procI]) = procPointLevel; + } + } + + + // level0Edge + + if (procDatas[0].level0EdgePtr_.valid()) + { + IOobject rio(io); + rio.rename(procDatas[0].level0EdgePtr_().name()); + + level0EdgePtr_.reset + ( + new uniformDimensionedScalarField + ( + rio, + procDatas[0].level0EdgePtr_() + ) + ); + } + + + // refinementHistory + + if (procDatas[0].refHistoryPtr_.valid()) + { + IOobject rio(io); + rio.rename(procDatas[0].refHistoryPtr_().name()); + + UPtrList<const refinementHistory> procRefs(procDatas.size()); + forAll(procDatas, i) + { + procRefs.set(i, &procDatas[i].refHistoryPtr_()); + } + + refHistoryPtr_.reset + ( + new refinementHistory + ( + rio, + cellMaps, + procRefs + ) + ); + } +} + + +// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * // + +Foam::hexRef8Data::~hexRef8Data() +{} + + +// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // + +void Foam::hexRef8Data::sync(const IOobject& io) +{ + const polyMesh& mesh = dynamic_cast<const polyMesh&>(io.db()); + + bool hasCellLevel = returnReduce(cellLevelPtr_.valid(), orOp<bool>()); + if (hasCellLevel && !cellLevelPtr_.valid()) + { + IOobject rio(io); + rio.rename("cellLevel"); + rio.readOpt() = IOobject::NO_READ; + cellLevelPtr_.reset(new labelIOList(rio, labelList(mesh.nCells(), 0))); + } + + bool hasPointLevel = returnReduce(pointLevelPtr_.valid(), orOp<bool>()); + if (hasPointLevel && !pointLevelPtr_.valid()) + { + IOobject rio(io); + rio.rename("pointLevel"); + rio.readOpt() = IOobject::NO_READ; + pointLevelPtr_.reset + ( + new labelIOList(rio, labelList(mesh.nPoints(), 0)) + ); + } + + bool hasLevel0Edge = returnReduce(level0EdgePtr_.valid(), orOp<bool>()); + if (hasLevel0Edge) + { + // Get master length + scalar masterLen = level0EdgePtr_().value(); + Pstream::scatter(masterLen); + if (!level0EdgePtr_.valid()) + { + IOobject rio(io); + rio.rename("level0Edge"); + rio.readOpt() = IOobject::NO_READ; + level0EdgePtr_.reset + ( + new uniformDimensionedScalarField + ( + rio, + dimensionedScalar("zero", dimLength, masterLen) + ) + ); + } + } + + bool hasHistory = returnReduce(refHistoryPtr_.valid(), orOp<bool>()); + if (hasHistory && !refHistoryPtr_.valid()) + { + IOobject rio(io); + rio.rename("refinementHistory"); + rio.readOpt() = IOobject::NO_READ; + refHistoryPtr_.reset(new refinementHistory(rio, mesh.nCells(), true)); + } +} + + +void Foam::hexRef8Data::distribute(const mapDistributePolyMesh& map) +{ + if (cellLevelPtr_.valid()) + { + map.cellMap().distribute(cellLevelPtr_()); + } + if (pointLevelPtr_.valid()) + { + map.pointMap().distribute(pointLevelPtr_()); + } + + // No need to distribute the level0Edge + + if (refHistoryPtr_.valid() && refHistoryPtr_().active()) + { + refHistoryPtr_().distribute(map); + } +} + + +bool Foam::hexRef8Data::write() const +{ + bool ok = true; + if (cellLevelPtr_.valid()) + { + ok = ok && cellLevelPtr_().write(); + } + if (pointLevelPtr_.valid()) + { + ok = ok && pointLevelPtr_().write(); + } + if (level0EdgePtr_.valid()) + { + ok = ok && level0EdgePtr_().write(); + } + if (refHistoryPtr_.valid()) + { + ok = ok && refHistoryPtr_().write(); + } + return ok; +} + + +// ************************************************************************* // diff --git a/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8Data.H b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8Data.H new file mode 100644 index 0000000000000000000000000000000000000000..c2a709d6defe410cf5dd16b469886a4bb8e93064 --- /dev/null +++ b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/hexRef8Data.H @@ -0,0 +1,136 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::hexRef8Data + +Description + Various for reading/decomposing/reconstructing/distributing refinement + data. + +SourceFiles + hexRef8Data.C + +\*---------------------------------------------------------------------------*/ + +#ifndef hexRef8Data_H +#define hexRef8Data_H + +#include "labelIOList.H" +#include "uniformDimensionedFields.H" +#include "UPtrList.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +// Forward declaration of classes +class mapPolyMesh; +class mapDistributePolyMesh; +class refinementHistory; +class fvMesh; + +/*---------------------------------------------------------------------------*\ + Class hexRef8Data Declaration +\*---------------------------------------------------------------------------*/ + +class hexRef8Data +{ + +private: + + // Private data + + autoPtr<labelIOList> cellLevelPtr_; + + autoPtr<labelIOList> pointLevelPtr_; + + autoPtr<uniformDimensionedScalarField> level0EdgePtr_; + + autoPtr<refinementHistory> refHistoryPtr_; + + + // Private Member Functions + + //- Disallow default bitwise copy construct + hexRef8Data(const hexRef8Data&); + + //- Disallow default bitwise assignment + void operator=(const hexRef8Data&); + + +public: + + // Constructors + + //- Construct read. Has special provision for only some processors + // having the files so can be used in redistribution. + hexRef8Data(const IOobject& io); + + //- Construct as subset + hexRef8Data + ( + const IOobject& io, + const hexRef8Data&, + const labelList& cellMap, + const labelList& pointMap + ); + + //- Construct from multiple hexRef8Data + hexRef8Data + ( + const IOobject& io, + const UPtrList<const labelList>& cellMaps, + const UPtrList<const labelList>& pointMaps, + const UPtrList<const hexRef8Data>& + ); + + + //- Destructor + ~hexRef8Data(); + + + // Member Functions + + //- Parallel synchronise. This enforces valid objects on all processors + // (even if they don't have a mesh). Used by redistributePar. + void sync(const IOobject& io); + + //- In-place distribute + void distribute(const mapDistributePolyMesh&); + + //- Write + bool write() const; +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/dynamicMesh/polyTopoChange/polyTopoChange/refinementHistory.C b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/refinementHistory.C similarity index 65% rename from src/dynamicMesh/polyTopoChange/polyTopoChange/refinementHistory.C rename to src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/refinementHistory.C index f4940f1d8df7ba1dbe08a916f0fe33deecf9f5b9..0519999c040b081f282a51466acb380e2e1b0a55 100644 --- a/src/dynamicMesh/polyTopoChange/polyTopoChange/refinementHistory.C +++ b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/refinementHistory.C @@ -23,12 +23,11 @@ License \*---------------------------------------------------------------------------*/ -#include "DynamicList.H" #include "refinementHistory.H" -#include "ListOps.H" #include "mapPolyMesh.H" #include "mapDistributePolyMesh.H" #include "polyMesh.H" +#include "syncTools.H" // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // @@ -138,7 +137,59 @@ Foam::refinementHistory::splitCell8::splitCell8(const splitCell8& sc) {} -// * * * * * * * * * * * * * * * Friend Operators * * * * * * * * * * * * * // +// * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * * // + +void Foam::refinementHistory::splitCell8::operator=(const splitCell8& s) +{ + //- Assignment operator since autoPtr otherwise 'steals' storage. + + // Check for assignment to self + if (this == &s) + { + FatalErrorIn("splitCell8::operator=(const Foam::splitCell8&)") + << "Attempted assignment to self" + << abort(FatalError); + } + + parent_ = s.parent_; + + addedCellsPtr_.reset + ( + s.addedCellsPtr_.valid() + ? new FixedList<label, 8>(s.addedCellsPtr_()) + : NULL + ); +} + + +bool Foam::refinementHistory::splitCell8::operator==(const splitCell8& s) const +{ + if (addedCellsPtr_.valid() != s.addedCellsPtr_.valid()) + { + return false; + } + else if (parent_ != s.parent_) + { + return false; + } + else if (addedCellsPtr_.valid()) + { + return addedCellsPtr_() == s.addedCellsPtr_(); + } + else + { + return true; + } +} + + +bool Foam::refinementHistory::splitCell8::operator!=(const splitCell8& s) const +{ + return !operator==(s); +} + + +// * * * * * * * * * * * * * * Friend Operators * * * * * * * * * * * * * * // Foam::Istream& Foam::operator>>(Istream& is, refinementHistory::splitCell8& sc) { @@ -183,6 +234,8 @@ Foam::Ostream& Foam::operator<< } +// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // + void Foam::refinementHistory::checkIndices() const { // Check indices. @@ -319,11 +372,192 @@ void Foam::refinementHistory::markSplit } +// Mark index and all its descendants +void Foam::refinementHistory::mark +( + const label val, + const label index, + labelList& splitToVal +) const +{ + splitToVal[index] = val; + + const splitCell8& split = splitCells_[index]; + + if (split.addedCellsPtr_.valid()) + { + const FixedList<label, 8>& splits = split.addedCellsPtr_(); + + forAll(splits, i) + { + if (splits[i] >= 0) + { + mark(val, splits[i], splitToVal); + } + } + } +} + + +Foam::label Foam::refinementHistory::markCommonCells +( + labelList& cellToCluster +) const +{ + label clusterI = 0; + + labelList splitToCluster(splitCells_.size(), -1); + + // Pass1: find top of all clusters + forAll(visibleCells_, cellI) + { + label index = visibleCells_[cellI]; + + if (index >= 0) + { + // Find highest ancestor + while (splitCells_[index].parent_ != -1) + { + index = splitCells_[index].parent_; + } + + // Mark tree with clusterI + if (splitToCluster[index] == -1) + { + mark(clusterI, index, splitToCluster); + clusterI++; + } + } + } + + // Pass2: mark all cells with cluster + cellToCluster.setSize(visibleCells_.size(), -1); + + forAll(visibleCells_, cellI) + { + label index = visibleCells_[cellI]; + + if (index >= 0) + { + cellToCluster[cellI] = splitToCluster[index]; + } + } + + return clusterI; +} + + +void Foam::refinementHistory::add +( + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections +) const +{ + const polyMesh& mesh = dynamic_cast<const polyMesh&>(db()); + + blockedFace.setSize(mesh.nFaces(), true); + + // Find common parent for all cells + labelList cellToCluster; + markCommonCells(cellToCluster); + + + // Unblock all faces inbetween same cluster + + label nUnblocked = 0; + + forAll(mesh.faceNeighbour(), faceI) + { + label ownCluster = cellToCluster[mesh.faceOwner()[faceI]]; + label neiCluster = cellToCluster[mesh.faceNeighbour()[faceI]]; + + if (ownCluster != -1 && ownCluster == neiCluster) + { + if (blockedFace[faceI]) + { + blockedFace[faceI] = false; + nUnblocked++; + } + } + } + + if (refinementHistory::debug) + { + reduce(nUnblocked, sumOp<label>()); + Info<< type() << " : unblocked " << nUnblocked << " faces" << endl; + } + + syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); +} + + +void Foam::refinementHistory::apply +( + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) const +{ + const polyMesh& mesh = dynamic_cast<const polyMesh&>(db()); + + // Find common parent for all cells + labelList cellToCluster; + label nClusters = markCommonCells(cellToCluster); + + // Unblock all faces inbetween same cluster + + + labelList clusterToProc(nClusters, -1); + + label nChanged = 0; + + forAll(mesh.faceNeighbour(), faceI) + { + label own = mesh.faceOwner()[faceI]; + label nei = mesh.faceNeighbour()[faceI]; + + label ownCluster = cellToCluster[own]; + label neiCluster = cellToCluster[nei]; + + if (ownCluster != -1 && ownCluster == neiCluster) + { + if (clusterToProc[ownCluster] == -1) + { + clusterToProc[ownCluster] = decomposition[own]; + } + + if (decomposition[own] != clusterToProc[ownCluster]) + { + decomposition[own] = clusterToProc[ownCluster]; + nChanged++; + } + if (decomposition[nei] != clusterToProc[ownCluster]) + { + decomposition[nei] = clusterToProc[ownCluster]; + nChanged++; + } + } + } + + if (refinementHistory::debug) + { + reduce(nChanged, sumOp<label>()); + Info<< type() << " : changed decomposition on " << nChanged + << " cells" << endl; + } +} + + // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // Foam::refinementHistory::refinementHistory(const IOobject& io) : - regIOobject(io) + regIOobject(io), + active_(false) { // Temporary warning if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED) @@ -345,12 +579,18 @@ Foam::refinementHistory::refinementHistory(const IOobject& io) close(); } + // When running in redistributPar + READ_IF_PRESENT it can happen + // that some processors do have refinementHistory and some don't so + // test for active has to be outside of above condition. + active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0); + if (debug) { Pout<< "refinementHistory::refinementHistory :" << " constructed history from IOobject :" << " splitCells:" << splitCells_.size() << " visibleCells:" << visibleCells_.size() + << " active:" << active_ << endl; } } @@ -360,10 +600,12 @@ Foam::refinementHistory::refinementHistory ( const IOobject& io, const List<splitCell8>& splitCells, - const labelList& visibleCells + const labelList& visibleCells, + const bool active ) : regIOobject(io), + active_(active), splitCells_(splitCells), freeSplitCells_(0), visibleCells_(visibleCells) @@ -397,12 +639,12 @@ Foam::refinementHistory::refinementHistory << " constructed history from IOobject or components :" << " splitCells:" << splitCells_.size() << " visibleCells:" << visibleCells_.size() + << " active:" << active_ << endl; } } -// Construct from initial number of cells (all visible) Foam::refinementHistory::refinementHistory ( const IOobject& io, @@ -410,6 +652,7 @@ Foam::refinementHistory::refinementHistory ) : regIOobject(io), + active_(false), freeSplitCells_(0) { // Temporary warning @@ -421,6 +664,67 @@ Foam::refinementHistory::refinementHistory << endl; } + if + ( + io.readOpt() == IOobject::MUST_READ + || io.readOpt() == IOobject::MUST_READ_IF_MODIFIED + || (io.readOpt() == IOobject::READ_IF_PRESENT && headerOk()) + ) + { + readStream(typeName) >> *this; + close(); + } + else + { + visibleCells_.setSize(nCells); + splitCells_.setCapacity(nCells); + + for (label cellI = 0; cellI < nCells; cellI++) + { + visibleCells_[cellI] = cellI; + splitCells_.append(splitCell8()); + } + } + + active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0); + + + // Check indices. + checkIndices(); + + if (debug) + { + Pout<< "refinementHistory::refinementHistory :" + << " constructed history from IOobject or initial size :" + << " splitCells:" << splitCells_.size() + << " visibleCells:" << visibleCells_.size() + << " active:" << active_ + << endl; + } +} + + +// Construct from initial number of cells (all visible) +Foam::refinementHistory::refinementHistory +( + const IOobject& io, + const label nCells, + const bool active +) +: + regIOobject(io), + active_(active), + freeSplitCells_(0) +{ + // Warn for MUST_READ_IF_MODIFIED + if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED) + { + WarningInFunction + << "Specified IOobject::MUST_READ_IF_MODIFIED but class" + << " does not support automatic rereading." + << endl; + } + if ( io.readOpt() == IOobject::MUST_READ @@ -452,6 +756,7 @@ Foam::refinementHistory::refinementHistory << " constructed history from IOobject or initial size :" << " splitCells:" << splitCells_.size() << " visibleCells:" << visibleCells_.size() + << " active:" << active_ << endl; } } @@ -465,6 +770,7 @@ Foam::refinementHistory::refinementHistory ) : regIOobject(io), + active_(rh.active_), splitCells_(rh.splitCells()), freeSplitCells_(rh.freeSplitCells()), visibleCells_(rh.visibleCells()) @@ -477,6 +783,126 @@ Foam::refinementHistory::refinementHistory } +// Construct from multiple +Foam::refinementHistory::refinementHistory +( + const IOobject& io, + const UPtrList<const labelList>& cellMaps, + const UPtrList<const refinementHistory>& refs +) +: + regIOobject(io), + active_(false) +{ + if + ( + io.readOpt() == IOobject::MUST_READ + || io.readOpt() == IOobject::MUST_READ_IF_MODIFIED + || (io.readOpt() == IOobject::READ_IF_PRESENT && headerOk()) + ) + { + WarningIn + ( + "refinementHistory::refinementHistory(const IOobject&" + ", const labelListList&, const PtrList<refinementHistory>&)" + ) << "read option IOobject::MUST_READ, READ_IF_PRESENT or " + << "MUST_READ_IF_MODIFIED" + << " suggests that a read constructor would be more appropriate." + << endl; + } + + const polyMesh& mesh = dynamic_cast<const polyMesh&>(db()); + + + // Determine offsets into splitCells + labelList offsets(refs.size()+1); + offsets[0] = 0; + forAll(refs, refI) + { + const DynamicList<splitCell8>& subSplits = refs[refI].splitCells(); + offsets[refI+1] = offsets[refI]+subSplits.size(); + } + + // Construct merged splitCells + splitCells_.setSize(offsets.last()); + forAll(refs, refI) + { + const DynamicList<splitCell8>& subSplits = refs[refI].splitCells(); + forAll(subSplits, i) + { + splitCell8& newSplit = splitCells_[offsets[refI]+i]; + + // Copy + newSplit = subSplits[i]; + + // Offset indices + if (newSplit.parent_ >= 0) + { + newSplit.parent_ += offsets[refI]; + } + + if (newSplit.addedCellsPtr_.valid()) + { + FixedList<label, 8>& splits = newSplit.addedCellsPtr_(); + + forAll(splits, i) + { + if (splits[i] >= 0) + { + splits[i] += offsets[refI]; + } + } + } + } + } + + + // Construct merged visibleCells + visibleCells_.setSize(mesh.nCells(), -1); + forAll(refs, refI) + { + const labelList& cellMap = cellMaps[refI]; + const labelList& subVis = refs[refI].visibleCells(); + + forAll(subVis, i) + { + label& newVis = visibleCells_[cellMap[i]]; + + newVis = subVis[i]; + if (newVis >= 0) + { + newVis += offsets[refI]; + } + } + } + + + // Is active if any of the refinementHistories is active (assumes active + // flag parallel synchronised) + active_ = false; + forAll(refs, refI) + { + if (refs[refI].active()) + { + active_ = true; + break; + } + } + + // Check indices. + checkIndices(); + + if (debug) + { + Pout<< "refinementHistory::refinementHistory :" + << " constructed history from multiple refinementHistories :" + << " splitCells:" << splitCells_.size() + << " visibleCells:" << visibleCells_.size() + << endl; + } +} + + // Construct from Istream Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is) : @@ -485,6 +911,8 @@ Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is) freeSplitCells_(0), visibleCells_(is) { + active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0); + // Check indices. checkIndices(); @@ -501,6 +929,192 @@ Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is) // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // +Foam::autoPtr<Foam::refinementHistory> Foam::refinementHistory::clone +( + const IOobject& io, + // Per visible cell the processor it is going to + const labelList& decomposition, + // Per splitCell entry the processor it moves to + const labelList& splitCellProc, + // Per splitCell entry the number of live cells that move to that processor + const labelList& splitCellNum, + + const label procI, + + // From old to new splitCells + labelList& oldToNewSplit +) const +{ + oldToNewSplit.setSize(splitCells_.size()); + oldToNewSplit = -1; + + // Compacted splitCells + DynamicList<splitCell8> newSplitCells(splitCells_.size()); + + // Loop over all entries. Note: could recurse like countProc so only + // visit used entries but is probably not worth it. + + forAll(splitCells_, index) + { + if (splitCellProc[index] == procI && splitCellNum[index] == 8) + { + // Entry moves in its whole to procI + oldToNewSplit[index] = newSplitCells.size(); + newSplitCells.append(splitCells_[index]); + } + } + + // Add live cells that are subsetted. + forAll(visibleCells_, cellI) + { + label index = visibleCells_[cellI]; + + if (index >= 0 && decomposition[cellI] == procI) + { + label parent = splitCells_[index].parent_; + + // Create new splitCell with parent + oldToNewSplit[index] = newSplitCells.size(); + newSplitCells.append(splitCell8(parent)); + } + } + + //forAll(oldToNewSplit, index) + //{ + // Pout<< "old:" << index << " new:" << oldToNewSplit[index] + // << endl; + //} + + newSplitCells.shrink(); + + // Renumber contents of newSplitCells + forAll(newSplitCells, index) + { + splitCell8& split = newSplitCells[index]; + + if (split.parent_ >= 0) + { + split.parent_ = oldToNewSplit[split.parent_]; + } + if (split.addedCellsPtr_.valid()) + { + FixedList<label, 8>& splits = split.addedCellsPtr_(); + + forAll(splits, i) + { + if (splits[i] >= 0) + { + splits[i] = oldToNewSplit[splits[i]]; + } + } + } + } + + + // Count number of cells + label nSub = 0; + forAll(decomposition, cellI) + { + if (decomposition[cellI] == procI) + { + nSub++; + } + } + + labelList newVisibleCells(nSub); + nSub = 0; + + forAll(visibleCells_, cellI) + { + if (decomposition[cellI] == procI) + { + label index = visibleCells_[cellI]; + if (index >= 0) + { + index = oldToNewSplit[index]; + } + newVisibleCells[nSub++] = index; + } + } + + return autoPtr<refinementHistory> + ( + new refinementHistory + ( + io, + newSplitCells, + newVisibleCells, + active_ + ) + ); +} + + +Foam::autoPtr<Foam::refinementHistory> Foam::refinementHistory::clone +( + const IOobject& io, + const labelList& cellMap +) const +{ + if (active_) + { + // Mark selected cells with '1' + labelList decomposition(visibleCells_.size(), 0); + forAll(cellMap, i) + { + decomposition[cellMap[i]] = 1; + } + + + // Per splitCell entry the processor it moves to + labelList splitCellProc(splitCells_.size(), -1); + // Per splitCell entry the number of live cells that move to that + // processor + labelList splitCellNum(splitCells_.size(), 0); + + forAll(visibleCells_, cellI) + { + label index = visibleCells_[cellI]; + + if (index >= 0) + { + countProc + ( + splitCells_[index].parent_, + decomposition[cellI], + splitCellProc, + splitCellNum + ); + } + } + + labelList oldToNewSplit; + return clone + ( + io, + decomposition, + splitCellProc, + splitCellNum, + 1, //procI, + oldToNewSplit + ); + } + else + { + return autoPtr<refinementHistory> + ( + new refinementHistory + ( + io, + DynamicList<splitCell8>(0), + labelList(0), + false + ) + ); + } +} + + void Foam::refinementHistory::resize(const label size) { label oldSize = visibleCells_.size(); @@ -698,9 +1312,6 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map) } } -//Pout<< "refinementHistory::distribute :" -// << " destination:" << destination << endl; - // Per splitCell entry the processor it moves to labelList splitCellProc(splitCells_.size(), -1); // Per splitCell entry the number of live cells that move to that processor @@ -746,21 +1357,11 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map) forAll(splitCells_, index) { -// Pout<< "oldCell:" << index -// << " proc:" << splitCellProc[index] -// << " nCells:" << splitCellNum[index] -// << endl; - if (splitCellProc[index] == proci && splitCellNum[index] == 8) { // Entry moves in its whole to proci oldToNew[index] = newSplitCells.size(); newSplitCells.append(splitCells_[index]); - - //Pout<< "Added oldCell " << index - // << " info " << newSplitCells.last() - // << " at position " << newSplitCells.size()-1 - // << endl; } } @@ -773,10 +1374,6 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map) { label parent = splitCells_[index].parent_; - //Pout<< "Adding refined cell " << celli - // << " since moves to " - // << proci << " old parent:" << parent << endl; - // Create new splitCell with parent oldToNew[index] = newSplitCells.size(); newSplitCells.append(splitCell8(parent)); @@ -849,7 +1446,9 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map) // Remove all entries. Leave storage intact. splitCells_.clear(); - visibleCells_.setSize(map.mesh().nCells()); + const polyMesh& mesh = dynamic_cast<const polyMesh&>(db()); + + visibleCells_.setSize(mesh.nCells()); visibleCells_ = -1; for (label proci = 0; proci < Pstream::nProcs(); proci++) @@ -1138,6 +1737,17 @@ void Foam::refinementHistory::combineCells } +bool Foam::refinementHistory::read() +{ + bool ok = readData(readStream(typeName)); + close(); + + active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0); + + return ok; +} + + bool Foam::refinementHistory::readData(Istream& is) { is >> *this; diff --git a/src/dynamicMesh/polyTopoChange/polyTopoChange/refinementHistory.H b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/refinementHistory.H similarity index 71% rename from src/dynamicMesh/polyTopoChange/polyTopoChange/refinementHistory.H rename to src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/refinementHistory.H index a2947e4530b307149580b9130f3783e0dbf3ff6f..88665a56e39e0ed40fcad9610099e2d10d808b34 100644 --- a/src/dynamicMesh/polyTopoChange/polyTopoChange/refinementHistory.H +++ b/src/dynamicMesh/polyTopoChange/polyTopoChange/hexRef8/refinementHistory.H @@ -28,7 +28,7 @@ Description All refinement history. Used in unrefinement. - visibleCells: valid for the current mesh and contains per cell -1 - (cell unrefined) or an index into splitCells_. + (cell unrefined) or an index into splitCells_. - splitCells: for every split contains the parent (also index into splitCells) and optionally a subsplit as 8 indices into splitCells. Note that the numbers in splitCells are not cell labels, they are purely @@ -74,9 +74,10 @@ SourceFiles #include "DynamicList.H" #include "labelList.H" #include "FixedList.H" -#include "SLList.H" #include "autoPtr.H" #include "regIOobject.H" +#include "boolList.H" +#include "labelPair.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -122,50 +123,11 @@ public: splitCell8(const splitCell8&); //- Copy operator since autoPtr otherwise 'steals' storage. - void operator=(const splitCell8& s) - { - // Check for assignment to self - if (this == &s) - { - FatalErrorInFunction - << "Attempted assignment to self" - << abort(FatalError); - } + void operator=(const splitCell8& s); - parent_ = s.parent_; + bool operator==(const splitCell8& s) const; - addedCellsPtr_.reset - ( - s.addedCellsPtr_.valid() - ? new FixedList<label, 8>(s.addedCellsPtr_()) - : NULL - ); - } - - bool operator==(const splitCell8& s) const - { - if (addedCellsPtr_.valid() != s.addedCellsPtr_.valid()) - { - return false; - } - else if (parent_ != s.parent_) - { - return false; - } - else if (addedCellsPtr_.valid()) - { - return addedCellsPtr_() == s.addedCellsPtr_(); - } - else - { - return true; - } - } - - bool operator!=(const splitCell8& s) const - { - return !operator==(s); - } + bool operator!=(const splitCell8& s) const; friend Istream& operator>>(Istream&, splitCell8&); friend Ostream& operator<<(Ostream&, const splitCell8&); @@ -176,6 +138,9 @@ private: // Private data + //- Is active? + bool active_; + //- Storage for splitCells DynamicList<splitCell8> splitCells_; @@ -226,6 +191,15 @@ private: labelList& splitCellNum ) const; + // For distribution: + + //- Mark index and all its descendants + void mark(const label, const label, labelList&) const; + + //- Mark cells according to top parent. Return number of clusters + // (set of cells originating from same parent) + label markCommonCells(labelList& cellToCluster) const; + public: // Declare name of the class and its debug switch @@ -234,25 +208,47 @@ public: // Constructors - //- Construct (read) given an IOobject + //- Construct (read) given an IOobject. If global number of visible + // cells > 0 becomes active refinementHistory(const IOobject&); - //- Construct (read) or construct null + //- Construct (read) or construct from components refinementHistory ( const IOobject&, const List<splitCell8>& splitCells, - const labelList& visibleCells + const labelList& visibleCells, + const bool active ); //- Construct (read) or construct from initial number of cells - // (all visible) + // (all visible). If global number of visible + // cells > 0 becomes active refinementHistory(const IOobject&, const label nCells); + //- Construct (read) or construct from initial number of cells + // (all visible) and active flag + refinementHistory + ( + const IOobject&, + const label nCells, + const bool active + ); + //- Construct as copy refinementHistory(const IOobject&, const refinementHistory&); - //- Construct from Istream + //- Construct from multiple refinement histories. If global number of + // visible cells > 0 becomes active + refinementHistory + ( + const IOobject&, + const UPtrList<const labelList>& cellMaps, + const UPtrList<const refinementHistory>& + ); + + //- Construct from Istream. If global number of + // visible cells > 0 becomes active refinementHistory(const IOobject&, Istream&); @@ -278,12 +274,16 @@ public: return freeSplitCells_; } - //- Is there unrefinement history. Note that this will fall over if - // there are 0 cells in the mesh. But this gives problems with - // lots of other programs anyway. + //- Is there unrefinement history? bool active() const { - return visibleCells_.size() > 0; + return active_; + } + + //- Is there unrefinement history? + bool& active() + { + return active_; } //- Get parent of cell @@ -314,6 +314,23 @@ public: const labelList& combinedCells ); + //- Low level clone + autoPtr<refinementHistory> clone + ( + const IOobject& io, + const labelList& decomposition, + const labelList& splitCellProc, + const labelList& splitCellNum, + const label procI, + labelList& oldToNewSplit + ) const; + + //- Create clone from subset + autoPtr<refinementHistory> clone + ( + const IOobject& io, + const labelList& cellMap + ) const; //- Update numbering for mesh changes void updateMesh(const mapPolyMesh&); @@ -343,20 +360,44 @@ public: void writeDebug() const; - //- ReadData function required for regIOobject read operation + //- Read object. If global number of visible cells > 0 becomes active + virtual bool read(); + + //- ReadData function required for regIOobject read operation. Note: + // does not do a reduction - does not set active_ flag virtual bool readData(Istream&); //- WriteData function required for regIOobject write operation virtual bool writeData(Ostream&) const; + // Helpers for decompositionConstraint - // Friend Functions + //- Add my decomposition constraints + void add + ( + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const; + + //- Apply any additional post-decomposition constraints + void apply + ( + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const; - // Friend Operators // IOstream Operators + //- Istream operator. Note: does not do a reduction - does not set + // active_ flag friend Istream& operator>>(Istream&, refinementHistory&); + friend Ostream& operator<<(Ostream&, const refinementHistory&); }; diff --git a/src/finiteVolume/Make/files b/src/finiteVolume/Make/files index 140b92dc89742c5691368060caa6d470dce8e535..7c64e76cff84e5050cfa2311cc044a181952d078 100644 --- a/src/finiteVolume/Make/files +++ b/src/finiteVolume/Make/files @@ -2,7 +2,6 @@ fvMesh/fvMeshGeometry.C fvMesh/fvMesh.C fvMesh/singleCellFvMesh/singleCellFvMesh.C -fvMesh/fvMeshSubset/fvMeshSubset.C fvBoundaryMesh = fvMesh/fvBoundaryMesh $(fvBoundaryMesh)/fvBoundaryMesh.C diff --git a/src/finiteVolume/fields/fvPatchFields/derived/fixedFluxPressure/fixedFluxPressureFvPatchScalarField.C b/src/finiteVolume/fields/fvPatchFields/derived/fixedFluxPressure/fixedFluxPressureFvPatchScalarField.C index e3a9409b25eeee757a98fff270dd7c3d2fa18b53..c834abd99a9fc7b39a244db29e3364ef6be22994 100644 --- a/src/finiteVolume/fields/fvPatchFields/derived/fixedFluxPressure/fixedFluxPressureFvPatchScalarField.C +++ b/src/finiteVolume/fields/fvPatchFields/derived/fixedFluxPressure/fixedFluxPressureFvPatchScalarField.C @@ -2,7 +2,7 @@ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | - \\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation + \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License @@ -96,6 +96,12 @@ Foam::fixedFluxPressureFvPatchScalarField::fixedFluxPressureFvPatchScalarField patchInternalField() + gradient()*(patch().nf() & patch().delta()) ); } + else + { + // Enforce mapping of values so we have a valid starting value. This + // constructor is used when reconstructing fields + this->map(ptf, mapper); + } } diff --git a/src/mesh/snappyHexMesh/meshRefinement/meshRefinement.C b/src/mesh/snappyHexMesh/meshRefinement/meshRefinement.C index 40edd95c45462bb68e4adc5de2e8b12f79d0d10c..30e95b65f26cd04f02884ba7a0b2c5c03b9cc024 100644 --- a/src/mesh/snappyHexMesh/meshRefinement/meshRefinement.C +++ b/src/mesh/snappyHexMesh/meshRefinement/meshRefinement.C @@ -175,7 +175,7 @@ void Foam::meshRefinement::calcNeighbourData label own = faceCells[i]; label ownLevel = cellLevel[own]; - label faceLevel = meshCutter_.getAnchorLevel(pp.start()+i); + label faceLevel = meshCutter_.faceLevel(pp.start()+i); // Normal distance from face centre to cell centre scalar d = ((faceCentres[i] - cellCentres[own]) & fn); diff --git a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolation.C b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolation.C index 4754869fbc9285c8accb8a09aff1aa2820913a15..0ebee5390d23215405ab8fc9a96048b08cc686e1 100644 --- a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolation.C +++ b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolation.C @@ -27,6 +27,7 @@ License #include "AMIMethod.H" #include "meshTools.H" #include "mapDistribute.H" +#include "flipOp.H" // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // @@ -933,7 +934,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update tgtMagSf_, triMode_, reverseTarget_, - requireMatch_ + requireMatch_ && (lowWeightCorrection_ < 0) ) ); @@ -978,27 +979,33 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update // send data back to originating procs. Note that contributions // from different processors get added (ListAppendEqOp) - mapDistribute::distribute + mapDistributeBase::distribute ( Pstream::nonBlocking, List<labelPair>(), tgtPatch.size(), map.constructMap(), + false, // has flip map.subMap(), + false, // has flip tgtAddress_, ListAppendEqOp<label>(), + flipOp(), // flip operation labelList() ); - mapDistribute::distribute + mapDistributeBase::distribute ( Pstream::nonBlocking, List<labelPair>(), tgtPatch.size(), map.constructMap(), + false, map.subMap(), + false, tgtWeights_, ListAppendEqOp<scalar>(), + flipOp(), scalarList() ); @@ -1050,7 +1057,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update tgtMagSf_, triMode_, reverseTarget_, - requireMatch_ + requireMatch_ && (lowWeightCorrection_ < 0) ) ); diff --git a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolationParallelOps.C b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolationParallelOps.C index 4d9ddde95410af64e3c8472fee36673fd954d1dd..0708b334bbda0eb10a7d4c2ca3260e4ac7cc63a8 100644 --- a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolationParallelOps.C +++ b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIInterpolationParallelOps.C @@ -97,7 +97,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcOverlappingProcs forAll(procBb, proci) { - const List<treeBoundBox>& bbs = procBb[proci]; + const treeBoundBoxList& bbs = procBb[proci]; forAll(bbs, bbI) { diff --git a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/AMIMethod/AMIMethod.C b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/AMIMethod/AMIMethod.C index 5ca35f44fc2a6818a7506cf0b20e6ec31095db44..8341460fcc76ecd05ffd1e312ce9944de01ea286 100644 --- a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/AMIMethod/AMIMethod.C +++ b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/AMIMethod/AMIMethod.C @@ -200,7 +200,7 @@ void Foam::AMIMethod<SourcePatch, TargetPatch>::resetTree() // Clear the old octree treePtr_.clear(); - treeBoundBox bb(tgtPatch_.points()); + treeBoundBox bb(tgtPatch_.points(), tgtPatch_.meshPoints()); bb.inflate(0.01); if (!treePtr_.valid()) diff --git a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/directAMI/directAMI.C b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/directAMI/directAMI.C index f1728daebab53e3f88efb8993b1a2799cdf66180..7048221fd07c9fd790a7ca07bb1f6ef79da54f96 100644 --- a/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/directAMI/directAMI.C +++ b/src/meshTools/AMIInterpolation/AMIInterpolation/AMIMethod/directAMI/directAMI.C @@ -310,16 +310,14 @@ void Foam::directAMI<SourcePatch, TargetPatch>::calculate forAll(srcAddr, i) { scalar magSf = this->srcMagSf_[i]; -// srcWeights[i] = scalarList(srcAddr[i].size(), magSf); - srcWeights[i] = scalarList(1, magSf); srcAddress[i].transfer(srcAddr[i]); + srcWeights[i] = scalarList(1, magSf); } forAll(tgtAddr, i) { scalar magSf = this->tgtMagSf_[i]; -// tgtWeights[i] = scalarList(tgtAddr[i].size(), magSf); - tgtWeights[i] = scalarList(1, magSf); tgtAddress[i].transfer(tgtAddr[i]); + tgtWeights[i] = scalarList(1, magSf); } } diff --git a/src/meshTools/mappedPatches/mappedPolyPatch/mappedPatchBaseTemplates.C b/src/meshTools/mappedPatches/mappedPolyPatch/mappedPatchBaseTemplates.C index bfd9152ceb82116f61e3642172173d477419e965..4801476320e3cf88e78987e36153face9e99bd08 100644 --- a/src/meshTools/mappedPatches/mappedPolyPatch/mappedPatchBaseTemplates.C +++ b/src/meshTools/mappedPatches/mappedPolyPatch/mappedPatchBaseTemplates.C @@ -61,15 +61,18 @@ void Foam::mappedPatchBase::distribute } default: { - map().distribute + mapDistributeBase::distribute ( Pstream::defaultCommsType, map().schedule(), map().constructSize(), map().subMap(), + false, map().constructMap(), + false, lst, cop, + flipOp(), Type(Zero) ); } @@ -117,15 +120,18 @@ void Foam::mappedPatchBase::reverseDistribute default: { label cSize = sampleSize(); - map().distribute + mapDistributeBase::distribute ( Pstream::defaultCommsType, map().schedule(), cSize, map().constructMap(), + false, map().subMap(), + false, lst, cop, + flipOp(), Type(Zero) ); break; diff --git a/src/parallel/decompose/decompose/Make/files b/src/parallel/decompose/decompose/Make/files index 2d0f959eaed2fa913a61f4cdbaf873f4d465ece5..5fabd38ccef57caf31d6d495eb45184fa8bb9fa9 100644 --- a/src/parallel/decompose/decompose/Make/files +++ b/src/parallel/decompose/decompose/Make/files @@ -1,3 +1,4 @@ +decompositionModel.C fvFieldDecomposer.C LIB = $(FOAM_LIBBIN)/libdecompose diff --git a/src/parallel/decompose/decompose/Make/options b/src/parallel/decompose/decompose/Make/options index 7a728f9dd7cf75800c9b44943a2964c781376576..cf471bcee58641da9b8c1e78fa83f55598b88906 100644 --- a/src/parallel/decompose/decompose/Make/options +++ b/src/parallel/decompose/decompose/Make/options @@ -1,9 +1,11 @@ EXE_INC = \ -I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \ + -I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude LIB_LIBS = \ -lfiniteVolume \ -lmeshTools \ + -ldecompositionMethods \ -llagrangian diff --git a/src/parallel/decompose/decompose/decompositionModel.C b/src/parallel/decompose/decompose/decompositionModel.C new file mode 100644 index 0000000000000000000000000000000000000000..eb84639cf511a5e540104953b3216c4134f6cf93 --- /dev/null +++ b/src/parallel/decompose/decompose/decompositionModel.C @@ -0,0 +1,164 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2014-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "decompositionModel.H" +#include "polyMesh.H" +#include "Time.H" + +// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // + +namespace Foam +{ + defineTypeNameAndDebug(decompositionModel, 0); +} + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::decompositionModel::decompositionModel +( + const polyMesh& mesh, + const fileName& decompDictFile +) +: + MeshObject + < + polyMesh, + Foam::UpdateableMeshObject, + decompositionModel + >(mesh), + IOdictionary + ( + selectIO + ( + IOobject + ( + "decomposeParDict", + mesh.time().system(), + mesh.local(), + mesh.db(), + IOobject::MUST_READ, + IOobject::NO_WRITE, + false //io.registerObject() + ), + decompDictFile + ) + ) +{} + + +Foam::decompositionModel::decompositionModel +( + const polyMesh& mesh, + const dictionary& dict, + const fileName& decompDictFile +) +: + MeshObject + < + polyMesh, + Foam::UpdateableMeshObject, + decompositionModel + >(mesh), + IOdictionary + ( + selectIO + ( + IOobject + ( + "decomposeParDict", + mesh.time().system(), + mesh.local(), + mesh.db(), + (dict.size() ? IOobject::NO_READ : IOobject::MUST_READ), + IOobject::NO_WRITE, + false //io.registerObject() + ), + decompDictFile + ), + dict + ) +{} + + +// * * * * * * * * * * * * * * * * * Selectors * * * * * * * * * * * * * * * // + +const Foam::decompositionModel& Foam::decompositionModel::New +( + const polyMesh& mesh, + const fileName& decompDictFile +) +{ + return + MeshObject + < + polyMesh, + Foam::UpdateableMeshObject, + decompositionModel + >::New(mesh, decompDictFile); +} + + +const Foam::decompositionModel& Foam::decompositionModel::New +( + const polyMesh& mesh, + const dictionary& dict, + const fileName& decompDictFile +) +{ + return + MeshObject + < + polyMesh, + Foam::UpdateableMeshObject, + decompositionModel + >::New(mesh, dict, decompDictFile); +} + + +// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // + +Foam::IOobject Foam::decompositionModel::selectIO +( + const IOobject& io, + const fileName& f +) +{ + return + ( + f.size() + ? IOobject // construct from filePath instead + ( + f, + io.db(), + io.readOpt(), + io.writeOpt(), + io.registerObject() + ) + : io + ); +} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompose/decompositionModel.H b/src/parallel/decompose/decompose/decompositionModel.H new file mode 100644 index 0000000000000000000000000000000000000000..7ed6bd3fa2b89f69168fc240a5e6681d92bbdb47 --- /dev/null +++ b/src/parallel/decompose/decompose/decompositionModel.H @@ -0,0 +1,145 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2014-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::decompositionModel + +Description + MeshObject wrapper of decompositionMethod + +SourceFiles + +\*---------------------------------------------------------------------------*/ + +#ifndef decompositionModel_H +#define decompositionModel_H + +#include "IOdictionary.H" +#include "MeshObject.H" +#include "decompositionMethod.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +// Forward declaration of classes +class mapPolyMesh; +class polyMesh; + +/*---------------------------------------------------------------------------*\ + Class decompositionModel Declaration +\*---------------------------------------------------------------------------*/ + +class decompositionModel +: + public MeshObject + < + polyMesh, + UpdateableMeshObject, + decompositionModel + >, + public IOdictionary +{ + + // Private data + + mutable autoPtr<decompositionMethod> decomposerPtr_; + + +public: + + // Declare name of the class and its debug switch + ClassName("decompositionModel"); + + + // Selectors + + //- Read (optionallly from absolute path) & register on mesh + static const decompositionModel& New + ( + const polyMesh& mesh, + const fileName& decompDictFile = "" + ); + + //- Read (optionallly from supplied dictionary) & register on mesh + static const decompositionModel& New + ( + const polyMesh& mesh, + const dictionary& dict, + const fileName& decompDictFile = "" + ); + + + // Constructors + + //- Construct from typeName or optional path to controlDictionary + decompositionModel(const polyMesh&, const fileName& = ""); + + + //- Construct from typeName or optional path to controlDictionary + decompositionModel + ( + const polyMesh&, + const dictionary& dict, + const fileName& = "" + ); + + + // Member functions + + decompositionMethod& decomposer() const + { + if (!decomposerPtr_.valid()) + { + decomposerPtr_ = decompositionMethod::New(*this); + } + return decomposerPtr_(); + } + + //- Helper: return IOobject with optionally absolute path provided + static IOobject selectIO(const IOobject&, const fileName&); + + + // UpdateableMeshObject + + virtual bool movePoints() + { + return false; + } + + virtual void updateMesh(const mapPolyMesh&) + {} + +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/Make/files b/src/parallel/decompose/decompositionMethods/Make/files index c2837d2eb320880626cf8fa1cca120dfa718e39a..2aef948ff7a22d15cb43a6f84f1437e200f2edd3 100644 --- a/src/parallel/decompose/decompositionMethods/Make/files +++ b/src/parallel/decompose/decompositionMethods/Make/files @@ -7,4 +7,15 @@ multiLevelDecomp/multiLevelDecomp.C structuredDecomp/structuredDecomp.C noDecomp/noDecomp.C + +decompositionConstraints = decompositionConstraints + +$(decompositionConstraints)/decompositionConstraint/decompositionConstraint.C +$(decompositionConstraints)/preserveBaffles/preserveBafflesConstraint.C +$(decompositionConstraints)/preserveFaceZones/preserveFaceZonesConstraint.C +$(decompositionConstraints)/preservePatches/preservePatchesConstraint.C +$(decompositionConstraints)/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.C +$(decompositionConstraints)/refinementHistory/refinementHistoryConstraint.C + + LIB = $(FOAM_LIBBIN)/libdecompositionMethods diff --git a/src/parallel/decompose/decompositionMethods/Make/options b/src/parallel/decompose/decompositionMethods/Make/options index f3070a731e46532261f92d0a3e815cf03a1ff2bc..45133caabbc96216595fb48cf8889878c1a620c4 100644 --- a/src/parallel/decompose/decompositionMethods/Make/options +++ b/src/parallel/decompose/decompositionMethods/Make/options @@ -1,7 +1,9 @@ EXE_INC = \ -I$(LIB_SRC)/meshTools/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude LIB_LIBS = \ -lmeshTools \ + -ldynamicMesh \ -lfiniteVolume diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/decompositionConstraint/decompositionConstraint.C b/src/parallel/decompose/decompositionMethods/decompositionConstraints/decompositionConstraint/decompositionConstraint.C new file mode 100644 index 0000000000000000000000000000000000000000..ca9a1981074e26fd619bb2e993528aa4fb13d63a --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/decompositionConstraint/decompositionConstraint.C @@ -0,0 +1,86 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "decompositionConstraint.H" + +// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // + +namespace Foam +{ +defineTypeNameAndDebug(decompositionConstraint, 1); +defineRunTimeSelectionTable(decompositionConstraint, dictionary); +} + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::decompositionConstraint::decompositionConstraint +( + const dictionary& constraintsDict, + const word& type +) +: + //coeffDict_(constraintsDict.subOrEmptyDict(type + "Coeffs")) + coeffDict_(constraintsDict) +{} + + +// * * * * * * * * * * * * * * * * Selectors * * * * * * * * * * * * * * * * // + +Foam::autoPtr<Foam::decompositionConstraint> +Foam::decompositionConstraint::New +( + const dictionary& dict, + const word& modelType +) +{ + Info<< "Selecting decompositionConstraint " << modelType << endl; + + dictionaryConstructorTable::iterator cstrIter = + dictionaryConstructorTablePtr_->find(modelType); + + if (cstrIter == dictionaryConstructorTablePtr_->end()) + { + FatalIOErrorInFunction(dict) + << "Unknown decompositionConstraint type " + << modelType << nl << nl + << "Valid decompositionConstraint types:" << endl + << dictionaryConstructorTablePtr_->sortedToc() + << exit(FatalIOError); + } + + return autoPtr<decompositionConstraint> + ( + cstrIter()(dict, modelType) + ); +} + + +// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * // + +Foam::decompositionConstraint::~decompositionConstraint() +{} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/decompositionConstraint/decompositionConstraint.H b/src/parallel/decompose/decompositionMethods/decompositionConstraints/decompositionConstraint/decompositionConstraint.H new file mode 100644 index 0000000000000000000000000000000000000000..9adb44da1fefcae4c811522ba015dcac3edee6b0 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/decompositionConstraint/decompositionConstraint.H @@ -0,0 +1,155 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::decompositionConstraint + +Description + +SourceFiles + decompositionConstraint.C + +\*---------------------------------------------------------------------------*/ + +#ifndef decompositionConstraint_H +#define decompositionConstraint_H + +#include "dictionary.H" +#include "runTimeSelectionTables.H" +#include "boolList.H" +#include "labelList.H" +#include "labelPair.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +// Forward declaration of classes +class polyMesh; + +/*---------------------------------------------------------------------------*\ + Class decompositionConstraint Declaration +\*---------------------------------------------------------------------------*/ + +class decompositionConstraint +{ +protected: + + // Protected data + + //- Model coefficients dictionary + dictionary coeffDict_; + +private: + + // Private Member Functions + + //- Disallow default bitwise copy construct + decompositionConstraint(const decompositionConstraint&); + + //- Disallow default bitwise assignment + void operator=(const decompositionConstraint&); + + +public: + + //- Runtime type information + TypeName("decompositionConstraint"); + + + // Declare run-time constructor selection table + + declareRunTimeSelectionTable + ( + autoPtr, + decompositionConstraint, + dictionary, + ( + const dictionary& constraintsDict, + const word& type + ), + (constraintsDict, type) + ); + + + // Constructors + + //- Construct with generic dictionary with optional entry for type + decompositionConstraint + ( + const dictionary& constraintsDict, + const word& type + ); + + // Selectors + + //- Return a reference to the selected decompositionConstraint + static autoPtr<decompositionConstraint> New + ( + const dictionary& constraintsDict, + const word& type + ); + + + //- Destructor + virtual ~decompositionConstraint(); + + + // Member Functions + + //- Add my constraints to list of constraints + virtual void add + ( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const = 0; + + //- Apply any additional post-decomposition constraints. Usually no + // need to do anything since decomposition method should have already + // obeyed the constraints + virtual void apply + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const + {} +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveBaffles/preserveBafflesConstraint.C b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveBaffles/preserveBafflesConstraint.C new file mode 100644 index 0000000000000000000000000000000000000000..d2c1b50e9476d0f06f4d350c9f2b21ca9b0c5be7 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveBaffles/preserveBafflesConstraint.C @@ -0,0 +1,243 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "preserveBafflesConstraint.H" +#include "addToRunTimeSelectionTable.H" +#include "syncTools.H" +#include "localPointRegion.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ +namespace decompositionConstraints +{ + defineTypeName(preserveBafflesConstraint); + + addToRunTimeSelectionTable + ( + decompositionConstraint, + preserveBafflesConstraint, + dictionary + ); +} +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::decompositionConstraints::preserveBafflesConstraint:: +preserveBafflesConstraint +( + const dictionary& constraintsDict, + const word& modelType +) +: + decompositionConstraint(constraintsDict, typeName) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : setting constraints to preserve baffles" + //<< returnReduce(bafflePairs.size(), sumOp<label>()) + << endl; + } +} + + +Foam::decompositionConstraints::preserveBafflesConstraint:: +preserveBafflesConstraint() +: + decompositionConstraint(dictionary(), typeName) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : setting constraints to preserve baffles" + //<< returnReduce(bafflePairs.size(), sumOp<label>()) + << endl; + } +} + + +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +void Foam::decompositionConstraints::preserveBafflesConstraint::add +( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections +) const +{ + const labelPairList bafflePairs + ( + localPointRegion::findDuplicateFacePairs(mesh) + ); + + if (decompositionConstraint::debug & 2) + { + Info<< type() << " : setting constraints to preserve " + << returnReduce(bafflePairs.size(), sumOp<label>()) + << " baffles" << endl; + } + + + // Merge into explicitConnections + { + // Convert into face-to-face addressing + labelList faceToFace(mesh.nFaces(), -1); + forAll(explicitConnections, i) + { + const labelPair& p = explicitConnections[i]; + faceToFace[p[0]] = p[1]; + faceToFace[p[1]] = p[0]; + } + + // Merge in bafflePairs + forAll(bafflePairs, i) + { + const labelPair& p = bafflePairs[i]; + + if (faceToFace[p[0]] == -1 && faceToFace[p[1]] == -1) + { + faceToFace[p[0]] = p[1]; + faceToFace[p[1]] = p[0]; + } + else if (labelPair::compare(p, labelPair(p[0], faceToFace[p[0]]))) + { + // Connection already present + } + else + { + label p0Slave = faceToFace[p[0]]; + label p1Slave = faceToFace[p[1]]; + IOWarningInFunction(coeffDict_) + << "When adding baffle between faces " + << p[0] << " at " << mesh.faceCentres()[p[0]] + << " and " + << p[1] << " at " << mesh.faceCentres()[p[1]] + << " : face " << p[0] << " already is connected to face " + << p0Slave << " at " << mesh.faceCentres()[p0Slave] + << " and face " << p[1] << " already is connected to face " + << p1Slave << " at " << mesh.faceCentres()[p1Slave] + << endl; + } + } + + // Convert back into labelPairList + label n = 0; + forAll(faceToFace, faceI) + { + label otherFaceI = faceToFace[faceI]; + if (otherFaceI != -1 && faceI < otherFaceI) + { + // I am master of slave + n++; + } + } + explicitConnections.setSize(n); + n = 0; + forAll(faceToFace, faceI) + { + label otherFaceI = faceToFace[faceI]; + if (otherFaceI != -1 && faceI < otherFaceI) + { + explicitConnections[n++] = labelPair(faceI, otherFaceI); + } + } + } + + // Make sure blockedFace is uptodate + blockedFace.setSize(mesh.nFaces(), true); + forAll(explicitConnections, i) + { + blockedFace[explicitConnections[i].first()] = false; + blockedFace[explicitConnections[i].second()] = false; + } + syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); +} + + +void Foam::decompositionConstraints::preserveBafflesConstraint::apply +( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) const +{ + const labelPairList bafflePairs + ( + localPointRegion::findDuplicateFacePairs(mesh) + ); + + label nChanged = 0; + + forAll(bafflePairs, i) + { + const labelPair& baffle = bafflePairs[i]; + label f0 = baffle.first(); + label f1 = baffle.second(); + + const label procI = decomposition[mesh.faceOwner()[f0]]; + + if (mesh.isInternalFace(f0)) + { + label nei0 = mesh.faceNeighbour()[f0]; + if (decomposition[nei0] != procI) + { + decomposition[nei0] = procI; + nChanged++; + } + } + + label own1 = mesh.faceOwner()[f1]; + if (decomposition[own1] != procI) + { + decomposition[own1] = procI; + nChanged++; + } + if (mesh.isInternalFace(f1)) + { + label nei1 = mesh.faceNeighbour()[f1]; + if (decomposition[nei1] != procI) + { + decomposition[nei1] = procI; + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nChanged, sumOp<label>()); + Info<< type() << " : changed decomposition on " << nChanged + << " cells" << endl; + } +} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveBaffles/preserveBafflesConstraint.H b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveBaffles/preserveBafflesConstraint.H new file mode 100644 index 0000000000000000000000000000000000000000..e593a5af7b429a0dd6d8f0cf9d12a33a6175fefe --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveBaffles/preserveBafflesConstraint.H @@ -0,0 +1,116 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::preserveBafflesConstraint + +Description + Detects baffles and keeps owner and neighbour on same processor. + +SourceFiles + preserveBafflesConstraint.C + +\*---------------------------------------------------------------------------*/ + +#ifndef preserveBafflesConstraint_H +#define preserveBafflesConstraint_H + +#include "decompositionConstraint.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +namespace decompositionConstraints +{ + +/*---------------------------------------------------------------------------*\ + Class preserveBafflesConstraint Declaration +\*---------------------------------------------------------------------------*/ + +class preserveBafflesConstraint +: + public decompositionConstraint +{ + // Private data + +public: + + //- Runtime type information + TypeName("preserveBaffles"); + + + // Constructors + + //- Construct with generic dictionary with optional entry for type + preserveBafflesConstraint + ( + const dictionary& constraintsDict, + const word& type + ); + + //- Construct from components + preserveBafflesConstraint(); + + + //- Destructor + virtual ~preserveBafflesConstraint() + {} + + + // Member Functions + + //- Add my constraints to list of constraints + virtual void add + ( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const; + + //- Apply any additional post-decomposition constraints + virtual void apply + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const; +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace decompositionConstraints +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveFaceZones/preserveFaceZonesConstraint.C b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveFaceZones/preserveFaceZonesConstraint.C new file mode 100644 index 0000000000000000000000000000000000000000..9a2a7de2f7640b547befd1e0e2d780a260c5047b --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveFaceZones/preserveFaceZonesConstraint.C @@ -0,0 +1,216 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "preserveFaceZonesConstraint.H" +#include "addToRunTimeSelectionTable.H" +#include "syncTools.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ +namespace decompositionConstraints +{ + defineTypeName(preserveFaceZonesConstraint); + + addToRunTimeSelectionTable + ( + decompositionConstraint, + preserveFaceZonesConstraint, + dictionary + ); +} +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::decompositionConstraints::preserveFaceZonesConstraint:: +preserveFaceZonesConstraint +( + const dictionary& constraintsDict, + const word& modelType +) +: + decompositionConstraint(constraintsDict, typeName), + zones_(coeffDict_.lookup("zones")) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : adding constraints to keep owner and neighbour" + << " of faces in zones " << zones_ + << " on same processor" << endl; + } +} + + +Foam::decompositionConstraints::preserveFaceZonesConstraint:: +preserveFaceZonesConstraint +( + const wordReList& zones +) +: + decompositionConstraint(dictionary(), typeName), + zones_(zones) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : adding constraints to keep owner and neighbour" + << " of faces in zones " << zones_ + << " on same processor" << endl; + } +} + + +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +void Foam::decompositionConstraints::preserveFaceZonesConstraint::add +( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections +) const +{ + blockedFace.setSize(mesh.nFaces(), true); + + const faceZoneMesh& fZones = mesh.faceZones(); + + const labelList zoneIDs = findStrings(zones_, fZones.names()); + + label nUnblocked = 0; + + forAll(zoneIDs, i) + { + const faceZone& fz = fZones[zoneIDs[i]]; + + forAll(fz, i) + { + if (blockedFace[fz[i]]) + { + blockedFace[fz[i]] = false; + nUnblocked++; + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nUnblocked, sumOp<label>()); + Info<< type() << " : unblocked " << nUnblocked << " faces" << endl; + } + + syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); +} + + +void Foam::decompositionConstraints::preserveFaceZonesConstraint::apply +( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) const +{ + // If the decomposition has not enforced the constraint do it over + // here. + + + // Synchronise decomposition on boundary + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + const polyBoundaryMesh& pbm = mesh.boundaryMesh(); + + labelList destProc(mesh.nFaces()-mesh.nInternalFaces(), labelMax); + + forAll(pbm, patchI) + { + const polyPatch& pp = pbm[patchI]; + + const labelUList& faceCells = pp.faceCells(); + + forAll(faceCells, i) + { + label bFaceI = pp.start()+i-mesh.nInternalFaces(); + destProc[bFaceI] = decomposition[faceCells[i]]; + } + } + + syncTools::syncBoundaryFaceList(mesh, destProc, minEqOp<label>()); + + + // Override if differing + // ~~~~~~~~~~~~~~~~~~~~~ + + const faceZoneMesh& fZones = mesh.faceZones(); + + const labelList zoneIDs = findStrings(zones_, fZones.names()); + + label nChanged = 0; + + forAll(zoneIDs, i) + { + const faceZone& fz = fZones[zoneIDs[i]]; + + forAll(fz, i) + { + label faceI = fz[i]; + + label own = mesh.faceOwner()[faceI]; + + if (mesh.isInternalFace(faceI)) + { + label nei = mesh.faceNeighbour()[faceI]; + if (decomposition[own] != decomposition[nei]) + { + decomposition[nei] = decomposition[own]; + nChanged++; + } + } + else + { + label bFaceI = faceI-mesh.nInternalFaces(); + if (decomposition[own] != destProc[bFaceI]) + { + decomposition[own] = destProc[bFaceI]; + nChanged++; + } + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nChanged, sumOp<label>()); + Info<< type() << " : changed decomposition on " << nChanged + << " cells" << endl; + } +} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveFaceZones/preserveFaceZonesConstraint.H b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveFaceZones/preserveFaceZonesConstraint.H new file mode 100644 index 0000000000000000000000000000000000000000..97bdd313d6d766d7a64cf0e79659ff385a651eb9 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preserveFaceZones/preserveFaceZonesConstraint.H @@ -0,0 +1,122 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::preserveFaceZonesConstraint + +Description + Constraint to keep/move owner and neighbour of faceZone onto same + processor. + +SourceFiles + preserveFaceZonesConstraint.C + +\*---------------------------------------------------------------------------*/ + +#ifndef preserveFaceZonesConstraint_H +#define preserveFaceZonesConstraint_H + +#include "decompositionConstraint.H" +#include "wordReList.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +namespace decompositionConstraints +{ + +/*---------------------------------------------------------------------------*\ + Class preserveFaceZonesConstraint Declaration +\*---------------------------------------------------------------------------*/ + +class preserveFaceZonesConstraint +: + public decompositionConstraint +{ + // Private data + + //- List of zones to keep together + wordReList zones_; + + +public: + + //- Runtime type information + TypeName("preserveFaceZones"); + + + // Constructors + + //- Construct with generic dictionary with optional entry for type + preserveFaceZonesConstraint + ( + const dictionary& constraintsDict, + const word& type + ); + + //- Construct from components + preserveFaceZonesConstraint(const wordReList& zones); + + + //- Destructor + virtual ~preserveFaceZonesConstraint() + {} + + + // Member Functions + + //- Add my constraints to list of constraints + virtual void add + ( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const; + + //- Apply any additional post-decomposition constraints + virtual void apply + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const; +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace decompositionConstraints +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/preservePatches/preservePatchesConstraint.C b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preservePatches/preservePatchesConstraint.C new file mode 100644 index 0000000000000000000000000000000000000000..7341e2b15ce009dbe480f29570ff27dfb779df99 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preservePatches/preservePatchesConstraint.C @@ -0,0 +1,200 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "preservePatchesConstraint.H" +#include "addToRunTimeSelectionTable.H" +#include "syncTools.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ +namespace decompositionConstraints +{ + defineTypeName(preservePatchesConstraint); + + addToRunTimeSelectionTable + ( + decompositionConstraint, + preservePatchesConstraint, + dictionary + ); +} +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::decompositionConstraints::preservePatchesConstraint:: +preservePatchesConstraint +( + const dictionary& constraintsDict, + const word& modelType +) +: + decompositionConstraint(constraintsDict, typeName), + patches_(coeffDict_.lookup("patches")) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : adding constraints to keep owner of faces" + << " in patches " << patches_ + << " on same processor. This only makes sense for cyclics." << endl; + } +} + + +Foam::decompositionConstraints::preservePatchesConstraint:: +preservePatchesConstraint +( + const wordReList& patches +) +: + decompositionConstraint(dictionary(), typeName), + patches_(patches) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : adding constraints to keep owner of faces" + << " in patches " << patches_ + << " on same processor. This only makes sense for cyclics." << endl; + } +} + + +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +void Foam::decompositionConstraints::preservePatchesConstraint::add +( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections +) const +{ + const polyBoundaryMesh& pbm = mesh.boundaryMesh(); + + blockedFace.setSize(mesh.nFaces(), true); + + const labelList patchIDs(pbm.patchSet(patches_).sortedToc()); + + label nUnblocked = 0; + + forAll(patchIDs, i) + { + const polyPatch& pp = pbm[patchIDs[i]]; + + forAll(pp, i) + { + if (blockedFace[pp.start() + i]) + { + blockedFace[pp.start() + i] = false; + nUnblocked++; + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nUnblocked, sumOp<label>()); + Info<< type() << " : unblocked " << nUnblocked << " faces" << endl; + } + + syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); +} + + +void Foam::decompositionConstraints::preservePatchesConstraint::apply +( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) const +{ + // If the decomposition has not enforced the constraint do it over + // here. + + // Synchronise decomposition on patchIDs + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + const polyBoundaryMesh& pbm = mesh.boundaryMesh(); + + labelList destProc(mesh.nFaces()-mesh.nInternalFaces(), labelMax); + + forAll(pbm, patchI) + { + const polyPatch& pp = pbm[patchI]; + + const labelUList& faceCells = pp.faceCells(); + + forAll(faceCells, i) + { + label bFaceI = pp.start()+i-mesh.nInternalFaces(); + destProc[bFaceI] = decomposition[faceCells[i]]; + } + } + + syncTools::syncBoundaryFaceList(mesh, destProc, minEqOp<label>()); + + + // Override if differing + // ~~~~~~~~~~~~~~~~~~~~~ + + const labelList patchIDs(pbm.patchSet(patches_).sortedToc()); + + label nChanged = 0; + + forAll(patchIDs, i) + { + const polyPatch& pp = pbm[patchIDs[i]]; + + const labelUList& faceCells = pp.faceCells(); + + forAll(faceCells, i) + { + label bFaceI = pp.start()+i-mesh.nInternalFaces(); + + if (decomposition[faceCells[i]] != destProc[bFaceI]) + { + decomposition[faceCells[i]] = destProc[bFaceI]; + nChanged++; + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nChanged, sumOp<label>()); + Info<< type() << " : changed decomposition on " << nChanged + << " cells" << endl; + } +} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/preservePatches/preservePatchesConstraint.H b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preservePatches/preservePatchesConstraint.H new file mode 100644 index 0000000000000000000000000000000000000000..1428880a00171b923eaed3ac6a2b961e2a87a88f --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/preservePatches/preservePatchesConstraint.H @@ -0,0 +1,123 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::preservePatchesConstraint + +Description + Constraint to keep owner and neighbour of (cyclic) patch on same + processor. + +SourceFiles + preservePatchesConstraint.C + +\*---------------------------------------------------------------------------*/ + +#ifndef preservePatchesConstraint_H +#define preservePatchesConstraint_H + +#include "decompositionConstraint.H" +#include "wordReList.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +namespace decompositionConstraints +{ + +/*---------------------------------------------------------------------------*\ + Class preservePatchesConstraint Declaration +\*---------------------------------------------------------------------------*/ + +class preservePatchesConstraint +: + public decompositionConstraint +{ + // Private data + + //- List of patches to keep together + wordReList patches_; + + +public: + + //- Runtime type information + TypeName("preservePatches"); + + + // Constructors + + //- Construct with generic dictionary with optional entry for type + preservePatchesConstraint + ( + const dictionary& constraintsDict, + const word& type + ); + + //- Construct from components + preservePatchesConstraint(const wordReList& patches); + + + + //- Destructor + virtual ~preservePatchesConstraint() + {} + + + // Member Functions + + //- Add my constraints to list of constraints + virtual void add + ( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const; + + //- Apply any additional post-decomposition constraints + virtual void apply + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const; +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace decompositionConstraints +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/refinementHistory/refinementHistoryConstraint.C b/src/parallel/decompose/decompositionMethods/decompositionConstraints/refinementHistory/refinementHistoryConstraint.C new file mode 100644 index 0000000000000000000000000000000000000000..a60917157f46e6c92df80ba08d3192ee640dba1f --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/refinementHistory/refinementHistoryConstraint.C @@ -0,0 +1,212 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "refinementHistoryConstraint.H" +#include "addToRunTimeSelectionTable.H" +#include "syncTools.H" +#include "refinementHistory.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + defineTypeName(refinementHistoryConstraint); + + addToRunTimeSelectionTable + ( + decompositionConstraint, + refinementHistoryConstraint, + dictionary + ); +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::refinementHistoryConstraint::refinementHistoryConstraint +( + const dictionary& constraintsDict, + const word& modelType +) +: + decompositionConstraint(constraintsDict, typeName) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : setting constraints to preserve refinement history" + << endl; + } +} + + +Foam::refinementHistoryConstraint::refinementHistoryConstraint() +: + decompositionConstraint(dictionary(), typeName) +{ + if (decompositionConstraint::debug) + { + Info<< type() << " : setting constraints to refinement history" + << endl; + } +} + + +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +void Foam::refinementHistoryConstraint::add +( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections +) const +{ + autoPtr<const refinementHistory> storagePtr; + refinementHistory const* refPtr = NULL; + + if (mesh.foundObject<refinementHistory>("refinementHistory")) + { + if (decompositionConstraint::debug) + { + Info<< type() << " : found refinementHistory" << endl; + } + refPtr = &mesh.lookupObject<refinementHistory>("refinementHistory"); + } + else + { + if (decompositionConstraint::debug) + { + Info<< type() << " : reading refinementHistory from time " + << mesh.facesInstance() << endl; + } + storagePtr.reset + ( + new refinementHistory + ( + IOobject + ( + "refinementHistory", + mesh.facesInstance(), + polyMesh::meshSubDir, + mesh, + IOobject::READ_IF_PRESENT, + IOobject::NO_WRITE + ), + mesh.nCells() + ) + ); + } + + const refinementHistory& history = + ( + storagePtr.valid() + ? storagePtr() + : *refPtr + ); + + if (history.active()) + { + // refinementHistory itself implements decompositionConstraint + history.add + ( + blockedFace, + specifiedProcessorFaces, + specifiedProcessor, + explicitConnections + ); + } +} + + +void Foam::refinementHistoryConstraint::apply +( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) const +{ + autoPtr<const refinementHistory> storagePtr; + refinementHistory const* refPtr = NULL; + + if (mesh.foundObject<refinementHistory>("refinementHistory")) + { + //if (decompositionConstraint::debug) + //{ + // Info<< type() << " : found refinementHistory" << endl; + //} + refPtr = &mesh.lookupObject<refinementHistory>("refinementHistory"); + } + else + { + //if (decompositionConstraint::debug) + //{ + // Info<< type() << " : reading refinementHistory from time " + // << mesh.facesInstance() << endl; + //} + storagePtr.reset + ( + new refinementHistory + ( + IOobject + ( + "refinementHistory", + mesh.facesInstance(), + polyMesh::meshSubDir, + mesh, + IOobject::READ_IF_PRESENT, + IOobject::NO_WRITE + ), + mesh.nCells() + ) + ); + } + + const refinementHistory& history = + ( + storagePtr.valid() + ? storagePtr() + : *refPtr + ); + + if (history.active()) + { + // refinementHistory itself implements decompositionConstraint + history.apply + ( + blockedFace, + specifiedProcessorFaces, + specifiedProcessor, + explicitConnections, + decomposition + ); + } +} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/refinementHistory/refinementHistoryConstraint.H b/src/parallel/decompose/decompositionMethods/decompositionConstraints/refinementHistory/refinementHistoryConstraint.H new file mode 100644 index 0000000000000000000000000000000000000000..b4adc677a38b2ba441ec2bfec4911ee56c24d968 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/refinementHistory/refinementHistoryConstraint.H @@ -0,0 +1,114 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::refinementHistoryConstraint + +Description + Constraint to keep all cells originating from refining the same cell + onto the same processor. Reads polyMesh/refinementHistory. + +SourceFiles + refinementHistoryConstraint.C + +\*---------------------------------------------------------------------------*/ + +#ifndef refinementHistoryConstraint_H +#define refinementHistoryConstraint_H + +#include "decompositionConstraint.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + + +/*---------------------------------------------------------------------------*\ + Class refinementHistoryConstraint Declaration +\*---------------------------------------------------------------------------*/ + +class refinementHistoryConstraint +: + public decompositionConstraint +{ + // Private data + +public: + + //- Runtime type information + TypeName("refinementHistory"); + + + // Constructors + + //- Construct with generic dictionary with optional entry for type + refinementHistoryConstraint + ( + const dictionary& constraintsDict, + const word& type + ); + + //- Construct from components + refinementHistoryConstraint(); + + + //- Destructor + virtual ~refinementHistoryConstraint() + {} + + + // Member Functions + + //- Add my constraints to list of constraints + virtual void add + ( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const; + + //- Apply any additional post-decomposition constraints + virtual void apply + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const; +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.C b/src/parallel/decompose/decompositionMethods/decompositionConstraints/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.C new file mode 100644 index 0000000000000000000000000000000000000000..da140d6658876ef80fd4076b39f22e3f3bd73a66 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.C @@ -0,0 +1,319 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +\*---------------------------------------------------------------------------*/ + +#include "singleProcessorFaceSetsConstraint.H" +#include "addToRunTimeSelectionTable.H" +#include "syncTools.H" +#include "faceSet.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ +namespace decompositionConstraints +{ + defineTypeName(singleProcessorFaceSetsConstraint); + + addToRunTimeSelectionTable + ( + decompositionConstraint, + singleProcessorFaceSetsConstraint, + dictionary + ); +} +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::decompositionConstraints::singleProcessorFaceSetsConstraint:: +singleProcessorFaceSetsConstraint +( + const dictionary& constraintsDict, + const word& modelType +) +: + decompositionConstraint(constraintsDict, typeName), + setNameAndProcs_(coeffDict_.lookup("singleProcessorFaceSets")) +{ + if (decompositionConstraint::debug) + { + Info<< type() + << " : adding constraints to keep" << endl; + + forAll(setNameAndProcs_, setI) + { + Info<< " all cells connected to faceSet " + << setNameAndProcs_[setI].first() + << " on processor " << setNameAndProcs_[setI].second() << endl; + } + } +} + + +Foam::decompositionConstraints::singleProcessorFaceSetsConstraint:: +singleProcessorFaceSetsConstraint +( + const List<Tuple2<word, label> >& setNameAndProcs +) +: + decompositionConstraint(dictionary(), typeName), + setNameAndProcs_(setNameAndProcs) +{ + if (decompositionConstraint::debug) + { + Info<< type() + << " : adding constraints to keep" << endl; + + forAll(setNameAndProcs_, setI) + { + Info<< " all cells connected to faceSet " + << setNameAndProcs_[setI].first() + << " on processor " << setNameAndProcs_[setI].second() << endl; + } + } +} + + +// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // + +void Foam::decompositionConstraints::singleProcessorFaceSetsConstraint::add +( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections +) const +{ + blockedFace.setSize(mesh.nFaces(), true); + + // Mark faces already in set + labelList faceToSet(mesh.nFaces(), -1); + forAll(specifiedProcessorFaces, setI) + { + const labelList& faceLabels = specifiedProcessorFaces[setI]; + forAll(faceLabels, i) + { + faceToSet[faceLabels[i]] = setI; + } + } + + + forAll(setNameAndProcs_, setI) + { + //Info<< "Keeping all cells connected to faceSet " + // << setNameAndProcs_[setI].first() + // << " on processor " << setNameAndProcs_[setI].second() << endl; + + const label destProcI = setNameAndProcs_[setI].second(); + + // Read faceSet + const faceSet fz(mesh, setNameAndProcs_[setI].first()); + + // Check that it does not overlap with existing specifiedProcessorFaces + labelList nMatch(specifiedProcessorFaces.size(), 0); + forAllConstIter(faceSet, fz, iter) + { + label setI = faceToSet[iter.key()]; + if (setI != -1) + { + nMatch[setI]++; + } + } + + + // Only store if all faces are not yet in specifiedProcessorFaces + // (on all processors) + bool store = true; + + forAll(nMatch, setI) + { + if (nMatch[setI] == fz.size()) + { + // full match + store = false; + break; + } + else if (nMatch[setI] > 0) + { + // partial match + store = false; + break; + } + } + + reduce(store, andOp<bool>()); + + + if (store) + { + specifiedProcessorFaces.append(new labelList(fz.sortedToc())); + specifiedProcessor.append(destProcI); + } + } + + + // Unblock all point connected faces + // 1. Mark all points on specifiedProcessorFaces + boolList procFacePoint(mesh.nPoints(), false); + forAll(specifiedProcessorFaces, setI) + { + const labelList& set = specifiedProcessorFaces[setI]; + forAll(set, fI) + { + const face& f = mesh.faces()[set[fI]]; + forAll(f, fp) + { + procFacePoint[f[fp]] = true; + } + } + } + syncTools::syncPointList(mesh, procFacePoint, orEqOp<bool>(), false); + + // 2. Unblock all faces on procFacePoint + + label nUnblocked = 0; + + forAll(procFacePoint, pointI) + { + if (procFacePoint[pointI]) + { + const labelList& pFaces = mesh.pointFaces()[pointI]; + forAll(pFaces, i) + { + if (blockedFace[pFaces[i]]) + { + blockedFace[pFaces[i]] = false; + nUnblocked++; + } + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nUnblocked, sumOp<label>()); + Info<< type() << " : unblocked " << nUnblocked << " faces" << endl; + } + + syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); +} + + +void Foam::decompositionConstraints::singleProcessorFaceSetsConstraint::apply +( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) const +{ + // For specifiedProcessorFaces rework the cellToProc to enforce + // all on one processor since we can't guarantee that the input + // to regionSplit was a single region. + // E.g. faceSet 'a' with the cells split into two regions + // by a notch formed by two walls + // + // \ / + // \ / + // ---a----+-----a----- + // + // + // Note that reworking the cellToProc might make the decomposition + // unbalanced. + label nChanged = 0; + + forAll(specifiedProcessorFaces, setI) + { + const labelList& set = specifiedProcessorFaces[setI]; + + // Get the processor to use for the set + label procI = specifiedProcessor[setI]; + if (procI == -1) + { + // If no processor specified use the one from the + // 0th element + if (set.size()) + { + procI = decomposition[mesh.faceOwner()[set[0]]]; + } + reduce(procI, maxOp<label>()); + } + + // Get all points on the sets + boolList procFacePoint(mesh.nPoints(), false); + forAll(set, fI) + { + const face& f = mesh.faces()[set[fI]]; + forAll(f, fp) + { + procFacePoint[f[fp]] = true; + } + } + syncTools::syncPointList(mesh, procFacePoint, orEqOp<bool>(), false); + + // 2. Unblock all faces on procFacePoint + forAll(procFacePoint, pointI) + { + if (procFacePoint[pointI]) + { + const labelList& pFaces = mesh.pointFaces()[pointI]; + forAll(pFaces, i) + { + label faceI = pFaces[i]; + + label own = mesh.faceOwner()[faceI]; + if (decomposition[own] != procI) + { + decomposition[own] = procI; + nChanged++; + } + if (mesh.isInternalFace(faceI)) + { + label nei = mesh.faceNeighbour()[faceI]; + if (decomposition[nei] != procI) + { + decomposition[nei] = procI; + nChanged++; + } + } + } + } + } + } + + if (decompositionConstraint::debug & 2) + { + reduce(nChanged, sumOp<label>()); + Info<< type() << " : changed decomposition on " << nChanged + << " cells" << endl; + } +} + + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionConstraints/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.H b/src/parallel/decompose/decompositionMethods/decompositionConstraints/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.H new file mode 100644 index 0000000000000000000000000000000000000000..185b5425233a6995576368d3eb30a5bb5669d592 --- /dev/null +++ b/src/parallel/decompose/decompositionMethods/decompositionConstraints/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.H @@ -0,0 +1,124 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation + \\/ M anipulation | +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. + +Class + Foam::singleProcessorFaceSetsConstraint + +Description + Constraint to keep all cells connected to face or point of faceSet on + a single processor. + +SourceFiles + singleProcessorFaceSetsConstraint.C + +\*---------------------------------------------------------------------------*/ + +#ifndef singleProcessorFaceSetsConstraint_H +#define singleProcessorFaceSetsConstraint_H + +#include "decompositionConstraint.H" +#include "Tuple2.H" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +namespace decompositionConstraints +{ + +/*---------------------------------------------------------------------------*\ + Class singleProcessorFaceSetsConstraint Declaration +\*---------------------------------------------------------------------------*/ + +class singleProcessorFaceSetsConstraint +: + public decompositionConstraint +{ + // Private data + + //- List of faceSet+processor + List<Tuple2<word, label> > setNameAndProcs_; + +public: + + //- Runtime type information + TypeName("singleProcessorFaceSets"); + + + // Constructors + + //- Construct with generic dictionary with optional entry for type + singleProcessorFaceSetsConstraint + ( + const dictionary& constraintsDict, + const word& type + ); + + //- Construct from components + singleProcessorFaceSetsConstraint + ( + const List<Tuple2<word, label> >& setNameAndProcs + ); + + + //- Destructor + virtual ~singleProcessorFaceSetsConstraint() + {} + + + // Member Functions + + //- Add my constraints to list of constraints + virtual void add + ( + const polyMesh& mesh, + boolList& blockedFace, + PtrList<labelList>& specifiedProcessorFaces, + labelList& specifiedProcessor, + List<labelPair>& explicitConnections + ) const; + + //- Apply any additional post-decomposition constraints + virtual void apply + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition + ) const; +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace decompositionConstraints +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.C b/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.C index 5ad9046091eff2f6f0ef9d5c63cc3c0af4e80cce..8b4d98c50ad2cb20e027976a928f3bc22a345592 100644 --- a/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.C +++ b/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.C @@ -36,6 +36,11 @@ InClass #include "minData.H" #include "FaceCellWave.H" +#include "preserveBafflesConstraint.H" +#include "preservePatchesConstraint.H" +#include "preserveFaceZonesConstraint.H" +#include "singleProcessorFaceSetsConstraint.H" + // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // namespace Foam @@ -44,20 +49,137 @@ namespace Foam defineRunTimeSelectionTable(decompositionMethod, dictionary); } -// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // -Foam::autoPtr<Foam::decompositionMethod> Foam::decompositionMethod::New +Foam::decompositionMethod::decompositionMethod ( const dictionary& decompositionDict ) +: + decompositionDict_(decompositionDict), + nProcessors_ + ( + readLabel(decompositionDict.lookup("numberOfSubdomains")) + ) { - word methodType(decompositionDict.lookup("method")); + // Read any constraints + wordList constraintTypes_; + if (decompositionDict_.found("constraints")) + { + //PtrList<dictionary> constraintsList + //( + // decompositionDict_.lookup("constraints") + //); + //forAll(constraintsList, i) + //{ + // const dictionary& dict = constraintsList[i]; + const dictionary& constraintsList = decompositionDict_.subDict + ( + "constraints" + ); + forAllConstIter(dictionary, constraintsList, iter) + { + const dictionary& dict = iter().dict(); + + constraintTypes_.append(dict.lookup("type")); - if (methodType == "scotch" && Pstream::parRun()) + constraints_.append + ( + decompositionConstraint::New + ( + dict, + constraintTypes_.last() + ) + ); + } + } + + // Backwards compatibility + if + ( + decompositionDict_.found("preserveBaffles") + && findIndex + ( + constraintTypes_, + decompositionConstraints::preserveBafflesConstraint::typeName + ) == -1 + ) + { + constraints_.append + ( + new decompositionConstraints::preserveBafflesConstraint() + ); + } + + if + ( + decompositionDict_.found("preservePatches") + && findIndex + ( + constraintTypes_, + decompositionConstraints::preservePatchesConstraint::typeName + ) == -1 + ) { - methodType = "ptscotch"; + const wordReList pNames(decompositionDict_.lookup("preservePatches")); + + constraints_.append + ( + new decompositionConstraints::preservePatchesConstraint(pNames) + ); } + if + ( + decompositionDict_.found("preserveFaceZones") + && findIndex + ( + constraintTypes_, + decompositionConstraints::preserveFaceZonesConstraint::typeName + ) == -1 + ) + { + const wordReList zNames(decompositionDict_.lookup("preserveFaceZones")); + + constraints_.append + ( + new decompositionConstraints::preserveFaceZonesConstraint(zNames) + ); + } + + if + ( + decompositionDict_.found("singleProcessorFaceSets") + && findIndex + ( + constraintTypes_, + decompositionConstraints::preserveFaceZonesConstraint::typeName + ) == -1 + ) + { + const List<Tuple2<word, label>> zNameAndProcs + ( + decompositionDict_.lookup("singleProcessorFaceSets") + ); + + constraints_.append + ( + new decompositionConstraints::singleProcessorFaceSetsConstraint + ( + zNameAndProcs + ) + ); + } +} + +// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // + +Foam::autoPtr<Foam::decompositionMethod> Foam::decompositionMethod::New +( + const dictionary& decompositionDict +) +{ + word methodType(decompositionDict.lookup("method")); Info<< "Selecting decompositionMethod " << methodType << endl; @@ -367,6 +489,210 @@ void Foam::decompositionMethod::calcCellCells } +void Foam::decompositionMethod::calcCellCells +( + const polyMesh& mesh, + const labelList& agglom, + const label nLocalCoarse, + const bool parallel, + CompactListList<label>& cellCells, + CompactListList<scalar>& cellCellWeights +) +{ + const labelList& faceOwner = mesh.faceOwner(); + const labelList& faceNeighbour = mesh.faceNeighbour(); + const polyBoundaryMesh& patches = mesh.boundaryMesh(); + + + // Create global cell numbers + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + globalIndex globalAgglom + ( + nLocalCoarse, + Pstream::msgType(), + Pstream::worldComm, + parallel + ); + + + // Get agglomerate owner on other side of coupled faces + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + labelList globalNeighbour(mesh.nFaces()-mesh.nInternalFaces()); + + forAll(patches, patchI) + { + const polyPatch& pp = patches[patchI]; + + if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp))) + { + label faceI = pp.start(); + label bFaceI = pp.start() - mesh.nInternalFaces(); + + forAll(pp, i) + { + globalNeighbour[bFaceI] = globalAgglom.toGlobal + ( + agglom[faceOwner[faceI]] + ); + + bFaceI++; + faceI++; + } + } + } + + // Get the cell on the other side of coupled patches + syncTools::swapBoundaryFaceList(mesh, globalNeighbour); + + + // Count number of faces (internal + coupled) + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // Number of faces per coarse cell + labelList nFacesPerCell(nLocalCoarse, 0); + + for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++) + { + label own = agglom[faceOwner[faceI]]; + label nei = agglom[faceNeighbour[faceI]]; + + nFacesPerCell[own]++; + nFacesPerCell[nei]++; + } + + forAll(patches, patchI) + { + const polyPatch& pp = patches[patchI]; + + if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp))) + { + label faceI = pp.start(); + label bFaceI = pp.start()-mesh.nInternalFaces(); + + forAll(pp, i) + { + label own = agglom[faceOwner[faceI]]; + + label globalNei = globalNeighbour[bFaceI]; + if + ( + !globalAgglom.isLocal(globalNei) + || globalAgglom.toLocal(globalNei) != own + ) + { + nFacesPerCell[own]++; + } + + faceI++; + bFaceI++; + } + } + } + + + // Fill in offset and data + // ~~~~~~~~~~~~~~~~~~~~~~~ + + cellCells.setSize(nFacesPerCell); + cellCellWeights.setSize(nFacesPerCell); + + nFacesPerCell = 0; + + labelList& m = cellCells.m(); + scalarList& w = cellCellWeights.m(); + const labelList& offsets = cellCells.offsets(); + + // For internal faces is just offsetted owner and neighbour + for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++) + { + label own = agglom[faceOwner[faceI]]; + label nei = agglom[faceNeighbour[faceI]]; + + label ownIndex = offsets[own] + nFacesPerCell[own]++; + label neiIndex = offsets[nei] + nFacesPerCell[nei]++; + + m[ownIndex] = globalAgglom.toGlobal(nei); + w[ownIndex] = mag(mesh.faceAreas()[faceI]); + m[neiIndex] = globalAgglom.toGlobal(own); + w[ownIndex] = mag(mesh.faceAreas()[faceI]); + } + + // For boundary faces is offsetted coupled neighbour + forAll(patches, patchI) + { + const polyPatch& pp = patches[patchI]; + + if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp))) + { + label faceI = pp.start(); + label bFaceI = pp.start()-mesh.nInternalFaces(); + + forAll(pp, i) + { + label own = agglom[faceOwner[faceI]]; + + label globalNei = globalNeighbour[bFaceI]; + + if + ( + !globalAgglom.isLocal(globalNei) + || globalAgglom.toLocal(globalNei) != own + ) + { + label ownIndex = offsets[own] + nFacesPerCell[own]++; + m[ownIndex] = globalNei; + w[ownIndex] = mag(mesh.faceAreas()[faceI]); + } + + faceI++; + bFaceI++; + } + } + } + + + // Check for duplicates connections between cells + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Done as postprocessing step since we now have cellCells. + label newIndex = 0; + labelHashSet nbrCells; + + + if (cellCells.size() == 0) + { + return; + } + + label startIndex = cellCells.offsets()[0]; + + forAll(cellCells, cellI) + { + nbrCells.clear(); + nbrCells.insert(globalAgglom.toGlobal(cellI)); + + label endIndex = cellCells.offsets()[cellI+1]; + + for (label i = startIndex; i < endIndex; i++) + { + if (nbrCells.insert(cellCells.m()[i])) + { + cellCells.m()[newIndex] = cellCells.m()[i]; + cellCellWeights.m()[newIndex] = cellCellWeights.m()[i]; + newIndex++; + } + } + startIndex = endIndex; + cellCells.offsets()[cellI+1] = newIndex; + cellCellWeights.offsets()[cellI+1] = newIndex; + } + + cellCells.m().setSize(newIndex); + cellCellWeights.m().setSize(newIndex); +} + + //void Foam::decompositionMethod::calcCellCells //( // const polyMesh& mesh, @@ -1101,172 +1427,45 @@ void Foam::decompositionMethod::setConstraints { blockedFace.setSize(mesh.nFaces()); blockedFace = true; - //label nUnblocked = 0; specifiedProcessorFaces.clear(); explicitConnections.clear(); - - if (decompositionDict_.found("preservePatches")) - { - wordList pNames(decompositionDict_.lookup("preservePatches")); - - Info<< nl - << "Keeping owner of faces in patches " << pNames - << " on same processor. This only makes sense for cyclics." << endl; - - const polyBoundaryMesh& patches = mesh.boundaryMesh(); - - forAll(pNames, i) - { - const label patchi = patches.findPatchID(pNames[i]); - - if (patchi == -1) - { - FatalErrorInFunction - << "Unknown preservePatch " << pNames[i] - << endl << "Valid patches are " << patches.names() - << exit(FatalError); - } - - const polyPatch& pp = patches[patchi]; - - forAll(pp, i) - { - if (blockedFace[pp.start() + i]) - { - blockedFace[pp.start() + i] = false; - //nUnblocked++; - } - } - } - } - if (decompositionDict_.found("preserveFaceZones")) - { - wordList zNames(decompositionDict_.lookup("preserveFaceZones")); - - Info<< nl - << "Keeping owner and neighbour of faces in zones " << zNames - << " on same processor" << endl; - - const faceZoneMesh& fZones = mesh.faceZones(); - - forAll(zNames, i) - { - label zoneI = fZones.findZoneID(zNames[i]); - - if (zoneI == -1) - { - FatalErrorInFunction - << "Unknown preserveFaceZone " << zNames[i] - << endl << "Valid faceZones are " << fZones.names() - << exit(FatalError); - } - - const faceZone& fz = fZones[zoneI]; - - forAll(fz, i) - { - if (blockedFace[fz[i]]) - { - blockedFace[fz[i]] = false; - //nUnblocked++; - } - } - } - } - - bool preserveBaffles = decompositionDict_.lookupOrDefault - ( - "preserveBaffles", - false - ); - if (preserveBaffles) + forAll(constraints_, constraintI) { - Info<< nl - << "Keeping owner of faces in baffles " - << " on same processor." << endl; - - explicitConnections = localPointRegion::findDuplicateFacePairs(mesh); - forAll(explicitConnections, i) - { - blockedFace[explicitConnections[i].first()] = false; - blockedFace[explicitConnections[i].second()] = false; - } - } - - if - ( - decompositionDict_.found("preservePatches") - || decompositionDict_.found("preserveFaceZones") - || preserveBaffles - ) - { - syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); - //reduce(nUnblocked, sumOp<label>()); + constraints_[constraintI].add + ( + mesh, + blockedFace, + specifiedProcessorFaces, + specifiedProcessor, + explicitConnections + ); } +} - - // Specified processor for group of cells connected to faces - - label nProcSets = 0; - if (decompositionDict_.found("singleProcessorFaceSets")) +void Foam::decompositionMethod::applyConstraints +( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& decomposition +) +{ + forAll(constraints_, constraintI) { - List<Tuple2<word, label>> zNameAndProcs + constraints_[constraintI].apply ( - decompositionDict_.lookup("singleProcessorFaceSets") + mesh, + blockedFace, + specifiedProcessorFaces, + specifiedProcessor, + explicitConnections, + decomposition ); - - specifiedProcessorFaces.setSize(zNameAndProcs.size()); - specifiedProcessor.setSize(zNameAndProcs.size()); - - forAll(zNameAndProcs, setI) - { - Info<< "Keeping all cells connected to faceSet " - << zNameAndProcs[setI].first() - << " on processor " << zNameAndProcs[setI].second() << endl; - - // Read faceSet - faceSet fz(mesh, zNameAndProcs[setI].first()); - - specifiedProcessorFaces.set(setI, new labelList(fz.sortedToc())); - specifiedProcessor[setI] = zNameAndProcs[setI].second(); - nProcSets += fz.size(); - } - reduce(nProcSets, sumOp<label>()); - - - // Unblock all point connected faces - // 1. Mark all points on specifiedProcessorFaces - boolList procFacePoint(mesh.nPoints(), false); - forAll(specifiedProcessorFaces, setI) - { - const labelList& set = specifiedProcessorFaces[setI]; - forAll(set, fI) - { - const face& f = mesh.faces()[set[fI]]; - forAll(f, fp) - { - procFacePoint[f[fp]] = true; - } - } - } - syncTools::syncPointList(mesh, procFacePoint, orEqOp<bool>(), false); - - // 2. Unblock all faces on procFacePoint - forAll(procFacePoint, pointI) - { - if (procFacePoint[pointI]) - { - const labelList& pFaces = mesh.pointFaces()[pointI]; - forAll(pFaces, i) - { - blockedFace[pFaces[i]] = false; - } - } - } - syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); } } @@ -1277,6 +1476,8 @@ Foam::labelList Foam::decompositionMethod::decompose const scalarField& cellWeights ) { + // Collect all constraints + boolList blockedFace; PtrList<labelList> specifiedProcessorFaces; labelList specifiedProcessor; @@ -1304,6 +1505,19 @@ Foam::labelList Foam::decompositionMethod::decompose explicitConnections // baffles ); + + // Give any constraint the option of modifying the decomposition + + applyConstraints + ( + mesh, + blockedFace, + specifiedProcessorFaces, + specifiedProcessor, + explicitConnections, + finalDecomp + ); + return finalDecomp; } diff --git a/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.H b/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.H index c48a185d55d5deee86b05090764eda4c525de8df..eea0d631cf55b101085b14a14ea9942e81d15355 100644 --- a/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.H +++ b/src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.H @@ -2,7 +2,7 @@ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | - \\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation + \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License @@ -36,8 +36,8 @@ SourceFiles #define decompositionMethod_H #include "polyMesh.H" -#include "pointField.H" #include "CompactListList.H" +#include "decompositionConstraint.H" namespace Foam { @@ -56,6 +56,8 @@ protected: const dictionary& decompositionDict_; label nProcessors_; + //- Optional constraints + PtrList<decompositionConstraint> constraints_; private: @@ -98,14 +100,7 @@ public: // Constructors //- Construct given the decomposition dictionary - decompositionMethod(const dictionary& decompositionDict) - : - decompositionDict_(decompositionDict), - nProcessors_ - ( - readLabel(decompositionDict.lookup("numberOfSubdomains")) - ) - {} + decompositionMethod(const dictionary& decompositionDict); //- Destructor @@ -226,6 +221,19 @@ public: CompactListList<label>& cellCells ); + //- Helper: determine (local or global) cellCells and face weights + // from mesh agglomeration. + // Uses mag of faceArea as weights + static void calcCellCells + ( + const polyMesh& mesh, + const labelList& agglom, + const label nLocalCoarse, + const bool parallel, + CompactListList<label>& cellCells, + CompactListList<scalar>& cellCellWeights + ); + //- Helper: extract constraints: // blockedface: existing faces where owner and neighbour on same // proc @@ -241,6 +249,19 @@ public: List<labelPair>& explicitConnections ); + //- Helper: apply constraints to a decomposition. This gives + // constraints opportunity to modify decomposition in case + // the native decomposition method has not obeyed all constraints + void applyConstraints + ( + const polyMesh& mesh, + const boolList& blockedFace, + const PtrList<labelList>& specifiedProcessorFaces, + const labelList& specifiedProcessor, + const List<labelPair>& explicitConnections, + labelList& finalDecomp + ); + // Decompose a mesh with constraints: // - blockedFace : whether owner and neighbour should be on same // processor diff --git a/src/parallel/decompose/ptscotchDecomp/ptscotchDecomp.H b/src/parallel/decompose/ptscotchDecomp/ptscotchDecomp.H index 6aa45a29030941fdce41829602268ea21b604024..5cb64e4be6de35f31437472f02bc5594e8664a2f 100644 --- a/src/parallel/decompose/ptscotchDecomp/ptscotchDecomp.H +++ b/src/parallel/decompose/ptscotchDecomp/ptscotchDecomp.H @@ -67,13 +67,6 @@ class ptscotchDecomp { // Private Member Functions - //- Insert list in front of list. - template<class Type> - static void prepend(const UList<Type>&, List<Type>&); - //- Insert list at end of list. - template<class Type> - static void append(const UList<Type>&, List<Type>&); - //- Check and print error message static void check(const int, const char*); @@ -177,12 +170,6 @@ public: // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // -#ifdef NoRepository - #include "ptscotchDecompTemplates.C" -#endif - -// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // - #endif // ************************************************************************* // diff --git a/src/renumber/renumberMethods/Make/options b/src/renumber/renumberMethods/Make/options index cc28bc2f00d0c12f1bf9380ef0f5c95bdcabf9ec..a97467a3329785d833b648b1d0576772420c3f86 100644 --- a/src/renumber/renumberMethods/Make/options +++ b/src/renumber/renumberMethods/Make/options @@ -1,9 +1,9 @@ EXE_INC = \ -I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \ + -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude LIB_LIBS = \ -ldecompositionMethods \ - -lfiniteVolume \ - -lmeshTools + -ldynamicMesh