Commit 73b83058 authored by henry's avatar henry
Browse files

Added new extended stencil handling from Mattijs.

parent 3fd4253e
......@@ -83,6 +83,26 @@ const Type& Foam::MeshObject<Mesh, Type>::New
}
template<class Mesh, class Type>
template<class Data1, class Data2>
const Type& Foam::MeshObject<Mesh, Type>::New
(
const Mesh& mesh,
const Data1& d1,
const Data2& d2
)
{
if (!mesh.db().objectRegistry::foundObject<Type>(Type::typeName))
{
return store(new Type(mesh, d1, d2));
}
else
{
return mesh.db().objectRegistry::lookupObject<Type>(Type::typeName);
}
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * //
template<class Mesh, class Type>
......
......@@ -72,6 +72,9 @@ public:
template<class Data>
static const Type& New(const Mesh& mesh, const Data& d);
template<class Data1, class Data2>
static const Type& New(const Mesh& mesh, const Data1&, const Data2&);
// Destructor
......
......@@ -271,4 +271,144 @@ Foam::mapDistribute::mapDistribute
}
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::mapDistribute::compact(const boolList& elemIsUsed)
{
// 1. send back to sender. Have him delete the corresponding element
// from the submap and do the same to the constructMap locally
// (and in same order).
// Send elemIsUsed field to neighbour. Use nonblocking code from
// mapDistribute but in reverse order.
{
List<boolList> sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap_[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
boolList& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = elemIsUsed[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(bool)
);
}
}
// Set up receives from neighbours
List<boolList> recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap_[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
recvFields[domain].setSize(map.size());
IPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(bool)
);
}
}
// Set up 'send' to myself - write directly into recvFields
{
const labelList& map = constructMap_[Pstream::myProcNo()];
recvFields[Pstream::myProcNo()].setSize(map.size());
forAll(map, i)
{
recvFields[Pstream::myProcNo()][i] = elemIsUsed[map[i]];
}
}
// Wait for all to finish
OPstream::waitRequests();
IPstream::waitRequests();
// Compact out all submap entries that are referring to unused elements
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap_[domain];
labelList newMap(map.size());
label newI = 0;
forAll(map, i)
{
if (recvFields[domain][i])
{
// So element is used on destination side
newMap[newI++] = map[i];
}
}
if (newI < map.size())
{
newMap.setSize(newI);
subMap_[domain].transfer(newMap);
}
}
}
// 2. remove from construct map - since end-result (element in elemIsUsed)
// not used.
label maxConstructIndex = -1;
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap_[domain];
labelList newMap(map.size());
label newI = 0;
forAll(map, i)
{
label destinationI = map[i];
// Is element is used on destination side
if (elemIsUsed[destinationI])
{
maxConstructIndex = max(maxConstructIndex, destinationI);
newMap[newI++] = destinationI;
}
}
if (newI < map.size())
{
newMap.setSize(newI);
constructMap_[domain].transfer(newMap);
}
}
constructSize_ = maxConstructIndex+1;
// Clear the schedule (note:not necessary if nothing changed)
schedulePtr_.clear();
}
// ************************************************************************* //
......@@ -51,6 +51,7 @@ SourceFiles
#include "labelList.H"
#include "labelPair.H"
#include "Pstream.H"
#include "boolList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
......@@ -155,6 +156,12 @@ public:
// Other
//- Compact maps. Gets per field a bool whether it is used (locally)
// and works out itself what this side and sender side can remove
// from maps.
void compact(const boolList& elemIsUsed);
//- Distribute data. Note:schedule only used for Pstream::scheduled
// for now, all others just use send-to-all, receive-from-all.
template<class T>
......@@ -168,6 +175,21 @@ public:
List<T>&
);
//- Distribute data. If multiple processors writing to same
// position adds contributions using cop.
template<class T, class CombineOp>
static void distribute
(
const Pstream::commsTypes commsType,
const List<labelPair>& schedule,
const label constructSize,
const labelListList& subMap,
const labelListList& constructMap,
List<T>&,
const CombineOp& cop,
const T& nullValue
);
//- Distribute data using default commsType.
template<class T>
void distribute(List<T>& fld) const
......
......@@ -268,8 +268,122 @@ void Foam::mapDistribute::distribute
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
// Receive sub field from myself (sendFields[Pstream::myProcNo()])
{
const labelList& map = constructMap[Pstream::myProcNo()];
const List<T>& subField = sendFields[Pstream::myProcNo()];
forAll(map, i)
{
field[map[i]] = subField[i];
}
}
// Wait for all to finish
OPstream::waitRequests();
IPstream::waitRequests();
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
if (recvFields[domain].size() != map.size())
{
FatalErrorIn
(
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Expected from processor " << domain
<< " " << map.size() << " but received "
<< recvFields[domain].size() << " elements."
<< abort(FatalError);
}
forAll(map, i)
{
field[map[i]] = recvFields[domain][i];
}
}
}
}
else
{
FatalErrorIn("mapDistribute::distribute(..)")
<< "Unknown communication schedule " << commsType
<< abort(FatalError);
}
}
// Distribute list.
template<class T, class CombineOp>
void Foam::mapDistribute::distribute
(
const Pstream::commsTypes commsType,
const List<labelPair>& schedule,
const label constructSize,
const labelListList& subMap,
const labelListList& constructMap,
List<T>& field,
const CombineOp& cop,
const T& nullValue
)
{
if (commsType == Pstream::blocking)
{
// Since buffered sending can reuse the field to collect the
// received data.
// Send sub field to neighbour
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
List<T> subField(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream toNbr(Pstream::blocking, domain);
toNbr << subField;
}
}
// Subset myself
const labelList& mySubMap = subMap[Pstream::myProcNo()];
......@@ -279,14 +393,231 @@ void Foam::mapDistribute::distribute
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
field = nullValue;
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
// Receive sub field from neighbour
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
IPstream fromNbr(Pstream::blocking, domain);
List<T> subField(fromNbr);
if (subField.size() != map.size())
{
FatalErrorIn
(
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Expected from processor " << domain
<< " " << map.size() << " but received "
<< subField.size() << " elements."
<< abort(FatalError);
}
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
}
}
}
else if (commsType == Pstream::scheduled)
{
// Need to make sure I don't overwrite field with received data
// since the data might need to be sent to another processor. So
// allocate a new field for the results.
List<T> newField(constructSize, nullValue);
// Subset myself
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
field[map[i]] = subField[i];
cop(newField[map[i]], subField[i]);
}
// Schedule will already have pruned 0-sized comms
forAll(schedule, i)
{
const labelPair& twoProcs = schedule[i];
label sendProc = twoProcs[0];
label recvProc = twoProcs[1];
if (Pstream::myProcNo() == sendProc)
{
// I am sender. Send to recvProc.
const labelList& map = subMap[recvProc];
List<T> subField(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream toNbr(Pstream::scheduled, recvProc);
toNbr << subField;
}
else
{
// I am receiver. Receive from sendProc.
IPstream fromNbr(Pstream::scheduled, sendProc);
List<T> subField(fromNbr);
const labelList& map = constructMap[sendProc];
if (subField.size() != map.size())
{
FatalErrorIn
(
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Expected from processor " << sendProc
<< " " << map.size() << " but received "
<< subField.size() << " elements."
<< abort(FatalError);
}
forAll(map, i)
{
cop(newField[map[i]], subField[i]);
}
}
}
field.transfer(newField);
}
else if (commsType == Pstream::nonBlocking)
{
if (!contiguous<T>())
{
FatalErrorIn
(
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Non-blocking only supported for contiguous data."
<< exit(FatalError);
}
// Set up sends to neighbours
List<List<T > > sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
List<T>& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(T)
);
}
}
// Set up receives from neighbours
List<List<T > > recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size() > 0)
{
recvFields[domain].setSize(map.size());
IPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(T)
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
field = nullValue;
// Receive sub field from myself (subField)
{
const labelList& map = constructMap[Pstream::myProcNo()];
const List<T>& subField = sendFields[Pstream::myProcNo()];
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
}
......@@ -325,7 +656,7 @@ void Foam::mapDistribute::distribute