Updated cfMesh to v1.0.1

This commit is contained in:
Franjo Juretic 2014-11-08 23:10:28 -01:00 committed by Dominik Christ
parent ae870dc409
commit fee6383c54
138 changed files with 653487 additions and 944 deletions

View file

@ -0,0 +1,166 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
Creates surface patches from surface subsets
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "Time.H"
#include "triSurf.H"
#include "triSurfaceCopyParts.H"
#include "demandDrivenData.H"
#include "OFstream.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void exportFeatureEdges
(
const triSurf& origSurf,
const fileName& edgeFileName
)
{
OFstream file(edgeFileName);
const pointField& points = origSurf.points();
labelList newPointLabel(points.size(), -1);
label nPoints(0);
const edgeLongList& featureEdges = origSurf.featureEdges();
forAll(featureEdges, feI)
{
const edge& e = featureEdges[feI];
if( newPointLabel[e[0]] == -1 )
newPointLabel[e[0]] = nPoints++;
if( newPointLabel[e[1]] == -1 )
newPointLabel[e[1]] = nPoints++;
}
pointField pCopy(nPoints);
forAll(newPointLabel, pI)
{
if( newPointLabel[pI] < 0 )
continue;
pCopy[newPointLabel[pI]] = points[pI];
}
//- write the header
file << "# vtk DataFile Version 3.0\n";
file << "vtk output\n";
file << "ASCII\n";
file << "DATASET POLYDATA\n";
//- write points
file << "POINTS " << pCopy.size() << " float\n";
forAll(pCopy, pI)
{
const point& p = pCopy[pI];
file << p.x() << ' ' << p.y() << ' ' << p.z() << '\n';
}
file << "\nLINES " << featureEdges.size()
<< ' ' << 3*featureEdges.size() << nl;
forAll(featureEdges, edgeI)
{
const edge& e = featureEdges[edgeI];
file << "2 " << newPointLabel[e[0]]
<< token::SPACE << newPointLabel[e[1]] << nl;
}
file << nl;
if( !file )
FatalErrorIn
(
"void exportFeatureEdges(const triSurf&, const fileName&)"
) << "Writting of feature edges failed!" << exit(FatalError);
}
int main(int argc, char *argv[])
{
argList::noParallel();
argList::validArgs.clear();
argList::validArgs.append("input surface file");
argList::validArgs.append("output surface file");
argList::validOptions.insert("exportSubsets", "");
argList::validOptions.insert("exportFeatureEdges", "");
argList args(argc, argv);
fileName inFileName(args.args()[1]);
fileName outFileName(args.args()[2]);
fileName outFileNoExt = outFileName.lessExt();
fileName outExtension = outFileName.ext();
Info << "Out file no ext " << outFileNoExt << endl;
Info << "Extension " << outExtension << endl;
//- read the inout surface
triSurf origSurf(inFileName);
//- write the surface in the requated format
origSurf.writeSurface(outFileName);
//- export surface subsets as separate surface meshes
if( args.options().found("exportSubsets") )
{
DynList<label> subsetIDs;
origSurf.facetSubsetIndices(subsetIDs);
triSurfaceCopyParts copyParts(origSurf);
forAll(subsetIDs, subsetI)
{
//- get the name of the subset
triSurf copySurf;
wordList subsetName(1);
subsetName[0] = origSurf.facetSubsetName(subsetIDs[subsetI]);
//- create a surface mesh corresponding to the subset
copyParts.copySurface(subsetName, copySurf);
//- write the mesh on disk
fileName fName = outFileNoExt+"_facetSubset_"+subsetName[0];
fName += '.'+outExtension;
copySurf.writeSurface(fName);
}
}
if( args.options().found("exportFeatureEdges") )
{
fileName fName = outFileNoExt+"_featureEdges";
fName += ".vtk";
exportFeatureEdges(origSurf, fName);
}
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View file

@ -0,0 +1,3 @@
FMSToSurface.C
EXE = $(FOAM_APPBIN)/FMSToSurface

View file

@ -0,0 +1,9 @@
EXE_INC = \
-I$(FOAM_SRC)/mesh/cfMesh/meshLibrary/lnInclude \
-I$(FOAM_SRC)/meshTools/lnInclude \
-I$(FOAM_SRC)/triSurface/lnInclude
EXE_LIBS = \
-ltriSurface \
-lmeshTools \
-lmeshLibrary

View file

@ -0,0 +1,545 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
cfMesh utility to convert a surface file to VTK multiblock dataset
format, including the patches, feature edges and surface features.
Author
Ivor Clifford <ivor.clifford@psi.ch>
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "autoPtr.H"
#include "OFstream.H"
#include "triSurf.H"
#include "triSurfModifier.H"
#include "xmlTag.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
//- Write the supplied pointList in serial vtkPolyData format
void writePointsToVTK
(
const fileName& fn,
const string& title,
const UList<point>& points
)
{
xmlTag xmlRoot("VTKFile");
xmlRoot.addAttribute("type", "PolyData");
xmlTag& xmlPolyData = xmlRoot.addChild("PolyData");
xmlTag& xmlPiece = xmlPolyData.addChild("Piece");
xmlPiece.addAttribute("NumberOfPoints", points.size());
xmlTag& xmlPoints = xmlPiece.addChild("Points");
xmlTag& xmlPointData = xmlPoints.addChild("DataArray");
xmlPointData.addAttribute("type", "Float32");
xmlPointData.addAttribute("NumberOfComponents", 3);
xmlPointData.addAttribute("format", "ascii");
xmlPointData << points;
OFstream os(fn);
os << xmlRoot << endl;
Info << "Created " << fn << endl;
}
//- Write the supplied addressed pointList in serial vtkPolyData format
void writePointsToVTK
(
const fileName& fn,
const string& title,
const UList<point>& points,
unallocLabelList& addr
)
{
// Create subaddressed points
pointField newPoints(addr.size());
forAll(addr, i)
{
newPoints[i] = points[addr[i]];
}
writePointsToVTK
(
fn,
title,
newPoints
);
}
//- Write the supplied edgeList in serial vtkPolyData format
void writeEdgesToVTK
(
const fileName& fn,
const string& title,
const UList<point>& points,
const LongList<edge>& edges
)
{
labelList connectivity(edges.size());
forAll(edges, edgeI)
{
connectivity[edgeI] = 2*(edgeI+1);
}
xmlTag xmlRoot("VTKFile");
xmlRoot.addAttribute("type", "PolyData");
xmlTag& xmlPolyData = xmlRoot.addChild("PolyData");
xmlTag& xmlPiece = xmlPolyData.addChild("Piece");
xmlPiece.addAttribute("NumberOfPoints", points.size());
xmlPiece.addAttribute("NumberOfLines", edges.size());
xmlTag& xmlPoints = xmlPiece.addChild("Points");
xmlTag& xmlPointData = xmlPoints.addChild("DataArray");
xmlPointData.addAttribute("type", "Float32");
xmlPointData.addAttribute("NumberOfComponents", 3);
xmlPointData.addAttribute("format", "ascii");
xmlPointData << points;
xmlTag& xmlEdges = xmlPiece.addChild("Lines");
xmlTag& xmlEdgeData = xmlEdges.addChild("DataArray");
xmlEdgeData.addAttribute("type", "Int32");
xmlEdgeData.addAttribute("Name", "connectivity");
xmlEdgeData.addAttribute("format", "ascii");
xmlEdgeData << edges;
xmlTag& xmlConnectData = xmlEdges.addChild("DataArray");
xmlConnectData.addAttribute("type", "Int32");
xmlConnectData.addAttribute("Name", "offsets");
xmlConnectData.addAttribute("format", "ascii");
xmlConnectData << connectivity;
OFstream os(fn);
os << xmlRoot << endl;
Info << "Created " << fn << endl;
}
//- Write the supplied edgeList subset in serial vtkPolyData format
void writeEdgesToVTK
(
const fileName& fn,
const string& title,
const UList<point>& points,
const LongList<edge>& edges,
const unallocLabelList& addr
)
{
// Remove unused points and create subaddressed edges
DynamicList<point> newPoints;
labelList newPointAddr(points.size(), -1);
LongList<edge> newEdges(addr.size());
forAll(addr, addrI)
{
label edgeI = addr[addrI];
const edge& curEdge = edges[edgeI];
edge& newEdge = newEdges[addrI];
forAll(curEdge, i)
{
label pointId = curEdge[i];
if (newPointAddr[pointId] == -1)
{
newPoints.append(points[pointId]);
newPointAddr[pointId] = newPoints.size()-1;
}
newEdge[i] = newPointAddr[pointId];
}
}
writeEdgesToVTK
(
fn,
title,
newPoints,
newEdges
);
}
//- Write the supplied facet list in serial vtkPolyData format
void writeFacetsToVTK
(
const fileName& fn,
const string& title,
const UList<point>& points,
const LongList<labelledTri>& facets
)
{
labelList connectivity(facets.size());
forAll(facets, faceI)
{
connectivity[faceI] = 3*(faceI+1);
}
labelList regionData(facets.size());
forAll(facets, faceI)
{
regionData[faceI] = facets[faceI].region();
}
xmlTag xmlRoot("VTKFile");
xmlRoot.addAttribute("type", "PolyData");
xmlTag& xmlPolyData = xmlRoot.addChild("PolyData");
xmlTag& xmlPiece = xmlPolyData.addChild("Piece");
xmlPiece.addAttribute("NumberOfPoints", points.size());
xmlPiece.addAttribute("NumberOfPolys", facets.size());
xmlTag& xmlPoints = xmlPiece.addChild("Points");
xmlTag& xmlPointData = xmlPoints.addChild("DataArray");
xmlPointData.addAttribute("type", "Float32");
xmlPointData.addAttribute("NumberOfComponents", 3);
xmlPointData.addAttribute("format", "ascii");
xmlPointData << points;
xmlTag& xmlPolys = xmlPiece.addChild("Polys");
xmlTag& xmlPolyDataArray = xmlPolys.addChild("DataArray");
xmlPolyDataArray.addAttribute("type", "Int32");
xmlPolyDataArray.addAttribute("Name", "connectivity");
xmlPolyDataArray.addAttribute("format", "ascii");
xmlPolyDataArray << facets;
xmlTag& xmlConnectData = xmlPolys.addChild("DataArray");
xmlConnectData.addAttribute("type", "Int32");
xmlConnectData.addAttribute("Name", "offsets");
xmlConnectData.addAttribute("format", "ascii");
xmlConnectData << connectivity;
xmlTag& xmlCellData = xmlPiece.addChild("CellData");
xmlTag& xmlCellDataArray = xmlCellData.addChild("DataArray");
xmlCellDataArray.addAttribute("type", "Int32");
xmlCellDataArray.addAttribute("Name", "region");
xmlCellDataArray.addAttribute("format", "ascii");
xmlCellDataArray << regionData;
OFstream os(fn);
os << xmlRoot << endl;
Info << "Created " << fn << endl;
}
//- Write an addressed subset of the supplied facet list
//- in serial vtkPolyData format
void writeFacetsToVTK
(
const fileName& fn,
const string& title,
const pointField& points,
const LongList<labelledTri>& facets,
const unallocLabelList& addr
)
{
// Remove unused points and create subaddressed facets
DynamicList<point> newPoints;
labelList newPointAddr(points.size(), -1);
LongList<labelledTri> newFacets(addr.size());
forAll(addr, addrI)
{
label faceI = addr[addrI];
const labelledTri& facet = facets[faceI];
const FixedList<label, 3>& pointIds = facet;
FixedList<label, 3> newPointIds;
forAll(pointIds, i)
{
label pointId = pointIds[i];
if (newPointAddr[pointId] == -1)
{
newPoints.append(points[pointId]);
newPointAddr[pointId] = newPoints.size()-1;
}
newPointIds[i] = newPointAddr[pointId];
}
newFacets[addrI] = labelledTri
(
newPointIds[0],
newPointIds[1],
newPointIds[2],
facet.region()
);
}
writeFacetsToVTK
(
fn,
title,
newPoints,
newFacets
);
}
int main(int argc, char *argv[])
{
argList::noParallel();
argList::validArgs.clear();
argList::validArgs.append("input surface file");
argList::validArgs.append("output prefix");
argList args(argc, argv);
// Process commandline arguments
fileName inFileName(args.args()[1]);
fileName outPrefix(args.args()[2]);
// Read original surface
triSurf origSurf(inFileName);
const pointField& points = origSurf.points();
const LongList<labelledTri>& facets = origSurf.facets();
const LongList<edge>& edges = origSurf.featureEdges();
const geometricSurfacePatchList& patches = origSurf.patches();
label index = 0;
// Create file structure for multiblock dataset
mkDir(outPrefix);
mkDir(outPrefix + "/patches");
mkDir(outPrefix + "/pointSubsets");
mkDir(outPrefix + "/edgeSubsets");
mkDir(outPrefix + "/faceSubsets");
// Create VTK multiblock dataset file
xmlTag xmlRoot("VTKFile");
xmlRoot.addAttribute("type", "vtkMultiBlockDataSet");
xmlRoot.addAttribute("version", "1.0");
xmlRoot.addAttribute("byte_order", "LittleEndian");
xmlTag& xmlDataSet = xmlRoot.addChild("vtkMultiBlockDataSet");
// Write faces and feature edges
{
fileName fn = outPrefix / "facets.vtp";
writeFacetsToVTK
(
outPrefix / "facets.vtp",
outPrefix,
points,
facets
);
xmlTag& tag = xmlDataSet.addChild("DataSet");
tag.addAttribute("index", Foam::name(index++));
tag.addAttribute("name", "facets");
tag.addAttribute("file", fn);
}
{
fileName fn = outPrefix / "featureEdges.vtp";
writeEdgesToVTK
(
outPrefix / "featureEdges.vtp",
"featureEdges",
points,
edges
);
xmlTag& tag = xmlDataSet.addChild("DataSet");
tag.addAttribute("index", Foam::name(index++));
tag.addAttribute("name", "featureEdges");
tag.addAttribute("file", fn);
}
// Write patches
// Create patch addressing
List<DynamicList<label> > patchAddr(patches.size());
forAll(facets, faceI)
{
patchAddr[facets[faceI].region()].append(faceI);
}
{
xmlTag& xmlBlock = xmlDataSet.addChild("Block");
xmlBlock.addAttribute("index", Foam::name(index++));
xmlBlock.addAttribute("name", "patches");
forAll(patches, patchI)
{
word patchName = patches[patchI].name();
fileName fn = outPrefix / "patches" / patchName + ".vtp";
writeFacetsToVTK
(
fn,
patchName,
points,
facets,
patchAddr[patchI]
);
xmlTag& tag = xmlBlock.addChild("DataSet");
tag.addAttribute("index", Foam::name(patchI));
tag.addAttribute("name", patchName);
tag.addAttribute("file", fn);
}
}
// Write point subsets
{
xmlTag& xmlBlock = xmlDataSet.addChild("Block");
xmlBlock.addAttribute("index", Foam::name(index++));
xmlBlock.addAttribute("name", "pointSubsets");
DynList<label> subsetIndices;
labelList subsetAddr;
origSurf.pointSubsetIndices(subsetIndices);
forAll(subsetIndices, id)
{
word subsetName = origSurf.pointSubsetName(id);
origSurf.pointsInSubset(id, subsetAddr);
fileName fn = outPrefix / "pointSubsets" / subsetName + ".vtp";
writePointsToVTK
(
fn,
subsetName,
points,
subsetAddr
);
xmlTag& tag = xmlBlock.addChild("DataSet");
tag.addAttribute("index", Foam::name(id));
tag.addAttribute("name", subsetName);
tag.addAttribute("file", fn);
}
}
// Write edge subsets
{
xmlTag& xmlBlock = xmlDataSet.addChild("Block");
xmlBlock.addAttribute("index", Foam::name(index++));
xmlBlock.addAttribute("name", "edgeSubsets");
DynList<label> subsetIndices;
labelList subsetAddr;
origSurf.edgeSubsetIndices(subsetIndices);
forAll(subsetIndices, id)
{
word subsetName = origSurf.edgeSubsetName(id);
origSurf.edgesInSubset(id, subsetAddr);
fileName fn = outPrefix / "edgeSubsets" / subsetName + ".vtp";
writeEdgesToVTK
(
fn,
subsetName,
points,
edges,
subsetAddr
);
xmlTag& tag = xmlBlock.addChild("DataSet");
tag.addAttribute("index", Foam::name(id));
tag.addAttribute("name", subsetName);
tag.addAttribute("file", fn);
}
}
// Write facet subsets
{
xmlTag& xmlBlock = xmlDataSet.addChild("Block");
xmlBlock.addAttribute("index", Foam::name(index++));
xmlBlock.addAttribute("name", "faceSubsets");
DynList<label> subsetIndices;
labelList subsetAddr;
origSurf.facetSubsetIndices(subsetIndices);
forAll(subsetIndices, id)
{
word subsetName = origSurf.facetSubsetName(id);
origSurf.facetsInSubset(id, subsetAddr);
fileName fn = outPrefix / "faceSubsets" / subsetName + ".vtp";
writeFacetsToVTK
(
fn,
subsetName,
points,
facets,
subsetAddr
);
xmlTag& tag = xmlBlock.addChild("DataSet");
tag.addAttribute("index", Foam::name(id));
tag.addAttribute("name", subsetName);
tag.addAttribute("file", fn);
}
}
OFstream os(outPrefix + ".vtm");
os << xmlRoot << endl;
Info << "Created " << outPrefix + ".vtm" << endl;
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View file

@ -0,0 +1,3 @@
FMSToVTK.C
EXE = $(FOAM_APPBIN)/FMSToVTK

View file

@ -0,0 +1,9 @@
EXE_INC = \
-I$(FOAM_SRC)/mesh/cfMesh/meshLibrary/lnInclude \
-I$(FOAM_SRC)/meshTools/lnInclude \
-I$(FOAM_SRC)/triSurface/lnInclude
EXE_LIBS = \
-ltriSurface \
-lmeshLibrary \
-lmeshTools

View file

@ -0,0 +1,297 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | foam-extend: Open Source CFD
\\ / O peration |
\\ / A nd | For copyright notice see file Copyright
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of foam-extend.
foam-extend is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
foam-extend is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::xmlTag
Description
Simple XML tag class allowing child tags and attributes. Specialized
output stream operators are provided to display or write the XML
structure.
Author
Ivor Clifford <ivor.clifford@psi.ch>
\*---------------------------------------------------------------------------*/
#ifndef XMLtag_H
#define XMLtag_H
#include "OStringStream.H"
#include "HashTable.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class xmlTag Declaration
\*---------------------------------------------------------------------------*/
class xmlTag
:
public OStringStream
{
// Private data
//- Tag name
word name_;
//- Attributes
HashTable<string> attributes_;
//- Child tags
DynamicList<xmlTag> children_;
public:
// Constructors
//- Null constructor
xmlTag()
:
OStringStream(),
name_("unknown"),
attributes_(),
children_()
{}
//- Construct given tag name
xmlTag(const word& name)
:
OStringStream(),
name_(name),
attributes_(),
children_()
{}
//- Construct as copy
xmlTag(const xmlTag& tag)
:
OStringStream(tag),
name_(tag.name_),
attributes_(tag.attributes_),
children_(tag.children_)
{}
//- Destructor
~xmlTag()
{}
// Member Functions
//- Add an attribute
template<class T>
void addAttribute(const keyType& key, const T& value)
{
OStringStream os;
os << value;
attributes_.insert(key, os.str());
};
//- Add a fileName attribute
void addAttribute(const keyType& key, const fileName& value)
{
attributes_.insert(key, value);
};
//- Add a string attribute
void addAttribute(const keyType& key, const string& value)
{
attributes_.insert(key, value);
};
//- Add a word attribute
void addAttribute(const keyType& key, const word& value)
{
attributes_.insert(key, value);
};
//- Add a child
xmlTag& addChild(const xmlTag& tag)
{
children_.append(tag);
return children_[children_.size()-1];
};
//- Create and add a new child
xmlTag& addChild(const word& name)
{
return addChild(xmlTag(name));
}
// Member Operators
void operator=(const xmlTag& tag)
{
name_ = tag.name_;
attributes_ = tag.attributes_;
children_ = tag.children_;
OStringStream::rewind();
Foam::operator<<(*this, tag.str().c_str());
};
// Friend IOstream Operators
friend Ostream& operator<<(Ostream&, const xmlTag&);
template<class Form, class Cmpt, int nCmpt>
friend xmlTag& operator<<(xmlTag&, const VectorSpace<Form, Cmpt, nCmpt>&);
friend xmlTag& operator<<(xmlTag&, const labelledTri&);
template<class T, unsigned Size>
friend xmlTag& operator<<(xmlTag&, const FixedList<T, Size>&);
template<class T>
friend xmlTag& operator<<(xmlTag&, const LongList<T>&);
template<class T>
friend xmlTag& operator<<(xmlTag&, const UList<T>&);
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
//- Write the tag in XML format to the supplied output stream
Ostream& operator<<(Ostream& os, const xmlTag& tag)
{
// Tag name
os << indent << '<' << tag.name_;
// Attributes and text
for
(
HashTable<string>::const_iterator iter = tag.attributes_.cbegin();
iter != tag.attributes_.cend();
++iter
)
{
os << token::SPACE << iter.key() << '=' << iter();
}
if (tag.str().size() || tag.children_.size())
{
os << '>' << nl;
// Children
os.incrIndent();
forAll(tag.children_, i)
{
os << tag.children_[i];
}
os.decrIndent();
// Tag text
os << tag.str().c_str();
// Close tag
os << indent << "</" << tag.name_ << '>' << endl;
}
else
{
// Empty element tag
os << "/>" << endl;
}
return os;
}
//- Append the supplied data to the tag text
template<class T>
xmlTag& operator<<(xmlTag& tag, const UList<T>& data)
{
forAll(data, i)
{
tag << data[i] << token::SPACE;
}
tag << nl;
return tag;
}
//- Append the supplied data to the tag text
template<class T>
xmlTag& operator<<(xmlTag& tag, const LongList<T>& data)
{
forAll(data, i)
{
tag << data[i] << token::SPACE;
}
tag << nl;
return tag;
}
//- Append the supplied data to the tag text
template<class Form, class Cmpt, int nCmpt>
xmlTag& operator<<(xmlTag& tag, const VectorSpace<Form, Cmpt, nCmpt>& data)
{
forAll(data, i)
{
tag << data[i] << token::SPACE;
}
tag << nl;
return tag;
}
//- Append the supplied data to the tag text
template<class T, unsigned Size>
xmlTag& operator<<(xmlTag& tag, const FixedList<T, Size>& data)
{
forAll(data, i)
{
tag << data[i] << token::SPACE;
}
tag << nl;
return tag;
}
//- Append the supplied data to the tag text
xmlTag& operator<<(xmlTag& tag, const labelledTri& data)
{
const triFace& tFace = data;
return tag << tFace;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View file

@ -0,0 +1,3 @@
importSurfaceAsSubset.C
EXE = $(FOAM_APPBIN)/importSurfaceAsSubset

View file

@ -0,0 +1,9 @@
EXE_INC = \
-I$(FOAM_SRC)/mesh/cfMesh/meshLibrary/lnInclude \
-I$(LIB_SRC)/triSurface/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \
-ltriSurface \
-lmeshLibrary \
-lmeshTools

View file

@ -0,0 +1,84 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
Finds feature edges and corners of a triangulated surface
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "IFstream.H"
#include "fileName.H"
#include "triSurf.H"
#include "OFstream.H"
#include "OSspecific.H"
#include "demandDrivenData.H"
#include "triSurfaceImportSurfaceAsSubset.H"
#include <cstdlib>
#include <sstream>
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Main program:
using namespace Foam;
int main(int argc, char *argv[])
{
argList::noParallel();
argList::validArgs.clear();
argList::validArgs.append("master surface file");
argList::validArgs.append("import surface file");
argList args(argc, argv);
fileName inFileName(args.args()[1]);
fileName importFileName(args.args()[2]);
triSurf originalSurface(inFileName);
triSurf importedSurface(importFileName);
triSurfaceImportSurfaceAsSubset importSurf(originalSurface);
importSurf.addSurfaceAsSubset(importedSurface, importFileName.lessExt());
if( inFileName.ext() == "fms" )
{
originalSurface.writeSurface(inFileName);
}
else
{
fileName newName = inFileName.lessExt();
newName.append(".fms");
Warning << "Writting surface as " << newName
<< " to preserve the subset!!" << endl;
originalSurface.writeSurface(newName);
}
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View file

@ -0,0 +1,3 @@
improveSymmetryPlanes.C
EXE = $(FOAM_APPBIN)/improveSymmetryPlanes

View file

@ -0,0 +1,7 @@
EXE_INC = \
-I$(FOAM_SRC)/mesh/cfMesh/meshLibrary/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \
-lmeshTools \
-lmeshLibrary

View file

@ -0,0 +1,57 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
Ensures that all mesh points belonging to a symmetryPlane are
in a plane.
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "symmetryPlaneOptimisation.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
int main(int argc, char *argv[])
{
# include "setRootCase.H"
# include "createTime.H"
polyMeshGen pmg(runTime);
pmg.read();
Info << "Starting optimisation of symmetry planes" << endl;
symmetryPlaneOptimisation(pmg).optimizeSymmetryPlanes();
Info << "Writing mesh" << endl;
pmg.write();
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View file

@ -0,0 +1,3 @@
mergeSurfacePatches.C
EXE = $(FOAM_APPBIN)/mergeSurfacePatches

View file

@ -0,0 +1,9 @@
EXE_INC = \
-I$(FOAM_SRC)/mesh/cfMesh/meshLibrary/lnInclude \
-I$(FOAM_SRC)/meshTools/lnInclude \
-I$(FOAM_SRC)/triSurface/lnInclude
EXE_LIBS = \
-ltriSurface \
-lmeshLibrary \
-lmeshTools

View file

@ -0,0 +1,403 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
cfMesh utility to merge the supplied list of patches onto a single
patch.
Author
Ivor Clifford <ivor.clifford@psi.ch>
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "autoPtr.H"
#include "Time.H"
#include "triSurf.H"
#include "triSurfModifier.H"
#include "demandDrivenData.H"
#include "Pair.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Find the supplied list of patch names and return a list of patch Ids
void getPatchIds
(
const triSurf& origSurf,
const wordList& patchNames,
DynamicList<label>& patchIds
)
{
const geometricSurfacePatchList& origPatches = origSurf.patches();
// Create patch name map
HashSet<word> patchNameHash(patchNames);
// Find selected patches
label nFound = 0;
forAll(origPatches, patchI)
{
if (patchNameHash.found(origPatches[patchI].name()))
{
patchIds.append(patchI);
nFound++;
}
}
if (nFound != patchNames.size())
{
WarningIn("getPatchIds")
<< "Not all supplied patch names were found on the surface mesh" << endl;
}
}
// Copy all face subsets from one triSurf to another
void copyFaceSubsets
(
const triSurf& origSurf,
triSurf& newSurf
)
{
DynList<label> subsetIds;
origSurf.facetSubsetIndices(subsetIds);
forAll(subsetIds, subsetI)
{
label newSubsetId = newSurf.addFacetSubset
(
origSurf.facetSubsetName(subsetI)
);
labelList origFaces;
origSurf.facetsInSubset(subsetI, origFaces);
forAll(origFaces, faceI)
{
newSurf.addFacetToSubset
(
newSubsetId,
origFaces[faceI]
);
}
}
}
// Copy all edge subsets from one triSurf to another
void copyEdgeSubsets
(
const triSurf& origSurf,
triSurf& newSurf
)
{
DynList<label> subsetIds;
origSurf.edgeSubsetIndices(subsetIds);
forAll(subsetIds, subsetI)
{
label newSubsetId = newSurf.addEdgeSubset
(
origSurf.edgeSubsetName(subsetI)
);
labelList origEdges;
origSurf.edgesInSubset(subsetI, origEdges);
forAll(origEdges, faceI)
{
newSurf.addEdgeToSubset
(
newSubsetId,
origEdges[faceI]
);
}
}
}
// Copy all point subsets from one triSurf to another
void copyPointSubsets
(
const triSurf& origSurf,
triSurf& newSurf
)
{
DynList<label> subsetIds;
origSurf.pointSubsetIndices(subsetIds);
forAll(subsetIds, subsetI)
{
label newSubsetId = newSurf.addPointSubset
(
origSurf.pointSubsetName(subsetI)
);
labelList origPoints;
origSurf.pointsInSubset(subsetI, origPoints);
forAll(origPoints, faceI)
{
newSurf.addPointToSubset
(
newSubsetId,
origPoints[faceI]
);
}
}
}
// Merge the supplied list of patchIds onto a new patch
autoPtr<triSurf> mergeSurfacePatches
(
const triSurf& origSurf, // Surface
const UList<label>& patchIds, // Ids of patches to merge
const word& newPatchName, // Name of new (merged) patch
bool keepPatches // Keep the original patches - they will be emptied
)
{
const geometricSurfacePatchList& origPatches = origSurf.patches();
const LongList<labelledTri>& origFacets = origSurf.facets();
label newPatchId = origPatches.size();
// Determine new patch type
word newPatchType = origPatches[patchIds[0]].geometricType();
// Create patch addressing
List<DynamicList<label> > patchAddr(origPatches.size()+1);
forAll(origFacets, faceI)
{
patchAddr[origFacets[faceI].region()].append(faceI);
}
// Move selected patches to new patch
forAll(patchIds, patchI)
{
patchAddr[newPatchId].append(patchAddr[patchIds[patchI]]);
patchAddr[patchIds[patchI]].clear();
}
// Create new facets list
LongList<labelledTri> newFacets(origFacets.size());
labelList newFaceAddr(origFacets.size(), -1);
label patchCount = 0;
label faceI = 0;
forAll(patchAddr, patchI)
{
const unallocLabelList& addr = patchAddr[patchI];
if(addr.size())
{
forAll(addr, i)
{
newFacets[faceI] = origFacets[addr[i]];
newFacets[faceI].region() = patchCount;
newFaceAddr[addr[i]] = faceI;
faceI++;
}
}
if(addr.size() || keepPatches)
{
patchCount++;
}
}
// Create new patch list
geometricSurfacePatchList newPatches(patchCount);
patchCount = 0;
forAll(origPatches, patchI)
{
// Only add patches if they contain faces
if(patchAddr[patchI].size())
{
newPatches[patchCount] = origPatches[patchI];
newPatches[patchCount].index() = patchCount;
}
if(patchAddr[patchI].size() || keepPatches)
{
patchCount++;
}
}
// Add new patch if it contains faces
if(patchAddr[patchAddr.size()-1].size())
{
newPatches[patchCount] = geometricSurfacePatch
(
newPatchType,
newPatchName,
patchCount
);
}
if(patchAddr[patchAddr.size()-1].size() || keepPatches)
{
patchCount++;
}
// Create new surface
autoPtr<triSurf> newSurf
(
new triSurf
(
newFacets,
newPatches,
origSurf.featureEdges(),
origSurf.points()
)
);
// Transfer face subsets
copyFaceSubsets(origSurf, newSurf());
newSurf->updateFacetsSubsets(newFaceAddr);
// Transfer feature edge subsets
copyEdgeSubsets(origSurf, newSurf());
// Transfer point subsets
copyPointSubsets(origSurf, newSurf());
// Done
return newSurf;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
int main(int argc, char *argv[])
{
argList::noParallel();
argList::validArgs.clear();
argList::validArgs.append("input surface file");
argList::validArgs.append("new patch");
argList::validOptions.insert("patchNames", "list of names");
argList::validOptions.insert("patchIds", "list of patchIds");
argList::validOptions.insert("patchIdRange", "( start end )");
argList::validOptions.insert("output", "file name (default overwrite)");
argList::validOptions.insert("keep", "");
argList args(argc, argv);
// Process commandline arguments
fileName inFileName(args.args()[1]);
word newPatchName(args.args()[2]);
fileName outFileName(inFileName);
if( args.options().found("output") )
{
outFileName = args.options()["output"];
}
bool keepPatches = false;
if( args.options().found("keep") )
{
keepPatches = true;
}
// Read original surface
triSurf origSurf(inFileName);
// Get patch ids
DynamicList<label> patchIds;
if (args.options().found("patchNames"))
{
if (args.options().found("patchIds"))
{
FatalError() << "Cannot specify both patch names and ids"
<< Foam::abort(FatalError);
}
IStringStream is(args.options()["patchNames"]);
wordList patchNames(is);
getPatchIds
(
origSurf,
patchNames,
patchIds
);
}
if (args.options().found("patchIds"))
{
IStringStream is(args.options()["patchIds"]);
patchIds = labelList(is);
}
if (args.options().found("patchIds"))
{
IStringStream is(args.options()["patchIds"]);
patchIds.append(labelList(is));
}
if (args.options().found("patchIdRange"))
{
IStringStream is(args.options()["patchIdRange"]);
Pair<label> idRange(is);
for(label id = idRange.first(); id <= idRange.second(); id++)
{
patchIds.append(id);
}
}
if (!patchIds.size())
{
FatalError() << "No patches specified"
<< Foam::abort(FatalError);
}
// Merge patches
autoPtr<triSurf> newSurf = mergeSurfacePatches
(
origSurf,
patchIds,
newPatchName,
keepPatches
);
// Write new surface mesh
newSurf->writeSurface(outFileName);
Info << "Original surface patches: " << origSurf.patches().size() << endl;
Info << "Final surface patches: " << newSurf->patches().size() << endl;
Info << "Surface written to " << outFileName << endl;
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View file

@ -0,0 +1,131 @@
#!python
# =============================================================================
# Salome GEOM script to extract the feature edges from a body and add them
# to the group named 'featureEdges'.
# Tested on Salome 7.4.0 and python 2.7 on 64-bit Windows
#
# Author: Ivor Clifford <ivor.clifford@psi.ch>
#
# =============================================================================
def extractFeatureEdges(body, minFeatureAngle = 5):
'''
Find all feature edges on the supplied body and return them as a list
of edge ids.
body - A Salome solid, compound, shell or face object to find all
feature edges on.
minFeatureAngle - the angle (in degrees) between adjacent surfaces
above which the edge will be considered a feature angle.
'''
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
# Check the body type
if not (body.GetShapeType() in [GEOM.SHELL, GEOM.SOLID, GEOM.FACE, GEOM.COMPOUND]):
raise RuntimeError('Supplied object is not a solid, shell or face.')
print 'Extracting edges of %s with feature angle > %g.' % (body.GetName(), minFeatureAngle)
# Extract basic info
faces = geompy.SubShapeAll(body, geompy.ShapeType["FACE"])
curves = geompy.SubShapeAll(body, geompy.ShapeType["EDGE"])
points = geompy.SubShapeAll(body, geompy.ShapeType["VERTEX"])
faceIds = geompy.GetSubShapesIDs(body, faces)
curveIds = geompy.GetSubShapesIDs(body, curves)
nodeIds = geompy.GetSubShapesIDs(body, points)
maxFaceId = max(faceIds)
maxCurveId = max(curveIds)
maxNodeId = max(nodeIds)
# Reverse mapping from curve id to local curve arrays
faceMap = [-1 for i in xrange(maxFaceId+1)]
for localId, id in enumerate(faceIds):
faceMap[id] = localId
curveMap = [-1 for i in xrange(maxCurveId+1)]
for localId, id in enumerate(curveIds):
curveMap[id] = localId
nodeMap = [-1 for i in xrange(maxNodeId+1)]
for localId, id in enumerate(nodeIds):
nodeMap[id] = localId
# Get curves on each face
faceCurveIds = [[curveMap[id] for id in geompy.GetSubShapesIDs(
body,
geompy.SubShapeAll(face, geompy.ShapeType["EDGE"])
)] for face in faces]
# Get faces attached to each curve
curveFaceIds = [[] for id in curveIds]
for faceI, ids in enumerate(faceCurveIds):
for id in ids:
curveFaceIds[id].append(faceI)
# Now that we have the connectivity for curves and faces find the
# feature edges
featureEdgeIds = []
for curveId, curve, adjFaceIds in zip(curveIds, curves, curveFaceIds):
if len(adjFaceIds) == 2:
# Curve with 2 adjacent faces - Test feature angle
# Determine break angle at each curve
# If greater than the feature edge angle, add the curve to group featureEdges
face1 = faces[adjFaceIds[0]]
face2 = faces[adjFaceIds[1]]
point = geompy.GetFirstVertex(curve) # Test at the first vertex
n1 = geompy.GetNormal(face1, point)
n2 = geompy.GetNormal(face2, point)
angle = geompy.GetAngle(n1, n2)
if angle > minFeatureAngle:
featureEdgeIds.append(curveId)
elif len(adjFaceIds) == 1:
# Curve on standalone face - Add by default
featureEdgeIds.append(curveId)
elif len(adjFaceIds) == 0:
# Standalone curve - Ignore
None
else:
raise RuntimeError('Curve found sharing %d faces. This is unexpected for fully enclosed bodies.' % len(adjFaceIds))
# Done
print "%d feature edges found" % len(featureEdgeIds)
return featureEdgeIds
# If run as a standalone script, use the current Salome GUI selection
# and add the feature edges to group named 'featureEdges'
if __name__ == '__main__':
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
# Get current GUI selection
selected = salome.sg.getAllSelected()
if len(selected) != 1:
raise RuntimeError('A single solid, shell or face object must be selected.')
body = salome.myStudy.FindObjectID(selected[0]).GetObject()
# Get feature edges and add to the group 'featureEdges'
featureEdges = geompy.CreateGroup(body, geompy.ShapeType["EDGE"])
geompy.UnionIDs(featureEdges, extractFeatureEdges(body))
geompy.addToStudyInFather(body, featureEdges, 'featureEdges')
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)

View file

@ -0,0 +1,363 @@
#!python
# =============================================================================
# Python module for writing OpenFOAM feature edge and triSurface files from
# within Salome platform.
# Tested on Salome 7.4.0 and python 2.7 on 64-bit Windows
#
# Author: Ivor Clifford <ivor.clifford@psi.ch>
#
# =============================================================================
def foamHeader(className, objectName):
'''
Return the OpenFOAM file header block as a string.
'''
return '''/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\\ / O peration | Version: 2.2.1 |
| \\\\ / A nd | Web: www.OpenFOAM.org |
| \\\\/ M anipulation | |
\\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class %s;
object %s;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
''' % (className, objectName)
class triSurf:
def __init__(self, object = None, allEdges = False):
'''
Construct from the supplied Salome mesh.
object - the mesh object (must be a triangular surface mesh). If no
object is supplied, the current Salome selection is used.
allEdges - If true, all edges on the mesh are included as feature
edges, otherwise only edges contained in groups are included
NOTE: All face groups are assumed to represent patches. No face subsets
are written. All edge groups are added as feature edge subsets. All point
groups are added as point subsets.
'''
# =============================================================================
# Initialize salome
import salome
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
from operator import itemgetter
from collections import OrderedDict
import SMESH, SALOMEDS
# =============================================================================
# Get the Salome mesh object
if object is None:
selected = salome.sg.getAllSelected()
if len(selected) != 1:
raise RuntimeError('A single Salome mesh object must be selected.')
object = salome.myStudy.FindObjectID(selected[0]).GetObject()
try:
object.GetMesh()
except:
raise RuntimeError('Supplied object is not a Salome SMESH mesh.')
smesh = smeshBuilder.New(salome.myStudy)
mesh = smesh.Mesh(object)
print "Converting SMESH Mesh '%s'" % mesh.GetName()
# =============================================================================
# Get basic mesh info
nNodes = mesh.NbNodes()
nFaces = mesh.NbFaces()
nTris = mesh.NbTriangles()
nEdges = mesh.NbEdges()
nodeIds = mesh.GetNodesId()
faceIds = mesh.GetElementsByType(SMESH.FACE)
edgeIds = mesh.GetElementsByType(SMESH.EDGE)
# Check that mesh is strictly triangular
if nFaces != nTris:
raise RuntimeError('Mesh is not strictly triangular')
# Get patch and subset names & ids
# All SMESH.FACE groups are assumed to be patches
# All SMESH.EDGE groups are assumed to be feature subsets
# All SMESH.NODE groups are assumed to be point subsets
patches = OrderedDict()
pointSubsets = OrderedDict()
featureEdgeSubsets = OrderedDict()
for group in mesh.GetGroups():
if group.GetType() == SMESH.FACE:
patches[group.GetName()] = group.GetIDs()
elif group.GetType() == SMESH.EDGE:
featureEdgeSubsets[group.GetName()] = group.GetIDs()
elif group.GetType() == SMESH.NODE:
pointSubsets[group.GetName()] = group.GetIDs()
# =============================================================================
# Process faces and patches
# Get patchId for each face
lastPatchId = len(patches)
patchIds = [lastPatchId] * max(faceIds)
patchId = 0
for name, ids in patches.iteritems():
for faceId in ids:
if patchIds[faceId-1] == lastPatchId:
patchIds[faceId-1] = patchId
else:
print "Face %d is assigned to both groups %s and %s" % (faceId, name, patches.keys()[patchIds[faceId-1]])
raise RuntimeError('Groups of faces are not unique, i.e. they overlap.')
patchId += 1
# Compact and reorder patchIds to match faceIds
patchIds = [patchIds[faceId-1] for faceId in faceIds]
# Reorder faces by increasing group id
faceAndpatchIds = sorted(zip(faceIds, patchIds), key=itemgetter(1))
faceIds, patchIds = zip(*faceAndpatchIds)
# Add unused faces to the default patch
defaultFaces = [faceId for faceId, patchId in faceAndpatchIds if patchId == lastPatchId]
if len(defaultFaces) > 0:
patches['defaultFaces'] = defaultFaces
defaultFaces = None
# =============================================================================
# Process feature edges
if not allEdges:
edgeIds = []
for name, ids in featureEdgeSubsets.iteritems():
edgeIds += ids
edgeIds = list(set(edgeIds))
nEdges = len(edgeIds)
# Reverse mapping of edge ids since they aren't necessarily numbered 1..nEdges
if len(edgeIds):
edgeMap = [-1] * max(edgeIds)
else:
edgeMap = []
i=0
for edgeId in edgeIds:
edgeMap[edgeId-1] = i
i += 1
# =============================================================================
# Process nodes
# Reverse mapping of node ids since nodes aren't necessarily numbered 1..nNodes
nodeMap = [-1] * max(nodeIds)
i=0
for nodeId in nodeIds:
nodeMap[nodeId-1] = i
i += 1
# =============================================================================
self._mesh = mesh
self._nodeIds = nodeIds
self._edgeIds = edgeIds
self._faceIds = faceIds
self._nodeMap = nodeMap
self._edgeMap = edgeMap
self._faceMap = []
self._patches = patches
self._pointSubsets = pointSubsets
self._featureEdgeSubsets = featureEdgeSubsets
self._faceSubsets = {}
print 'Done'
def nNodes(self):
'''
Return the number of nodes
'''
return len(self._nodeIds)
def nEdges(self):
'''
Return the number of edges
'''
return len(self._edgeIds)
def nFacets(self):
'''
Return the number of triangular facets
'''
return len(self._faceIds)
def nPatches(self):
'''
Return the number of patches
'''
return len(self._patches)
def _writePatchDefs(self, f, typeName = 'wall'):
'''
Write the patch definitions to file as an OpenFOAM geometricSurfacePatchList.
NOTE: All patches are assumed to be walls.
'''
patches = self._patches
f.write('%d\n(\n' % len(patches))
for name in patches.iterkeys():
f.write('%s\t%s\n' % (name, typeName))
f.write(')\n')
def _writeNodes(self, f):
'''
Write the nodes to file as an OpenFOAM pointField.
'''
mesh = self._mesh
nodeIds = self._nodeIds
f.write('%d\n(\n' % len(nodeIds))
for x, y, z in [mesh.GetNodeXYZ(nodeId) for nodeId in nodeIds]:
f.write( '( %g %g %g )\n' % (x, y, z))
f.write(')\n')
def _writeFeatureEdges(self, f):
'''
Write the feature edges to file as an OpenFOAM edgeList.
'''
mesh = self._mesh
nodeMap = self._nodeMap
edgeIds = self._edgeIds
f.write('%d\n(\n' % len(edgeIds))
for edgeId in edgeIds:
nodes = mesh.GetElemNodes(edgeId)
f.write( '(' + ' '.join([str(nodeMap[nodeId-1]) for nodeId in nodes]) + ')\n')
f.write(')\n')
def _writeFacets(self, f):
'''
Write the facets to file as an OpenFOAM List of labelledTri.
'''
from itertools import chain
mesh = self._mesh
nodeMap = self._nodeMap
patches = self._patches
f.write('%d\n(\n' % sum([len(patch) for patch in patches.itervalues()]))
patchId = 0
for patchId, (patchName, faceIds) in enumerate(patches.iteritems()):
for faceId in faceIds:
nodes = mesh.GetElemNodes(faceId)
f.write( '((' + ' '.join([str(nodeMap[nodeId-1]) for nodeId in nodes]) + ') %d)\n' % patchId)
f.write(')\n')
def _writeSubsets(self, f, subsets, map, typeId):
'''
General function to write a subset to file as an OpenFOAM Map<meshSubset>.
'''
f.write('%d\n(\n' % len(subsets))
for name, ids in subsets.iteritems():
f.write('%s %s %d ( %s )\n' % (name, typeId, len(ids), ' '.join([str(map[id-1]) for id in ids])))
f.write(')\n')
def _writePointSubsets(self, f):
'''
Write the point subsets to file as and OpenFOAM Map<meshSubset>.
'''
self._writeSubsets(f, self._pointSubsets, self._nodeMap, '2')
def _writeFaceSubsets(self, f):
'''
Write the face subsets to file as and OpenFOAM Map<meshSubset>.
'''
self._writeSubsets(f, self._faceSubsets, self._faceMap, '4')
def _writeFeatureEdgeSubsets(self, f):
'''
Write the feature edge subsets to file as and OpenFOAM Map<meshSubset>.
'''
self._writeSubsets(f, self._featureEdgeSubsets, self._edgeMap, '8')
def writeEdgeMesh(self, fileName):
'''
Write to file as an OpenFOAM edgeMesh
fileName - The file name to write
'''
# Create file
f = open(fileName, 'wb') # NOTE: file opened as binary to ensure unix-style line breaks
# Write header
f.write(foamHeader('edgeMesh', self._mesh.GetName()))
self._writeNodes(f)
self._writeFeatureEdges(f)
f.close()
print 'edgeMesh written to %s' % fileName
def writeFtr(self, fileName):
'''
Write to file as an OpenFOAM cfMesh FTR file
fileName - the file name to write
'''
# Create file
f = open(fileName, 'wb') # NOTE: file opened as binary to ensure unix-style line breaks
self._writePatchDefs(f)
self._writeNodes(f)
self._writeFacets(f)
f.close()
print 'triSurf written to %s' % fileName
def writeFms(self, fileName):
'''
Write to file as an OpenFOAM cfMesh FMS file
fileName - the file name to write
'''
# Create file
f = open(fileName, 'wb') # NOTE: file opened as binary to ensure unix-style line breaks
self._writePatchDefs(f)
self._writeNodes(f)
self._writeFacets(f)
self._writeFeatureEdges(f)
self._writePointSubsets(f)
self._writeFaceSubsets(f)
self._writeFeatureEdgeSubsets(f)
f.close()
print 'triSurf written to %s' % fileName
if __name__ == '__main__':
import salome
salome.salome_init()
import SMESH, SALOMEDS

View file

@ -0,0 +1,3 @@
surfaceToFMS.C
EXE = $(FOAM_APPBIN)/surfaceToFMS

View file

@ -0,0 +1,9 @@
EXE_INC = \
-I$(FOAM_SRC)/mesh/cfMesh/meshLibrary/lnInclude \
-I$(LIB_SRC)/triSurface/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \
-ltriSurface \
-lmeshLibrary \
-lmeshTools

View file

@ -0,0 +1,63 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
Reads the specified surface and writes it in the fms format.
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "IFstream.H"
#include "fileName.H"
#include "triSurf.H"
#include "demandDrivenData.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Main program:
using namespace Foam;
int main(int argc, char *argv[])
{
argList::noParallel();
argList::validArgs.clear();
argList::validArgs.append("input surface file");
argList args(argc, argv);
const fileName inFileName(args.args()[1]);
if( inFileName.ext() == "fms" )
FatalError << "trying to convert a fms file to itself"
<< exit(FatalError);
fileName outFileName(inFileName.lessExt()+".fms");
const triSurf surface(inFileName);
surface.writeSurface(outFileName);
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

Binary file not shown.

View file

@ -24,6 +24,9 @@ findCellsIntersectingSurface = $(intersectionTools)/findCellsIntersectingSurface
meshOptimizer = utilities/smoothers/geometry/meshOptimizer
tetMeshOptimisation = $(meshOptimizer)/tetMeshOptimisation
symmetryPlaneOptimisation = $(meshOptimizer)/symmetryPlaneOptimisation
simplexSmoother = $(tetMeshOptimisation)/advancedSmoothers/simplexSmoother
knuppMetric = $(tetMeshOptimisation)/advancedSmoothers/knuppMetric
meshUntangler = $(tetMeshOptimisation)/advancedSmoothers/meshUntangler
@ -44,10 +47,12 @@ meshOctreeModifier = utilities/octrees/meshOctree/meshOctreeModifier
meshOctreeAutoRef = utilities/octrees/meshOctree/meshOctreeAutomaticRefinement
patchRefinement = utilities/octrees/meshOctree/refinementControls/patchRefinement
objectRefinement = utilities/octrees/meshOctree/refinementControls/objectRefinement
helperFunctions = utilities/helperFunctions
createFacesFromChain = utilities/helperClasses/createFacesFromChain
sortEdgesIntoChains = utilities/helperClasses/sortEdgesIntoChains
trianglePlaneIntersections = utilities/helperClasses/trianglePlaneIntersections
tetCreatorOctree = utilities/tetrahedra/tetCreatorOctree
faceDecomposition = utilities/faceDecomposition
decomposeCells = utilities/decomposeCells
@ -60,12 +65,16 @@ checkIrregularSurfaceConnections = $(topology)/checkIrregularSurfaceConnections
checkNonMappableCellConnections = $(topology)/checkNonMappableCellConnections
triSurfaceTools = utilities/triSurfaceTools
triSurface2DCheck = $(triSurfaceTools)/triSurface2DCheck
triSurfaceCleanupDuplicates = $(triSurfaceTools)/triSurfaceCleanupDuplicates
triSurfaceCleanupDuplicateTriangles = $(triSurfaceTools)/triSurfaceCleanupDuplicateTriangles
triSurfaceCopyParts = $(triSurfaceTools)/triSurfaceCopyParts
triSurfaceCurvatureEstimator = $(triSurfaceTools)/triSurfaceCurvatureEstimator
triSurfacePartitioner = $(triSurfaceTools)/triSurfacePartitioner
triSurfaceDetectFeatureEdges = $(triSurfaceTools)/triSurfaceDetectFeatureEdges
triSurfaceClassifyEdges = $(triSurfaceTools)/triSurfaceClassifyEdges
triSurfaceImportSurfaceAsSubset = $(triSurfaceTools)/triSurfaceImportSurfaceAsSubset
triSurfacePatchManipulator = $(triSurfaceTools)/triSurfacePatchManipulator
triSurfaceRemoveFacets = $(triSurfaceTools)/triSurfaceRemoveFacets
triSurfaceExtrude2DEdges = $(triSurfaceTools)/triSurfaceExtrude2DEdges
@ -272,6 +281,8 @@ $(meshOptimizer)/optimizeMeshFV.C
$(tetMeshOptimisation)/tetMeshOptimisation.C
$(tetMeshOptimisation)/tetMeshOptimisationParallel.C
$(symmetryPlaneOptimisation)/symmetryPlaneOptimisation.C
$(simplexSmoother)/simplexSmoother.C
$(knuppMetric)/knuppMetric.C
@ -343,9 +354,14 @@ $(objectRefinement)/lineRefinement.C
$(objectRefinement)/coneRefinement.C
$(objectRefinement)/boxRefinement.C
$(triSurface2DCheck)/triSurface2DCheck.C
$(triSurfaceCleanupDuplicates)/triSurfaceCleanupDuplicates.C
$(triSurfaceCleanupDuplicates)/triSurfaceCleanupDuplicatesFunctions.C
$(triSurfaceCleanupDuplicateTriangles)/triSurfaceCleanupDuplicateTriangles.C
$(triSurfaceCleanupDuplicateTriangles)/triSurfaceCleanupDuplicateTrianglesFunctions.C
$(triSurfaceCopyParts)/triSurfaceCopyParts.C
$(triSurfacePartitioner)/triSurfacePartitioner.C
@ -360,6 +376,8 @@ $(triSurfaceDetectFeatureEdges)/triSurfaceDetectFeatureEdgesFunctions.C
$(triSurfaceClassifyEdges)/triSurfaceClassifyEdges.C
$(triSurfaceClassifyEdges)/triSurfaceClassifyEdgesFunctions.C
$(triSurfaceImportSurfaceAsSubset)/triSurfaceImportSurfaceAsSubset.C
$(triSurfacePatchManipulator)/triSurfacePatchManipulator.C
$(triSurfacePatchManipulator)/triSurfacePatchManipulatorFunctions.C

View file

@ -26,9 +26,11 @@ Description
\*---------------------------------------------------------------------------*/
#include "cartesian2DMeshGenerator.H"
#include "triSurface2DCheck.H"
#include "polyMeshGen2DEngine.H"
#include "triSurf.H"
#include "triSurfacePatchManipulator.H"
#include "triSurfaceCleanupDuplicateTriangles.H"
#include "demandDrivenData.H"
#include "Time.H"
#include "meshOctreeCreator.H"
@ -212,25 +214,35 @@ void cartesian2DMeshGenerator::renumberMesh()
void cartesian2DMeshGenerator::generateMesh()
{
createCartesianMesh();
try
{
createCartesianMesh();
surfacePreparation();
surfacePreparation();
mapMeshToSurface();
mapMeshToSurface();
mapEdgesAndCorners();
mapEdgesAndCorners();
optimiseMeshSurface();
optimiseMeshSurface();
generateBoundaryLayers();
generateBoundaryLayers();
optimiseMeshSurface();
optimiseMeshSurface();
refBoundaryLayers();
refBoundaryLayers();
renumberMesh();
renumberMesh();
replaceBoundaries();
replaceBoundaries();
}
catch(...)
{
WarningIn
(
"void cartesian2DMeshGenerator::generateMesh()"
) << "Meshing process terminated!" << endl;
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
@ -273,10 +285,23 @@ cartesian2DMeshGenerator::cartesian2DMeshGenerator(const Time& time)
mesh_.metaData().add("surfaceFile", surfaceFile);
mesh_.metaData().add("surfaceMeta", surfMetaDict);
triSurface2DCheck surfCheck(*surfacePtr_);
if( !surfCheck.is2DSurface() )
{
surfCheck.createSubsets();
Info << "Writting surface with subsets to file "
<< "badSurfaceWithSubsets.fms" << endl;
surfacePtr_->writeSurface("badSurfaceWithSubsets.fms");
}
}
if( surfacePtr_->featureEdges().size() != 0 )
{
//- get rid of duplicate triangles as they cause strange problems
triSurfaceCleanupDuplicateTriangles(const_cast<triSurf&>(*surfacePtr_));
//- create surface patches based on the feature edges
//- and update the meshDict based on the given data
triSurfacePatchManipulator manipulator(*surfacePtr_);

View file

@ -135,7 +135,7 @@ void cartesianMeshGenerator::mapMeshToSurface()
# ifdef DEBUG
mesh_.write();
//::exit(EXIT_SUCCESS);
::exit(EXIT_SUCCESS);
# endif
deleteDemandDrivenData(msePtr);
@ -192,12 +192,30 @@ void cartesianMeshGenerator::refBoundaryLayers()
void cartesianMeshGenerator::optimiseFinalMesh()
{
//- untangle the surface if needed
meshSurfaceEngine mse(mesh_);
meshSurfaceOptimizer(mse, *octreePtr_).optimizeSurface();
bool enforceConstraints(false);
if( meshDict_.found("enforceGeometryConstraints") )
{
enforceConstraints =
readBool(meshDict_.lookup("enforceGeometryConstraints"));
}
if( true )
{
meshSurfaceEngine mse(mesh_);
meshSurfaceOptimizer surfOpt(mse, *octreePtr_);
if( enforceConstraints )
surfOpt.enforceConstraints();
surfOpt.optimizeSurface();
}
deleteDemandDrivenData(octreePtr_);
//- final optimisation
meshOptimizer optimizer(mesh_);
if( enforceConstraints )
optimizer.enforceConstraints();
optimizer.optimizeMeshFV();
optimizer.optimizeLowQualityFaces();
optimizer.untangleMeshFV();
@ -230,25 +248,35 @@ void cartesianMeshGenerator::renumberMesh()
void cartesianMeshGenerator::generateMesh()
{
createCartesianMesh();
try
{
createCartesianMesh();
surfacePreparation();
surfacePreparation();
mapMeshToSurface();
mapMeshToSurface();
mapEdgesAndCorners();
mapEdgesAndCorners();
optimiseMeshSurface();
optimiseMeshSurface();
generateBoundaryLayers();
generateBoundaryLayers();
optimiseFinalMesh();
optimiseFinalMesh();
refBoundaryLayers();
refBoundaryLayers();
renumberMesh();
renumberMesh();
replaceBoundaries();
replaceBoundaries();
}
catch(...)
{
WarningIn
(
"void cartesianMeshGenerator::generateMesh()"
) << "Meshing process terminated!" << endl;
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //

View file

@ -102,8 +102,6 @@ void tetMeshExtractorOctree::createPolyMesh()
const partTet& elmt = tets[elmtI];
//tessellationElement telmt(elmt[0], elmt[1], elmt[2], elmt[3]);
label faceI = 4 * elmtI;
//- first face

View file

@ -167,7 +167,16 @@ void tetMeshGenerator::generateBoudaryLayers()
void tetMeshGenerator::optimiseFinalMesh()
{
//- final optimisation
bool enforceConstraints(false);
if( meshDict_.found("enforceGeometryConstraints") )
{
enforceConstraints =
readBool(meshDict_.lookup("enforceGeometryConstraints"));
}
meshOptimizer optimizer(mesh_);
if( enforceConstraints )
optimizer.enforceConstraints();
optimizer.optimizeSurface(*octreePtr_);
@ -221,25 +230,35 @@ void tetMeshGenerator::renumberMesh()
void tetMeshGenerator::generateMesh()
{
createTetMesh();
try
{
createTetMesh();
surfacePreparation();
surfacePreparation();
mapMeshToSurface();
mapMeshToSurface();
mapEdgesAndCorners();
mapEdgesAndCorners();
optimiseMeshSurface();
optimiseMeshSurface();
generateBoudaryLayers();
generateBoudaryLayers();
optimiseFinalMesh();
optimiseFinalMesh();
refBoundaryLayers();
refBoundaryLayers();
renumberMesh();
renumberMesh();
replaceBoundaries();
replaceBoundaries();
}
catch(...)
{
WarningIn
(
"void tetMeshGenerator::generateMesh()"
) << "Meshing process terminated!" << endl;
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //

View file

@ -243,6 +243,15 @@ void boundaryLayers::createLayerCells(const labelList& patchLabels)
const label pKeyI = patchKey_[patchI];
const label pKeyJ = patchKey_[patchJ];
if( pKeyI < 0 || pKeyJ < 0 )
{
continue;
FatalErrorIn
(
"void boundaryLayers::createLayerCells(const labelList&)"
) << "Patch key is negative at concave edge" << abort(FatalError);
}
if( pKeyI == pKeyJ )
continue;
@ -333,6 +342,10 @@ void boundaryLayers::createLayerCells(const labelList& patchLabels)
faceAtPatches.append(labelPair(patchI, patchJ));
}
# ifdef DEBUGLayer
Info << "Adding new cell at edge " << cellFaces << endl;
# endif
//- append cell to the queue
cellsToAdd.appendGraph(cellFaces);
}
@ -628,10 +641,6 @@ void boundaryLayers::createLayerCells(const labelList& patchLabels)
//- delete meshSurfaceEngine
this->clearOut();
# ifdef DEBUGLayer
mesh_.addressingData().checkMesh(true);
# endif
Info << "Finished creating layer cells" << endl;
}

View file

@ -93,7 +93,7 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
{
const label bpI = it.key();
if( mPart.numberOfFeatureEdgesAtPoint(bpI) != 3 )
if( mPart.numberOfFeatureEdgesAtPoint(bpI) > 3 )
{
labelHashSet commonPatches;
DynList<label> allPatches;
@ -133,18 +133,18 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
const labelHashSet& invertedVertices = vertexCheck.invertedVertices();
std::map<std::pair<label, label>, Pair<label> > edgeClassification;
labelLongList procEdges;
forAll(eFaces, eI)
{
if( eFaces.sizeOfRow(eI) != 2 )
{
procEdges.append(eI);
continue;
}
//- check if the any of the face vertices is tangled
const edge& e = edges[eI];
if( invertedVertices.found(e[0]) || invertedVertices.found(e[1]) )
if
(
!is2DMesh_ &&
(invertedVertices.found(e[0]) || invertedVertices.found(e[1]))
)
continue;
const label patch0 = boundaryFacePatches[eFaces(eI, 0)];
@ -164,7 +164,12 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
const face& f1 = bFaces[eFaces(eI, 0)];
const face& f2 = bFaces[eFaces(eI, 1)];
if( !help::isSharedEdgeConvex(points, f1, f2) )
if
(
!help::isSharedEdgeConvex(points, f1, f2) ||
(help::angleBetweenFaces(points, f1, f2) > 0.75 * M_PI)
)
{
++edgeClassification[pp].second();
}
@ -177,9 +182,10 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
if( Pstream::parRun() )
{
const labelList& bPoints = mse.boundaryPoints();
//- check faces over processor edges
const labelList& globalEdgeLabel = mse.globalBoundaryEdgeLabel();
const VRWGraph& beAtProcs = mse.beAtProcs();
const Map<label>& globalToLocal = mse.globalToLocalBndEdgeAddressing();
const DynList<label>& neiProcs = mse.beNeiProcs();
@ -203,14 +209,20 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
}
//- store faces for sending
forAll(procEdges, eI)
forAllConstIter(Map<label>, otherFaceProc, it)
{
const label beI = procEdges[eI];
const label beI = it.key();
if( eFaces.sizeOfRow(beI) == 0 )
continue;
const edge& e = edges[beI];
if( invertedVertices.found(e[0]) || invertedVertices.found(e[1]) )
if
(
!is2DMesh_ &&
(invertedVertices.found(e[0]) || invertedVertices.found(e[1]))
)
continue;
//- do not send data if the face on other processor
@ -220,30 +232,25 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
const face& f = bFaces[eFaces(beI, 0)];
forAllRow(beAtProcs, beI, procI)
{
const label neiProc = beAtProcs(beI, procI);
if( neiProc == Pstream::myProcNo() )
continue;
const label neiProc = it();
//- each face is sent as follows
//- 1. global edge label
//- 2. number of face nodes
//- 3. faces nodes and vertex coordinates
LongList<labelledPoint>& dps = exchangePoints[neiProc];
dps.append(labelledPoint(globalEdgeLabel[beI], point()));
dps.append(labelledPoint(f.size(), point()));
forAll(f, pI)
{
dps.append
//- each face is sent as follows
//- 1. global edge label
//- 2. number of face nodes
//- 3. faces nodes and vertex coordinates
LongList<labelledPoint>& dps = exchangePoints[neiProc];
dps.append(labelledPoint(globalEdgeLabel[beI], point()));
dps.append(labelledPoint(f.size(), point()));
forAll(f, pI)
{
dps.append
(
labelledPoint
(
labelledPoint
(
globalPointLabel[bp[f[pI]]],
points[f[pI]]
)
);
}
globalPointLabel[bp[f[pI]]],
points[f[pI]]
)
);
}
}
@ -258,14 +265,16 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
{
const label beI =
globalToLocal[receivedData[counter++].pointLabel()];
face f(receivedData[counter++].pointLabel());
DynList<label> f(receivedData[counter++].pointLabel());
forAll(f, pI)
{
const labelledPoint& lp = receivedData[counter++];
if( globalPointToLocal.found(lp.pointLabel()) )
{
//- this point already exist on this processor
f[pI] = globalPointToLocal[lp.pointLabel()];
f[pI] = bPoints[globalPointToLocal[lp.pointLabel()]];
}
else
{
@ -287,6 +296,8 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
}
}
const face& bf = bFaces[eFaces(beI, 0)];
const label patch0 = boundaryFacePatches[eFaces(beI, 0)];
const label patch1 = otherProcPatches[beI];
@ -303,7 +314,10 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
if(
(otherFaceProc[beI] > Pstream::myProcNo()) &&
!help::isSharedEdgeConvex(points, bFaces[eFaces(beI, 0)], f)
(
!help::isSharedEdgeConvex(points, bf, f) ||
(help::angleBetweenFaces(points, bf, f) > 0.75 * M_PI)
)
)
{
++edgeClassification[pp].second();
@ -443,6 +457,35 @@ void boundaryLayers::findPatchesToBeTreatedTogether()
Info << "Patch names " << patchNames_ << endl;
Info << "Treat patches with patch " << treatPatchesWithPatch_ << endl;
label layerI(0), subsetId;
boolList usedPatch(treatPatchesWithPatch_.size(), false);
const PtrList<boundaryPatch>& boundaries = mesh_.boundaries();
forAll(treatPatchesWithPatch_, patchI)
{
if( usedPatch[patchI] || (boundaries[patchI].patchSize() == 0) )
continue;
Info << "Adding layer subset " << layerI
<< " for patch " << patchI << endl;
usedPatch[patchI] = true;
subsetId = mesh_.addFaceSubset("layer_"+help::scalarToText(layerI));
++layerI;
forAll(treatPatchesWithPatch_[patchI], i)
{
const label cPatch = treatPatchesWithPatch_[patchI][i];
usedPatch[cPatch] = true;
label start = boundaries[cPatch].patchStart();
const label size = boundaries[cPatch].patchSize();
for(label i=0;i<size;++i)
mesh_.addFaceToSubset(subsetId, start++);
}
}
mesh_.write();
# endif
geometryAnalysed_ = true;
@ -498,6 +541,7 @@ boundaryLayers::boundaryLayers
meshPartitionerPtr_(NULL),
patchWiseLayers_(true),
terminateLayersAtConcaveEdges_(false),
is2DMesh_(false),
patchNames_(),
treatedPatch_(),
treatPatchesWithPatch_(),
@ -597,6 +641,8 @@ void boundaryLayers::activate2DMode()
if( treatedPatch_[patches[i]] )
patches.removeElement(i);
}
is2DMesh_ = true;
}
void boundaryLayers::addLayerForAllPatches()

View file

@ -35,8 +35,6 @@ SourceFiles
#ifndef boundaryLayers_H
#define boundaryLayers_H
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "meshSurfaceEngine.H"
#include "meshSurfacePartitioner.H"
@ -78,6 +76,9 @@ class boundaryLayers
//- shall the layers be terminated at concave edges (true)
bool terminateLayersAtConcaveEdges_;
//- is it a 2D mesh
bool is2DMesh_;
//- patch names
wordList patchNames_;

View file

@ -408,7 +408,6 @@ void boundaryLayers::createNewVertices(const labelList& patchLabels)
const meshSurfaceEngine& mse = surfaceEngine();
const labelList& bPoints = mse.boundaryPoints();
const label nBndPts = bPoints.size();
const meshSurfacePartitioner& mPart = surfacePartitioner();
const VRWGraph& pPatches = mPart.pointPatches();
@ -422,10 +421,10 @@ void boundaryLayers::createNewVertices(const labelList& patchLabels)
pointFieldPMG& points = mesh_.points();
boolList treatPatches(mesh_.boundaries().size());
List<direction> patchVertex(nBndPts);
List<direction> patchVertex(bPoints.size());
//- make sure than the points are never re-allocated during the process
points.reserve(points.size() + 2 * nBndPts);
points.reserve(points.size() + 2 * bPoints.size());
//- generate new layer vertices for each patch
forAll(patchLabels, patchI)
@ -457,12 +456,9 @@ void boundaryLayers::createNewVertices(const labelList& patchLabels)
//- classify vertices belonging to this patch
findPatchVertices(treatPatches, patchVertex);
//- go throught the vertices and create the new ones
labelLongList procPoints;
# ifdef USE_OMP
# pragma omp parallel for if( nBndPts > 1000 ) schedule(dynamic, 50)
# endif
for(label bpI=0;bpI<nBndPts;++bpI)
//- create indices and allocate maps for new points
labelLongList procPoints, patchPoints;
forAll(bPoints, bpI)
{
if( !patchVertex[bpI] )
continue;
@ -470,53 +466,67 @@ void boundaryLayers::createNewVertices(const labelList& patchLabels)
//- skip vertices at parallel boundaries
if( patchVertex[bpI] & PARALLELBOUNDARY )
{
# ifdef USE_OMP
# pragma omp critical(appendProcPoints)
# endif
procPoints.append(bpI);
continue;
}
patchPoints.append(bpI);
const label pointI = bPoints[bpI];
if( patchVertex[bpI] & EDGENODE )
{
if( otherVrts_.find(pointI) == otherVrts_.end() )
{
std::map<std::pair<label, label>, label> m;
otherVrts_.insert(std::make_pair(pointI, m));
}
std::pair<label, label> pr(pKey, pKey);
otherVrts_[pointI].insert(std::make_pair(pr, nPoints_++));
}
else
{
//- this the only new point
newLabelForVertex_[pointI] = nPoints_++;
}
}
//- set the size of points
points.setSize(nPoints_);
//- calculate coordinates of new points
# ifdef USE_OMP
# pragma omp parallel for schedule(dynamic, 50)
# endif
forAll(patchPoints, i)
{
const label bpI = patchPoints[i];
const label pointI = bPoints[bpI];
label nPoints;
# ifdef USE_OMP
# pragma omp critical(incrementNumPoints)
# endif
nPoints = nPoints_++;
//- create new point
const point p = createNewVertex(bpI, treatPatches, patchVertex);
if( patchVertex[bpI] & EDGENODE )
{
# ifdef USE_OMP
# pragma omp critical(adjustMap)
# endif
//- set the new point or an edge point
if( otherVrts_.find(pointI) == otherVrts_.end() )
{
if( otherVrts_.find(pointI) == otherVrts_.end() )
{
std::map<std::pair<label, label>, label> m;
std::map<std::pair<label, label>, label> m;
otherVrts_.insert(std::make_pair(pointI, m));
}
std::pair<label, label> pr(pKey, pKey);
otherVrts_[pointI].insert(std::make_pair(pr, nPoints));
otherVrts_.insert(std::make_pair(pointI, m));
}
std::pair<label, label> pr(pKey, pKey);
const label npI = otherVrts_[pointI][pr];
points[npI] = p;
}
else
{
//- this the only new point for a given vertex
newLabelForVertex_[pointI] = nPoints;
}
//- store the point
# ifdef USE_OMP
# pragma omp critical(addVertex)
# endif
{
points.newElmt(nPoints) = p;
//- set the new point
points[newLabelForVertex_[pointI]] = p;
}
}

View file

@ -40,12 +40,13 @@ label boundaryLayers::findNewNodeLabel
const label pKey
) const
{
if( otherVrts_.find(pointI) != otherVrts_.end() )
const std::map
<
label, std::map<std::pair<label, label>, label>
>::const_iterator it = otherVrts_.find(pointI);
if( it != otherVrts_.end() )
{
const std::map
<
label, std::map<std::pair<label, label>, label>
>::const_iterator it = otherVrts_.find(pointI);
const std::map<std::pair<label, label>, label>& m = it->second;
std::map<std::pair<label, label>, label>::const_iterator mit;
@ -90,7 +91,7 @@ inline void boundaryLayers::createNewCellFromEdge
otherVrts_.find(e.end())->second;
# ifdef DEBUGLayer
Info << "Creating cell for edge " << edgeI << " with nodes " << e << endl;
Info << "Creating cell for edge with nodes " << e << endl;
Info << "pKeyI " << pKeyI << endl;
Info << "pKeyJ " << pKeyJ << endl;
std::map<std::pair<label, label>, label>::const_iterator iter;
@ -208,6 +209,22 @@ inline void boundaryLayers::createNewCellFromEdge
cellFaces[5][1] = p0s;
cellFaces[5][2] = ns;
cellFaces[5][3] = p1s;
# ifdef DEBUGLayer
forAll(cellFaces, fI)
{
forAll(cellFaces[fI], pI)
{
if
(
cellFaces[fI][pI] < 0 ||
cellFaces[fI][pI] >= mesh_.points().size()
)
FatalError << "Invalid point indices found!"
<< abort(FatalError);
}
}
# endif
}
inline void boundaryLayers::createNewCellFromNode
@ -258,7 +275,6 @@ inline void boundaryLayers::createNewCellFromNode
const label p12 = m.find(pr)->second;
//- create the cell and append it
//- F0
cellFaces[0][0] = pointI;
cellFaces[0][1] = p02;

View file

@ -36,8 +36,6 @@ SourceFiles
#ifndef extrudeLayer_H
#define extrudeLayer_H
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "VRWGraphList.H"
#include "labelPair.H"

View file

@ -156,7 +156,12 @@ void refineBoundaryLayers::setNumberOfLayersForPatch
return;
}
numLayersForPatch_[patchName] = nLayers;
const labelList matchedIDs = mesh_.findPatches(patchName);
forAll(matchedIDs, matchI)
{
numLayersForPatch_[mesh_.getPatchName(matchedIDs[matchI])] = nLayers;
}
}
void refineBoundaryLayers::setThicknessRatioForPatch
@ -177,7 +182,13 @@ void refineBoundaryLayers::setThicknessRatioForPatch
return;
}
thicknessRatioForPatch_[patchName] = thicknessRatio;
const labelList matchedIDs = mesh_.findPatches(patchName);
forAll(matchedIDs, matchI)
{
const word pName = mesh_.getPatchName(matchedIDs[matchI]);
thicknessRatioForPatch_[pName] = thicknessRatio;
}
}
void refineBoundaryLayers::setMaxThicknessOfFirstLayerForPatch
@ -198,12 +209,24 @@ void refineBoundaryLayers::setMaxThicknessOfFirstLayerForPatch
return;
}
maxThicknessForPatch_[patchName] = maxThickness;
const labelList matchedIDs = mesh_.findPatches(patchName);
forAll(matchedIDs, matchI)
{
const word pName = mesh_.getPatchName(matchedIDs[matchI]);
maxThicknessForPatch_[pName] = maxThickness;
}
}
void refineBoundaryLayers::setInteruptForPatch(const word& patchName)
{
discontinuousLayersForPatch_.insert(patchName);
const labelList matchedIDs = mesh_.findPatches(patchName);
forAll(matchedIDs, matchI)
{
const word pName = mesh_.getPatchName(matchedIDs[matchI]);
discontinuousLayersForPatch_.insert(pName);
}
}
void refineBoundaryLayers::refineLayers()

View file

@ -36,8 +36,6 @@ SourceFiles
#ifndef refineBoundaryLayers_H
#define refineBoundaryLayers_H
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "meshSurfaceEngine.H"
#include "DynList.H"

View file

@ -38,6 +38,85 @@ namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void checkMeshDict::checkBasicSettings() const
{
//- check if maxCellSize is valid
const scalar maxCellSize = readScalar(meshDict_.lookup("maxCellSize"));
if( maxCellSize < 0 )
FatalErrorIn
(
"void checkMeshDict::checkBasicSettings() const"
) << "maxCellSize is negative! Cannot generate the mesh!!"
<< exit(FatalError);
//- check if boundaryCellSize makes sense
if( meshDict_.found("boundaryCellSize") )
{
const scalar bcs = readScalar(meshDict_.lookup("boundaryCellSize"));
if( bcs < 0 )
{
WarningIn
(
"void checkMeshDict::checkBasicSettings() const"
) << "Boundary cell size is negative!!" << endl;
}
if( meshDict_.found("boundaryCellSizeRefinementThickness") )
{
const scalar thickness =
readScalar
(
meshDict_.lookup("boundaryCellSizeRefinementThickness")
);
if( thickness < 0 )
{
WarningIn
(
"void checkMeshDict::checkBasicSettings() const"
) << "Boundary cell size refinement thickness is negative!!"
<< endl;
}
}
}
//- check if minCellSize is valid
if( meshDict_.found("minCellSize") )
{
const scalar mcs = readScalar(meshDict_.lookup("minCellSize"));
if( mcs < 0 )
{
FatalErrorIn
(
"void checkMeshDict::checkBasicSettings() const"
) << "Minimum cell size for automatic refinement is negative!!"
<< exit(FatalError);
}
}
//- check if keepCellsIntersectingBoundary can be read correctly
if( meshDict_.found("keepCellsIntersectingBoundary") )
{
const bool keep =
readBool(meshDict_.lookup("keepCellsIntersectingBoundary"));
if( keep && meshDict_.found("checkForGluedMesh") )
{
readBool(meshDict_.lookup("checkForGluedMesh"));
}
}
//- check if enforceConstraints is available
if( meshDict_.found("enforceGeometryConstraints") )
{
readBool(meshDict_.lookup("enforceGeometryConstraints"));
}
}
void checkMeshDict::checkPatchCellSize() const
{
if( meshDict_.found("patchCellSize") )
@ -92,14 +171,14 @@ void checkMeshDict::checkLocalRefinementLevel() const
{
const scalar cs = readScalar(dict.lookup("cellSize"));
if( cs > 0.0 )
continue;
WarningIn
(
if( cs < 0.0 )
{
WarningIn
(
"void checkMeshDict::checkLocalRefinementLevel() const"
) << "Cell size for " << entries[dictI]
<< " is negative" << endl;
) << "Cell size for " << entries[dictI]
<< " is negative" << endl;
}
}
else if( dict.found("additionalRefinementLevels") )
{
@ -107,13 +186,13 @@ void checkMeshDict::checkLocalRefinementLevel() const
readLabel(dict.lookup("additionalRefinementLevels"));
if( nLevels > 0 )
continue;
WarningIn
(
{
WarningIn
(
"void checkMeshDict::checkLocalRefinementLevel() const"
) << "Refinement level for " << entries[dictI]
<< " is negative" << endl;
) << "Refinement level for " << entries[dictI]
<< " is negative" << endl;
}
}
else
{
@ -124,6 +203,21 @@ void checkMeshDict::checkLocalRefinementLevel() const
<< " additionalRefinementLevels or cellSize"
<< "for " << entries[dictI] << exit(FatalError);
}
if( dict.found("refinementThickness") )
{
const scalar s =
readScalar(dict.lookup("refinementThickness"));
if( s < 0 )
{
WarningIn
(
"void checkMeshDict::checkLocalRefinementLevel() const"
) << "Refinement thickness for " << entries[dictI]
<< " is negative" << endl;
}
}
}
}
else
@ -223,6 +317,130 @@ void checkMeshDict::checkObjectRefinements() const
);
}
}
forAll(refObjects, oI)
{
if( refObjects[oI].cellSize() < 0.0 )
{
WarningIn
(
"void checkMeshDict::checkObjectRefinements() const"
) << "Cell size specified for object " << refObjects[oI].name()
<< " is negative!!" << endl;
}
if( refObjects[oI].refinementThickness() < 0.0 )
{
WarningIn
(
"void checkMeshDict::checkObjectRefinements() const"
) << "Refinement thickness specified for object "
<< refObjects[oI].name() << " is negative!!" << endl;
}
}
}
}
void checkMeshDict::checkSurfaceRefinements() const
{
if( meshDict_.found("surfaceMeshRefinement") )
{
const dictionary& surfaces = meshDict_.subDict("surfaceMeshRefinement");
const wordList surfaceSources = surfaces.toc();
forAll(surfaceSources, surfI)
{
if( surfaces.isDict(surfaceSources[surfI]) )
{
const dictionary& dict =
surfaces.subDict(surfaceSources[surfI]);
if( dict.found("surfaceFile") )
{
const fileName fName(dict.lookup("surfaceFile"));
if( !isFile(fName) )
FatalErrorIn
(
"void checkMeshDict::checkSurfaceRefinements() const"
) << "Surface file " << fName
<< " does not exist or is not readable!!"
<< exit(FatalError);
}
else
{
FatalErrorIn
(
"void checkMeshDict::checkSurfaceRefinements() const"
) << "Missing surfaceFile for entry "
<< surfaceSources[surfI] << exit(FatalError);
}
if( dict.found("cellSize") )
{
const scalar cs = readScalar(dict.lookup("cellSize"));
if( cs < VSMALL )
FatalErrorIn
(
"void checkMeshDict::"
"checkSurfaceRefinements() const"
) << "Cell size for entry " << surfaceSources[surfI]
<< " is extremely small or negative!!"
<< exit(FatalError);
}
else if( dict.found("additionalRefinementLevels") )
{
const label nLev =
readLabel(dict.lookup("additionalRefinementLevels"));
if( nLev < 0 )
{
FatalErrorIn
(
"void checkMeshDict::"
"checkSurfaceRefinements() const"
) << "Number refinement levels for entry "
<< surfaceSources[surfI] << " is negative!!"
<< exit(FatalError);
}
}
else
{
FatalErrorIn
(
"void checkMeshDict::checkSurfaceRefinements() const"
) << "Missing cellSize or additionalRefinementLevels"
<< " for entry " << surfaceSources[surfI]
<< exit(FatalError);
}
if( dict.found("refinementThickness") )
{
const scalar cs =
readScalar(dict.lookup("refinementThickness"));
if( cs < VSMALL )
WarningIn
(
"void checkMeshDict::"
"checkSurfaceRefinements() const"
) << "Refinement thickness for entry "
<< surfaceSources[surfI]
<< " is extremely small or negative!!" << endl;
}
}
else
{
FatalErrorIn
(
"void checkMeshDict::checkSurfaceRefinements() const"
) << "Dictionary " << surfaceSources[surfI]
<< " does not exist!!"
<< exit(FatalError);
}
}
}
}
@ -352,10 +570,14 @@ void checkMeshDict::checkRenameBoundary() const
void checkMeshDict::checkEntries() const
{
checkBasicSettings();
checkPatchCellSize();
checkSubsetCellSize();
checkSurfaceRefinements();
checkKeepCellsIntersectingPatches();
checkRemoveCellsIntersectingPatches();

View file

@ -35,8 +35,6 @@ SourceFiles
#ifndef checkMeshDict_H
#define checkMeshDict_H
#include "objectRegistry.H"
#include "Time.H"
#include "IOdictionary.H"
#include <map>
@ -56,6 +54,9 @@ class checkMeshDict
IOdictionary& meshDict_;
// Private member functions
//- check settings for cell size in meshDict
void checkBasicSettings() const;
//- check patchCellSize entry
void checkPatchCellSize() const;
@ -74,6 +75,9 @@ class checkMeshDict
//- check objectRefinements entry
void checkObjectRefinements() const;
//- check surfaceRefinements entry
void checkSurfaceRefinements() const;
//- check entry for boundary layers
void checkBoundaryLayers() const;

View file

@ -27,6 +27,8 @@ License
#include "Ostream.H"
#include "token.H"
#include "Time.H"
// * * * * * * * * * * * * * * * IOstream Operators * * * * * * * * * * * * //
template<class T, Foam::label Offset>

View file

@ -42,7 +42,6 @@ SourceFiles
#include "label.H"
#include "bool.H"
#include "IOstreams.H"
#include "error.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -66,11 +66,21 @@ namespace help
bool isinf(const ListType&);
//- check if the faces share a convex edge
bool isSharedEdgeConvex
template<class Face1, class Face2>
inline bool isSharedEdgeConvex
(
const pointField& points,
const face& f1,
const face& f2
const Face1& f1,
const Face2& f2
);
//- angle between the two faces in radians
template<class Face1, class Face2>
inline scalar angleBetweenFaces
(
const pointField& points,
const Face1& f1,
const Face2& f2
);
// merge faces which are in the same patch

View file

@ -70,35 +70,43 @@ bool isinf(const ListType& l)
return false;
}
template<class Face1, class Face2>
inline bool isSharedEdgeConvex
(
const pointField& points,
const face& f1,
const face& f2
const Face1& f1,
const Face2& f2
)
{
face triOwn(3);
face triNei(3);
DynList<label, 3> triOwn(3);
DynList<label, 3> triNei(3);
forAll(f1, pI)
{
label pos = f2.which(f1[pI]);
if( pos == -1 )
label pos(-1);
forAll(f2, pJ)
if( f2[pJ] == f1[pI] )
{
pos = pJ;
break;
}
if( pos < 0 )
continue;
triNei[0] = f2[pos];
triNei[1] = f2.nextLabel(pos);
triNei[2] = f2.prevLabel(pos);
triNei[1] = f2[f2.fcIndex(pos)];
triNei[2] = f2[f2.rcIndex(pos)];
triOwn[0] = f1[pI];
triOwn[1] = f1.nextLabel(pI);
triOwn[2] = f1.prevLabel(pI);
triOwn[1] = f1[f1.fcIndex(pI)];
triOwn[2] = f1[f1.rcIndex(pI)];
scalar vol(0.0);
forAll(triOwn, pJ)
{
if( triNei.which(triOwn[pJ]) == -1 )
if( !triNei.contains(triOwn[pJ]) )
{
tetrahedron<point, point> tet
(
@ -120,6 +128,107 @@ inline bool isSharedEdgeConvex
return true;
}
template<class Face1, class Face2>
inline scalar angleBetweenFaces
(
const pointField& points,
const Face1& f1,
const Face2& f2
)
{
DynList<label, 3> triOwn(3);
DynList<label, 3> triNei(3);
scalar angle(0.0);
label counter(0);
forAll(f1, pI)
{
label pos(-1);
forAll(f2, pJ)
if( f2[pJ] == f1[pI] )
{
pos = pJ;
break;
}
if( pos < 0 )
continue;
triNei[0] = f2[pos];
triNei[1] = f2[f2.fcIndex(pos)];
triNei[2] = f2[f2.rcIndex(pos)];
triOwn[0] = f1[pI];
triOwn[1] = f1[f1.fcIndex(pI)];
triOwn[2] = f1[f1.rcIndex(pI)];
scalar vol(0.0);
forAll(triOwn, pJ)
{
if( !triNei.contains(triOwn[pJ]) )
{
tetrahedron<point, point> tet
(
points[triNei[0]],
points[triNei[1]],
points[triNei[2]],
points[triOwn[pJ]]
);
vol = tet.mag();
break;
}
}
vector nOwn
(
(points[triOwn[1]] - points[triOwn[0]]) ^
(points[triOwn[2]] - points[triOwn[0]])
);
nOwn /= (mag(nOwn) + VSMALL);
vector nNei
(
(points[triNei[1]] - points[triNei[0]]) ^
(points[triNei[2]] - points[triNei[0]])
);
nNei /= (mag(nNei) + VSMALL);
const scalar dot = Foam::max(-1.0, Foam::min(nOwn & nNei, 1.0));
if( vol > -VSMALL )
{
//- the angle is in the interval [Pi, 2Pi>
const scalar ang = Foam::acos(dot);
angle += ang + M_PI;
++counter;
}
else
{
//- the angle is in the interval [0, Pi>
const scalar ang = Foam::acos(-dot);
angle += ang;
++counter;
}
}
if( counter == 0 )
{
FatalErrorIn
(
"scalar angleBetweenFaces"
"(const pointField&, const face&, const face&)"
) << "Faces " << f1 << " and " << f2
<< " do no share an edge" << abort(FatalError);
}
return angle / counter;
}
inline faceList mergePatchFaces
(
const List< DynList<label> >& pfcs,
@ -769,7 +878,7 @@ inline point nearestPointOnTheEdgeExact
if( d < VSMALL )
return edgePoint0;
const scalar t = (e & k) / (d * d);
const scalar t = (e & k) / (d * d + VSMALL);
if( t > 1.0 )
{
return edgePoint1;

View file

@ -362,6 +362,9 @@ void partTriMesh::updateVerticesSMP(const List<LongList<labelledPoint> >& np)
pts[pI] = centre / faceArea;
}
}
if( Pstream::parRun() )
updateBufferLayers();
}
void partTriMesh::updateVertices()
@ -398,6 +401,9 @@ void partTriMesh::updateVertices(const labelLongList& movedPoints)
const label pointI = bPoints[bpI];
const label triPointI = meshSurfacePointLabelInTriMesh_[bpI];
if( triPointI < 0 )
continue;
pts[triPointI] = points[pointI];
updateType[triPointI] |= SMOOTH;
@ -495,6 +501,9 @@ void partTriMesh::updateVertices(const labelLongList& movedPoints)
pts[pI] = centre / faceArea;
}
}
if( Pstream::parRun() )
updateBufferLayers();
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -278,7 +278,7 @@ void partTriMesh::createBufferLayers()
std::make_pair(neiProcs[procI], LongList<parTriFace>())
);
//- go through the tets and add the ones having vertices at parallel
//- loop over triangles and add the ones having vertices at parallel
//- boundaries for sending
forAll(surf_, triI)
{
@ -328,75 +328,93 @@ void partTriMesh::createBufferLayers()
}
}
LongList<parTriFace> receivedTrias;
help::exchangeMap(exchangeTrias, receivedTrias);
//- receive triangles sent to this processor
std::map<label, List<parTriFace> > receivedTriangles;
help::exchangeMap(exchangeTrias, receivedTriangles);
exchangeTrias.clear();
//- add triangles into the mesh and update the addressing
Map<label> newGlobalToLocal;
std::map<label, point> addCoordinates;
label nPoints = pts.size();
forAll(receivedTrias, i)
for
(
std::map<label, List<parTriFace> >::const_iterator it =
receivedTriangles.begin();
it!=receivedTriangles.end();
++it
)
{
const parTriFace& tri = receivedTrias[i];
const List<parTriFace>& receivedTrias = it->second;
DynList<label, 3> triPointLabels;
for(label j=0;j<3;++j)
forAll(receivedTrias, i)
{
const label gpI = tri.globalLabelOfPoint(j);
const parTriFace& tri = receivedTrias[i];
if( globalToLocal.found(gpI) )
DynList<label, 3> triPointLabels(3);
for(label j=0;j<3;++j)
{
const label pI = globalToLocal[gpI];
triPointLabels.append(pI);
}
else if( newGlobalToLocal.found(gpI) )
{
triPointLabels.append(newGlobalToLocal[gpI]);
}
else
{
newGlobalToLocal.insert(gpI, nPoints);
triPointLabels.append(nPoints);
const label gpI = tri.globalLabelOfPoint(j);
point tp;
if( j == 0 )
if( globalToLocal.found(gpI) )
{
tp = tri.trianglePoints().a();
//- point already exists in the triangulation
const label pI = globalToLocal[gpI];
triPointLabels[j] = pI;
}
else if( j == 1 )
else if( newGlobalToLocal.found(gpI) )
{
tp = tri.trianglePoints().b();
//- point is already added into the triangulation
triPointLabels[j] = newGlobalToLocal[gpI];
pProcs.appendIfNotIn(newGlobalToLocal[gpI], it->first);
}
else
{
tp = tri.trianglePoints().c();
//- point does not exist in the triangulation
//- and is not yet added in
newGlobalToLocal.insert(gpI, nPoints);
triPointLabels[j] = nPoints;
point tp;
if( j == 0 )
{
tp = tri.trianglePoints().a();
}
else if( j == 1 )
{
tp = tri.trianglePoints().b();
}
else
{
tp = tri.trianglePoints().c();
}
addCoordinates[nPoints] = tp;
++nPoints;
pointLabelInMeshSurface_.append(-1);
pointType_.append(NONE);
DynList<label> triAtProcs;
triAtProcs.append(it->first);
globalPointLabel.append(gpI);
triAtProcs.append(Pstream::myProcNo());
pProcs.appendList(triAtProcs);
}
addCoordinates[nPoints] = tp;
++nPoints;
pointLabelInMeshSurface_.append(-1);
pointType_.append(NONE);
DynList<label> helper;
helper.append(surf_.size());
globalPointLabel.append(gpI);
helper[0] = Pstream::myProcNo();
pProcs.appendList(helper);
}
}
//- append tet
surf_.appendTriangle
(
labelledTri
//- append tet
surf_.appendTriangle
(
triPointLabels[0],
triPointLabels[1],
triPointLabels[2],
-1
)
);
labelledTri
(
triPointLabels[0],
triPointLabels[1],
triPointLabels[2],
-1
)
);
}
}
//- store newly added points

View file

@ -56,8 +56,7 @@ boundaryPatch::boundaryPatch
boundaryPatch::boundaryPatch(const word& name, const dictionary& dict)
:
boundaryPatchBase(name, dict)
{
}
{}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -80,7 +79,7 @@ void boundaryPatch::write(Ostream& os) const
void boundaryPatch::writeDict(Ostream& os) const
{
this->operator<<(os);
}
Ostream& boundaryPatch::operator<<(Ostream& os) const

View file

@ -31,6 +31,7 @@ Description
#include "IOobjectList.H"
#include "faceSet.H"
#include "demandDrivenData.H"
#include "stringListOps.H"
namespace Foam
{
@ -66,8 +67,7 @@ polyMeshGenFaces::polyMeshGenFaces(const Time& runTime)
nIntFaces_(0),
ownerPtr_(NULL),
neighbourPtr_(NULL)
{
}
{}
//- Construct from components without the boundary
polyMeshGenFaces::polyMeshGenFaces
@ -95,8 +95,7 @@ polyMeshGenFaces::polyMeshGenFaces
nIntFaces_(0),
ownerPtr_(NULL),
neighbourPtr_(NULL)
{
}
{}
//- Construct from components with the boundary
polyMeshGenFaces::polyMeshGenFaces
@ -201,6 +200,65 @@ label polyMeshGenFaces::faceIsInPatch(const label faceLabel) const
return -1;
}
wordList polyMeshGenFaces::patchNames() const
{
wordList t(boundaries_.size());
forAll(boundaries_, patchI)
{
t[patchI] = boundaries_[patchI].patchName();
}
return t;
}
label polyMeshGenFaces::getPatchID(const word& patchName) const
{
forAll(boundaries_, patchI)
{
if(boundaries_.set(patchI))
{
if(boundaries_[patchI].patchName() == patchName)
{
return patchI;
}
}
}
// If the code gets here, it implies that the patch was not found.
// return a -1 in this case
return -1;
}
word polyMeshGenFaces::getPatchName(const label patchID) const
{
if((patchID < 0) || (patchID >= boundaries_.size()))
{
FatalErrorIn
(
"polyMeshGenFaces::getPatchName(const label patchID) const"
) << "invalid patch ID supplied"
<< abort(FatalError);
}
return boundaries_[patchID].patchName();
}
labelList polyMeshGenFaces::findPatches(const word& patchName) const
{
wordList allPatches = patchNames();
labelList patchIDs = findStrings(patchName, allPatches);
if(patchIDs.empty())
{
WarningIn("polyMeshGenFaces::findPatches(const word&)")
<< "Cannot find any patch names matching " << patchName << endl;
}
return patchIDs;
}
label polyMeshGenFaces::addFaceSubset(const word& setName)
{
label id = faceSubsetIndex(setName);
@ -313,7 +371,7 @@ void polyMeshGenFaces::read()
if( neighbourPtr_->size() != ownerPtr_->size() )
neighbourPtr_->setSize(ownerPtr_->size(), -1);
//- read boundary information
//- read boundary information
IOPtrList<boundaryPatchBase> patches
(
IOobject

View file

@ -49,7 +49,7 @@ namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
class polyMeshGenFaces
: public polyMeshGenPoints
: public polyMeshGenPoints
{
protected:
@ -143,6 +143,19 @@ public:
//- return patch label for the given face label
label faceIsInPatch(const label faceLabel) const;
//- return list of patches in the boundary
wordList patchNames() const;
//- return the index of a patch given its name
label getPatchID(const word& patchName) const;
//- return the name of a patch given its ID
word getPatchName(const label patchID) const;
//- return a list of patch indices corresponding to the given
// name, expanding regular expressions
labelList findPatches(const word& patchName) const;
// Subsets
label addFaceSubset(const word&);

View file

@ -37,7 +37,6 @@ SourceFiles
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#include "objectRegistry.H"
#include "Time.H"
#include "meshSubset.H"
#include "pointFieldPMG.H"

View file

@ -29,7 +29,10 @@ Description
#include "demandDrivenData.H"
#include "IFstream.H"
#include "OFstream.H"
#include "Time.H"
#include "gzstream.h"
#include "triSurface.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -29,6 +29,7 @@ Description
#include "pointIOField.H"
#include "IOobjectList.H"
#include "pointSet.H"
#include "stringListOps.H"
namespace Foam
{
@ -72,6 +73,35 @@ triSurfFacets::~triSurfFacets()
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
wordList triSurfFacets::patchNames() const
{
wordList t(patches_.size());
forAll(patches_, patchI)
{
t[patchI] = patches_[patchI].name();
}
return t;
}
labelList triSurfFacets::findPatches(const word& patchName) const
{
const wordList allPatches = patchNames();
const labelList patchIDs = findStrings(patchName, allPatches);
# ifdef DEBUGtriSurf
if(patchIDs.empty())
{
WarningIn("triSurfFacets::findPatches(const word&)")
<< "Cannot find any patch names matching " << patchName << endl;
}
# endif
return patchIDs;
}
label triSurfFacets::addFacetSubset(const word& subsetName)
{
label id = facetSubsetIndex(subsetName);

View file

@ -98,6 +98,13 @@ public:
//- access to patches
inline const geometricSurfacePatchList& patches() const;
//- return list of patches in the boundary
wordList patchNames() const;
//- return a list of patch indices corresponding to the given
// name, expanding regular expressions
labelList findPatches(const word& patchName) const;
//- append a triangle to the end of the list
inline void appendTriangle(const labelledTri& tria);

View file

@ -374,15 +374,17 @@ void meshOctreeAddressing::findUsedBoxes() const
boolList removeFacets(ts.size(), false);
//- remove facets in patches
forAll(ts.patches(), patchI)
forAllConstIter(HashSet<word>, patchesToRemove, it)
{
if( patchesToRemove.found(ts.patches()[patchI].name()) )
const labelList matchedPatches = ts.findPatches(it.key());
boolList activePatch(ts.patches().size(), false);
forAll(matchedPatches, ptchI)
activePatch[matchedPatches[ptchI]] = true;
forAll(ts, triI)
{
forAll(ts, triI)
{
if( ts[triI].region() == patchI )
removeFacets[triI] = true;
}
if( activePatch[ts[triI].region()] )
removeFacets[triI] = true;
}
}
@ -441,20 +443,22 @@ void meshOctreeAddressing::findUsedBoxes() const
const triSurf& ts = octree_.surface();
boolList keepFacets(ts.size(), false);
//- remove facets in patches
forAll(ts.patches(), patchI)
//- keep facets in patches
forAllConstIter(HashSet<word>, patchesToKeep, it)
{
if( patchesToKeep.found(ts.patches()[patchI].name()) )
const labelList matchedPatches = ts.findPatches(it.key());
boolList activePatch(ts.patches().size(), false);
forAll(matchedPatches, ptchI)
activePatch[matchedPatches[ptchI]] = true;
forAll(ts, triI)
{
forAll(ts, triI)
{
if( ts[triI].region() == patchI )
keepFacets[triI] = true;
}
if( activePatch[ts[triI].region()] )
keepFacets[triI] = true;
}
}
//- remove facets in subsets
//- keep facets in subsets
forAllConstIter(wordHashSet, patchesToKeep, it)
{
const label subsetID = ts.facetSubsetIndex(it.key());
@ -900,7 +904,11 @@ void meshOctreeAddressing::createOctreeFaces() const
forAll(sum, lfI)
{
if( Pstream::parRun() && octree_.returnLeaf(lfI).procNo() != Pstream::myProcNo() )
if
(
Pstream::parRun() &&
octree_.returnLeaf(lfI).procNo() != Pstream::myProcNo()
)
continue;
if( (boxType[lfI] & MESHCELL) && (mag(sum[lfI]) > SMALL) )
Info << "Leaf " << lfI << " is not closed " << sum[lfI] << endl;

View file

@ -37,7 +37,6 @@ SourceFiles
#define meshOctreeAutomaticRefinement_H
#include "meshOctreeModifier.H"
#include "IOdictionary.H"
//#include "volFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -61,7 +60,7 @@ class meshOctreeAutomaticRefinement
meshOctree& octree_;
//- const reference to mesh dictionary
const dictionary& meshDict_;
const IOdictionary& meshDict_;
//- use DATA boxes
bool useDATABoxes_;

View file

@ -47,7 +47,8 @@ meshOctreeCreator::meshOctreeCreator(meshOctree& mo)
meshDictPtr_(NULL),
hexRefinement_(false),
globalRefLevel_(0),
surfRefLevel_(mo.surface().size(), direction(0))
surfRefLevel_(mo.surface().size(), direction(0)),
surfRefThickness_(mo.surface().size(), 0.0)
{}
meshOctreeCreator::meshOctreeCreator
@ -61,7 +62,8 @@ meshOctreeCreator::meshOctreeCreator
meshDictPtr_(&dict),
hexRefinement_(false),
globalRefLevel_(0),
surfRefLevel_(mo.surface().size(), direction(0))
surfRefLevel_(mo.surface().size(), direction(0)),
surfRefThickness_(mo.surface().size(), 0.0)
{}
/*
@ -77,7 +79,8 @@ meshOctreeCreator::meshOctreeCreator
meshDict_(dict),
hexRefinement_(false),
globalRefLevel_(0),
surfRefLevel_(mo.surface().size(), direction(0))
surfRefLevel_(mo.surface().size(), direction(0)),
surfRefThickness_(mo.surface().size(), 0.0)
{
FatalErrorIn
(

View file

@ -37,7 +37,6 @@ SourceFiles
#include "boolList.H"
#include "DynList.H"
#include "IOdictionary.H"
#include "meshOctreeModifier.H"
//#include "volFields.H"
#include "patchRefinementList.H"
@ -50,6 +49,7 @@ namespace Foam
{
// Forward declarations
class IOdictionary;
class meshOctreeCube;
/*---------------------------------------------------------------------------*\
@ -80,6 +80,10 @@ private:
//- refine boxes contained inside the objects for refinement
void refineBoxesContainedInObjects();
//- refine boxes intersected by surface meshes
//- used as refinement sources
void refineBoxesIntersectingSurfaces();
//- refine boxes near DATA boxes to get a nice smooth surface
void refineBoxesNearDataBoxes(const direction nLayers = 1);
@ -104,6 +108,9 @@ private:
//- this list contains ref level for each surface triangle
List<direction> surfRefLevel_;
//- refinement thickness for each surface triangle
scalarList surfRefThickness_;
//- set the boundBox such that maxCellSize is achieved
void setRootCubeSizeAndRefParameters();
@ -124,15 +131,6 @@ public:
//- Construct from meshOctree and dictionary
meshOctreeCreator(meshOctree& mo, const IOdictionary& dict);
//- Construct from surface, dictionary and cell sizes
/* meshOctreeCreator
(
meshOctree& mo,
const IOdictionary& dict,
const volScalarField& localCellSize
);
*/
// Destructor
~meshOctreeCreator();

View file

@ -32,6 +32,7 @@ Description
#include "objectRefinementList.H"
#include "VRWGraph.H"
#include "meshOctreeModifier.H"
#include "helperFunctions.H"
#include "HashSet.H"
# ifdef USE_OMP
@ -54,7 +55,7 @@ void meshOctreeCreator::refineBoundary()
const LongList<meshOctreeCube*>& leaves = octreeModifier.leavesAccess();
//- refine DATA boxes to the given level
Info << "Refining data boxes to the given size" << endl;
Info << "Refining boundary boxes to the given size" << endl;
label nMarked;
do
@ -66,6 +67,9 @@ void meshOctreeCreator::refineBoundary()
# endif
List<direction> refineCubes(leaves.size(), direction(0));
labelList nLayers(leaves.size(), 0);
List<direction> targetLevel(leaves.size(), direction(0));
bool useNLayers(false);
//- select boxes which need to be refined
# ifdef USE_OMP
@ -86,20 +90,56 @@ void meshOctreeCreator::refineBoundary()
const VRWGraph& containedTriangles =
oc.slotPtr()->containedTriangles_;
const scalar cs = oc.size(octree_.rootBox());
bool refine(false);
forAllRow(containedTriangles, elRowI, tI)
{
const label triI = containedTriangles(elRowI, tI);
if( surfRefLevel_[triI] > oc.level() )
{
++nMarked;
refineCubes[leafI] = 1;
break;
refine = true;
}
if( surfRefThickness_[triI] > VSMALL )
{
useNLayers = true;
nLayers[leafI] =
Foam::max
(
nLayers[leafI],
Foam::max(label(surfRefThickness_[triI]/cs), 1)
);
targetLevel[leafI] =
Foam::max(targetLevel[leafI], surfRefLevel_[triI]);
}
}
if( refine )
{
refineCubes[leafI] = 1;
++nMarked;
}
}
}
//- mark additional boxes for refinement to achieve
//- correct refinement distance
reduce(useNLayers, maxOp<label>());
if( useNLayers )
{
nMarked +=
octreeModifier.markAdditionalLayers
(
refineCubes,
nLayers,
targetLevel
);
}
//- refine boxes
octreeModifier.refineSelectedBoxes(refineCubes, hexRefinement_);
@ -132,7 +172,7 @@ void meshOctreeCreator::refineBoundary()
} while( nMarked );
Info << "Finished refining data boxes" << endl;
Info << "Finished refining boundary boxes" << endl;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -147,7 +187,7 @@ void meshOctreeCreator::refineBoxesContainedInObjects()
Info << "Refining boxes inside objects" << endl;
objectRefinementList refObjects;
// Read polyPatchList
// Read objects
if( meshDictPtr_->isDict("objectRefinements") )
{
const dictionary& dict = meshDictPtr_->subDict("objectRefinements");
@ -197,16 +237,23 @@ void meshOctreeCreator::refineBoxesContainedInObjects()
scalar s(readScalar(meshDictPtr_->lookup("maxCellSize")));
List<direction> refLevels(refObjects.size(), globalRefLevel_);
scalarList refThickness(refObjects.size(), 0.0);
forAll(refThickness, oI)
refThickness[oI] = refObjects[oI].refinementThickness();
label nMarked;
do
{
nMarked = 0;
forAll(refObjects, oI)
{
if( refObjects[oI].cellSize() <= s * (1.+SMALL) )
{
++nMarked;
++refLevels[oI];
}
}
s /= 2.0;
@ -238,6 +285,9 @@ void meshOctreeCreator::refineBoxesContainedInObjects()
nMarked = 0;
List<direction> refineCubes(leaves.size(), direction(0));
labelList nLayers(leaves.size(), 0);
List<direction> targetRefLevel(leaves.size(), direction(0));
bool useNLayers(false);
//- select boxes which need to be refined
# ifdef USE_OMP
@ -254,21 +304,55 @@ void meshOctreeCreator::refineBoxesContainedInObjects()
boundBox bb;
oc.cubeBox(rootBox, bb.min(), bb.max());
bool refine(false);
forAll(refObjects, oI)
if(
(oc.level() < refLevels[oI]) &&
refObjects[oI].intersectsObject(bb)
)
{
if( refObjects[oI].intersectsObject(bb) )
{
# ifdef DEBUGSearch
Info << "Marking leaf " << leafI
<< " for refinement" << endl;
# endif
++nMarked;
refineCubes[leafI] = 1;
break;
if( oc.level() < refLevels[oI] )
refine = true;
if( refThickness[oI] > VSMALL )
{
const scalar cs = bb.max().x() - bb.min().x();
nLayers[leafI] =
Foam::max
(
nLayers[leafI],
Foam::max(label(refThickness[oI]/cs), 1)
);
targetRefLevel[leafI] =
Foam::max(targetRefLevel[leafI], refLevels[oI]);
useNLayers = true;
}
}
}
if( refine )
{
refineCubes[leafI] = 1;
++nMarked;
}
}
//- mark additional boxes for refinement to achieve
//- correct refinement distance
reduce(useNLayers, maxOp<label>());
if( useNLayers )
{
nMarked +=
octreeModifier.markAdditionalLayers
(
refineCubes,
nLayers,
targetRefLevel
);
}
//- refine boxes
@ -309,6 +393,234 @@ void meshOctreeCreator::refineBoxesContainedInObjects()
createInsideOutsideInformation();
}
void meshOctreeCreator::refineBoxesIntersectingSurfaces()
{
if( !meshDictPtr_ || !meshDictPtr_->found("surfaceMeshRefinement") )
{
return;
}
Info << "Refining boxes intersecting surface meshes" << endl;
label nMarked;
//- read surface meshes and calculate the refinement level for each
//- surface mesh
const dictionary& surfDict = meshDictPtr_->subDict("surfaceMeshRefinement");
const wordList surfaces = surfDict.toc();
PtrList<triSurf> surfaceMeshesPtr(surfaces.size());
List<direction> refLevels(surfaces.size(), globalRefLevel_);
scalarList refThickness(surfaces.size());
//- load surface meshes into memory
forAll(surfaceMeshesPtr, surfI)
{
const dictionary& dict = surfDict.subDict(surfaces[surfI]);
const fileName fName(dict.lookup("surfaceFile"));
surfaceMeshesPtr.set
(
surfI,
new triSurf(fName)
);
direction addLevel(0);
if( dict.found("cellSize") )
{
scalar s(readScalar(meshDictPtr_->lookup("maxCellSize")));
const scalar cs = readScalar(dict.lookup("cellSize"));
do
{
nMarked = 0;
if( cs <= s * (1.+SMALL) )
{
++nMarked;
++addLevel;
}
s /= 2.0;
} while( nMarked != 0 );
}
else if( dict.found("additionalRefinementLevels") )
{
addLevel =
readLabel(dict.lookup("additionalRefinementLevels"));
}
if( dict.found("refinementThickness") )
{
refThickness[surfI] =
readScalar(dict.lookup("refinementThickness"));
}
//- set the refinement level for the current surface
refLevels[surfI] += addLevel;
}
if( octree_.neiProcs().size() )
forAll(refLevels, oI)
{
label l = refLevels[oI];
reduce(l, maxOp<label>());
refLevels[oI] = l;
}
//- start refining boxes intersecting triangles in each refinement surface
const boundBox& rootBox = octree_.rootBox();
const vector tol = SMALL * rootBox.span();
meshOctreeModifier octreeModifier(octree_);
const LongList<meshOctreeCube*>& leaves = octreeModifier.leavesAccess();
DynList<label> leavesInBox, intersectedLeaves;
do
{
# ifdef OCTREETiming
const scalar startIter = omp_get_wtime();
# endif
nMarked = 0;
List<direction> refineCubes(leaves.size(), direction(0));
labelList nLayers(leaves.size(), 0);
List<direction> targetRefLevel(leaves.size(), direction(0));
bool useNLayers(false);
//- select boxes which need to be refined
forAll(surfaceMeshesPtr, surfI)
{
const triSurf& surf = surfaceMeshesPtr[surfI];
const pointField& points = surf.points();
# ifdef USE_OMP
# pragma omp parallel for \
reduction( + : nMarked) schedule(dynamic, 10) \
private(leavesInBox,intersectedLeaves)
# endif
forAll(surf, triI)
{
//- find the bounding box of the current triangle
const labelledTri& tri = surf[triI];
boundBox triBB(points[tri[0]], points[tri[0]]);
for(label pI=1;pI<3;++pI)
{
triBB.min() = Foam::min(triBB.min(), points[tri[pI]]);
triBB.max() = Foam::max(triBB.max(), points[tri[pI]]);
}
triBB.min() -= tol;
triBB.max() += tol;
//- find octree leaves inside the bounding box
leavesInBox.clear();
octree_.findLeavesContainedInBox(triBB, leavesInBox);
//- check which of the leaves are intersected by the triangle
intersectedLeaves.clear();
forAll(leavesInBox, i)
{
const label leafI = leavesInBox[i];
const meshOctreeCube& oc = *leaves[leafI];
if( oc.intersectsTriangleExact(surf, rootBox, triI) )
{
intersectedLeaves.append(leafI);
if( oc.level() < refLevels[surfI] )
{
# ifdef DEBUGSearch
Info << "Marking leaf " << leafI
<< " with coordinates " << oc
<< " for refinement" << endl;
# endif
if( !refineCubes[leafI] )
++nMarked;
refineCubes[leafI] = 1;
}
}
}
if( refThickness[surfI] > VSMALL )
{
useNLayers = true;
forAll(intersectedLeaves, i)
{
const label leafI = intersectedLeaves[i];
const meshOctreeCube& oc = *leaves[leafI];
const scalar cs = oc.size(rootBox);
nLayers[leafI] =
Foam::max
(
nLayers[leafI],
max(label(refThickness[surfI]/cs), 1)
);
targetRefLevel[leafI] =
Foam::max
(
targetRefLevel[leafI],
refLevels[surfI]
);
}
}
}
}
//- mark additional boxes for refinement to achieve
//- correct refinement distance
reduce(useNLayers, maxOp<label>());
if( useNLayers )
{
nMarked +=
octreeModifier.markAdditionalLayers
(
refineCubes,
nLayers,
targetRefLevel
);
}
//- refine boxes
octreeModifier.refineSelectedBoxes(refineCubes, hexRefinement_);
# ifdef OCTREETiming
const scalar refTime = omp_get_wtime();
Info << "Time for refinement " << (refTime-startIter) << endl;
# endif
if( octree_.neiProcs().size() != 0 )
{
reduce(nMarked, sumOp<label>());
if( nMarked )
{
octreeModifier.distributeLeavesToProcessors();
# ifdef OCTREETiming
const scalar distTime = omp_get_wtime();
Info << "Time for distributing data to processors "
<< (distTime-refTime) << endl;
# endif
loadDistribution(false);
# ifdef OCTREETiming
Info << "Time for load distribution "
<< (omp_get_wtime()-distTime) << endl;
# endif
}
}
} while( nMarked != 0 );
Info << "Finished refinement of boxes intersecting surface meshes" << endl;
}
void meshOctreeCreator::refineBoxesNearDataBoxes(const direction nLayers)
{
# ifdef OCTREETiming

View file

@ -156,6 +156,16 @@ void meshOctreeCreator::setRootCubeSizeAndRefParameters()
Info << "Requested boundary cell size corresponds to octree level "
<< label(boundaryRefLevel_) << endl;
if( meshDictPtr_->found("boundaryCellSizeRefinementThickness") )
{
const scalar s =
readScalar
(
meshDictPtr_->lookup("boundaryCellSizeRefinementThickness")
);
surfRefThickness_ = mag(s);
}
surfRefLevel_ = boundaryRefLevel_;
}
@ -169,7 +179,10 @@ void meshOctreeCreator::setRootCubeSizeAndRefParameters()
const dictionary& dict = meshDictPtr_->subDict("patchCellSize");
const wordList patchNames = dict.toc();
refPatches.setSize(patchNames.size());
const wordList allPatches = surface.patchNames();
refPatches.setSize(allPatches.size());
label counter(0);
forAll(patchNames, patchI)
@ -180,8 +193,13 @@ void meshOctreeCreator::setRootCubeSizeAndRefParameters()
const dictionary& patchDict = dict.subDict(patchNames[patchI]);
const scalar cs = readScalar(patchDict.lookup("cellSize"));
refPatches[counter] = patchRefinement(patchNames[patchI], cs);
++counter;
labelList matchedIDs = surface.findPatches(patchNames[patchI]);
forAll(matchedIDs, matchI)
{
refPatches[counter] =
patchRefinement(allPatches[matchedIDs[matchI]], cs);
++counter;
}
}
refPatches.setSize(counter);
@ -317,11 +335,6 @@ void meshOctreeCreator::setRootCubeSizeAndRefParameters()
const dictionary& dict = meshDictPtr_->subDict("localRefinement");
const wordList entries = dict.toc();
//- map patch name to its index
std::map<word, label> patchToIndex;
forAll(surface.patches(), patchI)
patchToIndex[surface.patches()[patchI].name()] = patchI;
//- map a facet subset name to its index
std::map<word, label> setToIndex;
DynList<label> setIDs;
@ -367,18 +380,35 @@ void meshOctreeCreator::setRootCubeSizeAndRefParameters()
} while( !finished );
}
scalar refinementThickness(0.0);
if( patchDict.found("refinementThickness") )
{
refinementThickness =
readScalar(patchDict.lookup("refinementThickness"));
}
const direction level = globalRefLevel_ + nLevel;
if( patchToIndex.find(pName) != patchToIndex.end() )
const labelList matchedPatches = surface.findPatches(pName);
forAll(matchedPatches, matchI)
{
//- patch-based refinement
const label patchI = patchToIndex[pName];
const label patchI = matchedPatches[matchI];
forAll(surface, triI)
{
if( surface[triI].region() == patchI )
{
surfRefLevel_[triI] =
Foam::max(surfRefLevel_[triI], level);
surfRefThickness_[triI] =
Foam::max
(
surfRefThickness_[triI],
refinementThickness
);
}
}
}
if( setToIndex.find(pName) != setToIndex.end() )
@ -394,6 +424,13 @@ void meshOctreeCreator::setRootCubeSizeAndRefParameters()
const label triI = facetsInSubset[i];
surfRefLevel_[triI] =
Foam::max(surfRefLevel_[triI], level);
surfRefThickness_[triI] =
Foam::max
(
surfRefThickness_[triI],
refinementThickness
);
}
}
}
@ -418,6 +455,9 @@ void meshOctreeCreator::createOctreeBoxes()
Info << "Refining boundary" << endl;
refineBoundary();
//- refine parts intersected with surface mesh serving as refinement sources
refineBoxesIntersectingSurfaces();
//- perform automatic octree refinement
if( !Pstream::parRun() )
{

View file

@ -97,9 +97,6 @@ class meshOctreeCube
//- this is needed for parallel coarsening of the octree
//inline void reclaimDataFromChild(const label scI);
//- Disallow default bitwise copy construct
meshOctreeCube(const meshOctreeCube&);
//- Disallow copy construct from meshOctreeCubeBasic
meshOctreeCube(const meshOctreeCubeBasic&);
@ -110,6 +107,9 @@ public:
//- Default constructor
inline meshOctreeCube();
//- Copy construct
inline meshOctreeCube(const meshOctreeCube&);
//- Construct from coordinates
meshOctreeCube(const meshOctreeCubeCoordinates&);

View file

@ -45,6 +45,16 @@ inline meshOctreeCube::meshOctreeCube()
containedEdgesLabel_(-1)
{}
inline meshOctreeCube::meshOctreeCube(const meshOctreeCube& moc)
:
meshOctreeCubeBasic(moc.coordinates(), moc.cubeType(), moc.procNo()),
activeSlotPtr_(moc.activeSlotPtr_),
subCubesPtr_(moc.subCubesPtr_),
cubeLabel_(moc.cubeLabel_),
containedElementsLabel_(moc.containedElementsLabel_),
containedEdgesLabel_(moc.containedEdgesLabel_)
{}
inline const meshOctreeSlot* meshOctreeCube::slotPtr() const
{
return activeSlotPtr_;

View file

@ -46,9 +46,8 @@ void meshOctreeCube::leavesInBox
DynList<const meshOctreeCube*, 256>& leaves
) const
{
point min, max;
this->cubeBox(rootBox, min, max);
const boundBox cubeBox(min, max);
boundBox cubeBox;
this->cubeBox(rootBox, cubeBox.min(), cubeBox.max());
if( cubeBox.overlaps(searchingBox) )
{
@ -74,8 +73,10 @@ void meshOctreeCube::leavesInBox
else if( Pstream::parRun() )
{
meshOctreeCubeCoordinates cc = refineForPosition(scI);
cc.cubeBox(rootBox, min, max);
const boundBox bb(min, max);
boundBox bb;
cc.cubeBox(rootBox, bb.min(), bb.max());
if( bb.overlaps(searchingBox) )
leaves.append(this);
}

View file

@ -225,7 +225,7 @@ bool meshOctree::findNearestEdgePoint
DynList<const meshOctreeCube*, 256> neighbours;
const pointField& sp = surface_.points();
const pointField& sPoints = surface_.points();
const edgeLongList& edges = surface_.edges();
const VRWGraph& edgeFaces = surface_.edgeFacets();
@ -255,12 +255,14 @@ bool meshOctree::findNearestEdgePoint
forAll(ce, eI)
{
const label edgeI = ce[eI];
//- find if the edge is in correct patches
bool correctPatches(true);
forAllRow(edgeFaces, ce[eI], efI)
forAllRow(edgeFaces, edgeI, efI)
{
const label facetI = edgeFaces(ce[eI], efI);
const label facetI = edgeFaces(edgeI, efI);
if( !regions.contains(surface_[facetI].region()) )
{
@ -272,9 +274,10 @@ bool meshOctree::findNearestEdgePoint
if( !correctPatches )
continue;
const point s = sp[edges[ce[eI]].start()];
const point e = sp[edges[ce[eI]].end()];
const point np = help::nearestPointOnTheEdgeExact(s, e, p);
const edge& e = edges[edgeI];
const point& sp = sPoints[e.start()];
const point& ep = sPoints[e.end()];
const point np = help::nearestPointOnTheEdgeExact(sp, ep, p);
const scalar dSq = Foam::magSqr(np - p);
if( dSq < distSq )

View file

@ -130,6 +130,16 @@ public:
const direction nLayers = 1
) const;
//- mark additional layers around the leaves selected for refinement
//- given on a box-by-box basis
//- returns the number of boxes selected for refinement
label markAdditionalLayers
(
List<direction>& refineBox,
labelList& nLayers,
List<direction>& targetRefLevel
) const;
//- refine leaves marked for refinement
//- hexRefinement is activated when it is required to refine all
//- sons of the same father, if a single son gets marked for refinement

View file

@ -28,6 +28,7 @@ Description
#include "meshOctreeModifier.H"
#include "triSurf.H"
#include "HashSet.H"
#include "helperFunctions.H"
# ifdef USE_OMP
#include <omp.h>
@ -163,6 +164,205 @@ void meshOctreeModifier::markAdditionalLayers
}
}
label meshOctreeModifier::markAdditionalLayers
(
List<direction>& refineBox,
labelList& nLayers,
List<direction>& targetRefLevel
) const
{
const FixedList<meshOctreeCubeCoordinates, 26>& rp =
octree_.regularityPositions_;
const LongList<meshOctreeCube*>& leaves = octree_.leaves_;
//- this is needed for parallel runs to reduce the length of messages
labelHashSet transferCoordinates;
FixedList<meshOctreeCube*, 26> neighbours;
labelList currentLayer(leaves.size());
forAll(targetRefLevel, leafI)
{
currentLayer[leafI] = targetRefLevel[leafI] ? 1 : 0;
}
bool changed;
label i(0), nMarked(0);
do
{
++i;
changed = false;
labelList newNLayers = nLayers;
labelList newCurrentLayer = currentLayer;
List<direction> newTargetRefLevel = targetRefLevel;
LongList<meshOctreeCubeCoordinates> processorChecks;
labelLongList processorNLayers;
labelLongList processorTargetLevels;
forAll(leaves, leafI)
{
if( currentLayer[leafI] != i )
continue;
const meshOctreeCube* oc = leaves[leafI];
forAll(rp, posI)
{
const meshOctreeCubeCoordinates cc
(
oc->coordinates() + rp[posI]
);
const label neiLabel = octree_.findLeafLabelForPosition(cc);
if( neiLabel > -1 )
{
neighbours[posI] = leaves[neiLabel];
}
else if( neiLabel == -1 )
{
neighbours[posI] = NULL;
}
else if( neiLabel == meshOctreeCubeBasic::OTHERPROC )
{
neighbours[posI] = NULL;
if( !transferCoordinates.found(leafI) )
{
processorChecks.append(oc->coordinates());
processorNLayers.append(nLayers[leafI]);
processorTargetLevels.append(targetRefLevel[leafI]);
transferCoordinates.insert(leafI);
}
}
}
forAll(neighbours, neiI)
{
const meshOctreeCube* nei = neighbours[neiI];
if( !nei ) continue;
if( !nei->isLeaf() ) continue;
if( i <= nLayers[leafI] )
{
newNLayers[nei->cubeLabel()] =
Foam::max(nLayers[nei->cubeLabel()], nLayers[leafI]);
newTargetRefLevel[nei->cubeLabel()] =
Foam::max
(
targetRefLevel[nei->cubeLabel()],
targetRefLevel[leafI]
);
changed = true;
newCurrentLayer[nei->cubeLabel()] = i+1;
}
}
}
if( octree_.neiProcs().size() )
{
std::map<label, LongList<meshOctreeCubeCoordinates> > eCoords;
std::map<label, labelLongList > eNLayers;
std::map<label, labelLongList > eTargetLevels;
forAll(octree_.neiProcs(), neiI)
{
const label neiProc = octree_.neiProcs()[neiI];
eCoords[neiProc] = processorChecks;
eNLayers[neiProc] = processorNLayers;
eTargetLevels[neiProc] = processorTargetLevels;
}
LongList<meshOctreeCubeCoordinates> receivedCoords;
help::exchangeMap(eCoords, receivedCoords);
labelLongList receivedNLayers;
help::exchangeMap(eNLayers, receivedNLayers);
labelLongList receivedTargetLevels;
help::exchangeMap(eTargetLevels, receivedTargetLevels);
if( receivedCoords.size() != receivedNLayers.size() )
FatalErrorIn
(
"label meshOctreeModifier::markAdditionalLayers("
"List<direction>&, labelList&, List<direction>&) const"
) << "Invalid list size" << abort(FatalError);
if( receivedCoords.size() != receivedTargetLevels.size() )
FatalErrorIn
(
"label meshOctreeModifier::markAdditionalLayers("
"List<direction>&, labelList&, List<direction>&) const"
) << "Invalid list size" << abort(FatalError);
//- check consistency with received cube coordinates
forAll(receivedCoords, ccI)
{
forAll(rp, posI)
{
const meshOctreeCubeCoordinates cc
(
receivedCoords[ccI] + rp[posI]
);
const meshOctreeCube* nei =
octree_.findCubeForPosition(cc);
if( !nei ) continue;
if( !nei->isLeaf() ) continue;
if( i <= receivedNLayers[ccI] )
{
newNLayers[nei->cubeLabel()] =
Foam::max
(
nLayers[nei->cubeLabel()],
receivedNLayers[ccI]
);
newTargetRefLevel[nei->cubeLabel()] =
Foam::max
(
targetRefLevel[nei->cubeLabel()],
direction(receivedTargetLevels[ccI])
);
changed = true;
newCurrentLayer[nei->cubeLabel()] = i+1;
}
}
}
}
nLayers.transfer(newNLayers);
currentLayer.transfer(newCurrentLayer);
targetRefLevel.transfer(newTargetRefLevel);
//- exchange information with all processors
reduce(changed, maxOp<bool>());
} while( changed );
//- update refine data
forAll(targetRefLevel, leafI)
{
if
(
!refineBox[leafI] &&
(targetRefLevel[leafI] > leaves[leafI]->level())
)
{
refineBox[leafI] = 1;
++nMarked;
}
}
return nMarked;
}
void meshOctreeModifier::refineSelectedBoxes
(
List<direction>& refineBox,

View file

@ -40,7 +40,8 @@ defineRunTimeSelectionTable(objectRefinement, dictionary);
objectRefinement::objectRefinement()
:
name_(),
cellSize_()
cellSize_(),
refThickness_(0.0)
{}
@ -51,8 +52,11 @@ objectRefinement::objectRefinement
)
:
name_(name),
cellSize_(readScalar(dict.lookup("cellSize")))
cellSize_(readScalar(dict.lookup("cellSize"))),
refThickness_(0.0)
{
if( dict.found("refinementThickness") )
refThickness_ = readScalar(dict.lookup("refinementThickness"));
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
@ -62,101 +66,6 @@ objectRefinement::~objectRefinement()
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
/*
dictionary Foam::objectRefinement::dict(bool ignoreType) const
{
dictionary dict;
dict.add("name", name_);
// only write type for derived types
if (!ignoreType && type() != typeName_())
{
dict.add("type", type());
}
dict.add("origin", origin_);
dict.add("e1", e1());
dict.add("e3", e3());
return dict;
}
void objectRefinement::write(Ostream& os) const
{
os << type()
<< " origin: " << origin()
<< " e1: " << e1() << " e3: " << e3();
}
void objectRefinement::writeDict(Ostream& os, bool subDict) const
{
if (subDict)
{
os << indent << name_ << nl
<< indent << token::BEGIN_BLOCK << incrIndent << nl;
}
// only write type for derived types
if (type() != typeName_())
{
os.writeKeyword("type") << type() << token::END_STATEMENT << nl;
}
os.writeKeyword("origin") << origin_ << token::END_STATEMENT << nl;
os.writeKeyword("e1") << e1() << token::END_STATEMENT << nl;
os.writeKeyword("e3") << e3() << token::END_STATEMENT << nl;
if (subDict)
{
os << decrIndent << indent << token::END_BLOCK << endl;
}
}
void coordinateSystem::operator=(const dictionary& rhs)
{
if (debug)
{
Pout<< "coordinateSystem::operator=(const dictionary&) : "
<< "assign from " << rhs << endl;
}
// allow as embedded sub-dictionary "coordinateSystem"
const dictionary& dict =
(
rhs.found(typeName_())
? rhs.subDict(typeName_())
: rhs
);
// unspecified origin is (0 0 0)
if (dict.found("origin"))
{
dict.lookup("origin") >> origin_;
}
else
{
origin_ = vector::zero;
}
// specify via coordinateRotation
if (dict.found("coordinateRotation"))
{
autoPtr<coordinateRotation> cr =
coordinateRotation::New(dict.subDict("coordinateRotation"));
R_ = cr();
}
else
{
// no sub-dictionary - specify via axes
R_ = coordinateRotation(dict);
}
Rtr_ = R_.T();
}
*/
Ostream& operator<<(Ostream& os, const objectRefinement& obr)
{

View file

@ -62,6 +62,9 @@ class objectRefinement
//- cell size for this object
scalar cellSize_;
//- refinement thickness fro this object
scalar refThickness_;
public:
// Runtime type information
@ -136,6 +139,18 @@ public:
return cellSize_;
}
//- set refinement thickness
void setRefinementThickness(const scalar refThickness)
{
refThickness_ = refThickness_;
}
//- return refinement thickness for this object
scalar refinementThickness() const
{
return refThickness_;
}
//- Return as dictionary of entries
virtual dictionary dict(bool ignoreType = false) const = 0;

View file

@ -139,7 +139,9 @@ meshOptimizer::meshOptimizer(polyMeshGen& mesh)
:
mesh_(mesh),
vertexLocation_(mesh.points().size(), INSIDE),
msePtr_(NULL)
msePtr_(NULL),
enforceConstraints_(false),
badPointsSubsetName_()
{
const meshSurfaceEngine& mse = meshSurface();
const labelList& bPoints = mse.boundaryPoints();
@ -177,6 +179,15 @@ meshOptimizer::~meshOptimizer()
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void meshOptimizer::enforceConstraints(const word subsetName)
{
enforceConstraints_ = true;
badPointsSubsetName_ = subsetName;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -70,6 +70,12 @@ class meshOptimizer
//- mesh surface
mutable meshSurfaceEngine* msePtr_;
//- enforce constraints
bool enforceConstraints_;
//- name of the subset contaning tangled points
word badPointsSubsetName_;
// Private member functions
//- return mesh surface
const meshSurfaceEngine& meshSurface() const;
@ -199,6 +205,12 @@ public:
~meshOptimizer();
// Member Functions
//- Flag stopping the meshing process if it is not possible
//- to untangle the surface without sacrificing geometry constraints
//- Points which must be moved away from the required position are
//- stored into a point subset
void enforceConstraints(const word subsetName="badPoints");
//- smooth surface vertices
void optimizeSurface(const meshOctree&);

View file

@ -48,6 +48,9 @@ void meshOptimizer::optimizeSurface(const meshOctree& octree)
meshSurfaceEngine& mse = const_cast<meshSurfaceEngine&>(meshSurface());
meshSurfaceOptimizer surfaceOptimizer(mse, octree);
if( enforceConstraints_ )
surfaceOptimizer.enforceConstraints(badPointsSubsetName_);
surfaceOptimizer.optimizeSurface();
meshSurfaceMapper(mse, octree).mapVerticesOntoSurfacePatches();

View file

@ -25,6 +25,8 @@ Description
\*---------------------------------------------------------------------------*/
#include <stdexcept>
#include "demandDrivenData.H"
#include "meshOptimizer.H"
#include "polyMeshGenAddressing.H"
@ -154,7 +156,36 @@ void meshOptimizer::untangleMeshFV()
//- perform optimisation
if( nBadFaces == 0 )
{
break;
}
else if( enforceConstraints_ )
{
const label subsetId =
mesh_.addPointSubset(badPointsSubsetName_);
forAllConstIter(labelHashSet, badFaces, it)
{
const face& f = faces[it.key()];
forAll(f, pI)
mesh_.addPointToSubset(subsetId, f[pI]);
}
WarningIn
(
"void meshOptimizer::untangleMeshFV()"
) << "Writing mesh with " << badPointsSubsetName_
<< " subset. These points cannot be untangled"
<< " without sacrificing geometry constraints. Exitting.."
<< endl;
returnReduce(1, sumOp<label>());
throw std::logic_error
(
"void meshOptimizer::untangleMeshFV()"
"Cannot untangle mesh!!"
);
}
partTetMesh tetMesh(mesh_, badFaces, 0);
tetMeshOptimisation tmo(tetMesh);

View file

@ -0,0 +1,251 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
\*---------------------------------------------------------------------------*/
#include "demandDrivenData.H"
#include "symmetryPlaneOptimisation.H"
#include "polyMeshGenAddressing.H"
#include "helperFunctions.H"
#include "polyMeshGenChecks.H"
#include "meshOptimizer.H"
// #define DEBUGSearch
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void symmetryPlaneOptimisation::detectSymmetryPlanes()
{
const PtrList<boundaryPatch>& boundaries = mesh_.boundaries();
const pointFieldPMG& points = mesh_.points();
const faceListPMG& faces = mesh_.faces();
symmetryPlanes_.clear();
typedef std::map<label, std::pair<vector, label> > mapType;
mapType centreSum, normalSum;
forAll(boundaries, patchI)
{
if( boundaries[patchI].patchType() == "symmetryPlane" )
{
std::pair<vector, label>& cs = centreSum[patchI];
cs = std::pair<vector, label>(vector::zero, 0);
std::pair<vector, label>& ns = normalSum[patchI];
ns = std::pair<vector, label>(vector::zero, 0);
const label start = boundaries[patchI].patchStart();
const label end = start + boundaries[patchI].patchSize();
for(label faceI=start;faceI<end;++faceI)
{
cs.first += faces[faceI].centre(points);
ns.first += faces[faceI].normal(points);
}
cs.second = ns.second = boundaries[patchI].patchSize();
}
}
if( Pstream::parRun() )
{
//- sum up all normals and centres of all processors
//- every symmetry plane patch must be present on all processors
forAllIter(mapType, centreSum, pIter)
{
std::pair<vector, label>& cs = pIter->second;
reduce(cs.second, sumOp<label>());
reduce(cs.first, sumOp<vector>());
std::pair<vector, label>& ns = normalSum[pIter->first];
reduce(ns.first, sumOp<vector>());
ns.second = cs.second;
}
}
//- create planes corresponding to each symmetry plane
forAllConstIter(mapType, centreSum, it)
{
const point c = it->second.first / it->second.second;
const std::pair<vector, label>& ns = normalSum[it->first];
const point n = ns.first / ns.second;
symmetryPlanes_.insert(std::make_pair(it->first, plane(c, n)));
}
}
void symmetryPlaneOptimisation::pointInPlanes(VRWGraph& pointInPlanes) const
{
const PtrList<boundaryPatch>& boundaries = mesh_.boundaries();
const pointFieldPMG& points = mesh_.points();
const faceListPMG& faces = mesh_.faces();
pointInPlanes.clear();
pointInPlanes.setSize(points.size());
forAll(boundaries, patchI)
{
if( boundaries[patchI].patchType() == "symmetryPlane" )
{
const label start = boundaries[patchI].patchStart();
const label end = start + boundaries[patchI].patchSize();
for(label faceI=start;faceI<end;++faceI)
{
const face& f = faces[faceI];
forAll(f, pI)
pointInPlanes.appendIfNotIn(f[pI], patchI);
}
}
}
if( Pstream::parRun() )
{
const Map<label>& globalToLocal =
mesh_.addressingData().globalToLocalPointAddressing();
const VRWGraph& pointAtProcs = mesh_.addressingData().pointAtProcs();
const DynList<label>& neiProcs =
mesh_.addressingData().pointNeiProcs();
std::map<label, labelLongList> exchangeData;
forAll(neiProcs, i)
exchangeData[neiProcs[i]].clear();
forAllConstIter(Map<label>, globalToLocal, it)
{
const label pointI = it();
if( pointInPlanes.sizeOfRow(pointI) == 0 )
continue;
forAllRow(pointAtProcs, pointI, i)
{
const label neiProc = pointAtProcs(pointI, i);
if( neiProc == Pstream::myProcNo() )
continue;
labelLongList& dataToSend = exchangeData[neiProc];
dataToSend.append(it.key());
dataToSend.append(pointInPlanes.sizeOfRow(pointI));
forAllRow(pointInPlanes, pointI, pipI)
dataToSend.append(pointInPlanes(pointI, pipI));
}
}
labelLongList receivedData;
help::exchangeMap(exchangeData, receivedData);
for(label counter=0;counter<receivedData.size();)
{
const label pointI = globalToLocal[receivedData[counter++]];
const label size = receivedData[counter++];
for(label i=0;i<size;++i)
pointInPlanes.appendIfNotIn(pointI, receivedData[counter++]);
}
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// Construct from mesh
symmetryPlaneOptimisation::symmetryPlaneOptimisation(polyMeshGen& mesh)
:
mesh_(mesh)
{
detectSymmetryPlanes();
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
symmetryPlaneOptimisation::~symmetryPlaneOptimisation()
{}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void symmetryPlaneOptimisation::optimizeSymmetryPlanes()
{
pointFieldPMG& points = mesh_.points();
VRWGraph pointInPlane;
pointInPlanes(pointInPlane);
forAll(pointInPlane, pointI)
{
const label nPlanes = pointInPlane.sizeOfRow(pointI);
if( nPlanes > 3 )
{
WarningIn
(
"void symmetryPlaneOptimisation::optimizeSymmetryPlanes()"
) << "Point " << pointI << " is in more than three symmetry"
<< " planes. Cannot move it" << endl;
}
point& p = points[pointI];
vector disp(vector::zero);
for(label plI=0;plI<nPlanes;++plI)
{
//- point is in a plane
std::map<label, plane>::const_iterator it =
symmetryPlanes_.find(pointInPlane(pointI, 0));
const point newP = it->second.nearestPoint(points[pointI]);
disp += newP - p;
}
p += disp;
}
labelHashSet badFaces;
polyMeshGenChecks::checkFacePyramids(mesh_, false, VSMALL, &badFaces);
if( badFaces.size() )
{
WarningIn
(
"void symmetryPlaneOptimisation::optimizeSymmetryPlanes()"
) << "Bad quality or inverted faces found in the mesh" << endl;
const label badFacesId = mesh_.addFaceSubset("invalidFaces");
forAllConstIter(labelHashSet, badFaces, it)
mesh_.addFaceToSubset(badFacesId, it.key());
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -0,0 +1,99 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Class
symmetryPlaneOptimisation
Description
Smoothing of symmetry planes in the mesh such that all points
are in the plane.
SourceFiles
symmetryPlaneOptimisation.C
\*---------------------------------------------------------------------------*/
#ifndef symmetryPlaneOptimisation_H
#define symmetryPlaneOptimisation_H
#include "DynList.H"
#include "polyMeshGenModifier.H"
#include "boundBox.H"
#include "labelLongList.H"
#include "boolList.H"
#include "plane.H"
#include <map>
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class symmetryPlaneOptimisation Declaration
\*---------------------------------------------------------------------------*/
class symmetryPlaneOptimisation
{
// Private data
//- reference to the mesh
polyMeshGen& mesh_;
//- symmetry planes in the mesh
std::map<label, plane> symmetryPlanes_;
// Private member functions
//- detect symmetry planes
void detectSymmetryPlanes();
//- point-planes addressing
void pointInPlanes(VRWGraph&) const;
public:
// Constructors
//- Construct from mesh
symmetryPlaneOptimisation(polyMeshGen& mesh);
// Destructor
~symmetryPlaneOptimisation();
// Member Functions
//- move vertices to the symmetry planes
void optimizeSymmetryPlanes();
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View file

@ -126,7 +126,9 @@ meshSurfaceOptimizer::meshSurfaceOptimizer
vertexType_(surface.boundaryPoints().size()),
partitionerPtr_(new meshSurfacePartitioner(surface)),
deletePartitioner_(true),
triMeshPtr_(NULL)
triMeshPtr_(NULL),
enforceConstraints_(false),
badPointsSubsetName_()
{
classifySurfaceVertices();
}
@ -142,7 +144,9 @@ meshSurfaceOptimizer::meshSurfaceOptimizer
vertexType_(surfaceEngine_.boundaryPoints().size()),
partitionerPtr_(&partitioner),
deletePartitioner_(false),
triMeshPtr_(NULL)
triMeshPtr_(NULL),
enforceConstraints_(false),
badPointsSubsetName_()
{
classifySurfaceVertices();
}
@ -159,6 +163,12 @@ meshSurfaceOptimizer::~meshSurfaceOptimizer()
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void meshSurfaceOptimizer::enforceConstraints(const word subsetName)
{
enforceConstraints_ = true;
badPointsSubsetName_ = subsetName;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -79,6 +79,12 @@ class meshSurfaceOptimizer
//- mesh of surface triangles needed for some smoothers
mutable partTriMesh* triMeshPtr_;
//- enforce constraints
bool enforceConstraints_;
//- name of the subset contaning tangled points
word badPointsSubsetName_;
// Private member functions
//- classify surface vertices as PARTITION, EDGE, CORNER
void classifySurfaceVertices();
@ -108,18 +114,6 @@ class meshSurfaceOptimizer
DynList<triFace>& trias
) const;
//- transform into a 2D space in plane for parallel boundaries
inline bool transformIntoPlaneParallel
(
const label bpI,
const plane& pl,
const std::map<label, DynList<parTriFace> >& m,
vector& vecX,
vector& vecY,
DynList<point>& pts,
DynList<triFace>& trias
) const;
//- new position of a node after laplacian smoothing
//- the position is the average of neighbouring vertex positions
inline point newPositionLaplacian
@ -225,12 +219,6 @@ class meshSurfaceOptimizer
const bool transformIntoPlane = true
);
//- smooth nodes at processor boundaries using surface optimizer
void nodeDisplacementSurfaceOptimizerParallel
(
const labelLongList& nodesToSmooth
);
//- smooth edge nodes at processor boundaries
void edgeNodeDisplacementParallel
(
@ -271,6 +259,12 @@ public:
~meshSurfaceOptimizer();
// Member Functions
//- Flag stopping the meshing process if it is not possible
//- to untangle the surface without sacrificing geometry constraints
//- Points which must be moved away from the required position are
//- stored into a point subset
void enforceConstraints(const word subsetName="badPoints");
//- runs a surface smoother on the selected boundary points
bool untangleSurface
(

View file

@ -90,11 +90,14 @@ inline bool meshSurfaceOptimizer::transformIntoPlane
) const
{
# ifdef DEBUGSmooth
Info << "Transforming boundary node " << bpI << endl;
Pout << "Transforming boundary node " << bpI << endl;
# endif
const partTriMesh& triMesh = this->triMesh();
const label triPointI = triMesh.meshSurfacePointLabelInTriMesh()[bpI];
if( triPointI < 0 )
return false;
partTriMeshSimplex simplex(triMesh, triPointI);
const DynList<point, 32>& sPts = simplex.pts();
@ -124,8 +127,8 @@ inline bool meshSurfaceOptimizer::transformIntoPlane
trias = simplex.triangles();
# ifdef DEBUGSmooth
Info << "VecX " << vecX << endl;
Info << "vecY " << vecY << endl;
Pout << "VecX " << vecX << endl;
Pout << "vecY " << vecY << endl;
# endif
//- transform the vertices
@ -180,135 +183,6 @@ inline bool meshSurfaceOptimizer::transformIntoPlane
return found;
}
inline bool meshSurfaceOptimizer::transformIntoPlaneParallel
(
const label bpI,
const plane& pl,
const std::map<label, DynList<parTriFace> >& m,
vector& vecX,
vector& vecY,
DynList<point>& pts,
DynList<triFace>& trias
) const
{
/*
# ifdef DEBUGSmooth
Info << "Transforming boundary node at parallel interface " << bpI << endl;
# endif
const pointFieldPMG& points = surfaceEngine_.points();
const labelList& bPoints = surfaceEngine_.boundaryPoints();
const labelList& globalPointLabel =
surfaceEngine_.globalBoundaryPointLabel();
std::map<label, DynList<parTriFace> >::const_iterator
pIter = m.find(globalPointLabel[bpI]);
if( pIter == m.end() )
{
FatalErrorIn
(
"(const label, const plane&,"
" const std::map<label, DynList<parTriFace> >&,"
" vector&, vector&, DynList<point>&, DynList<triFace>&) const"
) << " Cannot find node " << globalPointLabel[bpI]
<< " in the map" << abort(FatalError);
}
const DynList<parTriFace>& pFcs = pIter->second;
# ifdef DEBUGSmooth
if( globalPointLabel[bpI] != pFcs[0].globalLabelOfPoint(0) )
FatalError << "Wrong points supplied!" << abort(FatalError);
Info << "Triangles containing this node are " << pFcs << endl;
# endif
//- create vecX and vecY
const point& p = points[bPoints[bpI]];
pts.setSize(0);
bool found(false);
forAll(pFcs, tI)
{
const point sp = pl.nearestPoint(pFcs[tI].trianglePoints().centre());
const scalar d = mag(sp - p);
if( d > VSMALL )
{
vecX = sp - p;
vecX /= d;
vecY = pl.normal() ^ vecX;
vecY /= mag(vecY);
found = true;
break;
}
}
if( !found )
return false;
# ifdef DEBUGSmooth
Info << "VecX " << vecX << endl;
Info << "vecY " << vecY << endl;
# endif
//- transform the vertices
//label counter(0);
Map<label> pointMapping;
trias.setSize(pFcs.size());
//- tranfer triangle from other proc first
forAll(pFcs, triI)
{
const parTriFace& tri = pFcs[triI];
triFace& tf = trias[triI];
for(label pI=0;pI<3;++pI)
{
const Map<label>::iterator fnd =
pointMapping.find(tri.globalLabelOfPoint(pI));
if( fnd != pointMapping.end() )
{
tf[pI] = fnd();
}
else
{
point tp;
if( pI == 0 )
{
tp = tri.trianglePoints().a();
}
else if( pI == 1 )
{
tp = tri.trianglePoints().b();
}
else
{
tp = tri.trianglePoints().c();
}
const point pt
(
((tp - p) & vecX),
((tp - p) & vecY),
0.0
);
tf[pI] = pts.size();
pointMapping.insert(tri.globalLabelOfPoint(pI), pts.size());
pts.append(pt);
}
}
}
return found;
*/
return false;
}
inline point meshSurfaceOptimizer::newPositionLaplacian
(
const label bpI,
@ -462,9 +336,9 @@ inline point meshSurfaceOptimizer::newPositionSurfaceOptimizer
const labelList& bPoints = surfaceEngine_.boundaryPoints();
# ifdef DEBUGSmooth
Info << "Smoothing boundary node " << bpI << endl;
Info << "Node label in the mesh is " << bPoints[bpI] << endl;
Info << "Point coordinates " << points[bPoints[bpI]] << endl;
Pout << "Smoothing boundary node " << bpI << endl;
Pout << "Node label in the mesh is " << bPoints[bpI] << endl;
Pout << "Point coordinates " << points[bPoints[bpI]] << endl;
# endif
//- project vertices onto the plane
@ -480,7 +354,7 @@ inline point meshSurfaceOptimizer::newPositionSurfaceOptimizer
bool success = this->transformIntoPlane(bpI, pl, vecX, vecY, pts, trias);
if( !success )
{
Warning << "Cannot transform to plane" << endl;
//Warning << "Cannot transform to plane" << endl;
return points[bPoints[bpI]];
}

View file

@ -279,85 +279,6 @@ void meshSurfaceOptimizer::nodeDisplacementLaplacianFCParallel
}
}
void meshSurfaceOptimizer::nodeDisplacementSurfaceOptimizerParallel
(
const labelLongList& nodesToSmooth
)
{
if( returnReduce(nodesToSmooth.size(), sumOp<label>()) == 0 )
return;
meshSurfaceEngineModifier surfaceModifier(surfaceEngine_);
//- exchange data with other processors
std::map<label, DynList<parTriFace> > m;
exchangeData(nodesToSmooth, m);
const pointField& points = surfaceEngine_.points();
const labelList& bPoints = surfaceEngine_.boundaryPoints();
const vectorField& pNormals = surfaceEngine_.pointNormals();
//- perform smoothing
pointField newPositions(nodesToSmooth.size());
# ifdef USE_OMP
# pragma omp parallel for schedule(dynamic, 10)
# endif
forAll(nodesToSmooth, pI)
{
const label bpI = nodesToSmooth[pI];
if( magSqr(pNormals[bpI]) < VSMALL )
{
newPositions[pI] = points[bPoints[bpI]];
continue;
}
plane pl(points[bPoints[bpI]], pNormals[bpI]);
point vecX, vecY;
DynList<point> pts;
DynList<triFace> trias;
if( !transformIntoPlaneParallel(bpI, pl, m, vecX, vecY, pts, trias) )
{
newPositions[pI] = points[bPoints[bpI]];
continue;
}
surfaceOptimizer so(pts, trias);
point newPoint = so.optimizePoint(0.001);
const point newP
(
points[bPoints[bpI]] +
vecX * newPoint.x() +
vecY * newPoint.y()
);
if( help::isnan(newP) || help::isinf(newP) )
{
newPositions[pI] = points[bPoints[bpI]];
}
else
{
newPositions[pI] = newP;
}
}
# ifdef USE_OMP
# pragma omp parallel for schedule(dynamic, 50)
# endif
forAll(newPositions, pI)
surfaceModifier.moveBoundaryVertexNoUpdate
(
nodesToSmooth[pI],
newPositions[pI]
);
//- make sure that moved points have the same coordinates on all processors
surfaceModifier.syncVerticesAtParallelBoundaries(nodesToSmooth);
}
void meshSurfaceOptimizer::edgeNodeDisplacementParallel
(
const labelLongList& nodesToSmooth

View file

@ -40,6 +40,7 @@ Description
#include "FIFOStack.H"
#include <map>
#include <stdexcept>
# ifdef USE_OMP
#include <omp.h>
@ -282,7 +283,10 @@ void meshSurfaceOptimizer::smoothSurfaceOptimizer
const labelLongList& selectedPoints
)
{
//- create partTriMesh is it is not yet present
this->triMesh();
//- update coordinates of the triangulation
updateTriMesh(selectedPoints);
pointField newPositions(selectedPoints.size());
@ -370,7 +374,40 @@ bool meshSurfaceOptimizer::untangleSurface
nInvertedTria =
findInvertedVertices(smoothVertex, nAdditionalLayers);
if( nInvertedTria == 0 ) break;
if( nInvertedTria == 0 )
{
break;
}
else if( enforceConstraints_ && !remapVertex )
{
polyMeshGen& mesh =
const_cast<polyMeshGen&>(surfaceEngine_.mesh());
const label subsetId =
mesh.addPointSubset(badPointsSubsetName_);
forAll(smoothVertex, bpI)
if( smoothVertex[bpI] )
mesh.addPointToSubset(subsetId, bPoints[bpI]);
WarningIn
(
"bool meshSurfaceOptimizer::untangleSurface"
"(const labelLongList&, const label)"
) << "Writing mesh with " << badPointsSubsetName_
<< " subset. These points cannot be untangled"
<< " without sacrificing geometry constraints. Exitting.."
<< endl;
returnReduce(1, sumOp<label>());
throw std::logic_error
(
"bool meshSurfaceOptimizer::untangleSurface"
"(const labelLongList&, const label)"
"Cannot untangle mesh!!"
);
}
//- find the min number of inverted points and
//- add the last number to the stack
@ -488,7 +525,7 @@ bool meshSurfaceOptimizer::untangleSurface
//- update normals and other geometric data
surfaceModifier.updateGeometry(movedPoints);
if( nGlobalIter > 3 )
if( nGlobalIter > 5 )
remapVertex = false;
}
@ -700,7 +737,7 @@ void meshSurfaceOptimizer::optimizeSurface2D(const label nIterations)
{
Info << "." << flush;
smoothLaplacianFC(movedPoints, procBndPoints, false);
smoothSurfaceOptimizer(movedPoints);
//- move the points which are not at minimum z coordinate
mesh2DEngine.correctPoints();

View file

@ -26,8 +26,6 @@ Description
\*---------------------------------------------------------------------------*/
#include "error.H"
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "edgeExtractor.H"
#include "meshSurfaceEngine.H"
@ -1778,7 +1776,7 @@ bool edgeExtractor::checkFacePatchesTopology()
}
}
//- eavluate the new situation and ensure that no oscillation occur
//- evaluate the new situation and ensure that no oscillation occur
reduce(nCorrected, sumOp<label>());
if( nCorrected )
{
@ -1808,7 +1806,7 @@ bool edgeExtractor::checkFacePatchesTopology()
facePatch_.transfer(newBoundaryPatches);
}
} while( nCorrected != 0 && (nIter++ < 30) );
} while( nCorrected != 0 && (nIter++ < 3) );
return changed;
}
@ -2089,7 +2087,7 @@ bool edgeExtractor::checkFacePatchesGeometry()
);
//- stop after a certain number of iterations
if( iter++ > 20 )
if( iter++ > 3 )
break;
//- check if there exist any inverted faces
@ -2204,7 +2202,7 @@ bool edgeExtractor::checkFacePatchesGeometry()
//- compare face patches before and after
//- disallow modification which may trigger oscillating behaviour
labelHashSet changedFaces;
labelLongList changedFaces;
forAll(newBoundaryPatches, bfI)
{
if( newBoundaryPatches[bfI] != facePatch_[bfI] )
@ -2214,7 +2212,7 @@ bool edgeExtractor::checkFacePatchesGeometry()
newBoundaryPatches[bfI] = patchI;
if( patchI != facePatch_[bfI] )
changedFaces.insert(bfI);
changedFaces.append(bfI);
}
}
@ -2426,11 +2424,6 @@ void edgeExtractor::extractEdges()
Info << "No geometrical adjustment was needed" << endl;
}
// updateMeshPatches();
// mesh_.write();
// returnReduce(1, sumOp<label>());
// ::exit(0);
# ifdef DEBUGEdgeExtractor
const triSurf* sPtr = surfaceWithPatches();
sPtr->writeSurface("finalDistributionOfPatches.stl");

View file

@ -38,8 +38,6 @@ SourceFiles
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#include "objectRegistry.H"
#include "Time.H"
#include "labelList.H"
#include "labelLongList.H"
#include "VRWGraph.H"

View file

@ -256,7 +256,7 @@ void edgeExtractor::faceEvaluator::neiPatchesOverEdges
neiPatches[feI] = fPatches[nei];
}
else if( edgeFaces.sizeOfRow(beI) == 1 )
else if( Pstream::parRun() && (edgeFaces.sizeOfRow(beI) == 1) )
{
neiPatches[feI] = otherFacePatch[beI];
}
@ -422,8 +422,8 @@ label edgeExtractor::faceEvaluator::bestPatchAfterModification
forAll(neiFaces, eI)
{
const label origPatchI = extractor_.facePatch_[neiFaces[eI]];
const label newPatchI = (*newBoundaryPatchesPtr_)[neiFaces[eI]];
const label origPatchI = oldNeiPatches[eI];
const label newPatchI = newNeiPatches[eI];
if( neiFaces[eI] > bfI )
{

View file

@ -37,8 +37,6 @@ SourceFiles
#ifndef meshSurfaceCheckEdgeTypes_H
#define meshSurfaceCheckEdgeTypes_H
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -37,8 +37,6 @@ SourceFiles
#ifndef meshSurfaceCheckInvertedVertices_H
#define meshSurfaceCheckInvertedVertices_H
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "HashSet.H"

View file

@ -35,8 +35,6 @@ SourceFiles
#ifndef meshSurfaceEngine_H
#define meshSurfaceEngine_H
#include "objectRegistry.H"
#include "Time.H"
#include "polyMeshGenModifier.H"
#include "SubList.H"
#include "boolList.H"

View file

@ -105,7 +105,7 @@ class meshSurfaceMapper
void findMappingDistance
(
const labelLongList& nodesToMap,
std::map<label, scalar>& mappingDistance
scalarList & mappingDistance
) const;
// Private member functions needed for parallel execution

View file

@ -52,7 +52,7 @@ namespace Foam
void meshSurfaceMapper::findMappingDistance
(
const labelLongList& nodesToMap,
std::map<label, scalar>& mappingDistance
scalarList& mappingDistance
) const
{
const vectorField& faceCentres = surfaceEngine_.faceCentres();
@ -61,23 +61,25 @@ void meshSurfaceMapper::findMappingDistance
const pointFieldPMG& points = surfaceEngine_.points();
//- generate search distance for corner nodes
mappingDistance.clear();
mappingDistance.setSize(nodesToMap.size());
# ifdef USE_OMP
# pragma omp parallel for schedule(dynamic, 50)
# endif
forAll(nodesToMap, i)
{
const label bpI = nodesToMap[i];
mappingDistance.insert(std::make_pair(bpI, 0.0));
std::map<label, scalar>::iterator mIter = mappingDistance.find(bpI);
mappingDistance[i] = 0.0;
const point& p = points[bPoints[bpI]];
forAllRow(pFaces, bpI, pfI)
{
const scalar d = magSqr(faceCentres[pFaces(bpI, pfI)] - p);
mIter->second = Foam::max(mIter->second, d);
mappingDistance[i] = Foam::max(mappingDistance[i], d);
}
//- safety factor
mIter->second *= 4.0;
mappingDistance[i] *= 4.0;
}
if( Pstream::parRun() )
@ -87,8 +89,6 @@ void meshSurfaceMapper::findMappingDistance
const VRWGraph& bpAtProcs = surfaceEngine_.bpAtProcs();
const labelList& globalPointLabel =
surfaceEngine_.globalBoundaryPointLabel();
const Map<label>& globalToLocal =
surfaceEngine_.globalToLocalBndPointAddressing();
//- create the map for exchanging data
std::map<label, DynList<labelledScalar> > exchangeData;
@ -96,12 +96,18 @@ void meshSurfaceMapper::findMappingDistance
forAll(neiProcs, i)
exchangeData.insert
(
std::make_pair(neiProcs[i], DynList<labelledScalar>()));
std::make_pair(neiProcs[i], DynList<labelledScalar>())
);
Map<label> globalToLocal;
forAll(nodesToMap, nI)
{
const label bpI = nodesToMap[nI];
if( bpAtProcs.sizeOfRow(bpI) != 0 )
globalToLocal.insert(globalPointLabel[bpI], nI);
forAllRow(bpAtProcs, bpI, i)
{
const label neiProc = bpAtProcs(bpI, i);
@ -110,7 +116,7 @@ void meshSurfaceMapper::findMappingDistance
exchangeData[neiProc].append
(
labelledScalar(globalPointLabel[bpI], mappingDistance[bpI])
labelledScalar(globalPointLabel[bpI], mappingDistance[nI])
);
}
}
@ -124,11 +130,10 @@ void meshSurfaceMapper::findMappingDistance
{
const labelledScalar& ls = receivedData[i];
const label bpI = globalToLocal[ls.scalarLabel()];
const label nI = globalToLocal[ls.scalarLabel()];
//- choose the maximum value for the mapping distance
std::map<label, scalar>::iterator mIter = mappingDistance.find(bpI);
mIter->second = Foam::max(mIter->second, ls.value());
mappingDistance[nI] = Foam::max(mappingDistance[nI], ls.value());
}
}
}
@ -147,7 +152,8 @@ void meshSurfaceMapper::mapCorners(const labelLongList& nodesToMap)
const pointFieldPMG& points = surfaceEngine_.points();
const labelList& bPoints = surfaceEngine_.boundaryPoints();
std::map<label, scalar> mappingDistance;
//std::map<label, scalar> mappingDistance;
scalarList mappingDistance;
findMappingDistance(nodesToMap, mappingDistance);
//- for every corner in the mesh surface find the nearest corner in the
@ -155,7 +161,7 @@ void meshSurfaceMapper::mapCorners(const labelLongList& nodesToMap)
meshSurfaceEngineModifier sMod(surfaceEngine_);
# ifdef USE_OMP
# pragma omp parallel for if( nodesToMap.size() > 10 )
# pragma omp parallel for schedule(dynamic, 50)
# endif
forAll(nodesToMap, cornerI)
{
@ -168,7 +174,7 @@ void meshSurfaceMapper::mapCorners(const labelLongList& nodesToMap)
<< abort(FatalError);
const point& p = points[bPoints[bpI]];
const scalar maxDist = mappingDistance[bpI];
const scalar maxDist = mappingDistance[cornerI];
//- find the nearest position to the given point patches
const DynList<label> patches = pPatches[bpI];
@ -205,7 +211,7 @@ void meshSurfaceMapper::mapCorners(const labelLongList& nodesToMap)
distSqApprox = magSqr(mapPointApprox - p);
//- find the nearest triSurface corner for the given corner
scalar distSq(mappingDistance[bpI]);
scalar distSq(mappingDistance[cornerI]);
point mapPoint(p);
forAll(surfCorners, scI)
{
@ -254,7 +260,7 @@ void meshSurfaceMapper::mapEdgeNodes(const labelLongList& nodesToMap)
const VRWGraph& pPatches = mPart.pointPatches();
//- find mapping distance for selected vertices
std::map<label, scalar> mappingDistance;
scalarList mappingDistance;
findMappingDistance(nodesToMap, mappingDistance);
const VRWGraph* bpAtProcsPtr(NULL);
@ -265,10 +271,9 @@ void meshSurfaceMapper::mapEdgeNodes(const labelLongList& nodesToMap)
meshSurfaceEngineModifier sMod(surfaceEngine_);
//- map point to the nearest vertex on the triSurface
//- map point to the nearest vertex on the surface mesh
# ifdef USE_OMP
# pragma omp parallel for shared(parallelBndNodes) \
if( nodesToMap.size() > 100 )
# pragma omp parallel for schedule(dynamic, 50)
# endif
forAll(nodesToMap, i)
{
@ -276,9 +281,9 @@ void meshSurfaceMapper::mapEdgeNodes(const labelLongList& nodesToMap)
const point& p = points[bPoints[bpI]];
//- find patches at this edge point
DynList<label> patches = pPatches[bpI];
const DynList<label> patches = pPatches[bpI];
const scalar maxDist = mappingDistance[bpI];
const scalar maxDist = mappingDistance[i];
//- find approximate position of the vertex on the edge
point mapPointApprox(p);
@ -312,8 +317,8 @@ void meshSurfaceMapper::mapEdgeNodes(const labelLongList& nodesToMap)
distSqApprox = magSqr(mapPointApprox - p);
//- find the nearest vertex on the triSurface feature edge
point mapPoint;
scalar distSq;
point mapPoint(mapPointApprox);
scalar distSq(distSqApprox);
label nse;
meshOctree_.findNearestEdgePoint(mapPoint, distSq, nse, p, patches);
@ -330,7 +335,7 @@ void meshSurfaceMapper::mapEdgeNodes(const labelLongList& nodesToMap)
//- this indicates possible problems
//- reduce the mapping distance
const scalar f = Foam::sqrt(maxDist / distSq);
distSq = mappingDistance[bpI];
distSq = mappingDistance[i];
mapPoint = f * (mapPoint - p) + p;
}
@ -342,16 +347,18 @@ void meshSurfaceMapper::mapEdgeNodes(const labelLongList& nodesToMap)
# ifdef USE_OMP
# pragma omp critical
# endif
parallelBndNodes.append
(
parMapperHelper
{
parallelBndNodes.append
(
mapPoint,
distSq,
bpI,
-1
)
);
parMapperHelper
(
mapPoint,
distSq,
bpI,
-1
)
);
}
}
}

View file

@ -366,16 +366,18 @@ void meshSurfaceMapper::mapVerticesOntoSurfacePatches
# ifdef USE_OMP
# pragma omp critical
# endif
parallelBndNodes.append
(
parMapperHelper
{
parallelBndNodes.append
(
mapPoint,
dSq,
bpI,
-1
)
);
parMapperHelper
(
mapPoint,
dSq,
bpI,
-1
)
);
}
}
# ifdef DEBUGMapping

View file

@ -80,7 +80,7 @@ void meshSurfaceMapper2D::findMappingDistance
}
//- safety factor
mIter->second *= 4.0;
mIter->second *= 16.0;
}
if( Pstream::parRun() )
@ -420,7 +420,7 @@ void meshSurfaceMapper2D::mapCorners(const labelLongList& edgesToMap)
scalar distSqApprox;
label iter(0);
while( iter++ < 20 )
while( iter++ < 40 )
{
point newP(vector::zero);
forAll(ePatches, epI)

View file

@ -46,19 +46,6 @@ void renameBoundaryPatches::calculateNewBoundary()
const dictionary& dict = meshDict_.subDict("renameBoundary");
std::map<word, label> patchToLabel;
forAll(mesh_.boundaries(), patchI)
{
patchToLabel.insert
(
std::pair<word, label>
(
mesh_.boundaries()[patchI].patchName(),
patchI
)
);
}
labelList patchToNew(mesh_.boundaries().size(), -1);
wordList newPatchNames(patchToNew.size());
@ -100,9 +87,11 @@ void renameBoundaryPatches::calculateNewBoundary()
{
const word patchName = patchesToRename[patchI].keyword();
if( patchToLabel.find(patchName) == patchToLabel.end() )
labelList matchedPatches = mesh_.findPatches(patchName);
if(matchedPatches.empty())
{
Info << "Patch " << patchName << " does not exist!!" << endl;
Warning << "No matches for " << patchName << " found!!" << endl;
continue;
}
@ -116,32 +105,36 @@ void renameBoundaryPatches::calculateNewBoundary()
const dictionary pDict = patchesToRename[patchI].dict();
word newName(patchName);
if( pDict.found("newName") )
newName = word(pDict.lookup("newName"));
if( newNameToPos.find(newName) != newNameToPos.end() )
forAll(matchedPatches, matchI)
{
//- patch with the same name already exists
patchToNew[patchToLabel[patchName]] = newNameToPos[newName];
continue;
}
word newName(mesh_.getPatchName(matchedPatches[matchI]));
//- add a new patch
newNameToPos.insert(std::pair<word, label>(newName, newPatchI));
newPatchNames[newPatchI] = newName;
if( pDict.found("type") )
{
const word newType(pDict.lookup("type"));
newPatchTypes[newPatchI] = newType;
}
else
{
newPatchTypes[newPatchI] = "wall";
}
if( pDict.found("newName") )
newName = word(pDict.lookup("newName"));
patchToNew[patchToLabel[patchName]] = newPatchI;
++newPatchI;
if( newNameToPos.find(newName) != newNameToPos.end() )
{
//- patch with the same name already exists
patchToNew[matchedPatches[matchI]] = newNameToPos[newName];
continue;
}
//- add a new patch
newNameToPos.insert(std::pair<word, label>(newName, newPatchI));
newPatchNames[newPatchI] = newName;
if( pDict.found("type") )
{
const word newType(pDict.lookup("type"));
newPatchTypes[newPatchI] = newType;
}
else
{
newPatchTypes[newPatchI] = "wall";
}
patchToNew[matchedPatches[matchI]] = newPatchI;
++newPatchI;
}
}
}

View file

@ -0,0 +1,171 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
\*---------------------------------------------------------------------------*/
#include "triSurface2DCheck.H"
#include "triSurfModifier.H"
#include "boundBox.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void triSurface2DCheck::createCovarianceMatrix()
{
const vectorField& fNormals = surf_.facetNormals();
//- find the normal vector of the best-fitting plane
covarianceMatrix_ = symmTensor::zero;
forAll(fNormals, tI)
{
vector fn = fNormals[tI];
fn /= (mag(fn) + VSMALL);
covarianceMatrix_ += symm(fn * fn);
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
triSurface2DCheck::triSurface2DCheck(const triSurf& surface)
:
surf_(surface),
covarianceMatrix_(symmTensor::zero)
{
createCovarianceMatrix();
}
triSurface2DCheck::~triSurface2DCheck()
{}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
bool triSurface2DCheck::is2DSurface() const
{
const pointField& points = surf_.points();
const vector eigenVal = eigenValues(covarianceMatrix_);
//- the smallest eigenvalue must be zero in case all face normals
//- lie in a plane
if( mag(eigenVal[0]) > SMALL )
{
WarningIn("bool triSurface2DCheck::is2DSurface() const")
<< "Surface mesh is in 3D space!"
<< " This may result in an invalid mesh!" << endl;
return false;
}
//- calculate the plane normal as a cross prduct of the two
//- eigenVectors spanning the plane
const vector n
(
eigenVector(covarianceMatrix_, eigenVal[1]) ^
eigenVector(covarianceMatrix_, eigenVal[2])
);
//- check if the plane is in the x-y plane of the coordinate system
if( mag(n.x()) > SMALL || mag(n.y()) > SMALL )
{
//- this could be a 2D surface, but it is not in the x-y plane
WarningIn("bool triSurface2DCheck::is2DSurface() const")
<< "The surface mesh IS NOT IN THE X-Y PLANE!!!!"
<< " This will result in a mesh without any cells" << endl;
return false;
}
//- check if the points in the 2D surface have uniform z coordinates
boundBox bb(points);
forAll(points, pI)
{
const point& p = points[pI];
if
(
mag(p.z() - bb.max().z()) > SMALL &&
mag(p.z() - bb.min().z()) > SMALL
)
{
WarningIn("bool triSurface2DCheck::is2DSurface() const")
<< "z coordinates of the 2D surface are not uniform" << endl;
return false;
}
}
Info << "Detected a 2D surface in the x-y plane" << endl;
return true;
}
void triSurface2DCheck::createSubsets()
{
const pointField& points = surf_.points();
const vectorField& fNormals = surf_.facetNormals();
//- create a subset containing faces having non-zero z coordinate
//- of the normals
triSurf& surf = const_cast<triSurf&>(surf_);
const label badFacetsId = surf.addFacetSubset("badFacets");
forAll(fNormals, triI)
{
vector fn = fNormals[triI];
fn /= (mag(fn) + VSMALL);
if( mag(fn.z()) > SMALL )
surf.addFacetToSubset(badFacetsId, triI);
}
//- create a subset containing points which are not
//- in z-min and z-max planes
const label badPointsId = surf.addPointSubset("badPointsId");
boundBox bb(points);
forAll(points, pI)
{
const point& p = points[pI];
if
(
mag(p.z() - bb.max().z()) > SMALL &&
mag(p.z() - bb.min().z()) > SMALL
)
{
surf.addPointToSubset(badPointsId, pI);
}
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -0,0 +1,98 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Class
triSurface2DCheck
Description
SourceFiles
triSurface2DCheck.C
\*---------------------------------------------------------------------------*/
#ifndef triSurface2DCheck_H
#define triSurface2DCheck_H
#include "triSurf.H"
#include "symmTensor.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class triSurface2DCheck Declaration
\*---------------------------------------------------------------------------*/
class triSurface2DCheck
{
// Private data
//- reference to triSurf
const triSurf& surf_;
//- covariance matrix
symmTensor covarianceMatrix_;
// Private member functions
//- create covariance matrix
void createCovarianceMatrix();
//- Disallow default bitwise copy construct
triSurface2DCheck(const triSurface2DCheck&);
//- Disallow default bitwise assignment
void operator=(const triSurface2DCheck&);
public:
// Constructors
//- Construct from octree
triSurface2DCheck(const triSurf& surface);
// Destructor
~triSurface2DCheck();
// Member Functions
//- checks if the surface is a 2D triangulation
bool is2DSurface() const;
//- create subset containing invalid facets
void createSubsets();
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View file

@ -0,0 +1,56 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
\*---------------------------------------------------------------------------*/
#include "triSurfaceCleanupDuplicateTriangles.H"
#include "triSurf.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
triSurfaceCleanupDuplicateTriangles::triSurfaceCleanupDuplicateTriangles
(
triSurf& surf
)
:
surf_(surf),
newTriangleLabel_()
{
checkDuplicateTriangles();
}
triSurfaceCleanupDuplicateTriangles::~triSurfaceCleanupDuplicateTriangles()
{}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -0,0 +1,98 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Class
triSurfaceCleanupDuplicates
Description
Removes duplicate triangles from the surface.
SourceFiles
triSurfaceCleanupDuplicateTriangles.C
triSurfaceCleanupDuplicateTrianglesFunctions.C
\*---------------------------------------------------------------------------*/
#ifndef triSurfaceCleanupDuplicateTriangles_H
#define triSurfaceCleanupDuplicateTriangles_H
#include "VRWGraph.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Forward declarations
class triSurf;
/*---------------------------------------------------------------------------*\
Class triSurfaceCleanupDuplicateTriangles Declaration
\*---------------------------------------------------------------------------*/
class triSurfaceCleanupDuplicateTriangles
{
// Private data
//- reference to triSurf
triSurf& surf_;
//- new triangle labels in case some of them is removed
labelLongList newTriangleLabel_;
// Private member functions
//- Check duplicate triangles
void checkDuplicateTriangles();
//- Disallow default bitwise copy construct
triSurfaceCleanupDuplicateTriangles
(
const triSurfaceCleanupDuplicateTriangles&
);
//- Disallow default bitwise assignment
void operator=(const triSurfaceCleanupDuplicateTriangles&);
public:
// Constructors
//- Construct from triSurf
triSurfaceCleanupDuplicateTriangles(triSurf&);
// Destructor
~triSurfaceCleanupDuplicateTriangles();
// Member Functions
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View file

@ -0,0 +1,100 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
\*---------------------------------------------------------------------------*/
#include "triSurfaceCleanupDuplicateTriangles.H"
#include "triSurfModifier.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void triSurfaceCleanupDuplicateTriangles::checkDuplicateTriangles()
{
labelLongList newTriangleLabel(surf_.size(), -1);
const VRWGraph& pointTriangles = surf_.pointFacets();
//- check if there exist duplicate triangles
label counter(0);
forAll(surf_, triI)
{
if( newTriangleLabel[triI] != -1 )
continue;
newTriangleLabel[triI] = counter;
++counter;
const labelledTri& tri = surf_[triI];
forAll(pointTriangles[tri[0]], ptI)
{
const label triJ = pointTriangles(tri[0], ptI);
if( triJ <= triI )
continue;
const labelledTri& otherTri = surf_[triJ];
if( tri == otherTri )
newTriangleLabel[triJ] = newTriangleLabel[triI];
}
}
Info << "Found " << (newTriangleLabel.size()-counter)
<< " duplicate triangles" << endl;
//- return if there exist no duplicate triangles
if( counter == newTriangleLabel.size() )
return;
Info << "Current number of triangles" << surf_.size() << endl;
Info << "New number of triangles " << counter << endl;
//- create new list of triangles and store it in the surface mesh
LongList<labelledTri> newTriangles(counter);
forAll(newTriangleLabel, triI)
{
newTriangles[newTriangleLabel[triI]] = surf_[triI];
}
triSurfModifier(surf_).facetsAccess().transfer(newTriangles);
surf_.updateFacetsSubsets(newTriangleLabel);
surf_.clearAddressing();
surf_.clearGeometry();
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -28,6 +28,7 @@ Description
#include "triSurfaceCurvatureEstimator.H"
#include "matrix3D.H"
#include "quadricFitting.H"
#include "helperFunctions.H"
#include "HashSet.H"
#include "boolList.H"
#include "Map.H"
@ -85,6 +86,69 @@ void writeSurfaceToVTK
}
}
void writeSurfaceToVTK
(
OFstream& file,
const triSurf& surf,
const DynList<label>& triangles,
const labelHashSet& labels
)
{
//- write the header
file << "# vtk DataFile Version 3.0\n";
file << "vtk output\n";
file << "ASCII\n";
file << "DATASET POLYDATA\n";
//- write points
std::map<label, label> newPointLabel;
forAll(triangles, tI)
{
const labelledTri& tri = surf[triangles[tI]];
for(label pI=0;pI<3;++pI)
{
newPointLabel[tri[pI]];
}
}
forAllConstIter(labelHashSet, labels, it)
newPointLabel[it.key()];
const pointField& points = surf.points();
file << "POINTS " << label(newPointLabel.size()) << " float\n";
label counter(0);
for
(
std::map<label, label>::iterator it=newPointLabel.begin();
it!=newPointLabel.end();
++it
)
{
it->second = counter++;
const point& p = points[it->first];
file << p.x() << ' ' << p.y() << ' ' << p.z() << ' ';
file << nl;
}
//- write triangles
file << "\n";
file << "\nPOLYGONS " << triangles.size()
<< " " << 4*triangles.size() << endl;
forAll(triangles, tI)
{
const labelledTri& tri = surf[triangles[tI]];
file << 3
<< " " << newPointLabel[tri[0]]
<< " " << newPointLabel[tri[1]]
<< " " << newPointLabel[tri[2]] << endl;
}
}
void writeSurfaceToVTK
(
const word& name,
@ -209,17 +273,15 @@ void triSurfaceCurvatureEstimator::calculateEdgeCurvature()
{
//- calculate the curvature and store it in the map
vector e1 = edges[features[0]].vec(points);
const scalar d1 = mag(e1);
if( d1 > VSMALL )
e1 /= d1;
const scalar d1 = mag(e1) + VSMALL;
e1 /= d1;
vector e2 = edges[features[1]].vec(points);
const scalar d2 = mag(e2);
if( d2 > VSMALL )
e2 /= d2;
const scalar d2 = mag(e2) + VSMALL;
e2 /= d2;
scalar cs = e1 & e2;
cs = Foam::min(1.0, cs);
cs = Foam::max(1.0, cs);
cs = Foam::max(-1.0, cs);
const scalar curv = Foam::acos(cs) / (0.5 * (d1+d2+VSMALL));
edgePointCurvature_[pI] = Foam::mag(curv);
@ -233,8 +295,6 @@ void triSurfaceCurvatureEstimator::calculateSurfaceCurvatures()
const VRWGraph& pointTriangles = surface_.pointFacets();
const pointField& points = surface_.points();
const VRWGraph& pointEdges = surface_.pointEdges();
const edgeLongList& edges = surface_.edges();
patchPositions_.setSize(surface_.size());
gaussianCurvature_.setSize(points.size());
@ -292,30 +352,24 @@ void triSurfaceCurvatureEstimator::calculateSurfaceCurvatures()
{
const labelHashSet& currLabels = it();
if( otherLabels[it.key()].size() > 5 )
continue;
labelHashSet additionalPoints;
forAllConstIter(labelHashSet, currLabels, lit)
{
const label neiPointI = lit.key();
const constRow pEdges = pointEdges[neiPointI];
const constRow pTriangles = pointTriangles[neiPointI];
forAll(pEdges, peI)
forAll(pTriangles, ptI)
{
const label neiPointJ =
edges[pEdges[peI]].otherVertex(neiPointI);
const labelledTri& nTri = surface_[pTriangles[ptI]];
if( neiPointJ == pointI )
if( nTri.region() != it.key() )
continue;
bool correctPatch(false);
forAll(pointTriangles[neiPointJ], ntJ)
{
const label pJ = pointTriangles[neiPointJ][ntJ];
if( surface_[pJ].region() == it.key() )
correctPatch = true;
}
if( correctPatch )
additionalPoints.insert(neiPointJ);
forAll(nTri, pI)
additionalPoints.insert(nTri[pI]);
}
}
@ -363,8 +417,19 @@ void triSurfaceCurvatureEstimator::calculateSurfaceCurvatures()
# ifdef DEBUGCurvatureEstimator
Info << "Point " << pointI << " in patch " << regionI
<< " normal " << normals[it.key()]
<< " evalution points " << labels
<< " has max curvature " << qfit.maxCurvature()
<< " and min curvature " << qfit.minCurvature() << endl;
OFstream file
(
"point_" +
help::scalarToText(pointI) +
"_region_" +
help::scalarToText(regionI) +
"_triangles.vtk"
);
writeSurfaceToVTK(file, surface_, rTriangles, labels);
# endif
//- store curvatures
@ -390,9 +455,9 @@ void triSurfaceCurvatureEstimator::calculateSurfaceCurvatures()
# ifdef USE_OMP
# pragma omp for schedule(static, 1)
# endif
forAll(pointEdges, pointI)
forAll(pointTriangles, pointI)
{
const constRow pEdges = pointEdges[pointI];
const constRow pTriangles = pointTriangles[pointI];
//- find neighbouring points for each patch
Map<DynList<label> > patchNeiPoints;
@ -403,17 +468,21 @@ void triSurfaceCurvatureEstimator::calculateSurfaceCurvatures()
DynList<label>()
);
forAll(pEdges, peI)
forAll(pTriangles, ptI)
{
const edge& e = edges[pEdges[peI]];
const label neiPointI = e.otherVertex(pointI);
const labelledTri& tri = surface_[pTriangles[ptI]];
forAll(pointPatches[neiPointI], i)
if( !patchNeiPoints.found(tri.region()) )
patchNeiPoints.insert(tri.region(), DynList<label>());
forAll(tri, pI)
{
const label patchI = pointPatches[neiPointI][i];
const label neiPointI = tri[pI];
if( patchNeiPoints.found(patchI) )
patchNeiPoints[patchI].append(neiPointI);
if( neiPointI == pointI )
continue;
patchNeiPoints[tri.region()].appendIfNotIn(neiPointI);
}
}

View file

@ -0,0 +1,240 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Description
\*---------------------------------------------------------------------------*/
#include "triSurfaceImportSurfaceAsSubset.H"
#include "meshOctree.H"
#include "meshOctreeCreator.H"
#include "helperFunctions.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void triSurfaceImportSurfaceAsSubset::createOctree
(
const triSurf& surf,
meshOctree& octree
)
{
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
triSurfaceImportSurfaceAsSubset::triSurfaceImportSurfaceAsSubset(triSurf& surface)
:
surf_(surface),
octreePtr_(NULL)
{}
triSurfaceImportSurfaceAsSubset::~triSurfaceImportSurfaceAsSubset()
{
deleteDemandDrivenData(octreePtr_);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void triSurfaceImportSurfaceAsSubset::addSurfaceAsSubset
(
const triSurf& importSurf,
const word& subsetName,
const scalar angleTol
)
{
if( !octreePtr_ )
{
octreePtr_ = new meshOctree(surf_);
meshOctreeCreator(*octreePtr_).createOctreeWithRefinedBoundary
(
direction(20),
15
);
}
const pointField& points = surf_.points();
const vectorField& fNornals = surf_.facetNormals();
const vectorField& fCentres = surf_.facetCentres();
labelList nearestTriangle(importSurf.size(), -1);
//- check which triangles in the surface fit best to the centres of the
//- triangles in the import surface
const pointField& importSurfPoints = importSurf.points();
const vectorField& importFaceCentres = importSurf.facetCentres();
const vectorField& importFaceNormals = importSurf.facetNormals();
# ifdef USE_OMP
# pragma omp parallel for schedule(dynamic, 40)
# endif
forAll(nearestTriangle, triI)
{
point np;
scalar dSq;
label nt, patch;
octreePtr_->findNearestSurfacePoint
(
np,
dSq,
nt,
patch,
importFaceCentres[triI]
);
//- find the longest edge distance
scalar maxEdgeDSq(0.);
const labelledTri& tri = importSurf[triI];
forAll(tri, pI)
{
const point& s = importSurfPoints[tri[pI]];
const point& e = importSurfPoints[tri[(pI+1)%3]];
maxEdgeDSq = max(maxEdgeDSq, magSqr(e - s));
}
//- check if the triangle has been found
if( (nt < 0) || (dSq > 0.09 * maxEdgeDSq) )
{
Warning << "Could not find a matching triangle " << endl;
Warning << "It seems that your surface meshes do not overlap" << endl;
continue;
}
vector nTri = importFaceNormals[triI];
const scalar magSqrTri = magSqr(nTri);
//- skip sliver triangles
if( magSqrTri < VSMALL )
continue;
vector normal = fNornals[nt];
const scalar dSqNormal = magSqr(normal);
//- skip sliver triangles
if( dSqNormal < VSMALL )
continue;
if( ((nTri & normal) / (magSqrTri * dSqNormal)) > angleTol )
nearestTriangle[triI] = nt;
}
meshOctree otherSurfOctree(importSurf);
meshOctreeCreator(otherSurfOctree).createOctreeWithRefinedBoundary(20, 15);
//- search for nearest facets in the import surface
DynList<label> containedTriangles;
# ifdef USE_OMP
# pragma omp parallel for schedule(dynamic, 40) private(containedTriangles)
# endif
forAll(surf_, triI)
{
//- find the bounding box and the ize of the triangle
boundBox bb(fCentres[triI], fCentres[triI]);
scalar maxEdgeDSq(0.);
const labelledTri& tri = surf_[triI];
forAll(tri, pI)
{
//- bounding box of the surface triangle
bb.min() = min(bb.min(), points[tri[pI]]);
bb.max() = max(bb.max(), points[tri[pI]]);
const point& s = points[tri[pI]];
const point& e = points[tri[(pI+1)%3]];
maxEdgeDSq = max(maxEdgeDSq, magSqr(e - s));
}
//- find the nearest triangle in the surface which shall be imported
otherSurfOctree.findTrianglesInBox(bb, containedTriangles);
label nt(-1);
scalar dSq(VGREAT);
forAll(containedTriangles, ctI)
{
const point p =
help::nearestPointOnTheTriangle
(
containedTriangles[ctI],
importSurf,
fCentres[triI]
);
const scalar distSq = magSqr(p - fCentres[triI]);
if( distSq < dSq )
{
nt = containedTriangles[ctI];
dSq = distSq;
}
}
//- check if the triangle has been found
if( (nt < 0) || (dSq > 0.09 * maxEdgeDSq) )
continue;
//- skip firther checkes f it has found the same triangle
if( nearestTriangle[nt] == triI )
continue;
vector nTri = fNornals[triI];
const scalar magSqrTri = magSqr(nTri);
//- skip sliver triangles
if( magSqrTri < VSMALL )
continue;
vector normal = importFaceNormals[nt];
const scalar dSqNormal = magSqr(normal);
//- skip sliver triangles
if( dSqNormal < VSMALL )
continue;
if( ((nTri & normal) / (magSqrTri * dSqNormal)) > angleTol )
nearestTriangle[nt] = triI;
}
//- create a facet subset in the surface mesh and add the facets into it
const label subsetId = surf_.addFacetSubset(subsetName);
forAll(nearestTriangle, triI)
{
if( nearestTriangle[triI] < 0 )
continue;
surf_.addFacetToSubset(subsetId, nearestTriangle[triI]);
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -0,0 +1,103 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Class
triSurfaceImportSurfaceAsSubset
Description
Creates a subset in the master surface consisting of facets which are
near the other surface
SourceFiles
triSurfaceImportSurfaceAsSubset.C
\*---------------------------------------------------------------------------*/
#ifndef triSurfaceImportSurfaceAsSubset_H
#define triSurfaceImportSurfaceAsSubset_H
#include "triSurf.H"
#include "boolList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
class meshOctree;
/*---------------------------------------------------------------------------*\
Class triSurfaceImportSurfaceAsSubset Declaration
\*---------------------------------------------------------------------------*/
class triSurfaceImportSurfaceAsSubset
{
// Private data
//- reference to triSurf
triSurf& surf_;
//- pointer to meshOctree, needed for searching on the master surface
meshOctree* octreePtr_;
// Private member functions
void createOctree(const triSurf&, meshOctree&);
//- Disallow default bitwise copy construct
triSurfaceImportSurfaceAsSubset(const triSurfaceImportSurfaceAsSubset&);
//- Disallow default bitwise assignment
void operator=(const triSurfaceImportSurfaceAsSubset&);
public:
// Constructors
//- Construct from triSurf
triSurfaceImportSurfaceAsSubset(triSurf& surface);
// Destructor
~triSurfaceImportSurfaceAsSubset();
// Member Functions
//- finds the nearest faces in the surface to the import surf
//- and creates a subset
void addSurfaceAsSubset
(
const triSurf& importSurf,
const word& subsetName,
const scalar angleTol = 5.*M_PI/180.
);
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View file

@ -37,7 +37,6 @@ SourceFiles
#ifndef triSurfacePatchManipulator_H
#define triSurfacePatchManipulator_H
#include "IOdictionary.H"
#include "triSurf.H"
#include "VRWGraph.H"

View file

@ -0,0 +1,12 @@
#!/bin/bash
# Klas Jareteg, 2012-10-13
# Description:
# Script to run comparative case between coupled and segregated solvers.
# Source tutorial run functions
. $WM_PROJECT_DIR/bin/tools/CleanFunctions
cleanCase
rm -r constant/polyMesh

View file

@ -0,0 +1,6 @@
#!/bin/sh
# Source tutorial run functions
. $WM_PROJECT_DIR/bin/tools/RunFunctions
runApplication cartesianMesh
runApplication checkMesh -constant

Some files were not shown because too many files have changed in this diff Show more