--HG--
branch : bgschaid/minorAdditionsBranch
This commit is contained in:
Bernhard F.W. Gschaider 2013-07-19 00:36:27 +02:00
commit 72c243a96f
1416 changed files with 18017 additions and 18012 deletions

View file

@ -49,7 +49,7 @@ IF (NOT $ENV{CDASH_SUBMIT_LOCAL_HOST_ID} STREQUAL "")
# $CDASH_SUBMIT_LOCAL_HOST_ID
SET(
SITENAME $ENV{CDASH_SUBMIT_LOCAL_HOST_ID}
CACHE STRING "Name of the local site"
CACHE STRING "Name of the local site"
)
ELSE (NOT $ENV{CDASH_SUBMIT_LOCAL_HOST_ID} STREQUAL "")
# Grab the hostname FQN; will be used for the sitename
@ -81,16 +81,16 @@ SET(
)
# Find out the version of the compiler being used.
# Add this information to the buildname
# Add this information to the buildname
# This is for gcc or icc because they both support the -dumpversion option
EXEC_PROGRAM($ENV{WM_CC}
ARGS -dumpversion
OUTPUT_VARIABLE COMPILER_VERSION
)
SET(BUILDNAME "${BUILDNAME}-$ENV{WM_CC}${COMPILER_VERSION}")
SET(BUILDNAME "${BUILDNAME}-$ENV{WM_CC}${COMPILER_VERSION}")
#
# We will support more compilers eventually.
#
#
# Timeout for running every single test: 4 hours: 4 x 3600 seconds
#SET(
@ -128,12 +128,12 @@ if(GIT_FOUND)
if (GIT_BRANCH_NAME STREQUAL "")
message("No git-branch. Mercurial?")
EXEC_PROGRAM(hg
ARGS branch
ARGS branch
OUTPUT_VARIABLE GIT_BRANCH_NAME
)
message("Git branch (mercurial): ${GIT_BRANCH_NAME}")
endif()
SET(BUILDNAME "${BUILDNAME}-git-branch=${GIT_BRANCH_NAME}")
SET(BUILDNAME "${BUILDNAME}-git-branch=${GIT_BRANCH_NAME}")
endif()
# Some last minute cleanup
@ -158,7 +158,7 @@ set_property(
# Compile the OpenFOAM unit tests located under applications/test
# This part will not be compiled and run by default.
# This would be a good candidate for a sub-project
add_custom_target (OpenFOAM-$ENV{WM_PROJECT_VERSION}_unitTests
add_custom_target (OpenFOAM-$ENV{WM_PROJECT_VERSION}_unitTests
wmake all ${OF_ROOT}/applications/test
)
@ -184,7 +184,7 @@ IF(BUILD_TESTING)
# Modify this variable if you want the full length test case simulations
# Beware, this might take a long time to execute.
# Otherwise, the default behaviour is to run each tutorial for 1 "timestep"
# Otherwise, the default behaviour is to run each tutorial for 1 "timestep"
#SET(RUN_FROM_ONE_TIMESTEP 0)
SET(RUN_FROM_ONE_TIMESTEP 1)

14
COPYING
View file

@ -1,12 +1,12 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
@ -56,7 +56,7 @@ patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
@ -255,7 +255,7 @@ make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
@ -277,9 +277,9 @@ YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it

View file

@ -14,7 +14,7 @@ Contributions:
- TU Braunschweig: real gas thermo
- Kornev, Rostock: inlet turbulence generator; locDynV2F hybrid turbulence model
- Christoph Goniva: and multi-mixer dynamic mesh class
-
-
- segregated pressure-based turbomachinery (Wikki)

View file

@ -1,118 +1,118 @@
The OpenFOAM-Extend Project
Web site: http://www.extend-project.de/
The goal of the OpenFOAM -Extend Project is to open the OpenFOAM CFD toolbox
to community contributed extensions in the spirit of the Open Source
development model.
Software under the name OpenFOAM has been developed by a large group of
volunteers starting in 1991 and has spread through scientific and engineering
community in the late 1990s and early 2000s. Its main strength was the spirit
of open collaboration in the community: the aim of the OpenFOAM-Extend Project
is to formalize this spirit.
* Trademark notice
Currently, the word "OpenFOAM" is held as a trademark by a commercial
company, not associated with the Open Source development effort of this
software. While the author acknowledges the existence of Trademark,
association of trademarked phrase with the the code developed by third
parties and including the author is misleading in terms of authorship,
ownership of intellectual property of the source code.
In the absence of a legal resolution, we shall refer to the project as the
-Extend project and software as "OpenFOAM", as per agreement between
Prof. Hrvoje Jasak and Mr. Henry G. Weller of 30 September 2004.
The excerpts below are from the correspondences of Dr.Jasak and are to
his knowledge all the relevant writings about this topic. Although
this topic has been discussed before in public forums there exists no
'official' answer by OpenCFD on this issue. In the interest of
fairness a reference (link) to such an answer will be added to this
document as soon as we come aware of it.
09/30/2004 01:45 PM
Dear Dr. Jasak,
I propose that we make FOAM open-source and shutdown Nabla, that way we
are free to pursue whatever future we wish independently.
Henry G. Weller
Managing Director
Nabla Ltd
The first release of the OpenFOAM source code of 10 December 2004
and subsequent clarification of 16 February 2005.
Hi Henry,
...
Regarding the new setup, I would like to thank you for your effort i
n making FOAM public domain, with crucial comments regarding some
important details.
- in the header files for the open version of foam there exists a line
which claims copyright for OpenCFD Ltd:
Copyright (C) 1991-2004 OpenCFD Ltd.
I am not sure whether it is possible to claim copyright over a GPL piece
of software, but this is at least misleading. In order for this
statement to be true, OpenCFD Ltd. would need to purchase the copyright
from Nabla Ltd. and from you and me personally (due to unresolved
ownership and the remaining payments Nabla Ltd. needs to make to us
regarding the original transfer of copyright). I can offer a number
of solutions acceptable to me as follows:
1) no Copyright statement apart from the required GPL statement
2) Copyright OpenFOAM.org
3) Copyright Henry Weller, Hrvoje Jasak et al
4) Copyright OpenCFD and Hrvoje Jasak
5) List of authors instead of the copyright statement
I am also prepared to consider any other reasonable ideas along the same
lines (including the sale of copyright, which would be a bit pointless
at this stage) - please advise.
02/17/2005 11:09 PM
Under the GPL there has to be an organization in place to enforce the GPL
if there are violation, these are part of the rules. OpenCFD undertakes
that responsibility hence the copyright.
Henry G. Weller
Managing Director
12 November 2010
From the above it is clear that OpenCFD does not own the rights or
authorship of the source code, which is currently concealed in public.
For further details, I have been attempting to get a formal response by
this company for 6 years now, without success. It is time to move on.
The current Copyright notice does not fairly describe the authorship of
the software. In the spirit of the open source development model, the
-Extend project wishes to formally acknowledge all contributors and
developers of OpenFOAM from its inception to today.
As this issue still remains unresolved, and the list of contributors
has been removed from the OpenFOAM source code by Mr. Weller in 2006,
we shall re-base OpenFOAM as an Open Source community-based project
and associate the authorship and Copyright in each case with the
original author.
Hrvoje Jasak
This offering is not affiliated with OpenCFD Limited, owner of the OpenFOAM
trade mark.
Please find the official website of OpenCFD Limited at http://www.openfoam.org
In particular, OpenCFD Limited hosts the following legal statements:
- licensing and intellectual property: http://www.openfoam.com/legal/index.php
- trademark policy: http://www.openfoam.com/legal/trademark-policy.php
The OpenFOAM-Extend Project
Web site: http://www.extend-project.de/
The goal of the OpenFOAM -Extend Project is to open the OpenFOAM CFD toolbox
to community contributed extensions in the spirit of the Open Source
development model.
Software under the name OpenFOAM has been developed by a large group of
volunteers starting in 1991 and has spread through scientific and engineering
community in the late 1990s and early 2000s. Its main strength was the spirit
of open collaboration in the community: the aim of the OpenFOAM-Extend Project
is to formalize this spirit.
* Trademark notice
Currently, the word "OpenFOAM" is held as a trademark by a commercial
company, not associated with the Open Source development effort of this
software. While the author acknowledges the existence of Trademark,
association of trademarked phrase with the the code developed by third
parties and including the author is misleading in terms of authorship,
ownership of intellectual property of the source code.
In the absence of a legal resolution, we shall refer to the project as the
-Extend project and software as "OpenFOAM", as per agreement between
Prof. Hrvoje Jasak and Mr. Henry G. Weller of 30 September 2004.
The excerpts below are from the correspondences of Dr.Jasak and are to
his knowledge all the relevant writings about this topic. Although
this topic has been discussed before in public forums there exists no
'official' answer by OpenCFD on this issue. In the interest of
fairness a reference (link) to such an answer will be added to this
document as soon as we come aware of it.
09/30/2004 01:45 PM
Dear Dr. Jasak,
I propose that we make FOAM open-source and shutdown Nabla, that way we
are free to pursue whatever future we wish independently.
Henry G. Weller
Managing Director
Nabla Ltd
The first release of the OpenFOAM source code of 10 December 2004
and subsequent clarification of 16 February 2005.
Hi Henry,
...
Regarding the new setup, I would like to thank you for your effort i
n making FOAM public domain, with crucial comments regarding some
important details.
- in the header files for the open version of foam there exists a line
which claims copyright for OpenCFD Ltd:
Copyright (C) 1991-2004 OpenCFD Ltd.
I am not sure whether it is possible to claim copyright over a GPL piece
of software, but this is at least misleading. In order for this
statement to be true, OpenCFD Ltd. would need to purchase the copyright
from Nabla Ltd. and from you and me personally (due to unresolved
ownership and the remaining payments Nabla Ltd. needs to make to us
regarding the original transfer of copyright). I can offer a number
of solutions acceptable to me as follows:
1) no Copyright statement apart from the required GPL statement
2) Copyright OpenFOAM.org
3) Copyright Henry Weller, Hrvoje Jasak et al
4) Copyright OpenCFD and Hrvoje Jasak
5) List of authors instead of the copyright statement
I am also prepared to consider any other reasonable ideas along the same
lines (including the sale of copyright, which would be a bit pointless
at this stage) - please advise.
02/17/2005 11:09 PM
Under the GPL there has to be an organization in place to enforce the GPL
if there are violation, these are part of the rules. OpenCFD undertakes
that responsibility hence the copyright.
Henry G. Weller
Managing Director
12 November 2010
From the above it is clear that OpenCFD does not own the rights or
authorship of the source code, which is currently concealed in public.
For further details, I have been attempting to get a formal response by
this company for 6 years now, without success. It is time to move on.
The current Copyright notice does not fairly describe the authorship of
the software. In the spirit of the open source development model, the
-Extend project wishes to formally acknowledge all contributors and
developers of OpenFOAM from its inception to today.
As this issue still remains unresolved, and the list of contributors
has been removed from the OpenFOAM source code by Mr. Weller in 2006,
we shall re-base OpenFOAM as an Open Source community-based project
and associate the authorship and Copyright in each case with the
original author.
Hrvoje Jasak
This offering is not affiliated with OpenCFD Limited, owner of the OpenFOAM
trade mark.
Please find the official website of OpenCFD Limited at http://www.openfoam.org
In particular, OpenCFD Limited hosts the following legal statements:
- licensing and intellectual property: http://www.openfoam.com/legal/index.php
- trademark policy: http://www.openfoam.com/legal/trademark-policy.php

View file

@ -10,7 +10,7 @@ git Repository: Henrik Rusche (h.rusche@wikki.co.uk)
Martin Beaudoin (beaudoin.martin@ireq.ca)
1. SourceForge Access
~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~
To make contributions to the -extend project, you should first obtain an
account at SourceForge.net. (SourceForge will suggest a username
@ -46,7 +46,7 @@ git Repository: Henrik Rusche (h.rusche@wikki.co.uk)
The article listed above should be considered mandatory reading material
for those planning to make contributions to the repository. Some links about
the general usage of GIT can be found in Section 8.
the general usage of GIT can be found in Section 8.
Please do not hesitate to ask one of the "git Repository" contacts at the top
of this document if you are not sure about specific operation relative to the git
@ -81,14 +81,14 @@ git Repository: Henrik Rusche (h.rusche@wikki.co.uk)
+ git checkout -b my-feature-branch
Feature branches should be named after the fix or feature that they contain,
*not* named after the author. There may be more than one author, after all, and
*not* named after the author. There may be more than one author, after all, and
this information is recorded in the commit anyway. As an example, a bug fix
to the mesquite package should be committed to a branch named "hotfix/mesquite".
Carefully organized commits and branches, clear commit messages, and well-chosen
branch names will make it easier for the release committee to review and merge
each contribution.
each contribution.
When you have a feature branch that is ready to be merged, push it to the server
using a command such as this:
@ -125,7 +125,7 @@ git Repository: Henrik Rusche (h.rusche@wikki.co.uk)
The feature branches provided by users will be merged by the release committee
into an integration branch called "nextRelease", and then both the local
and remote copy of the feature branch will be deleted. The merge will be performed
and remote copy of the feature branch will be deleted. The merge will be performed
using a "git merge --no-ff" command, which forces the creation of a merge commit
even in the case where the merge could be accomplished by fast-forward.
Note that the automated test loop will be run off of this integration branch.
@ -136,7 +136,7 @@ git Repository: Henrik Rusche (h.rusche@wikki.co.uk)
and hotfixes.
Note that hotfixes should be branched off of the master branch and should be merged
twice - once into the integration branch and once into the master branch - in order to
twice - once into the integration branch and once into the master branch - in order to
guarantee that a merge of the integration branch into the master branch can be
accomplished by a fast-forward.

14
README
View file

@ -59,7 +59,7 @@
Then update the environment variables by sourcing the $HOME/.bashrc file by
typing in the terminal:
+ . $HOME/.bashrc
+ . $HOME/.bashrc
2) OR, if running tcsh or csh, source the etc/cshrc file by adding the
following line to the end of your $HOME/.cshrc file:
@ -69,7 +69,7 @@
Then update the environment variables by sourcing the $HOME/.cshrc file by
typing in the terminal:
+ source $HOME/.cshrc
+ source $HOME/.cshrc
*** Installation in alternative locations
OpenFOAM may also be installed in alternative locations. However, the
@ -79,13 +79,13 @@
The environment variable 'FOAM_INST_DIR' can be used to find and source the
appropriate resource file. Here is a bash/ksh/sh example:
+ export FOAM_INST_DIR=/data/app/OpenFOAM
+ export FOAM_INST_DIR=/data/app/OpenFOAM
+ foamDotFile=$FOAM_INST_DIR/OpenFOAM-<VERSION>/etc/bashrc
+ [ -f $foamDotFile ] && . $foamDotFile
and a csh/tcsh example:
+ setenv FOAM_INST_DIR /data/app/OpenFOAM
+ setenv FOAM_INST_DIR /data/app/OpenFOAM
+ set foamDotFile=$FOAM_INST_DIR/OpenFOAM-<VERSION>/etc/cshrc
+ if ( -f $foamDotFile ) source $foamDotFile
@ -93,7 +93,7 @@
of the OpenFOAM installation.
* Building the ThirdParty packages from Sources
A new way of compiling and installing the ThirdParty suite of packages is proposed with OpenFOAM-1.6-ext.
A new way of compiling and installing the ThirdParty suite of packages is proposed with OpenFOAM-1.6-ext.
Please consult the file ThirdParty/README.ThirdParty a for more information.
* Building from Sources (Optional)
@ -154,7 +154,7 @@
* Reporting Bugs in OpenFOAM
Please report all bugs and feature requests using our bug tracking system:
http://sourceforge.net/apps/mantisbt/openfoam-extend/main_page.php or
http://sourceforge.net/apps/mantisbt/openfoam-extend/main_page.php or
http://www.extend-project.de/project-final/openfoam-extend-bug-tracker
Please also report bugs at the CFD-Online User Forum.
@ -188,7 +188,7 @@
David Hill
Niklas Wikstrom
Dubravko Matijasevic
Darrin Stephens
Darrin Stephens
Christian Beck
Oliver Borm
James Criner

View file

@ -1,84 +1,84 @@
# -*- mode: org; -*-
#
#+TITLE: *OpenFOAM release notes for version 1.6-ext*
#+AUTHOR: Hrvoje Jasak.
#+DATE: 12 November 2010
#+LINK: http://www.extend-project.de
#+OPTIONS: author:nil
* Overview
OpenFOAM-1.6-ext is a formal release of the community developments in the
OpenFOAM software library, following the establishment of the -Extend Project
and web portal.
This release includes many improvements, including an automatic test harness
which provides basic quality control by running OpenFOAM over a growing
suite of tutorials and test cases.
* Compatibility
OpenFOAM-1.6-ext is compatible with the OpenFOAM-1.6.x and 1.7.1 versions
of the code and incorporate most developments and changes from above versions
and ensure top-level compatibility. In some cases, the differences are
caused by bug fixes and algorithmic improvements, considered more important
than inter-operability.
The list of developments is a result of the work of numerous contributors.
The Extend Project Admins would formally like to thank all contributors;
the list will be updated with further code development
* Fundamental developments
Improvements in accuracy and stability on tetrahedral and tet-dominant meshes
Implicit coupled multi-domain solver
Block-implicit multi-equation matrix support
Proper Orthogonal Decomposition (POD) data analysis tools
Rewrite of Ordinary Differential Equation (ODE) and ODE solver classes
Dynamic remeshing classes, based on tetrahedral edge swapping
Radial Basis Function interpolation and mesh motion classes
Turbomachinery features: GGI interface, cyclic GGI, partial overlap GGI
Parallelisation of topological change engine
Support for parallelisation and topological changes in the Finite Area Method
Library preparation for Python/Swig interface for OpenFOAM: VulaSHAKA project
(http://sourceforge.net/projects/vulashaka/)
Basic implementation of OpenMP wrapping for multi-core support
* Ongoing developments
This section lists the applications that existed in versions of OpenFOAM
but were abandoned by OpenCFD due to lack of expertise. In some cases, code
may still be present but it is buggy or unusable. The -Extend project line
maintains, validates and develops the features in contact with original
authors and contributes new features and bug fixes.
Working parallel point mesh and field tools, shared between interpolation
and FEM discretisation solvers
FEM-based automatic mesh motion solver, with working parallelisation
Dynamic mesh features and topological mesh changes
The Finite Area Method: parallelisation, volume-to-surface mapping
New generation of linear equation solvers, including accelerated AMG
* Developments to solvers (applications)
Basic 6-degree-of-freedom (6-DOF) solver
POD solver tools: prototype
Demonstration solver for fully implicit block-matrix coupling
* Developments to utilities
New parallel decomposition and reconstruction formulation, with support
for topologically changing meshes
Level-set support tools
* Model development
Transport models and top-level solver for visco-elasto-plastic flows
Updates to internal combustion engine simulation tools
Updated version of free surface tracking tools for free surface flows
Updated fluid-structure interaction solver with parallelisation support
Updated stress analysis tools, new large deformation solver formulation
* Quality Control
Integrated automatic and publicly available test harness with ~150 cases
http://openfoam-extend.sourceforge.net/CDash/index.php
Community-driven test-case base to enhance code quality and integrity.
Contributions under
http://www.extend-project.de/project-final/openfoam-extend-test-harness
* Other
The number of cumulative bug fixes compared to OpenFOAM-1.6.x is over
5000; we will stop counting.
# -*- mode: org; -*-
#
#+TITLE: *OpenFOAM release notes for version 1.6-ext*
#+AUTHOR: Hrvoje Jasak.
#+DATE: 12 November 2010
#+LINK: http://www.extend-project.de
#+OPTIONS: author:nil
* Overview
OpenFOAM-1.6-ext is a formal release of the community developments in the
OpenFOAM software library, following the establishment of the -Extend Project
and web portal.
This release includes many improvements, including an automatic test harness
which provides basic quality control by running OpenFOAM over a growing
suite of tutorials and test cases.
* Compatibility
OpenFOAM-1.6-ext is compatible with the OpenFOAM-1.6.x and 1.7.1 versions
of the code and incorporate most developments and changes from above versions
and ensure top-level compatibility. In some cases, the differences are
caused by bug fixes and algorithmic improvements, considered more important
than inter-operability.
The list of developments is a result of the work of numerous contributors.
The Extend Project Admins would formally like to thank all contributors;
the list will be updated with further code development
* Fundamental developments
Improvements in accuracy and stability on tetrahedral and tet-dominant meshes
Implicit coupled multi-domain solver
Block-implicit multi-equation matrix support
Proper Orthogonal Decomposition (POD) data analysis tools
Rewrite of Ordinary Differential Equation (ODE) and ODE solver classes
Dynamic remeshing classes, based on tetrahedral edge swapping
Radial Basis Function interpolation and mesh motion classes
Turbomachinery features: GGI interface, cyclic GGI, partial overlap GGI
Parallelisation of topological change engine
Support for parallelisation and topological changes in the Finite Area Method
Library preparation for Python/Swig interface for OpenFOAM: VulaSHAKA project
(http://sourceforge.net/projects/vulashaka/)
Basic implementation of OpenMP wrapping for multi-core support
* Ongoing developments
This section lists the applications that existed in versions of OpenFOAM
but were abandoned by OpenCFD due to lack of expertise. In some cases, code
may still be present but it is buggy or unusable. The -Extend project line
maintains, validates and develops the features in contact with original
authors and contributes new features and bug fixes.
Working parallel point mesh and field tools, shared between interpolation
and FEM discretisation solvers
FEM-based automatic mesh motion solver, with working parallelisation
Dynamic mesh features and topological mesh changes
The Finite Area Method: parallelisation, volume-to-surface mapping
New generation of linear equation solvers, including accelerated AMG
* Developments to solvers (applications)
Basic 6-degree-of-freedom (6-DOF) solver
POD solver tools: prototype
Demonstration solver for fully implicit block-matrix coupling
* Developments to utilities
New parallel decomposition and reconstruction formulation, with support
for topologically changing meshes
Level-set support tools
* Model development
Transport models and top-level solver for visco-elasto-plastic flows
Updates to internal combustion engine simulation tools
Updated version of free surface tracking tools for free surface flows
Updated fluid-structure interaction solver with parallelisation support
Updated stress analysis tools, new large deformation solver formulation
* Quality Control
Integrated automatic and publicly available test harness with ~150 cases
http://openfoam-extend.sourceforge.net/CDash/index.php
Community-driven test-case base to enhance code quality and integrity.
Contributions under
http://www.extend-project.de/project-final/openfoam-extend-test-harness
* Other
The number of cumulative bug fixes compared to OpenFOAM-1.6.x is over
5000; we will stop counting.

View file

@ -62,12 +62,12 @@ int main(int argc, char *argv[])
forAll (structure.names(), bodyI)
{
Info<< nl << "Average velocity of " << structure.names()[bodyI]
Info<< nl << "Average velocity of " << structure.names()[bodyI]
<< " in time step = "
<< structure()[bodyI].Uaverage().value() << nl
<< "Current velocity in time instant = "
<< structure()[bodyI].U().value() << nl
<< "Average omega of " << structure.names()[bodyI]
<< "Average omega of " << structure.names()[bodyI]
<< " in time step = "
<< structure()[bodyI].omegaAverage().value() << nl
<< "Current omega in time instant = "

View file

@ -22,7 +22,7 @@
volScalarField GEta = GEtaCoef/tauEta;
volScalarField XiEqEta = 1.0 + XiCoef*sqrt(up/(Su + SuMin))*Reta;
volScalarField R =
volScalarField R =
GEta*XiEqEta/(XiEqEta - 0.999) + GIn*XiIn/(XiIn - 0.999);
volScalarField XiEqStar = R/(R - GEta - GIn);
@ -42,7 +42,7 @@
volScalarField GEta = GEtaCoef/tauEta;
volScalarField XiEqEta = 1.0 + XiCoef*sqrt(up/(Su + SuMin))*Reta;
volScalarField R =
volScalarField R =
GEta*XiEqEta/(XiEqEta - 0.999) + GIn*XiIn/(XiIn - 0.999);
volScalarField XiEqStar = R/(R - GEta - GIn);

View file

@ -91,7 +91,7 @@ bool Foam::XiModels::algebraic::read(const dictionary& XiProperties)
XiModel::read(XiProperties);
XiModelCoeffs_.lookup("XiShapeCoef") >> XiShapeCoef;
return true;
}

View file

@ -117,7 +117,7 @@ inline Foam::scalar Foam::laminarFlameSpeedModels::SCOPE::polyPhi
{
scalar x = phi - 1.0;
return
return
a[0]
*(
scalar(1)

View file

@ -45,7 +45,7 @@ if (adjustTimeStep)
maxDeltaT
)
);
Info<< "deltaT = " << runTime.deltaT().value() << endl;
}

View file

@ -2,7 +2,7 @@
cd ${0%/*} || exit 1 # run from this directory
set -x
wmake libso BCs
wmake libso BCs
wmake
# ----------------------------------------------------------------- end-of-file

View file

@ -84,7 +84,7 @@ int main(int argc, char *argv[])
(
fvm::ddt(rhoU)
+ fvm::div(phiv, rhoU)
==
==
- fvc::grad(p)
);

View file

@ -2,7 +2,7 @@
cd ${0%/*} || exit 1 # run from this directory
set -x
wmake libso BCs
wmake libso BCs
wmake
# ----------------------------------------------------------------- end-of-file

View file

@ -131,7 +131,7 @@ void inviscidWallPFvPatchScalarField::updateCoeffs()
const fvPatchField<vector>& rhoUp =
lookupPatchField<volVectorField, vector>("rhoU");
const fvsPatchField<scalar>& phip =
const fvsPatchField<scalar>& phip =
lookupPatchField<surfaceScalarField, scalar>("phi");
const fvsPatchField<scalar>& rAp =
@ -147,7 +147,7 @@ void inviscidWallPFvPatchScalarField::updateCoeffs()
void inviscidWallPFvPatchScalarField::write(Ostream& os) const
{
fixedGradientFvPatchScalarField::write(os);
os.writeKeyword("fluxFraction")
os.writeKeyword("fluxFraction")
<< fluxFraction_ << token::END_STATEMENT << nl;
writeEntry("value", os);
}

View file

@ -145,7 +145,7 @@ void mixedRhoEFvPatchScalarField::updateCoeffs()
const volScalarField& T = db().lookupObject<volScalarField>("T");
const label patchi = patch().index();
fvPatchScalarField& Tp =
fvPatchScalarField& Tp =
const_cast<fvPatchScalarField&>(T.boundaryField()[patchi]);
Tp.evaluate();
@ -164,7 +164,7 @@ void mixedRhoEFvPatchScalarField::updateCoeffs()
refGrad() =
rhop*Cv.value()*Tp.snGrad()
+ (
refValue()
refValue()
- (0.5*rhop.patchInternalField()*
magSqr(rhoUp.patchInternalField()/rhop.patchInternalField()))
)*patch().deltaCoeffs();

View file

@ -66,7 +66,7 @@ public:
const dictionary&
);
//- Construct by mapping given fixedRhoUFvPatchVectorField
//- Construct by mapping given fixedRhoUFvPatchVectorField
// onto a new patch
fixedRhoUFvPatchVectorField
(

View file

@ -1,3 +1,3 @@
conjugateHeatFoam.C
conjugateHeatFoam.C
EXE = $(FOAM_APPBIN)/conjugateHeatFoam

View file

@ -1,3 +1,3 @@
electrostaticFoam.C
electrostaticFoam.C
EXE = $(FOAM_APPBIN)/electrostaticFoam

View file

@ -98,7 +98,7 @@ int main(int argc, char *argv[])
U = rUA*UEqn.H();
phi = (fvc::interpolate(U) & mesh.Sf())
phi = (fvc::interpolate(U) & mesh.Sf())
+ fvc::ddtPhiCorr(rUA, U, phi);
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)

View file

@ -83,6 +83,6 @@
momentumPredictor = false;
# include "UEqn.H"
momentumPredictor = momentumPredictorSave;
rUA = 1.0/UEqn.A();
}

View file

@ -35,7 +35,7 @@ scalar meanMeshCoNum = 0.0;
if (mesh.nInternalFaces())
{
surfaceScalarField SfUfbyDelta =
surfaceScalarField SfUfbyDelta =
mesh.surfaceInterpolation::deltaCoeffs()*mag(mesh.phi());
meshCoNum = max(SfUfbyDelta/mesh.magSf())

View file

@ -28,7 +28,7 @@
pcorr.oldTime() == p.oldTime();
phi = fvc::interpolate(rho)
*((fvc::interpolate(U) & mesh.Sf()) - fvc::meshPhi(rho, U));
*((fvc::interpolate(U) & mesh.Sf()) - fvc::meshPhi(rho, U));
for(int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{
@ -43,7 +43,7 @@
if (nonOrth == nNonOrthCorr)
{
phi += pcorrEqn.flux();
phi += pcorrEqn.flux();
}
}
}

View file

@ -35,7 +35,7 @@ scalar meanMeshCoNum = 0.0;
if (mesh.nInternalFaces())
{
surfaceScalarField SfUfbyDelta =
surfaceScalarField SfUfbyDelta =
mesh.surfaceInterpolation::deltaCoeffs()*mag(mesh.phi());
meshCoNum = max(SfUfbyDelta/mesh.magSf())

View file

@ -28,7 +28,7 @@ Application
Description
Sample application testing the equationReader extension, and demonstrating
its use.
Author
David L. F. Gaden
@ -121,7 +121,7 @@ int main(int argc, char *argv[])
Info << "Reading equation b from testDict, linking an output variable"
<< endl;
eqns.readEquation(testDict, "b", activeOutB);
Info << "Output variable before update() = " << activeOutB << endl;
Info << "Begining .update() - this evaluates all equations with active "
<< "output..." << endl;
@ -161,11 +161,11 @@ int main(int argc, char *argv[])
Info << "done. Evaluating equation f ... ";
passiveOutF = eqns.evaluate("f");
Info << "done." << token::NL << "The result is: " << passiveOutF << endl;
Info << token::NL << "Creating output..." << endl;
OFstream os(path/"outputDict");
os << eqns;
eqns.dataSourceStatus(os);
return(0);
}

View file

@ -28,7 +28,7 @@ Application
Description
Sample application testing the equationReader in a finite volume solver
environment.
Author
David L. F. Gaden
@ -174,7 +174,7 @@ int main(int argc, char *argv[])
eqns.addDataSource(Sj, "Sj");
eqns.addDataSource(Sk, "Sk");
eqns.addDataSource(Sl, "Sl");
label listIndex(0);
eqns.addDataSource(p);
eqns.addDataSource(dummy);
@ -201,7 +201,7 @@ int main(int argc, char *argv[])
scalar saD(readScalar(testDict1.lookup("saD")));
scalar saE(readScalar(testDict1.lookup("saE")));
scalar saF(readScalar(testDict1.lookup("saF")));
dimensionedScalar dsaA(testDict1.lookup("dsaA"));
dimensionedScalar dsaB(testDict1.lookup("dsaB"));
dimensionedScalar dsaC(testDict1.lookup("dsaC"));
@ -244,7 +244,7 @@ int main(int argc, char *argv[])
{
Info<< "Time = " << runTime.timeName() << nl << endl;
DStime.value() = runTime.value();
Info << "Moving p index to ";
listIndex++;
if (listIndex == p.size())
@ -313,7 +313,7 @@ int main(int argc, char *argv[])
volScalarField rUA = 1.0/UEqn.A();
U = rUA*UEqn.H();
phi = (fvc::interpolate(U) & mesh.Sf())
phi = (fvc::interpolate(U) & mesh.Sf())
+ fvc::ddtPhiCorr(rUA, U, phi);
adjustPhi(phi, U, p);

View file

@ -71,7 +71,7 @@ class solidWallMixedTemperatureCoupledFvPatchScalarField
//- Name of field on the neighbour region
const word neighbourFieldName_;
//- Name of thermal conductivity field
const word KName_;

View file

@ -85,6 +85,6 @@
momentumPredictor = false;
# include "UEqn.H"
momentumPredictor = momentumPredictorSave;
rAU = 1.0/UEqn.A();
}

View file

@ -85,6 +85,6 @@
momentumPredictor = false;
# include "UEqn.H"
momentumPredictor = momentumPredictorSave;
rAU = 1.0/UEqn.A();
}

View file

@ -1,3 +1,3 @@
icoFoam.C
icoFoam.C
EXE = $(FOAM_APPBIN)/icoFoam

View file

@ -1,3 +1,3 @@
nonNewtonianIcoFoam.C
nonNewtonianIcoFoam.C
EXE = $(FOAM_APPBIN)/nonNewtonianIcoFoam

View file

@ -1,3 +1,3 @@
simpleFoam.C
simpleFoam.C
EXE = $(FOAM_APPBIN)/simpleFoam

View file

@ -62,7 +62,7 @@ int main(int argc, char *argv[])
# include "solverScalarTransportFoam.H"
multiRun++;
// * * * * * * * * * * * * * * * * icoFoam2 * * * * * * * * * * * * * * * * //
Info << "*** Switching to icoFoam2 ***\n" << endl;

View file

@ -31,7 +31,7 @@
volScalarField rUA = 1.0/UEqn.A();
U = rUA*UEqn.H();
phi = (fvc::interpolate(U) & mesh.Sf())
phi = (fvc::interpolate(U) & mesh.Sf())
+ fvc::ddtPhiCorr(rUA, U, phi);
adjustPhi(phi, U, p);

View file

@ -3,12 +3,12 @@
dimensionedScalar totalMass = fvc::domainIntegrate(rho);
scalar sumLocalContErr =
scalar sumLocalContErr =
(
fvc::domainIntegrate(mag(rho - thermoRho))/totalMass
).value();
scalar globalContErr =
scalar globalContErr =
(
fvc::domainIntegrate(rho - thermoRho)/totalMass
).value();

View file

@ -47,7 +47,7 @@ if (adjustTimeStep)
maxDeltaT
)
);
Info<< "deltaT = " << runTime.deltaT().value() << endl;
}

View file

@ -63,7 +63,7 @@ Ostream& operator<<
)
{
os << tp.theta0_ << token::SPACE
<< tp.uTheta_ << token::SPACE
<< tp.uTheta_ << token::SPACE
<< tp.thetaA_ << token::SPACE
<< tp.thetaR_;

View file

@ -166,13 +166,13 @@ Foam::tmp<Foam::surfaceScalarField> Foam::multiphaseMixture::muf() const
{
PtrDictionary<phase>::const_iterator iter = phases_.begin();
tmp<surfaceScalarField> tmuf =
tmp<surfaceScalarField> tmuf =
fvc::interpolate(iter().limitedAlpha())*iter().rho()*
fvc::interpolate(iter().nu());
for(++iter; iter != phases_.end(); ++iter)
{
tmuf() +=
tmuf() +=
fvc::interpolate(iter().limitedAlpha())*iter().rho()*
fvc::interpolate(iter().nu());
}

View file

@ -28,7 +28,7 @@ License
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::phase::phase
Foam::phase::phase
(
const word& name,
const dictionary& phaseDict,

View file

@ -3,9 +3,9 @@
// Creates the porosity field for MULES
volScalarField porosity
(
IOobject
IOobject
(
"porosity",
"porosity",
runTime.timeName(),
mesh,
IOobject::NO_READ,
@ -19,14 +19,14 @@
forAll( pZones, zoneI )
{
const label & zoneId( pZones[zoneI].zoneId() );
const labelList & cells(mesh.cellZones()[zoneId]);
const scalar & zonePorosity( pZones[zoneI].porosity() );
forAll( cells, cellI )
{
porosity[cells[cellI]] = zonePorosity;
porosity[cells[cellI]] = zonePorosity;
}
}

View file

@ -1,5 +1,5 @@
{
mul = muc +
mul = muc +
plasticViscosity
(
plasticViscosityCoeff,

View file

@ -26,7 +26,7 @@ if(turbulence)
+ fvm::div(phi, epsilon)
- fvm::laplacian
(
alphaEps*mut + mul, epsilon,
alphaEps*mut + mul, epsilon,
"laplacian(DepsilonEff,epsilon)"
)
==

View file

@ -6,7 +6,7 @@ EXE_INC = \
-IinterfacialModels/lnInclude \
-IphaseModel/lnInclude \
-Iaveraging
EXE_LIBS = \
-lEulerianInterfacialModels \
-lfiniteVolume \

View file

@ -56,7 +56,7 @@ fvVectorMatrix UbEqn(Ub, Ub.dimensions()*dimVol/dimTime);
-fvc::interpolate(nuEffb)*mesh.magSf()*fvc::snGrad(beta)
/fvc::interpolate(beta + scalar(0.001));
UbEqn =
UbEqn =
(
(scalar(1) + Cvm*rhob*alpha/rhob)*
(

View file

@ -83,7 +83,7 @@ Foam::tmp<Foam::volScalarField> Foam::GidaspowErgunWenYu::K
Cds[celli] = 0.44;
}
}
// Wen and Yu (1966)
tmp<volScalarField> tKWenYu = 0.75*Cds*phaseb_.rho()*Ur*bp/phasea_.d();
volScalarField& KWenYu = tKWenYu();

View file

@ -41,7 +41,7 @@ Foam::autoPtr<Foam::dragModel> Foam::dragModel::New
interfaceDict.lookup("dragModel" + phasea.name())
);
Info << "Selecting dragModel for phase "
Info << "Selecting dragModel for phase "
<< phasea.name()
<< ": "
<< dragModelType << endl;

View file

@ -75,7 +75,7 @@ Foam::tmp<Foam::volScalarField> Foam::GidaspowConductivity::kappa
(
2.0*sqr(alpha)*g0*(1.0 + e)/sqrtPi
+ (9.0/8.0)*sqrtPi*g0*0.5*(1.0 + e)*sqr(alpha)
+ (15.0/16.0)*sqrtPi*alpha
+ (15.0/16.0)*sqrtPi*alpha
+ (25.0/64.0)*sqrtPi/((1.0 + e)*g0)
);
}

View file

@ -76,7 +76,7 @@ Foam::tmp<Foam::volScalarField> Foam::HrenyaSinclairConductivity::kappa
{
const scalar sqrtPi = sqrt(mathematicalConstant::pi);
volScalarField lamda =
volScalarField lamda =
scalar(1) + da/(6.0*sqrt(2.0)*(alpha + scalar(1.0e-5)))/L_;
return rhoa*da*sqrt(Theta)*

View file

@ -35,9 +35,9 @@ Foam::autoPtr<Foam::conductivityModel> Foam::conductivityModel::New
{
word conductivityModelType(dict.lookup("conductivityModel"));
Info<< "Selecting conductivityModel "
Info<< "Selecting conductivityModel "
<< conductivityModelType << endl;
dictionaryConstructorTable::iterator cstrIter =
dictionaryConstructorTablePtr_->find(conductivityModelType);

View file

@ -73,7 +73,7 @@ frictionalPressure
) const
{
return
return
Fr*pow(max(alpha - alphaMinFriction, scalar(0)), eta)
/pow(max(alphaMax - alpha, scalar(5.0e-2)), p);
}
@ -104,9 +104,9 @@ Foam::tmp<Foam::volScalarField> Foam::JohnsonJacksonFrictionalStress::muf
const dimensionedScalar& alphaMax,
const volScalarField& pf,
const volTensorField& D,
const dimensionedScalar& phi
const dimensionedScalar& phi
) const
{
{
return dimensionedScalar("0.5", dimTime, 0.5)*pf*sin(phi);
}

View file

@ -35,9 +35,9 @@ Foam::autoPtr<Foam::frictionalStressModel> Foam::frictionalStressModel::New
{
word frictionalStressModelType(dict.lookup("frictionalStressModel"));
Info<< "Selecting frictionalStressModel "
Info<< "Selecting frictionalStressModel "
<< frictionalStressModelType << endl;
dictionaryConstructorTable::iterator cstrIter =
dictionaryConstructorTablePtr_->find(frictionalStressModelType);

View file

@ -35,9 +35,9 @@ Foam::autoPtr<Foam::granularPressureModel> Foam::granularPressureModel::New
{
word granularPressureModelType(dict.lookup("granularPressureModel"));
Info<< "Selecting granularPressureModel "
Info<< "Selecting granularPressureModel "
<< granularPressureModelType << endl;
dictionaryConstructorTable::iterator cstrIter =
dictionaryConstructorTablePtr_->find(granularPressureModelType);

View file

@ -65,7 +65,7 @@ Foam::tmp<Foam::volScalarField> Foam::CarnahanStarlingRadial::g0
) const
{
return
return
1.0/(1.0 - alpha)
+ 3.0*alpha/(2.0*sqr(1.0 - alpha))
+ sqr(alpha)/(2.0*pow(1.0 - alpha, 3));
@ -78,8 +78,8 @@ Foam::tmp<Foam::volScalarField> Foam::CarnahanStarlingRadial::g0prime
const dimensionedScalar& alphaMax
) const
{
return
- alpha/sqr(1.0 - alpha)
return
- alpha/sqr(1.0 - alpha)
+ (3.0*(1.0 - alpha) + 6.0*sqr(alpha))/(2.0*(1.0 - alpha))
+ (2.0*alpha*(1.0 - alpha) + 3.0*pow(alpha, 3))
/(2.0*pow(1.0 - alpha, 4));

View file

@ -74,7 +74,7 @@ Foam::tmp<Foam::volScalarField> Foam::GidaspowRadial::g0prime
const dimensionedScalar& alphaMax
) const
{
return
return
(-1.0/5.0)*pow(alpha/alphaMax, -2.0/3.0)
/(alphaMax*sqr(1.0 - pow(alpha/alphaMax, 1.0/3.0)));
}

View file

@ -74,7 +74,7 @@ Foam::tmp<Foam::volScalarField> Foam::SinclairJacksonRadial::g0prime
const dimensionedScalar& alphaMax
) const
{
return
return
(-1.0/3.0)*pow(alpha/alphaMax, -2.0/3.0)
/(alphaMax*sqr(1.0 - pow(alpha/alphaMax, 1.0/3.0)));
}

View file

@ -35,9 +35,9 @@ Foam::autoPtr<Foam::radialModel> Foam::radialModel::New
{
word radialModelType(dict.lookup("radialModel"));
Info<< "Selecting radialModel "
Info<< "Selecting radialModel "
<< radialModelType << endl;
dictionaryConstructorTable::iterator cstrIter =
dictionaryConstructorTablePtr_->find(radialModelType);

View file

@ -42,7 +42,7 @@
phib = (fvc::interpolate(Ub) & mesh.Sf()) + fvc::ddtPhiCorr(rUbA, Ub, phib)
+ phiDragb;
phi = alphaf*phia + betaf*phib;
surfaceScalarField Dp("(rho*(1|A(U)))", alphaf*rUaAf/rhoa + betaf*rUbAf/rhob);

View file

@ -14,12 +14,12 @@
// Initializing neighbouring cells contribution
scalar neighboursEx = 0.0;
forAll (neighbourCell, cellj)
{
labelList neighboursNeighbour = neighbour[neighbourCell[cellj]];
scalar neighboursNeighbourCellVolumes = 0.0;
forAll (neighboursNeighbour, cellk)
{
neighboursNeighbourCellVolumes +=

View file

@ -16,7 +16,7 @@ $(rheologyLaws)/PronyViscoelastic/PronyViscoelastic.C
thermalModel/thermalModel.C
thermalLaws = thermalModel/thermalLaws
$(thermalLaws)/thermalLaw/thermalLaw.C
$(thermalLaws)/thermalLaw/thermalLaw.C
$(thermalLaws)/thermalLaw/newThermalLaw.C
$(thermalLaws)/constantThermal/constantThermal.C
$(thermalLaws)/multiMaterialThermal/multiMaterialThermal.C

View file

@ -71,7 +71,7 @@ public:
DugdaleCohesiveLaw
(
const word& cohesiveLawName,
const dictionary& dict
const dictionary& dict
);
//- Construct as copy

View file

@ -74,7 +74,7 @@ Foam::autoPtr<Foam::cohesiveLaw> Foam::cohesiveLaw::New
Foam::cohesiveLaw::cohesiveLaw
(
const word& cohesiveLawName,
const dictionary& dict
const dictionary& dict
)
:
cohesiveLawCoeffs_(dict.subDict(cohesiveLawName + "Coeffs")),

View file

@ -97,7 +97,7 @@ public:
static autoPtr<cohesiveLaw> New
(
const word& cohesiveLawName,
const dictionary& dict
const dictionary& dict
);
@ -107,7 +107,7 @@ public:
cohesiveLaw
(
const word& cohesiveLawName,
const dictionary& dict
const dictionary& dict
);
//- Construct as copy
@ -124,7 +124,7 @@ public:
// Member Functions
//- Return cohesive law coefficients
const dictionary& cohesiveLawCoeffs() const
{

View file

@ -71,7 +71,7 @@ public:
linearCohesiveLaw
(
const word& cohesiveLawName,
const dictionary& dict
const dictionary& dict
);
//- Construct as copy

View file

@ -171,7 +171,7 @@ public:
{
checkPatchFace(mesh);
}
//- Construct from dictionary
componentReference

View file

@ -245,7 +245,7 @@ void cohesiveLawFvPatchVectorField::write(Ostream& os) const
{
fvPatchVectorField::write(os);
traction_.writeEntry("traction", os);
os.writeKeyword("cohesiveLaw") << law().type()
os.writeKeyword("cohesiveLaw") << law().type()
<< token::END_STATEMENT << nl;
os.writeKeyword("relaxationFactor") << relaxationFactor_
<< token::END_STATEMENT << nl;

View file

@ -99,7 +99,7 @@ cohesiveZoneFvPatchVectorField::cohesiveZoneFvPatchVectorField
{
this->refValue() = vector::zero;
}
if (dict.found("refGradient"))
{
this->refGrad() = vectorField("refGradient", dict, p.size());
@ -111,7 +111,7 @@ cohesiveZoneFvPatchVectorField::cohesiveZoneFvPatchVectorField
if (dict.found("valueFraction"))
{
this->valueFraction() =
this->valueFraction() =
symmTensorField("valueFraction", dict, p.size());
}
else
@ -206,7 +206,7 @@ void cohesiveZoneFvPatchVectorField::updateCoeffs()
const rheologyModel& rheology =
this->db().objectRegistry::lookupObject<rheologyModel>(rheologyName_);
const scalarField mu =
const scalarField mu =
rheology.mu()().boundaryField()[patch().index()];
const scalarField lambda =
@ -276,17 +276,17 @@ void cohesiveZoneFvPatchVectorField::updateCoeffs()
if(magSqr(valueFraction()[faceI]) < SMALL)
{
cohesiveTraction =
relaxationFactor_*cohesiveTraction
cohesiveTraction =
relaxationFactor_*cohesiveTraction
+ (1.0 - relaxationFactor_)*sigmaN[faceI]*n[faceI];
refGrad()[faceI] =
(
cohesiveTraction
- (
n[faceI]
n[faceI]
& (
mu[faceI]*gradU[faceI].T()
mu[faceI]*gradU[faceI].T()
- (mu[faceI] + lambda[faceI])*gradU[faceI]
)
)
@ -306,7 +306,7 @@ void cohesiveZoneFvPatchVectorField::write(Ostream& os) const
directionMixedFvPatchVectorField::write(os);
os.writeKeyword("U") << UName_ << token::END_STATEMENT << nl;
os.writeKeyword("rheology") << rheologyName_ << token::END_STATEMENT << nl;
os.writeKeyword("cohesiveLaw") << law().type()
os.writeKeyword("cohesiveLaw") << law().type()
<< token::END_STATEMENT << nl;
os.writeKeyword("relaxationFactor") << relaxationFactor_
<< token::END_STATEMENT << nl;

View file

@ -174,7 +174,7 @@ public:
//- Update the coefficients associated with the patch field
virtual void updateCoeffs();
//- Write
virtual void write(Ostream&) const;

View file

@ -107,7 +107,7 @@ Foam::tmp<Foam::volScalarField> Foam::BurgersViscoelastic::E(scalar t) const
+ eta2_.value()/k2_.value();
scalar p2 = eta1_.value()*eta2_.value()/(k1_.value()*k2_.value());
scalar q1 = eta1_.value();
scalar q2 = eta1_.value()*eta2_.value()/k2_.value();
@ -120,7 +120,7 @@ Foam::tmp<Foam::volScalarField> Foam::BurgersViscoelastic::E(scalar t) const
E = (q1 - q2*r1)*exp(-r1*t)/A - (q1 - q2*r2)*exp(-r2*t)/A;
}
tmp<volScalarField> tresult
(
@ -178,7 +178,7 @@ Foam::tmp<Foam::volScalarField> Foam::BurgersViscoelastic::J(scalar t) const
if(t >= 0)
{
J = 1.0/k1_.value()
J = 1.0/k1_.value()
+ (1 - exp(-k2_.value()*t/eta2_.value()))/k2_.value()
+ t/eta1_.value();
}

View file

@ -102,14 +102,14 @@ Foam::tmp<Foam::volScalarField> Foam::KelvinSLSViscoelastic::E(scalar t) const
if(t>=0)
{
scalar p1 = eta2_.value()/(k1_.value() + k2_.value());
scalar q0 = k1_.value()*k2_.value()/(k1_.value() + k2_.value());
scalar q1 = k1_.value()*eta2_.value()/(k1_.value() + k2_.value());
E = q0 + (q1/p1 - q0)*exp(-t/p1);
}
tmp<volScalarField> tresult
(
@ -168,7 +168,7 @@ Foam::tmp<Foam::volScalarField> Foam::KelvinSLSViscoelastic::J(scalar t) const
if(t >= 0)
{
scalar p1 = eta2_.value()/(k1_.value() + k2_.value());
scalar q0 = k1_.value()*k2_.value()/(k1_.value() + k2_.value());
scalar q1 = k1_.value()*eta2_.value()/(k1_.value() + k2_.value());

View file

@ -183,8 +183,8 @@ Foam::MaxwellElasticViscoelastic::J(scalar t) const
mesh(),
dimensionedScalar
(
"J",
dimless/k_.dimensions(),
"J",
dimless/k_.dimensions(),
1.0/k_.value() + t/eta_.value()
),
zeroGradientFvPatchScalarField::typeName

View file

@ -101,7 +101,7 @@ Foam::tmp<Foam::volScalarField> Foam::MaxwellSLSViscoelastic::E(scalar t) const
{
E = k2_.value() + k1_.value()*exp(-k1_.value()*t/eta1_.value());
}
tmp<volScalarField> tresult
(

View file

@ -169,8 +169,8 @@ Foam::tmp<Foam::volScalarField> Foam::MaxwellViscoelastic::J(scalar t) const
mesh(),
dimensionedScalar
(
"J",
dimless/k_.dimensions(),
"J",
dimless/k_.dimensions(),
1.0/k_.value() + t/eta_.value()
),
zeroGradientFvPatchScalarField::typeName

View file

@ -105,7 +105,7 @@ Foam::tmp<Foam::volScalarField> Foam::PronyViscoelastic::E(scalar t) const
{
E += k_[i]*exp(-t/tau_[i]);
}
if(t < 0)
{
E = 0;
@ -162,7 +162,7 @@ Foam::tmp<Foam::volScalarField> Foam::PronyViscoelastic::nu(scalar t) const
Foam::tmp<Foam::volScalarField> Foam::PronyViscoelastic::J(scalar t) const
{
notImplemented(type() + "::J(scalar t)");
return 1.0/E(t);
}

View file

@ -104,7 +104,7 @@ Foam::contactPatchPair::contactPatchPair
cp.mesh().boundaryMesh()[slavePatch_.index()], // to patch
intersection::algorithmNames_.read(dict.lookup("projectionAlgo")),
intersection::directionNames_.read(dict.lookup("projectionDir"))
),
slaveToMasterInterpolate_
(
@ -112,7 +112,7 @@ Foam::contactPatchPair::contactPatchPair
cp.mesh().boundaryMesh()[masterPatch_.index()], // to patch
intersection::algorithmNames_.read(dict.lookup("projectionAlgo")),
intersection::directionNames_.read(dict.lookup("projectionDir"))
)
{}

View file

@ -27,7 +27,7 @@ Class
Description
A pair of surfaces in contact.
SourceFiles
contactPatchPair.C

View file

@ -241,7 +241,7 @@ void contactProblem::correct()
(
lambdaPatches[patchI]*tr(gradUpatches[patchI])
)
)/(2.0*muPatches[patchI] + lambdaPatches[patchI]);
// Set the value fractions

View file

@ -1,47 +1,47 @@
if(divDSigmaExpMethod == "standard")
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU = ((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*
(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
(
mu*gradDU.T()
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
(
mu*gradDU.T()
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}

View file

@ -23,138 +23,131 @@ philipc
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
//***** FIX INCORRECT POINT ON PATCHES WITH FACEZONE *****//
contactPatchPairList& contacts = contact;
forAll(contacts, contactI)
{
label masterID = contacts[contactI].masterPatch().index();
label slaveID = contacts[contactI].slavePatch().index();
primitivePatchInterpolation masterInterpolator
(mesh.boundaryMesh()[masterID]);
primitivePatchInterpolation slaveInterpolator
(mesh.boundaryMesh()[slaveID]);
{
label masterID = contacts[contactI].masterPatch().index();
label slaveID = contacts[contactI].slavePatch().index();
//- U must be interpolated to the vertices, this ignores the faceZone
//- points with no U (unlike volPointInterpolation)
vectorField correctMasterPointU =
masterInterpolator.faceToPointInterpolate<vector>
(
U.boundaryField()[masterID]
);
vectorField correctSlavePointU =
slaveInterpolator.faceToPointInterpolate<vector>
(
U.boundaryField()[slaveID]
);
vectorField oldMasterPoints =
mesh.boundaryMesh()[masterID].localPoints();
vectorField oldSlavePoints =
mesh.boundaryMesh()[slaveID].localPoints();
labelList masterPointLabels =
mesh.boundaryMesh()[masterID].meshPoints();
labelList slavePointLabels =
mesh.boundaryMesh()[slaveID].meshPoints();
//- correct the patch newPoints
forAll(masterPointLabels, pointI)
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI]
+
correctMasterPointU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI]
+
correctSlavePointU[pointI];
}
}
primitivePatchInterpolation masterInterpolator
(
mesh.boundaryMesh()[masterID]
);
primitivePatchInterpolation slaveInterpolator
(
mesh.boundaryMesh()[slaveID]
);
//- U must be interpolated to the vertices, this ignores the faceZone
//- points with no U (unlike volPointInterpolation)
vectorField correctMasterPointU =
masterInterpolator.faceToPointInterpolate<vector>
(
U.boundaryField()[masterID]
);
vectorField correctSlavePointU =
slaveInterpolator.faceToPointInterpolate<vector>
(
U.boundaryField()[slaveID]
);
vectorField oldMasterPoints =
mesh.boundaryMesh()[masterID].localPoints();
vectorField oldSlavePoints =
mesh.boundaryMesh()[slaveID].localPoints();
labelList masterPointLabels =
mesh.boundaryMesh()[masterID].meshPoints();
labelList slavePointLabels =
mesh.boundaryMesh()[slaveID].meshPoints();
//- correct the patch newPoints
forAll(masterPointLabels, pointI)
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI] + correctMasterPointU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI] + correctSlavePointU[pointI];
}
}
//***** NOW FIX AND SYNCHRONISE ALL THE FACEZONE POINTS *****//
forAll(mesh.faceZones(), faceZoneI)
{
//- find the patch corresponding to this faceZone
//- assuming that the FZ is called <patch_name>FaceZone
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
vectorField globalFZpoints =
mesh.faceZones()[faceZoneI]().localPoints();
//- new points for the face zone
vectorField globalFZnewPoints(globalFZpoints.size(), vector::zero);
//- inter-proc points are shared by multiple procs
//- pointNumProc is the number of procs which a point lies on
scalarField pointNumProcs(globalFZpoints.size(), 0.0);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
//if(localPoint < mesh.boundaryMesh()[patchID].localPoints().size())
if(pointOnLocalProcPatch[faceZoneI][localPoint])
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] =
newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
reduce(globalFZnewPoints, sumOp<vectorField>());
reduce(pointNumProcs, sumOp<scalarField>());
//- now average the newPoints between all procs
if(min(pointNumProcs) < 1)
{
FatalError << "pointNumProc has not been set for all points" << exit(FatalError);
}
globalFZnewPoints /= pointNumProcs;
//- the globalFZnewPoints now contains the correct FZ new points in
//- a global order, now convert them back into the local proc order
vectorField procFZnewPoints(globalFZpoints.size(), vector::zero);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] =
globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints =
mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] =
procFZnewPoints[pointI];
}
}
}
{
//- find the patch corresponding to this faceZone
//- assuming that the FZ is called <patch_name>FaceZone
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
vectorField globalFZpoints =
mesh.faceZones()[faceZoneI]().localPoints();
//- new points for the face zone
vectorField globalFZnewPoints(globalFZpoints.size(), vector::zero);
//- inter-proc points are shared by multiple procs
//- pointNumProc is the number of procs which a point lies on
scalarField pointNumProcs(globalFZpoints.size(), 0.0);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
//if(localPoint < mesh.boundaryMesh()[patchID].localPoints().size())
if(pointOnLocalProcPatch[faceZoneI][localPoint])
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] = newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
reduce(globalFZnewPoints, sumOp<vectorField>());
reduce(pointNumProcs, sumOp<scalarField>());
//- now average the newPoints between all procs
if(min(pointNumProcs) < 1)
{
FatalError << "pointNumProc has not been set for all points" << exit(FatalError);
}
globalFZnewPoints /= pointNumProcs;
//- the globalFZnewPoints now contains the correct FZ new points in
//- a global order, now convert them back into the local proc order
vectorField procFZnewPoints(globalFZpoints.size(), vector::zero);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] = globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints = mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] = procFZnewPoints[pointI];
}
}
}

View file

@ -25,7 +25,7 @@
IOobject::AUTO_WRITE
),
mesh,
dimensionedVector("zero", dimLength, vector::zero)
dimensionedVector("zero", dimLength, vector::zero)
);
volSymmTensorField DEpsilon
@ -84,22 +84,22 @@
dimensionedSymmTensor("zero", dimForce/dimArea, symmTensor::zero)
);
volVectorField divDSigmaExp
(
volVectorField divDSigmaExp
(
IOobject
(
"divDSigmaExp",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
"divDSigmaExp",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
mesh,
dimensionedVector("zero", dimensionSet(1,-2,-2,0,0,0,0), vector::zero)
);
);
// read rheology properties
// read rheology properties
rheologyModel rheology(sigma);
volScalarField rho = rheology.rho();
@ -111,5 +111,5 @@
surfaceVectorField n = mesh.Sf()/mesh.magSf();
//- create contact problem
contactProblem contact(DU);
//- create contact problem
contactProblem contact(DU);

View file

@ -19,117 +19,120 @@ philipc
//- these are read if present to allow restarting of contact cases
IOList<labelList> procToGlobalFZmap
(
IOobject
(
"procToGlobalFZmap",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
IOobject
(
"procToGlobalFZmap",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
IOList<labelList> pointOnLocalProcPatch
(
IOobject
(
"pointOnLocalProcPatch",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
IOobject
(
"pointOnLocalProcPatch",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
//- if they have been read then don't recalculate it
bool globalFaceZoneMappingSet = false;
if(gMax(procToGlobalFZmap[0]) > 0 && gMax(pointOnLocalProcPatch[0]) > 0)
{
{
Info << "Reading procToGlobalFZmap and pointOnLocalProcPatch allowing restart of contact cases"
<< endl;
<< endl;
globalFaceZoneMappingSet = true;
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
if(!globalFaceZoneMappingSet)
{
forAll(mesh.faceZones(), faceZoneI)
{
vectorField globalFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
procToGlobalFZmap[faceZoneI].setSize(globalFZpoints.size(), 0);
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
globalFZpoints *= 0.0;
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
//- now every proc has the master's list of FZ points
//- every proc must now find the mapping from their local FZpoints to
//- the globalFZpoints
vectorField procFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
forAll(globalFZpoints, globalPointI)
{
forAll(procFZpoints, procPointI)
{
if(procFZpoints[procPointI] == globalFZpoints[globalPointI])
{
procToGlobalFZmap[faceZoneI][globalPointI] = procPointI;
break;
}
}
}
//- procToGlobalFZmap now contains the local FZpoint label for each
//- global FZ point label - for each faceZone
//- check what points are on the current proc patch
pointOnLocalProcPatch[faceZoneI].setSize(globalFZpoints.size(), 0);
//- find corresponding patch
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
forAll(mesh.faceZones()[faceZoneI]().localPoints(), fzpi)
{
forAll(mesh.boundaryMesh()[patchID].localPoints(), pi)
{
if(mesh.faceZones()[faceZoneI]().localPoints()[fzpi] == mesh.boundaryMesh()[patchID].localPoints()[pi])
{
pointOnLocalProcPatch[faceZoneI][fzpi] = 1;
break;
}
}
}
}
} //- end if(!globalFaceZoneMappingSet)
}
{
forAll(mesh.faceZones(), faceZoneI)
{
vectorField globalFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
procToGlobalFZmap[faceZoneI].setSize(globalFZpoints.size(), 0);
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
{
globalFZpoints *= 0.0;
}
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
//- now every proc has the master's list of FZ points
//- every proc must now find the mapping from their local FZpoints to
//- the globalFZpoints
vectorField procFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
forAll(globalFZpoints, globalPointI)
{
forAll(procFZpoints, procPointI)
{
if(procFZpoints[procPointI] == globalFZpoints[globalPointI])
{
procToGlobalFZmap[faceZoneI][globalPointI] = procPointI;
break;
}
}
}
//- procToGlobalFZmap now contains the local FZpoint label for each
//- global FZ point label - for each faceZone
//- check what points are on the current proc patch
pointOnLocalProcPatch[faceZoneI].setSize(globalFZpoints.size(), 0);
//- find corresponding patch
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
forAll(mesh.faceZones()[faceZoneI]().localPoints(), fzpi)
{
forAll(mesh.boundaryMesh()[patchID].localPoints(), pi)
{
if(mesh.faceZones()[faceZoneI]().localPoints()[fzpi] == mesh.boundaryMesh()[patchID].localPoints()[pi])
{
pointOnLocalProcPatch[faceZoneI][fzpi] = 1;
break;
}
}
}
}
} //- end if(!globalFaceZoneMappingSet)
}
//- write to disk to allow restart of cases
//- because it is not possible to calculate the
//- because it is not possible to calculate the
//- mapping after the meshes have moved
if(!globalFaceZoneMappingSet && Pstream::parRun())
{
{
procToGlobalFZmap.write();
pointOnLocalProcPatch.write();
}
}

View file

@ -4,22 +4,22 @@ solidInterface* solidInterfacePtr(NULL);
{
const dictionary& stressControl =
mesh.solutionDict().subDict("stressedFoam");
mesh.solutionDict().subDict("stressedFoam");
solidInterfaceCorr = Switch(stressControl.lookup("solidInterface"));
if(solidInterfaceCorr)
{
Info << "Creating solid interface correction" << endl;
solidInterfacePtr = new solidInterface(mesh, rheology);
solidInterfacePtr->modifyProperties(muf, lambdaf);
gradDU = solidInterfacePtr->grad(DU);
//- solidInterface needs muf and lambdaf to be used for divSigmaExp
if(divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose")
{
FatalError << "divDSigmaExp must be decompose or surface when solidInterface is on"
<< exit(FatalError);
}
}
{
Info << "Creating solid interface correction" << endl;
solidInterfacePtr = new solidInterface(mesh, rheology);
solidInterfacePtr->modifyProperties(muf, lambdaf);
gradDU = solidInterfacePtr->grad(DU);
//- solidInterface needs muf and lambdaf to be used for divDSigmaExp
if(divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose")
{
FatalError << "divDSigmaExp must be decompose or surface when solidInterface is on"
<< exit(FatalError);
}
}
}

View file

@ -61,35 +61,35 @@ Author
int main(int argc, char *argv[])
{
# include "setRootCase.H"
# include "setRootCase.H"
# include "createTime.H"
# include "createTime.H"
# include "createMesh.H"
# include "createMesh.H"
# include "createFields.H"
# include "createFields.H"
# include "readDivDSigmaExpMethod.H"
# include "readDivDSigmaExpMethod.H"
# include "createGlobalToLocalFaceZonePointMap.H"
# include "createGlobalToLocalFaceZonePointMap.H"
# include "createSolidInterface.H"
# include "createSolidInterface.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
for (runTime++; !runTime.end(); runTime++)
{
Info<< "Time: " << runTime.timeName() << endl;
# include "readContactControls.H"
# include "readStressedFoamControls.H"
//-- for moving the mesh and then back again
vectorField oldMeshPoints = mesh.allPoints();
int iCorr = 0;
lduMatrix::solverPerformance solverPerf;
word solverName;
@ -101,116 +101,118 @@ int main(int argc, char *argv[])
//- reset DU to zero at the start of the time-step if
//- a predictor is not required
if(!predictor)
DU = dimensionedVector("zero", dimLength, vector::zero);
{
DU = dimensionedVector("zero", dimLength, vector::zero);
}
do //- start of momentum loop
{
DU.storePrevIter();
//- correct the contact boundaries
if(iCorr % uEqnContactCorrFreq == 0)
{
Info << "\t\tCorrecting contact in the momentum loop "
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
{
DU.storePrevIter();
//- correct the contact boundaries
if(iCorr % uEqnContactCorrFreq == 0)
{
Info << "\t\tCorrecting contact in the momentum loop "
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
# include "moveSolidMesh.H"
contact.correct();
mesh.movePoints(oldMeshPoints);
}
contact.correct();
mesh.movePoints(oldMeshPoints);
}
# include "calculateDivDSigmaExp.H"
fvVectorMatrix DUEqn
(
fvm::d2dt2(rho, DU)
==
fvm::laplacian(2*mu + lambda, DU, "laplacian(DDU,DU)")
+ divDSigmaExp
);
fvVectorMatrix DUEqn
(
fvm::d2dt2(rho, DU)
==
fvm::laplacian(2*mu + lambda, DU, "laplacian(DDU,DU)")
+ divDSigmaExp
);
if(solidInterfaceCorr)
{
{
solidInterfacePtr->correct(DUEqn);
}
}
solverPerf = DUEqn.solve();
DU.relax();
solverName = solverPerf.solverName();
solverPerf = DUEqn.solve();
DU.relax();
solverName = solverPerf.solverName();
if(solidInterfaceCorr)
{
{
gradDU = solidInterfacePtr->grad(DU);
}
}
else
{
{
gradDU = fvc::grad(DU);
}
U = U.oldTime() + DU;
}
residual = solverPerf.initialResidual();
//****************************************************//
// The contact residual is the initial residual for the
// first iteration of the momentum equation
//****************************************************//
if(iCorr == 0)
{
initialResidual = solverPerf.initialResidual();
}
# include "calculateRelativeResidual.H"
U = U.oldTime() + DU;
Info << "\tTime " << runTime.value()
<< ", Corrector " << iCorr
<< ", Solving for " << DU.name()
<< " using " << solverPerf.solverName()
<< ", residual = " << solverPerf.initialResidual()
<< ", relative residual = " << relativeResidual << endl;
} //- end of momentum loop
residual = solverPerf.initialResidual();
//****************************************************//
// The contact residual is the initial residual for the
// first iteration of the momentum equation
//****************************************************//
if(iCorr == 0)
{
initialResidual = solverPerf.initialResidual();
}
# include "calculateRelativeResidual.H"
Info << "\tTime " << runTime.value()
<< ", Corrector " << iCorr
<< ", Solving for " << DU.name()
<< " using " << solverPerf.solverName()
<< ", residual = " << solverPerf.initialResidual()
<< ", relative residual = " << relativeResidual << endl;
} //- end of momentum loop
while
(
relativeResidual > convergenceTolerance
//residual > convergenceTolerance
&&
++iCorr < nCorr
);
(
relativeResidual > convergenceTolerance
//residual > convergenceTolerance
&&
++iCorr < nCorr
);
// Print out info per contact iteration
Info << "\t\tSolving for " << DU.name()
<< " using " << solverName
<< ", Initial residual = " << initialResidual
<< ", Final residual = " << solverPerf.initialResidual()
<< ", No outer iterations " << iCorr << endl;
<< " using " << solverName
<< ", Initial residual = " << initialResidual
<< ", Final residual = " << solverPerf.initialResidual()
<< ", No outer iterations " << iCorr << endl;
lduMatrix::debug = 1;
# include "calculateDEpsilonDSigma.H"
epsilon += DEpsilon;
sigma += DSigma;
# include "writeFields.H"
//# include "writeBoundaryNetForces.H"
//# include "moveMeshLeastSquares.H"
//# include "moveSolidMesh.H"
//# include "printContactResults.H"
//mesh.movePoints(oldMeshPoints);
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
<< endl << endl;
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
<< endl << endl;
}
Info<< "End\n" << endl;
return(0);
}

View file

@ -2,55 +2,54 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
// Create point mesh
pointMesh pMesh(mesh);
wordList types
(
pMesh.boundary().size(),
calculatedFvPatchVectorField::typeName
);
(
pMesh.boundary().size(),
calculatedFvPatchVectorField::typeName
);
pointVectorField pointDU
(
IOobject
(
"pointDU",
runTime.timeName(),
mesh
(
IOobject
(
"pointDU",
runTime.timeName(),
mesh
),
pMesh,
dimensionedVector("zero", dimLength, vector::zero),
types
);
pMesh,
dimensionedVector("zero", dimLength, vector::zero),
types
);
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
forAll (pointDUI, pointI)
{
{
newPoints[pointI] += pointDUI[pointI];
}
}
twoDPointCorrector twoDCorrector(mesh);
twoDCorrector.correctPoints(newPoints);
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}

View file

@ -1,4 +1,4 @@
{
{
//- move mesh for the contact correction
// Create point interpolation
@ -8,20 +8,20 @@
pointVectorField pointU = pointInterpolation.interpolate(U);
const vectorField& pointUI = pointU.internalField();
// Move mesh
vectorField newPoints = mesh.allPoints();
forAll (pointUI, pointI)
{
newPoints[pointI] += pointUI[pointI];
}
# include "correctGlobalFaceZoneMesh.H"
twoDPointCorrector twoDCorrector(mesh);
twoDCorrector.correctPoints(newPoints);
twoDCorrector.correctPoints(newPoints);
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);

View file

@ -1,55 +1,55 @@
if (runTime.outputTime())
{
{
// FAILS IN PARALLEL - FIX
// Info << "Print contact area" << endl;
//volScalarField ca = contact.contactArea();
//ca.write();
//-------------------------------------------------------------//
// I couldn't get tmp to return the pointScalarField correctly //
// so I had to make the pointScalarField here and pass it to //
// contactGapPoints and pointContactForce to populate //
//-------------------------------------------------------------//
//This is the point distance for each contact vertex
pointScalarField cGapPoints
(
IOobject
(
"pointContactGap",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
pMesh,
dimensionedScalar("scalar", dimLength, 0.0),
"calculated"
);
pointScalarField cGapPoints
(
IOobject
(
"pointContactGap",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
pMesh,
dimensionedScalar("scalar", dimLength, 0.0),
"calculated"
);
contact.contactGapPoints(cGapPoints);
cGapPoints.write();
//- This is the point distance for each contact vertex
pointVectorField cPointForce
(
IOobject
(
"pointContactForce",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
pMesh,
dimensionedVector("vector", dimForce, vector::zero),
"calculated"
);
//- This is the point distance for each contact vertex
pointVectorField cPointForce
(
IOobject
(
"pointContactForce",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
pMesh,
dimensionedVector("vector", dimForce, vector::zero),
"calculated"
);
contact.contactPointForce(cPointForce);
cPointForce.write();
//- this is the actual (sigma&n)&n) on the contact patches
//- SHOULD THIS BE A REF TO A TMP...?
volScalarField cPressure = contact.contactPressure();
cPressure.write();
}
}

View file

@ -1,9 +1,15 @@
//- how explicit component of sigma is to be calculated
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << divDSigmaExpMethod << " method chosen for calculation of sigmaExp" << endl;
if(divDSigmaExpMethod != "standard" && divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose" && divDSigmaExpMethod != "laplacian")
{
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}

View file

@ -6,9 +6,9 @@ Info << nl;
forAll(netForces, patchI)
{
netForces[patchI] = gSum(mesh.Sf().boundaryField()[patchI] & sigma.boundaryField()[patchI]);
Info << "patch\t" << mesh.boundary()[patchI].name() << "\t\tnet force is\t"
<< netForces[patchI] << " N" << endl;
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -1,77 +1,78 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
(
"epsilonEq",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
sqrt((2.0/3.0)*magSqr(dev(epsilon)))
);
(
IOobject
(
"epsilonEq",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
sqrt((2.0/3.0)*magSqr(dev(epsilon)))
);
Info<< "Max epsilonEq = " << max(epsilonEq).value()
<< endl;
<< endl;
volScalarField sigmaEq
(
IOobject
(
"sigmaEq",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
sqrt((3.0/2.0)*magSqr(dev(sigma)))
);
(
IOobject
(
"sigmaEq",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
sqrt((3.0/2.0)*magSqr(dev(sigma)))
);
Info<< "Max sigmaEq = " << max(sigmaEq).value()
<< endl;
<< endl;
volScalarField pressure
(
IOobject
(
"pressure",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
(
IOobject
(
"pressure",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
tr(sigma)/3.0
);
tr(sigma)/3.0
);
//- boundary surface pressure
forAll(pressure.boundaryField(), patchi)
{
{
const vectorField& nb = n.boundaryField()[patchi];
pressure.boundaryField()[patchi] =
-(nb & ( nb & sigma.boundaryField()[patchi] ));
}
}
//- contact slave penetration
# include "moveSolidMesh.H"
pointMesh pMesh(mesh);
pointScalarField cGapPoints
(
IOobject
(
"pointContactGap",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
(
IOobject
(
"pointContactGap",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
pMesh,
dimensionedScalar("scalar", dimLength, 0.0),
"calculated"
);
);
contact.contactGapPoints(cGapPoints);
cGapPoints.write();
mesh.movePoints(oldMeshPoints);
mesh.movePoints(oldMeshPoints);
runTime.write();
}
}

View file

@ -1,47 +1,47 @@
if(divDSigmaExpMethod == "standard")
{
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
(
mu*gradDU.T()
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
mu*gradDU.T()
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}

View file

@ -2,33 +2,33 @@
//- sigma explicit large strain explicit terms
//----------------------------------------------------//
if(divDSigmaLargeStrainExpMethod == "standard")
{
{
divDSigmaLargeStrainExp =
fvc::div
(
mu*(gradDU & gradDU.T())
+ 0.5*lambda*(gradDU && gradDU)*I //- equivalent to 0.5*lambda*(I*tr(gradDU & gradDU.T()))
+ ((sigma + DSigma) & DF.T()),
"div(sigma)"
fvc::div
(
mu*(gradDU & gradDU.T())
+ 0.5*lambda*(gradDU && gradDU)*I //- equivalent to 0.5*lambda*(I*tr(gradDU & gradDU.T()))
+ ((sigma + DSigma) & DF.T()),
"div(sigma)"
);
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
divDSigmaLargeStrainExp =
fvc::div
(
muf * (mesh.Sf() & fvc::interpolate(gradDU & gradDU.T()))
+ 0.5*lambdaf * (mesh.Sf() & (fvc::interpolate(gradDU && gradDU)*I))
+ (mesh.Sf() & fvc::interpolate( sigma & DF.T() ))
+ (mesh.Sf() & fvc::interpolate(DSigma & DF.T() ))
);
}
else
{
FatalError
<< "divDSigmaLargeStrainExp not found!"
<< exit(FatalError);
}
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
divDSigmaLargeStrainExp =
fvc::div
(
muf * (mesh.Sf() & fvc::interpolate(gradDU & gradDU.T()))
+ 0.5*lambdaf * (mesh.Sf() & (fvc::interpolate(gradDU && gradDU)*I))
+ (mesh.Sf() & fvc::interpolate( sigma & DF.T() ))
+ (mesh.Sf() & fvc::interpolate(DSigma & DF.T() ))
);
}
else
{
FatalError
<< "divDSigmaLargeStrainExp not found!"
<< exit(FatalError);
}
//- relax large strain component
divDSigmaLargeStrainExp.relax();

View file

@ -15,59 +15,58 @@
FieldField<Field, vector> extraVecs(ptc.size());
{
const labelListList& pfaces = mesh.pointFaces();
const labelListList& pfaces = mesh.pointFaces();
const volVectorField& centres = mesh.C();
const volVectorField& centres = mesh.C();
const fvBoundaryMesh& bm = mesh.boundary();
const fvBoundaryMesh& bm = mesh.boundary();
forAll (ptc, pointI)
forAll (ptc, pointI)
{
const label curPoint = ptc[pointI];
const labelList& curFaces = pfaces[curPoint];
// extraVecs.hook(new vectorField(curFaces.size())); //- no hook function
extraVecs.set
(
pointI,
new vectorField(curFaces.size())
);
const label curPoint = ptc[pointI];
vectorField& curExtraVectors = extraVecs[pointI];
label nFacesAroundPoint = 0;
const labelList& curFaces = pfaces[curPoint];
const vector& pointLoc = mesh.points()[curPoint];
// Go through all the faces
forAll (curFaces, faceI)
{
if (!mesh.isInternalFace(curFaces[faceI]))
{
// This is a boundary face. If not in the empty patch
// or coupled calculate the extrapolation vector
label patchID =
mesh.boundaryMesh().whichPatch(curFaces[faceI]);
if
(
!isA<emptyFvPatch>(bm[patchID])
&& !bm[patchID].coupled()
)
{
// Found a face for extrapolation
curExtraVectors[nFacesAroundPoint] =
pointLoc
- centres.boundaryField()[patchID]
[bm[patchID].patch().whichFace(curFaces[faceI])];
nFacesAroundPoint++;
}
}
}
curExtraVectors.setSize(nFacesAroundPoint);
// extraVecs.hook(new vectorField(curFaces.size())); //- no hook function
extraVecs.set
(
pointI,
new vectorField(curFaces.size())
);
vectorField& curExtraVectors = extraVecs[pointI];
label nFacesAroundPoint = 0;
const vector& pointLoc = mesh.points()[curPoint];
// Go through all the faces
forAll (curFaces, faceI)
{
if (!mesh.isInternalFace(curFaces[faceI]))
{
// This is a boundary face. If not in the empty patch
// or coupled calculate the extrapolation vector
label patchID =
mesh.boundaryMesh().whichPatch(curFaces[faceI]);
if
(
!isA<emptyFvPatch>(bm[patchID])
&& !bm[patchID].coupled()
)
{
// Found a face for extrapolation
curExtraVectors[nFacesAroundPoint] =
pointLoc
- centres.boundaryField()[patchID]
[bm[patchID].patch().whichFace(curFaces[faceI])];
nFacesAroundPoint++;
}
}
}
curExtraVectors.setSize(nFacesAroundPoint);
}
}

View file

@ -8,114 +8,116 @@
FieldField<Field, scalar> w(ptc.size());
{
const labelListList& pf = mesh.pointFaces();
const labelListList& pf = mesh.pointFaces();
const volVectorField& centres = mesh.C();
const volVectorField& centres = mesh.C();
const fvBoundaryMesh& bm = mesh.boundary();
pointScalarField volPointSumWeights
const fvBoundaryMesh& bm = mesh.boundary();
pointScalarField volPointSumWeights
(
IOobject
(
"volPointSumWeights",
mesh.polyMesh::instance(),
mesh
),
pMesh,
dimensionedScalar("zero", dimless, 0)
);
forAll (ptc, pointI)
{
const label curPoint = ptc[pointI];
const labelList& curFaces = pf[curPoint];
//w.hook(new scalarField(curFaces.size())); //philipc no hook function
w.set
(
pointI,
new scalarField(curFaces.size())
);
scalarField& curWeights = w[pointI];
label nFacesAroundPoint = 0;
const vector& pointLoc = mesh.points()[curPoint];
// Go through all the faces
forAll (curFaces, faceI)
{
if (!mesh.isInternalFace(curFaces[faceI]))
{
// This is a boundary face. If not in the empty patch
// or coupled calculate the extrapolation vector
label patchID =
mesh.boundaryMesh().whichPatch(curFaces[faceI]);
if
(
!isA<emptyFvPatch>(bm[patchID])
&& !(
bm[patchID].coupled()
//&& Pstream::parRun()
//&& !mesh.parallelData().cyclicParallel()
)
)
{
curWeights[nFacesAroundPoint] =
1.0/mag
(
pointLoc
- centres.boundaryField()[patchID]
[
bm[patchID].patch().whichFace(curFaces[faceI])
]
);
nFacesAroundPoint++;
}
}
}
// Reset the sizes of the local weights
curWeights.setSize(nFacesAroundPoint);
// Collect the sum of weights for parallel correction
volPointSumWeights[curPoint] += sum(curWeights);
}
// Do parallel correction of weights
// Update coupled boundaries
// Work-around for cyclic parallels.
/*if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
{
forAll (volPointSumWeights.boundaryField(), patchI)
{
if (volPointSumWeights.boundaryField()[patchI].coupled())
{
volPointSumWeights.boundaryField()[patchI].initAddField();
}
}
forAll (volPointSumWeights.boundaryField(), patchI)
{
if (volPointSumWeights.boundaryField()[patchI].coupled())
{
volPointSumWeights.boundaryField()[patchI].addField
(
volPointSumWeights.internalField()
IOobject
(
"volPointSumWeights",
mesh.polyMesh::instance(),
mesh
),
pMesh,
dimensionedScalar("zero", dimless, 0)
);
}
}
}*/
// Re-scale the weights for the current point
forAll (ptc, pointI)
forAll (ptc, pointI)
{
w[pointI] /= volPointSumWeights[ptc[pointI]];
const label curPoint = ptc[pointI];
const labelList& curFaces = pf[curPoint];
//w.hook(new scalarField(curFaces.size())); //philipc no hook function
w.set
(
pointI,
new scalarField(curFaces.size())
);
scalarField& curWeights = w[pointI];
label nFacesAroundPoint = 0;
const vector& pointLoc = mesh.points()[curPoint];
// Go through all the faces
forAll (curFaces, faceI)
{
if (!mesh.isInternalFace(curFaces[faceI]))
{
// This is a boundary face. If not in the empty patch
// or coupled calculate the extrapolation vector
label patchID =
mesh.boundaryMesh().whichPatch(curFaces[faceI]);
if
(
!isA<emptyFvPatch>(bm[patchID])
&& !(
bm[patchID].coupled()
//&& Pstream::parRun()
//&& !mesh.parallelData().cyclicParallel()
)
)
{
curWeights[nFacesAroundPoint] =
1.0/mag
(
pointLoc
- centres.boundaryField()[patchID]
[
bm[patchID].patch().whichFace(curFaces[faceI])
]
);
nFacesAroundPoint++;
}
}
}
// Reset the sizes of the local weights
curWeights.setSize(nFacesAroundPoint);
// Collect the sum of weights for parallel correction
volPointSumWeights[curPoint] += sum(curWeights);
}
// Do parallel correction of weights
// Update coupled boundaries
// Work-around for cyclic parallels.
/*
if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
{
forAll (volPointSumWeights.boundaryField(), patchI)
{
if (volPointSumWeights.boundaryField()[patchI].coupled())
{
volPointSumWeights.boundaryField()[patchI].initAddField();
}
}
forAll (volPointSumWeights.boundaryField(), patchI)
{
if (volPointSumWeights.boundaryField()[patchI].coupled())
{
volPointSumWeights.boundaryField()[patchI].addField
(
volPointSumWeights.internalField()
);
}
}
}
*/
// Re-scale the weights for the current point
forAll (ptc, pointI)
{
w[pointI] /= volPointSumWeights[ptc[pointI]];
}
}

View file

@ -23,138 +23,131 @@ philipc
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
//***** FIX INCORRECT POINT ON PATCHES WITH FACEZONE *****//
contactPatchPairList& contacts = contact;
forAll(contacts, contactI)
{
label masterID = contacts[contactI].masterPatch().index();
label slaveID = contacts[contactI].slavePatch().index();
primitivePatchInterpolation masterInterpolator
(mesh.boundaryMesh()[masterID]);
primitivePatchInterpolation slaveInterpolator
(mesh.boundaryMesh()[slaveID]);
{
label masterID = contacts[contactI].masterPatch().index();
label slaveID = contacts[contactI].slavePatch().index();
//- DU must be interpolated to the vertices, this ignores the faceZone
//- points with no DU (unlike volPointInterpolation)
vectorField correctMasterPointDU =
masterInterpolator.faceToPointInterpolate<vector>
(
DU.boundaryField()[masterID]
);
vectorField correctSlavePointDU =
slaveInterpolator.faceToPointInterpolate<vector>
(
DU.boundaryField()[slaveID]
);
vectorField oldMasterPoints =
mesh.boundaryMesh()[masterID].localPoints();
vectorField oldSlavePoints =
mesh.boundaryMesh()[slaveID].localPoints();
labelList masterPointLabels =
mesh.boundaryMesh()[masterID].meshPoints();
labelList slavePointLabels =
mesh.boundaryMesh()[slaveID].meshPoints();
//- correct the patch newPoints
forAll(masterPointLabels, pointI)
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI]
+
correctMasterPointDU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI]
+
correctSlavePointDU[pointI];
}
}
primitivePatchInterpolation masterInterpolator
(
mesh.boundaryMesh()[masterID]
);
primitivePatchInterpolation slaveInterpolator
(
mesh.boundaryMesh()[slaveID]
);
//- DU must be interpolated to the vertices, this ignores the faceZone
//- points with no DU (unlike volPointInterpolation)
vectorField correctMasterPointDU =
masterInterpolator.faceToPointInterpolate<vector>
(
DU.boundaryField()[masterID]
);
vectorField correctSlavePointDU =
slaveInterpolator.faceToPointInterpolate<vector>
(
DU.boundaryField()[slaveID]
);
vectorField oldMasterPoints =
mesh.boundaryMesh()[masterID].localPoints();
vectorField oldSlavePoints =
mesh.boundaryMesh()[slaveID].localPoints();
labelList masterPointLabels =
mesh.boundaryMesh()[masterID].meshPoints();
labelList slavePointLabels =
mesh.boundaryMesh()[slaveID].meshPoints();
//- correct the patch newPoints
forAll(masterPointLabels, pointI)
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI] + correctMasterPointDU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI] + correctSlavePointDU[pointI];
}
}
//***** NOW FIX AND SYNCHRONISE ALL THE FACEZONE POINTS *****//
forAll(mesh.faceZones(), faceZoneI)
{
//- find the patch corresponding to this faceZone
//- assuming that the FZ is called <patch_name>FaceZone
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
vectorField globalFZpoints =
mesh.faceZones()[faceZoneI]().localPoints();
//- new points for the face zone
vectorField globalFZnewPoints(globalFZpoints.size(), vector::zero);
//- inter-proc points are shared by multiple procs
//- pointNumProc is the number of procs which a point lies on
scalarField pointNumProcs(globalFZpoints.size(), 0.0);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
//if(localPoint < mesh.boundaryMesh()[patchID].localPoints().size())
if(pointOnLocalProcPatch[faceZoneI][localPoint])
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] =
newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
reduce(globalFZnewPoints, sumOp<vectorField>());
reduce(pointNumProcs, sumOp<scalarField>());
//- now average the newPoints between all procs
if(min(pointNumProcs) < 1)
{
FatalError << "pointNumProc has not been set for all points" << exit(FatalError);
}
globalFZnewPoints /= pointNumProcs;
//- the globalFZnewPoints now contains the correct FZ new points in
//- a global order, now convert them back into the local proc order
vectorField procFZnewPoints(globalFZpoints.size(), vector::zero);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] =
globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints =
mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] =
procFZnewPoints[pointI];
}
}
}
{
//- find the patch corresponding to this faceZone
//- assuming that the FZ is called <patch_name>FaceZone
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
vectorField globalFZpoints =
mesh.faceZones()[faceZoneI]().localPoints();
//- new points for the face zone
vectorField globalFZnewPoints(globalFZpoints.size(), vector::zero);
//- inter-proc points are shared by multiple procs
//- pointNumProc is the number of procs which a point lies on
scalarField pointNumProcs(globalFZpoints.size(), 0.0);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
//if(localPoint < mesh.boundaryMesh()[patchID].localPoints().size())
if(pointOnLocalProcPatch[faceZoneI][localPoint])
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] = newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
reduce(globalFZnewPoints, sumOp<vectorField>());
reduce(pointNumProcs, sumOp<scalarField>());
//- now average the newPoints between all procs
if(min(pointNumProcs) < 1)
{
FatalError << "pointNumProc has not been set for all points" << exit(FatalError);
}
globalFZnewPoints /= pointNumProcs;
//- the globalFZnewPoints now contains the correct FZ new points in
//- a global order, now convert them back into the local proc order
vectorField procFZnewPoints(globalFZpoints.size(), vector::zero);
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] = globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints = mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] = procFZnewPoints[pointI];
}
}
}

View file

@ -25,7 +25,7 @@
IOobject::AUTO_WRITE
),
mesh,
dimensionedVector("zero", dimLength, vector::zero)
dimensionedVector("zero", dimLength, vector::zero)
);
volSymmTensorField DEpsilon
@ -84,35 +84,35 @@
dimensionedSymmTensor("zero", dimForce/dimArea, symmTensor::zero)
);
volVectorField divDSigmaExp
(
volVectorField divDSigmaExp
(
IOobject
(
"divDSigmaExp",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
"divDSigmaExp",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
mesh,
dimensionedVector("zero", dimensionSet(1,-2,-2,0,0,0,0), vector::zero)
);
);
volVectorField divDSigmaLargeStrainExp
(
volVectorField divDSigmaLargeStrainExp
(
IOobject
(
"divDSigmaLargeStrainExp",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
"divDSigmaLargeStrainExp",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
mesh,
dimensionedVector("zero", dimensionSet(1,-2,-2,0,0,0,0), vector::zero)
);
);
// read rheology properties
// read rheology properties
rheologyModel rheology(sigma);
volScalarField rho = rheology.rho();

View file

@ -19,117 +19,117 @@ philipc
//- these are read if present to allow restarting of contact cases
IOList<labelList> procToGlobalFZmap
(
IOobject
(
"procToGlobalFZmap",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
IOobject
(
"procToGlobalFZmap",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
IOList<labelList> pointOnLocalProcPatch
(
IOobject
(
"pointOnLocalProcPatch",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
IOobject
(
"pointOnLocalProcPatch",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
//- if they have been read then don't recalculate it
bool globalFaceZoneMappingSet = false;
if(gMax(procToGlobalFZmap[0]) > 0 && gMax(pointOnLocalProcPatch[0]) > 0)
{
{
Info << "Reading procToGlobalFZmap and pointOnLocalProcPatch allowing restart of contact cases"
<< endl;
<< endl;
globalFaceZoneMappingSet = true;
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
if(!globalFaceZoneMappingSet)
{
forAll(mesh.faceZones(), faceZoneI)
{
vectorField globalFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
procToGlobalFZmap[faceZoneI].setSize(globalFZpoints.size(), 0);
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
globalFZpoints *= 0.0;
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
//- now every proc has the master's list of FZ points
//- every proc must now find the mapping from their local FZpoints to
//- the globalFZpoints
vectorField procFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
forAll(globalFZpoints, globalPointI)
{
forAll(procFZpoints, procPointI)
{
if(procFZpoints[procPointI] == globalFZpoints[globalPointI])
{
procToGlobalFZmap[faceZoneI][globalPointI] = procPointI;
break;
}
}
}
//- procToGlobalFZmap now contains the local FZpoint label for each
//- global FZ point label - for each faceZone
//- check what points are on the current proc patch
pointOnLocalProcPatch[faceZoneI].setSize(globalFZpoints.size(), 0);
//- find corresponding patch
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
forAll(mesh.faceZones()[faceZoneI]().localPoints(), fzpi)
{
forAll(mesh.boundaryMesh()[patchID].localPoints(), pi)
{
if(mesh.faceZones()[faceZoneI]().localPoints()[fzpi] == mesh.boundaryMesh()[patchID].localPoints()[pi])
{
pointOnLocalProcPatch[faceZoneI][fzpi] = 1;
break;
}
}
}
}
} //- end if(!globalFaceZoneMappingSet)
}
{
forAll(mesh.faceZones(), faceZoneI)
{
vectorField globalFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
procToGlobalFZmap[faceZoneI].setSize(globalFZpoints.size(), 0);
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
{
globalFZpoints *= 0.0;
}
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
//- now every proc has the master's list of FZ points
//- every proc must now find the mapping from their local FZpoints to
//- the globalFZpoints
vectorField procFZpoints = mesh.faceZones()[faceZoneI]().localPoints();
forAll(globalFZpoints, globalPointI)
{
forAll(procFZpoints, procPointI)
{
if(procFZpoints[procPointI] == globalFZpoints[globalPointI])
{
procToGlobalFZmap[faceZoneI][globalPointI] = procPointI;
break;
}
}
}
//- check what points are on the current proc patch
pointOnLocalProcPatch[faceZoneI].setSize(globalFZpoints.size(), 0);
//- find corresponding patch
string faceZoneName = mesh.faceZones().names()[faceZoneI];
//- remove the string FaceZone from the end of the face zone name to get the patch name
string patchName = faceZoneName.substr(0, (faceZoneName.size()-8));
label patchID = mesh.boundaryMesh().findPatchID(patchName);
if(patchID == -1)
{
FatalError << "Patch " << patchName << " not found corresponding for faceZone"
<< faceZoneName << exit(FatalError);
}
forAll(mesh.faceZones()[faceZoneI]().localPoints(), fzpi)
{
forAll(mesh.boundaryMesh()[patchID].localPoints(), pi)
{
if(mesh.faceZones()[faceZoneI]().localPoints()[fzpi] == mesh.boundaryMesh()[patchID].localPoints()[pi])
{
pointOnLocalProcPatch[faceZoneI][fzpi] = 1;
break;
}
}
}
}
} //- end if(!globalFaceZoneMappingSet)
}
//- write to disk to allow restart of cases
//- because it is not possible to calculate the
//- because it is not possible to calculate the
//- mapping after the meshes have moved
if(!globalFaceZoneMappingSet)
{
{
procToGlobalFZmap.write();
pointOnLocalProcPatch.write();
}
}

View file

@ -66,137 +66,136 @@ Author
int main(int argc, char *argv[])
{
# include "setRootCase.H"
# include "setRootCase.H"
# include "createTime.H"
# include "createTime.H"
# include "createMesh.H"
# include "createMesh.H"
# include "createFields.H"
# include "createFields.H"
# include "readDivDSigmaExpMethod.H"
# include "readDivDSigmaExpMethod.H"
# include "readDivDSigmaLargeStrainMethod.H"
# include "readDivDSigmaLargeStrainMethod.H"
# include "readMoveMeshMethod.H"
# include "readMoveMeshMethod.H"
# include "createGlobalToLocalFaceZonePointMap.H"
# include "createGlobalToLocalFaceZonePointMap.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
for (runTime++; !runTime.end(); runTime++)
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
for (runTime++; !runTime.end(); runTime++)
{
Info<< "Time: " << runTime.timeName() << endl;
# include "readContactControls.H"
Info<< "Time: " << runTime.timeName() << endl;
# include "readStressedFoamControls.H"
//-- for moving the mesh and then back again
vectorField oldMeshPoints = mesh.allPoints();
int iCorr = 0;
lduMatrix::solverPerformance solverPerf;
word solverName;
lduMatrix::debug = 0;
scalar residual = GREAT;
scalar initialResidual = 0;
scalar relativeResidual = GREAT;
do //- start of momentum loop
{
DU.storePrevIter();
divDSigmaLargeStrainExp.storePrevIter();
# include "readContactControls.H"
//- correct the contact boundaries
if(iCorr % uEqnContactCorrFreq == 0)
{
Info << "\t\tCorrecting contact in the momentum loop "
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
# include "moveSolidMeshForContact.H"
contact.correct();
mesh.movePoints(oldMeshPoints);
}
# include "calculateDivDSigmaExp.H"
# include "calculateDivDSigmaExpLargeStrain.H"
# include "readStressedFoamControls.H"
fvVectorMatrix DUEqn
(
fvm::d2dt2(rho, DU)
==
fvm::laplacian(2*mu + lambda, DU, "laplacian(DDU,DU)")
+ divDSigmaExp
+ divDSigmaLargeStrainExp
//-- for moving the mesh and then back again
vectorField oldMeshPoints = mesh.allPoints();
);
solverPerf = DUEqn.solve();
DU.relax();
solverName = solverPerf.solverName();
gradDU = fvc::grad(DU);
DF = gradDU.T();
int iCorr = 0;
lduMatrix::solverPerformance solverPerf;
word solverName;
lduMatrix::debug = 0;
scalar residual = GREAT;
scalar initialResidual = 0;
scalar relativeResidual = GREAT;
# include "calculateDEpsilonDSigma.H"
do //- start of momentum loop
{
DU.storePrevIter();
residual = solverPerf.initialResidual();
if(iCorr == 0)
{
initialResidual = solverPerf.initialResidual();
}
# include "calculateRelativeResidual.H"
Info << "\tTime " << runTime.value()
<< ", Corrector " << iCorr
<< ", Solving for " << DU.name()
<< " using " << solverPerf.solverName()
<< ", residual = " << solverPerf.initialResidual()
<< ", relative residual = " << relativeResidual << endl;
} //- end of momentum loop
while
(
relativeResidual > convergenceTolerance
//residual > convergenceTolerance
&&
++iCorr < nCorr
);
// Print out info per contact iteration
Info << "\t\tSolving for " << DU.name()
<< " using " << solverName
<< ", Initial residual = " << initialResidual
<< ", Final residual = " << solverPerf.initialResidual()
<< ", No outer iterations " << iCorr << endl;
lduMatrix::debug = 1;
# include "rotateFields.H"
divDSigmaLargeStrainExp.storePrevIter();
# include "moveMesh.H"
//- correct the contact boundaries
if(iCorr % uEqnContactCorrFreq == 0)
{
Info << "\t\tCorrecting contact in the momentum loop "
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
# include "moveSolidMeshForContact.H"
contact.correct();
mesh.movePoints(oldMeshPoints);
}
# include "writeFields.H"
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
<< endl << endl;
# include "calculateDivDSigmaExp.H"
# include "calculateDivDSigmaExpLargeStrain.H"
fvVectorMatrix DUEqn
(
fvm::d2dt2(rho, DU)
==
fvm::laplacian(2*mu + lambda, DU, "laplacian(DDU,DU)")
+ divDSigmaExp
+ divDSigmaLargeStrainExp
);
solverPerf = DUEqn.solve();
DU.relax();
solverName = solverPerf.solverName();
gradDU = fvc::grad(DU);
DF = gradDU.T();
# include "calculateDEpsilonDSigma.H"
residual = solverPerf.initialResidual();
if(iCorr == 0)
{
initialResidual = solverPerf.initialResidual();
}
# include "calculateRelativeResidual.H"
Info << "\tTime " << runTime.value()
<< ", Corrector " << iCorr
<< ", Solving for " << DU.name()
<< " using " << solverPerf.solverName()
<< ", residual = " << solverPerf.initialResidual()
<< ", relative residual = " << relativeResidual << endl;
} //- end of momentum loop
while
(
relativeResidual > convergenceTolerance
//residual > convergenceTolerance
&&
++iCorr < nCorr
);
// Print out info per contact iteration
Info << "\t\tSolving for " << DU.name()
<< " using " << solverName
<< ", Initial residual = " << initialResidual
<< ", Final residual = " << solverPerf.initialResidual()
<< ", No outer iterations " << iCorr << endl;
lduMatrix::debug = 1;
# include "rotateFields.H"
# include "moveMesh.H"
# include "writeFields.H"
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
<< endl << endl;
}
Info<< "End\n" << endl;
return(0);
Info<< "End\n" << endl;
return(0);
}

View file

@ -8,26 +8,26 @@ const fvBoundaryMesh& bm = mesh.boundary();
forAll (bm, patchI)
{
// If the patch is empty, skip it
// If the patch is coupled, and there are no cyclic parallels, skip it
if
// If the patch is empty, skip it
// If the patch is coupled, and there are no cyclic parallels, skip it
if
(
!isA<emptyFvPatch>(bm[patchI])
&& !(
bm[patchI].coupled()
//&& Pstream::parRun()
//&& !mesh.parallelData().cyclicParallel()
)
)
!isA<emptyFvPatch>(bm[patchI])
&& !(
bm[patchI].coupled()
//&& Pstream::parRun()
//&& !mesh.parallelData().cyclicParallel()
)
)
{
const labelList& bp = bm[patchI].patch().boundaryPoints();
const labelList& meshPoints = bm[patchI].patch().meshPoints();
const labelList& bp = bm[patchI].patch().boundaryPoints();
forAll (bp, pointI)
{
pointsCorrectionMap.insert(meshPoints[bp[pointI]]);
}
const labelList& meshPoints = bm[patchI].patch().meshPoints();
forAll (bp, pointI)
{
pointsCorrectionMap.insert(meshPoints[bp[pointI]]);
}
}
}

View file

@ -1,15 +1,15 @@
if(moveMeshMethod == "inverseDistance")
{
{
# include "moveMeshInverseDistance.H"
}
else if(moveMeshMethod == "leastSquares")
{
}
else if(moveMeshMethod == "leastSquares")
{
# include "moveMeshLeastSquares.H"
}
else
{
FatalError << "move mesh method " << moveMeshMethod << " not recognised" << nl
<< "available methods are:" << nl
<< "inverseDistance" << nl
<< "leastSquares" << exit(FatalError);
}
}
else
{
FatalError << "move mesh method " << moveMeshMethod << " not recognised" << nl
<< "available methods are:" << nl
<< "inverseDistance" << nl
<< "leastSquares" << exit(FatalError);
}

View file

@ -2,34 +2,34 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Move solid mesh using inverse distance interpolation" << endl;
// Create point mesh
pointMesh pMesh(mesh);
// Create point interpolation
volPointInterpolation pointInterpolation(mesh);
wordList types
(
pMesh.boundary().size(),
//fixedValueFvPatchVectorField::typeName
calculatedFvPatchVectorField::typeName
);
(
pMesh.boundary().size(),
//fixedValueFvPatchVectorField::typeName
calculatedFvPatchVectorField::typeName
);
pointVectorField pointDU
(
IOobject
(
"pointDU",
runTime.timeName(),
mesh
),
pMesh,
dimensionedVector("zero", dimLength, vector::zero),
types
);
(
IOobject
(
"pointDU",
runTime.timeName(),
mesh
),
pMesh,
dimensionedVector("zero", dimLength, vector::zero),
types
);
// Calculate mesh points displacement
pointInterpolation.interpolate(DU, pointDU);
@ -41,26 +41,25 @@ if(min(J.internalField()) > 0)
//pointDU.write();
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
// Move mesh
vectorField newPoints = mesh.allPoints();
forAll (pointDUI, pointI)
{
newPoints[pointI] += pointDUI[pointI];
}
forAll (pointDUI, pointI)
{
newPoints[pointI] += pointDUI[pointI];
}
twoDPointCorrector twoDCorrector(mesh);
twoDCorrector.correctPoints(newPoints);
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}

Some files were not shown because too many files have changed in this diff Show more