Merge branch 'master' into cvm

This commit is contained in:
graham
2009-05-01 10:11:25 +01:00
26 changed files with 330 additions and 268 deletions

View File

@ -48,157 +48,6 @@ using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void createBaffles
(
const polyMesh& mesh,
const label faceI,
const label oldPatchI,
const labelList& newPatches,
PackedBoolList& doneFace,
polyTopoChange& meshMod
)
{
const polyBoundaryMesh& patches = mesh.boundaryMesh();
const faceZoneMesh& faceZones = mesh.faceZones();
const face& f = mesh.faces()[faceI];
label zoneID = faceZones.whichZone(faceI);
bool zoneFlip = false;
if (zoneID >= 0)
{
const faceZone& fZone = faceZones[zoneID];
zoneFlip = fZone.flipMap()[fZone.whichFace(faceI)];
}
label nei = -1;
if (oldPatchI == -1)
{
nei = mesh.faceNeighbour()[faceI];
}
face revFace(f.reverseFace());
forAll(newPatches, i)
{
if (oldPatchI == -1)
{
// Internal face
if (doneFace.set(faceI))
{
// First usage of face. Modify.
meshMod.setAction
(
polyModifyFace
(
f, // modified face
faceI, // label of face
mesh.faceOwner()[faceI], // owner
-1, // neighbour
false, // face flip
newPatches[i], // patch for face
false, // remove from zone
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
else
{
// Second or more usage of face. Add.
meshMod.setAction
(
polyAddFace
(
f, // modified face
mesh.faceOwner()[faceI], // owner
-1, // neighbour
-1, // master point
-1, // master edge
faceI, // master face
false, // face flip
newPatches[i], // patch for face
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
meshMod.setAction
(
polyAddFace
(
revFace, // modified face
nei, // owner
-1, // neighbour
-1, // masterPointID
-1, // masterEdgeID
faceI, // masterFaceID,
true, // face flip
newPatches[i], // patch for face
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
else if
(
patches[oldPatchI].coupled()
&& patches[newPatches[i]].coupled()
)
{
// Do not allow coupled patches to be moved to different coupled
// patches.
//WarningIn("createBaffles()")
// << "Not moving face from coupled patch "
// << patches[oldPatchI].name()
// << " to another coupled patch " << patches[newPatches[i]]
// << endl;
}
else
{
if (doneFace.set(faceI))
{
meshMod.setAction
(
polyModifyFace
(
f, // modified face
faceI, // label of face
mesh.faceOwner()[faceI], // owner
-1, // neighbour
false, // face flip
newPatches[i], // patch for face
false, // remove from zone
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
else
{
meshMod.setAction
(
polyAddFace
(
f, // modified face
mesh.faceOwner()[faceI], // owner
-1, // neighbour
-1, // master point
-1, // master edge
faceI, // master face
false, // face flip
newPatches[i], // patch for face
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
}
}
}
// Main program: // Main program:
int main(int argc, char *argv[]) int main(int argc, char *argv[])
@ -215,15 +64,16 @@ int main(int argc, char *argv[])
const word oldInstance = mesh.pointsInstance(); const word oldInstance = mesh.pointsInstance();
const polyBoundaryMesh& patches = mesh.boundaryMesh(); const polyBoundaryMesh& patches = mesh.boundaryMesh();
const faceZoneMesh& faceZones = mesh.faceZones();
// Faces to baffle // Faces to baffle
word setName(args.additionalArgs()[0]); word setName(args.additionalArgs()[0]);
Pout<< "Reading faceSet from " << setName << nl << endl; Info<< "Reading faceSet from " << setName << nl << endl;
faceSet facesToSplit(mesh, setName); faceSet facesToSplit(mesh, setName);
// Make sure set is synchronised across couples // Make sure set is synchronised across couples
facesToSplit.sync(mesh); facesToSplit.sync(mesh);
Pout<< "Read " << facesToSplit.size() << " faces from " << setName Info<< "Read " << returnReduce(facesToSplit.size(), sumOp<label>())
<< nl << endl; << " faces from " << setName << nl << endl;
// Patches to put baffles into // Patches to put baffles into
@ -231,7 +81,7 @@ int main(int argc, char *argv[])
word patchName(args.additionalArgs()[1]); word patchName(args.additionalArgs()[1]);
newPatches[0] = patches.findPatchID(patchName); newPatches[0] = patches.findPatchID(patchName);
Pout<< "Using patch " << patchName Info<< "Using patch " << patchName
<< " at index " << newPatches[0] << endl; << " at index " << newPatches[0] << endl;
if (newPatches[0] == -1) if (newPatches[0] == -1)
@ -316,49 +166,182 @@ int main(int argc, char *argv[])
// Do the actual changes // Do the actual changes
// Note order in which faces are modified/added is so they end up correctly
// for cyclic patches (original faces first and then reversed faces)
// since otherwise it will have trouble matching baffles.
label nBaffled = 0; label nBaffled = 0;
PackedBoolList doneFace(mesh.nFaces());
forAll(newPatches, i)
{
label newPatchI = newPatches[i];
for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++) for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
{ {
if (facesToSplit.found(faceI)) if (facesToSplit.found(faceI))
{ {
createBaffles const face& f = mesh.faces()[faceI];
label zoneID = faceZones.whichZone(faceI);
bool zoneFlip = false;
if (zoneID >= 0)
{
const faceZone& fZone = faceZones[zoneID];
zoneFlip = fZone.flipMap()[fZone.whichFace(faceI)];
}
if (i == 0)
{
// First usage of face. Modify.
meshMod.setAction
( (
mesh, polyModifyFace
faceI, (
-1, // oldPatchI, f, // modified face
newPatches, faceI, // label of face
doneFace, mesh.faceOwner()[faceI], // owner
meshMod -1, // neighbour
false, // face flip
newPatchI, // patch for face
false, // remove from zone
zoneID, // zone for face
zoneFlip // face flip in zone
)
); );
}
else
{
// Second or more usage of face. Add.
meshMod.setAction
(
polyAddFace
(
f, // modified face
mesh.faceOwner()[faceI], // owner
-1, // neighbour
-1, // master point
-1, // master edge
faceI, // master face
false, // face flip
newPatchI, // patch for face
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
nBaffled++; nBaffled++;
} }
} }
// Add the reversed face.
for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
{
if (facesToSplit.found(faceI))
{
const face& f = mesh.faces()[faceI];
label zoneID = faceZones.whichZone(faceI);
bool zoneFlip = false;
if (zoneID >= 0)
{
const faceZone& fZone = faceZones[zoneID];
zoneFlip = fZone.flipMap()[fZone.whichFace(faceI)];
}
label nei = mesh.faceNeighbour()[faceI];
meshMod.setAction
(
polyAddFace
(
f.reverseFace(), // modified face
nei, // owner
-1, // neighbour
-1, // masterPointID
-1, // masterEdgeID
faceI, // masterFaceID,
true, // face flip
newPatchI, // patch for face
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
nBaffled++;
}
}
// Modify any boundary faces
forAll(patches, patchI) forAll(patches, patchI)
{ {
const polyPatch& pp = patches[patchI]; const polyPatch& pp = patches[patchI];
if (patches[newPatchI].coupled() && pp.coupled())
{
// Do not allow coupled faces to be moved to different coupled
// patches.
}
else
{
forAll(pp, i) forAll(pp, i)
{ {
label faceI = pp.start()+i; label faceI = pp.start()+i;
if (facesToSplit.found(faceI)) if (facesToSplit.found(faceI))
{ {
createBaffles const face& f = mesh.faces()[faceI];
label zoneID = faceZones.whichZone(faceI);
bool zoneFlip = false;
if (zoneID >= 0)
{
const faceZone& fZone = faceZones[zoneID];
zoneFlip = fZone.flipMap()[fZone.whichFace(faceI)];
}
if (i == 0)
{
// First usage of face. Modify.
meshMod.setAction
( (
mesh, polyModifyFace
faceI, (
patchI, // oldPatchI, f, // modified face
newPatches, faceI, // label of face
doneFace, mesh.faceOwner()[faceI],// owner
meshMod -1, // neighbour
false, // face flip
newPatchI, // patch for face
false, // remove from zone
zoneID, // zone for face
zoneFlip // face flip in zone
)
); );
}
else
{
// Second or more usage of face. Add.
meshMod.setAction
(
polyAddFace
(
f, // modified face
mesh.faceOwner()[faceI],// owner
-1, // neighbour
-1, // master point
-1, // master edge
faceI, // master face
false, // face flip
newPatchI, // patch for face
zoneID, // zone for face
zoneFlip // face flip in zone
)
);
}
nBaffled++; nBaffled++;
} }
} }
} }
}
}
Info<< "Converted " << returnReduce(nBaffled, sumOp<label>()) Info<< "Converted " << returnReduce(nBaffled, sumOp<label>())

View File

@ -37,6 +37,9 @@ Usage
Write the cell distribution as a labelList for use with 'manual' Write the cell distribution as a labelList for use with 'manual'
decomposition method and as a volScalarField for post-processing. decomposition method and as a volScalarField for post-processing.
@param -region regionName \n
Decompose named region. Does not check for existence of processor*.
@param -copyUniform \n @param -copyUniform \n
Copy any @a uniform directories too. Copy any @a uniform directories too.

View File

@ -1522,30 +1522,6 @@ void Foam::meshRefinement::baffleAndSplitMesh
if (debug) if (debug)
{ {
//- Note: commented out since not properly parallel yet.
//// Dump all these faces to a faceSet.
//faceSet problemGeom(mesh_, "problemFacesGeom", 100);
//
//const labelList facePatchGeom
//(
// markFacesOnProblemCellsGeometric
// (
// motionDict,
// globalToPatch
// )
//);
//forAll(facePatchGeom, faceI)
//{
// if (facePatchGeom[faceI] != -1)
// {
// problemGeom.insert(faceI);
// }
//}
//Pout<< "Dumping " << problemGeom.size()
// << " problem faces to " << problemGeom.objectPath() << endl;
//problemGeom.write();
faceSet problemTopo(mesh_, "problemFacesTopo", 100); faceSet problemTopo(mesh_, "problemFacesTopo", 100);
const labelList facePatchTopo const labelList facePatchTopo

View File

@ -22,7 +22,40 @@ License
along with OpenFOAM; if not, write to the Free Software Foundation, along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
S Strat=b{job=t,map=t,poli=S,sep=(m{asc=b{bnd=d{pass=40,dif=1,rem=1}f{move=80,pass=-1,bal=0.005},org=f{move=80,pass=-1,bal=0.005},width=3},low=h{pass=10}f{move=80,pass=-1,bal=0.0005},type=h,vert=80,rat=0.8}|m{asc=b{bnd=d{pass=40,dif=1,rem=1}f{move=80,pass=-1,bal=0.005},org=f{move=80,pass=-1,bal=0.005},width=3},low=h{pass=10}f{move=80,pass=-1,bal=0.0005},type=h,vert=80,rat=0.8})} Default strategy:
Strat=b
{
job=t,
map=t,
poli=S,
sep=
(
m
{
asc=b
{
bnd=d{pass=40,dif=1,rem=1}f{move=80,pass=-1,bal=0.005},
org=f{move=80,pass=-1,bal=0.005},width=3
},
low=h{pass=10}f{move=80,pass=-1,bal=0.0005},
type=h,
vert=80,
rat=0.8
}
| m
{
asc=b
{
bnd=d{pass=40,dif=1,rem=1}f{move=80,pass=-1,bal=0.005},
org=f{move=80,pass=-1,bal=0.005},
width=3},
low=h{pass=10}f{move=80,pass=-1,bal=0.0005},
type=h,
vert=80,
rat=0.8
}
)
}
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
#include "scotchDecomp.H" #include "scotchDecomp.H"
@ -31,6 +64,7 @@ S Strat=b{job=t,map=t,poli=S,sep=(m{asc=b{bnd=d{pass=40,dif=1,rem=1}f{move
#include "IFstream.H" #include "IFstream.H"
#include "Time.H" #include "Time.H"
#include "cyclicPolyPatch.H" #include "cyclicPolyPatch.H"
#include "OFstream.H"
extern "C" extern "C"
{ {
@ -113,46 +147,102 @@ Foam::label Foam::scotchDecomp::decompose
check(SCOTCH_graphCheck(&grafdat), "SCOTCH_graphCheck"); check(SCOTCH_graphCheck(&grafdat), "SCOTCH_graphCheck");
// Dump graph
if (decompositionDict_.found("scotchCoeffs"))
{
const dictionary& scotchCoeffs =
decompositionDict_.subDict("scotchCoeffs");
//// Architecture Switch writeGraph(scotchCoeffs.lookup("writeGraph"));
//// ~~~~~~~~~~~~
//// (fully connected network topology since using switch) if (writeGraph)
// {
//SCOTCH_Arch archdat; OFstream str(mesh_.time().path() / mesh_.name() + ".grf");
//check(SCOTCH_archInit(&archdat), "SCOTCH_archInit");
//check Info<< "Dumping Scotch graph file to " << str.name() << endl
//( << "Use this in combination with gpart." << endl;
// // SCOTCH_archCmpltw for weighted.
// SCOTCH_archCmplt(&archdat, nProcessors_), label version = 0;
// "SCOTCH_archCmplt" str << version << nl;
//); // Numer of vertices
str << xadj.size()-1 << ' ' << adjncy.size() << nl;
// Numbering starts from 0
label baseval = 0;
// Has weights?
label hasEdgeWeights = 0;
label hasVertexWeights = 0;
label numericflag = 10*hasEdgeWeights+hasVertexWeights;
str << baseval << ' ' << numericflag << nl;
for (label cellI = 0; cellI < xadj.size()-1; cellI++)
{
label start = xadj[cellI];
label end = xadj[cellI+1];
str << end-start;
for (label i = start; i < end; i++)
{
str << ' ' << adjncy[i];
}
str << nl;
}
}
}
// Architecture
// ~~~~~~~~~~~~
// (fully connected network topology since using switch)
SCOTCH_Arch archdat;
check(SCOTCH_archInit(&archdat), "SCOTCH_archInit");
check
(
// SCOTCH_archCmpltw for weighted.
SCOTCH_archCmplt(&archdat, nProcessors_),
"SCOTCH_archCmplt"
);
//SCOTCH_Mapping mapdat; //SCOTCH_Mapping mapdat;
//SCOTCH_graphMapInit(&grafdat, &mapdat, &archdat, NULL); //SCOTCH_graphMapInit(&grafdat, &mapdat, &archdat, NULL);
//SCOTCH_graphMapCompute(&grafdat, &mapdat, &stradat); /* Perform mapping */ //SCOTCH_graphMapCompute(&grafdat, &mapdat, &stradat); /* Perform mapping */
//SCOTCH_graphMapExit(&grafdat, &mapdat);
finalDecomp.setSize(xadj.size()-1); finalDecomp.setSize(xadj.size()-1);
finalDecomp = 0;
check check
( (
SCOTCH_graphPart SCOTCH_graphMap
( (
&grafdat, &grafdat,
nProcessors_, // partnbr &archdat,
&stradat, // const SCOTCH_Strat * &stradat, // const SCOTCH_Strat *
finalDecomp.begin() // parttab finalDecomp.begin() // parttab
), ),
"SCOTCH_graphPart" "SCOTCH_graphMap"
); );
//finalDecomp.setSize(xadj.size()-1);
//check
//(
// SCOTCH_graphPart
// (
// &grafdat,
// nProcessors_, // partnbr
// &stradat, // const SCOTCH_Strat *
// finalDecomp.begin() // parttab
// ),
// "SCOTCH_graphPart"
//);
// Release storage for graph // Release storage for graph
SCOTCH_graphExit(&grafdat); SCOTCH_graphExit(&grafdat);
// Release storage for strategy
SCOTCH_stratExit(&stradat); SCOTCH_stratExit(&stradat);
//// Release storage for network topology // Release storage for network topology
//SCOTCH_archExit(&archdat); SCOTCH_archExit(&archdat);
return 0; return 0;
} }

View File

@ -56,7 +56,7 @@ Foam::FitData<Form, extendedStencil, Polynomial>::FitData
// Check input // Check input
if (linearLimitFactor <= SMALL || linearLimitFactor > 3) if (linearLimitFactor <= SMALL || linearLimitFactor > 3)
{ {
FatalErrorIn("FitData<Polynomial>::FitData") FatalErrorIn("FitData<Polynomial>::FitData(..)")
<< "linearLimitFactor requested = " << linearLimitFactor << "linearLimitFactor requested = " << linearLimitFactor
<< " should be between zero and 3" << " should be between zero and 3"
<< exit(FatalError); << exit(FatalError);
@ -264,8 +264,7 @@ void Foam::FitData<FitDataType, ExtendedStencil, Polynomial>::calcFit
// { // {
WarningIn WarningIn
( (
"FitData<Polynomial>::calcFit" "FitData<Polynomial>::calcFit(..)"
"(const List<point>& C, const label facei"
) << "Could not fit face " << facei ) << "Could not fit face " << facei
<< " Weights = " << coeffsi << " Weights = " << coeffsi
<< ", reverting to linear." << nl << ", reverting to linear." << nl

View File

@ -326,7 +326,7 @@ void Foam::cellClassification::markCells
changedFaces, // Labels of changed faces changedFaces, // Labels of changed faces
changedFacesInfo, // Information on changed faces changedFacesInfo, // Information on changed faces
cellInfoList, // Information on all cells cellInfoList, // Information on all cells
mesh_.nCells() // max iterations mesh_.globalData().nTotalCells() // max iterations
); );
// Get information out of cellInfoList // Get information out of cellInfoList

View File

@ -226,7 +226,7 @@ void Foam::patchDataWave<TransferType>::correct()
mesh(), mesh(),
changedFaces, changedFaces,
faceDist, faceDist,
mesh().nCells() // max iterations mesh().globalData().nTotalCells() // max iterations
); );

View File

@ -28,7 +28,7 @@ License
#include "polyMesh.H" #include "polyMesh.H"
#include "wallPoint.H" #include "wallPoint.H"
#include "MeshWave.H" #include "MeshWave.H"
#include "globalMeshData.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
@ -187,7 +187,7 @@ void Foam::patchWave::correct()
mesh(), mesh(),
changedFaces, changedFaces,
faceDist, faceDist,
mesh().nCells() // max iterations mesh().globalData().nTotalCells() // max iterations
); );

View File

@ -1,9 +1,8 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/basic/lnInclude \ -I$(LIB_SRC)/thermophysicalModels/basic/lnInclude
-I$(LIB_SRC)/thermophysicalModels/combustion/lnInclude \
-I$(LIB_SRC)/OpenFOAM/lnInclude \
-I radiationModel/fvDOM/interpolationLookUpTable
LIB_LIBS = \ LIB_LIBS = \
-lfiniteVolume -lfiniteVolume \
-lbasicThermophysicalModels \
-lspecie

View File

@ -250,6 +250,15 @@ Foam::radiation::fvDOM::fvDOM(const volScalarField& T)
) )
); );
} }
Info<< "fvDOM : Allocated " << IRay_.size()
<< " rays with average orientation:" << nl;
forAll (IRay_, i)
{
Info<< '\t' << IRay_[i].I().name()
<< '\t' << IRay_[i].dAve() << nl;
}
Info<< endl;
} }
@ -359,7 +368,8 @@ void Foam::radiation::fvDOM::updateG()
{ {
IRay_[rayI].addIntensity(); IRay_[rayI].addIntensity();
G_ += IRay_[rayI].I()*IRay_[rayI].omega(); G_ += IRay_[rayI].I()*IRay_[rayI].omega();
Qr_ += IRay_[rayI].Qr(); //Qr_ += IRay_[rayI].Qr();
Qr_.boundaryField() += IRay_[rayI].Qr().boundaryField();
} }
} }

View File

@ -187,7 +187,8 @@ Foam::radiation::radiativeIntensityRay::~radiativeIntensityRay()
Foam::scalar Foam::radiation::radiativeIntensityRay::correct() Foam::scalar Foam::radiation::radiativeIntensityRay::correct()
{ {
// reset boundary heat flux to zero // reset boundary heat flux to zero
Qr_ = dimensionedScalar("zero", dimMass/pow3(dimTime), 0.0); //Qr_ = dimensionedScalar("zero", dimMass/pow3(dimTime), 0.0);
Qr_.boundaryField() = 0.0;
scalar maxResidual = -GREAT; scalar maxResidual = -GREAT;

View File

@ -138,7 +138,7 @@ void smoothDelta::calcDelta()
changedFacesInfo, changedFacesInfo,
faceDeltaData, faceDeltaData,
cellDeltaData, cellDeltaData,
mesh_.nCells() // max iterations mesh_.globalData().nTotalCells() // max iterations
); );
forAll(delta_, cellI) forAll(delta_, cellI)

View File

@ -23,25 +23,25 @@ boundaryField
{ {
floor floor
{ {
type epsilonWallFunction; type compressible::epsilonWallFunction;
value uniform 0; value uniform 0;
} }
ceiling ceiling
{ {
type epsilonWallFunction; type compressible::epsilonWallFunction;
value uniform 0; value uniform 0;
} }
fixedWalls fixedWalls
{ {
type epsilonWallFunction; type compressible::epsilonWallFunction;
value uniform 0; value uniform 0;
} }
box box
{ {
type epsilonWallFunction; type compressible::epsilonWallFunction;
value uniform 0; value uniform 0;
} }
} }

View File

@ -23,25 +23,25 @@ boundaryField
{ {
floor floor
{ {
type kQRWallFunction; type compressible::kQRWallFunction;
value uniform 0; value uniform 0;
} }
ceiling ceiling
{ {
type kQRWallFunction; type compressible::kQRWallFunction;
value uniform 0; value uniform 0;
} }
fixedWalls fixedWalls
{ {
type kQRWallFunction; type compressible::kQRWallFunction;
value uniform 0; value uniform 0;
} }
box box
{ {
type kQRWallFunction; type compressible::kQRWallFunction;
value uniform 0; value uniform 0;
} }
} }

View File

@ -1,2 +1,2 @@
cDBUG = cDBUG =
cOPT = -O3 -fno-gcse cOPT = -O3

View File

@ -1,2 +1,2 @@
cDBUG = cDBUG =
cOPT = -O3 -fno-gcse cOPT = -O3

View File

@ -1,4 +1,4 @@
c++DBUG = c++DBUG =
c++OPT = -march=opteron -O3 c++OPT = -O3
#c++OPT = -march=nocona -O3 #c++OPT = -march=nocona -O3
# -ftree-vectorize -ftree-vectorizer-verbose=3 # -ftree-vectorize -ftree-vectorizer-verbose=3

View File

@ -1,2 +1,2 @@
cDBUG = cDBUG =
cOPT = -march=opteron -O3 -fno-gcse cOPT = -O3

View File

@ -1,5 +1,5 @@
CPP = /lib/cpp $(GFLAGS) CPP = /lib/cpp $(GFLAGS)
LD = ld -A64 LD = ld
PROJECT_LIBS = -l$(WM_PROJECT) -liberty -ldl PROJECT_LIBS = -l$(WM_PROJECT) -liberty -ldl

View File

@ -1,4 +1,4 @@
c++DBUG = c++DBUG =
c++OPT = -march=opteron -O3 c++OPT = -O3
#c++OPT = -march=nocona -O3 #c++OPT = -march=nocona -O3
# -ftree-vectorize -ftree-vectorizer-verbose=3 # -ftree-vectorize -ftree-vectorizer-verbose=3

View File

@ -1,2 +1,2 @@
cDBUG = cDBUG =
cOPT = -march=opteron -O3 -fno-gcse cOPT = -O3

View File

@ -8,7 +8,7 @@ include $(RULES)/c++$(WM_COMPILE_OPTION)
ptFLAGS = -DNoRepository -ftemplate-depth-60 ptFLAGS = -DNoRepository -ftemplate-depth-60
c++FLAGS = $(GFLAGS) $(c++WARN) $(c++OPT) $(c++DBUG) $(ptFLAGS) $(LIB_HEADER_DIRS) -fPIC -pthread c++FLAGS = $(GFLAGS) $(c++WARN) $(c++OPT) $(c++DBUG) $(ptFLAGS) $(LIB_HEADER_DIRS) -fPIC
Ctoo = $(WM_SCHEDULER) $(CC) $(c++FLAGS) -c $$SOURCE -o $@ Ctoo = $(WM_SCHEDULER) $(CC) $(c++FLAGS) -c $$SOURCE -o $@
cxxtoo = $(Ctoo) cxxtoo = $(Ctoo)

View File

@ -1,2 +1,2 @@
cDBUG = -ggdb -DFULLDEBUG cDBUG = -ggdb -DFULLDEBUG
cOPT = -O1 -finline-functions cOPT = -O1 -fdefault-inline -finline-functions

View File

@ -1,10 +1,11 @@
CPP = /lib/cpp $(GFLAGS) CPP = /lib/cpp $(GFLAGS)
LD = ld -melf_i386 LD = ld -melf_i386
PROJECT_LIBS = -l$(WM_PROJECT) -ldl PROJECT_LIBS = -l$(WM_PROJECT) -liberty -ldl
include $(GENERAL_RULES)/standard include $(GENERAL_RULES)/standard
include $(RULES)/X include $(RULES)/X
include $(RULES)/c include $(RULES)/c
include $(RULES)/c++ include $(RULES)/c++
include $(GENERAL_RULES)/cint

View File

@ -1,2 +1,2 @@
cDBUG = cDBUG =
cOPT = -O3 -fno-gcse cOPT = -O3

View File

@ -1,2 +1,2 @@
cDBUG = cDBUG =
cOPT = -O3 -fno-gcse cOPT = -O3