ParaView-5.0.1: Added the source-tree to ThirdParty-dev and patched as described in the README file

Resolves bug-report http://bugs.openfoam.org/view.php?id=2098
This commit is contained in:
Henry Weller
2016-05-30 21:20:56 +01:00
parent 1cce60aa78
commit eba760a6d6
24640 changed files with 6366069 additions and 0 deletions

View File

@ -0,0 +1,90 @@
coProcessor = None
usecp = True
def initialize():
global coProcessor, usecp
if usecp:
import paraview
import vtkParallelCorePython
import vtk
from mpi4py import MPI
import os, sys
paraview.options.batch = True
paraview.options.symmetric = True
import vtkPVClientServerCoreCorePython as CorePython
try:
import vtkPVServerManagerApplicationPython as ApplicationPython
except:
paraview.print_error("Error: Cannot import vtkPVServerManagerApplicationPython")
if not CorePython.vtkProcessModule.GetProcessModule():
pvoptions = None
if paraview.options.batch:
pvoptions = CorePython.vtkPVOptions();
pvoptions.SetProcessType(CorePython.vtkPVOptions.PVBATCH)
if paraview.options.symmetric:
pvoptions.SetSymmetricMPIMode(True)
ApplicationPython.vtkInitializationHelper.Initialize(sys.executable, CorePython.vtkProcessModule.PROCESS_BATCH, pvoptions)
import paraview.servermanager as pvsm
# we need ParaView 4.2 since ParaView 4.1 doesn't properly wrap
# vtkPVPythonCatalystPython
if pvsm.vtkSMProxyManager.GetVersionMajor() != 4 or \
pvsm.vtkSMProxyManager.GetVersionMinor() < 2:
print 'Must use ParaView v4.2 or greater'
sys.exit(0)
import numpy
import vtkPVCatalystPython as catalyst
import vtkPVPythonCatalystPython as pythoncatalyst
import paraview.simple
import paraview.vtk as vtk
from paraview import numpy_support
paraview.options.batch = True
paraview.options.symmetric = True
coProcessor = catalyst.vtkCPProcessor()
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
from mpi4py import MPI
def finalize():
global coProcessor, usecp
if usecp:
coProcessor.Finalize()
import vtkPVServerManagerApplicationPython as ApplicationPython
ApplicationPython.vtkInitializationHelper.Finalize()
def addscript(name):
global coProcessor
import vtkPVPythonCatalystPython as pythoncatalyst
pipeline = pythoncatalyst.vtkCPPythonScriptPipeline()
pipeline.Initialize(name)
coProcessor.AddPipeline(pipeline)
def coprocess(time, timeStep, grid, attributes):
global coProcessor
import vtk
import vtkPVCatalystPython as catalyst
import paraview
from paraview import numpy_support
dataDescription = catalyst.vtkCPDataDescription()
dataDescription.SetTimeData(time, timeStep)
dataDescription.AddInput("input")
if coProcessor.RequestDataDescription(dataDescription):
import fedatastructures
imageData = vtk.vtkImageData()
imageData.SetExtent(grid.XStartPoint, grid.XEndPoint, 0, grid.NumberOfYPoints-1, 0, grid.NumberOfZPoints-1)
imageData.SetSpacing(grid.Spacing)
velocity = paraview.numpy_support.numpy_to_vtk(attributes.Velocity)
velocity.SetName("velocity")
imageData.GetPointData().AddArray(velocity)
pressure = numpy_support.numpy_to_vtk(attributes.Pressure)
pressure.SetName("pressure")
imageData.GetCellData().AddArray(pressure)
dataDescription.GetInputDescriptionByName("input").SetGrid(imageData)
dataDescription.GetInputDescriptionByName("input").SetWholeExtent(0, grid.NumberOfGlobalXPoints-1, 0, grid.NumberOfYPoints-1, 0, grid.NumberOfZPoints-1)
coProcessor.CoProcess(dataDescription)

View File

@ -0,0 +1,85 @@
try: paraview.simple
except: from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
filename_6_pvti = coprocessor.CreateProducer( datadescription, "input" )
ParallelImageDataWriter1 = coprocessor.CreateWriter( XMLPImageDataWriter, "fullgrid_%t.pvti", 100 )
SetActiveSource(filename_6_pvti)
Slice1 = Slice( guiName="Slice1", Crinkleslice=0, SliceOffsetValues=[0.0], Triangulatetheslice=1, SliceType="Plane" )
Slice1.SliceType.Offset = 0.0
Slice1.SliceType.Origin = [9.0, 11.0, 9.0]
Slice1.SliceType.Normal = [1.0, 0.0, 0.0]
ParallelPolyDataWriter1 = coprocessor.CreateWriter( XMLPPolyDataWriter, "slice_%t.pvtp", 10 )
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
freqs = {'input': [10, 100]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variables that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView
coprocessor.EnableLiveVisualization(False, 1)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=False)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)

View File

@ -0,0 +1,44 @@
import numpy
class GridClass:
"""
We are working with a uniform grid which will be
represented as a vtkImageData in Catalyst. It is partitioned
in the x-direction only.
"""
def __init__(self, pointDimensions, spacing):
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
self.XStartPoint = int(pointDimensions[0]*rank/size)
self.XEndPoint = int(pointDimensions[0]*(rank+1)/size)
if rank+1 != size:
self.XEndPoint += 1
else:
self.XEndPoint = pointDimensions[0]-1
self.NumberOfYPoints = pointDimensions[1]
self.NumberOfZPoints = pointDimensions[2]
self.NumberOfGlobalXPoints = pointDimensions[0]
self.PointDimensions = pointDimensions
self.Spacing = spacing
def GetNumberOfPoints(self):
return (self.XEndPoint-self.XStartPoint+1)*self.PointDimensions[1]*self.PointDimensions[2]
def GetNumberOfCells(self):
return (self.XEndPoint-self.XStartPoint)*(self.PointDimensions[1]-1)*(self.PointDimensions[2]-1)
class AttributesClass:
"""
We have velocity point data and pressure cell data.
"""
def __init__(self, grid):
self.Grid = grid
def Update(self, time):
self.Velocity = numpy.zeros((self.Grid.GetNumberOfPoints(), 3))
self.Velocity = self.Velocity + time
self.Pressure = numpy.zeros(self.Grid.GetNumberOfCells())

View File

@ -0,0 +1,35 @@
"""
A simple example of a Python simulation code working with Catalyst.
It depends on numpy and mpi4py being available. The environment
variables need to be set up properly to find Catalyst. For Linux
and Mac machines they should be:
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:<Catalyst build dir>/lib
export PYTHONPATH=<Catalyst build dir>/lib:<Catalyst build dir>/lib/site-packages
"""
import numpy
import sys
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
import fedatastructures
grid = fedatastructures.GridClass([10, 12, 10], [2, 2, 2])
attributes = fedatastructures.AttributesClass(grid)
doCoprocessing = True
if doCoprocessing:
import coprocessor
coprocessor.initialize()
coprocessor.addscript("cpscript.py")
for i in range(100):
attributes.Update(i)
if doCoprocessing:
import coprocessor
coprocessor.coprocess(i, i, grid, attributes)
if doCoprocessing:
import coprocessor
coprocessor.finalize()