ParaView-5.0.1: Added the source-tree to ThirdParty-dev and patched as described in the README file

Resolves bug-report http://bugs.openfoam.org/view.php?id=2098
This commit is contained in:
Henry Weller
2016-05-30 21:20:56 +01:00
parent 1cce60aa78
commit eba760a6d6
24640 changed files with 6366069 additions and 0 deletions

View File

@ -0,0 +1,133 @@
#==========================================================================
#
# Program: ParaView
#
# Copyright (c) 2005-2008 Sandia Corporation, Kitware Inc.
# All rights reserved.
#
# ParaView is a free software; you can redistribute it and/or modify it
# under the terms of the ParaView license version 1.2.
#
# See License_v1.2.txt for the full ParaView license.
# A copy of this license can be obtained by contacting
# Kitware Inc.
# 28 Corporate Drive
# Clifton Park, NY 12065
# USA
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#==========================================================================
# This file manages the building/installing of ParaView's python modules.
# Wrapping of classes is taken care of by VTK itself.
if (NOT PARAVIEW_ENABLE_PYTHON)
return()
endif()
set(PV_PYTHON_MODULE_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/paraview")
set(PV_PYTHON_MODULE_BINARY_DIR "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/site-packages/paraview")
# Copy generated module files from VTK into place in the build tree
set(_vtkpy_modules
# Ninja BUG 760: In past this depended on vtkpython_pyc. However, due to Ninja
# bug, the dependency wasn't being setup properly. Hence we directly depend on
# the generated file. Once Ninja or Cmake is fixed, we can remove this file
# depedency and leave the target dependecy.
${CMAKE_BINARY_DIR}/VTK/Wrapping/Python/vtk_compile_complete
vtkpython_pyc
)
if (TARGET vtkWebPython)
list(APPEND _vtkpy_modules vtkWebPython)
endif()
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/vtk_py_copy_completed"
# Copy into $pydir/paraview/vtk.
# This is what scripts get when they do "import paraview.vtk".
COMMAND ${CMAKE_COMMAND} ARGS -E echo "copying to ${PV_PYTHON_MODULE_BINARY_DIR}"
COMMAND ${CMAKE_COMMAND} ARGS -E copy_directory
"${VTK_BINARY_DIR}/Wrapping/Python/vtk"
"${PV_PYTHON_MODULE_BINARY_DIR}/vtk"
# Overlay that with paraview's customizations.
COMMAND ${CMAKE_COMMAND} ARGS -E echo "overlaying onto ${PV_PYTHON_MODULE_BINARY_DIR}"
COMMAND ${CMAKE_COMMAND} ARGS -E copy_directory
"${CMAKE_CURRENT_SOURCE_DIR}/paraview/vtk"
"${PV_PYTHON_MODULE_BINARY_DIR}/vtk"
# Also copy into $pydir/vtk.
# Scripts that want all of vtk can now "import vtk".
COMMAND ${CMAKE_COMMAND} ARGS -E echo "copying to ${VTK_BUILD_PYTHON_MODULE_DIR}/vtk"
COMMAND ${CMAKE_COMMAND} ARGS -E copy_directory
"${VTK_BINARY_DIR}/Wrapping/Python/vtk"
"${VTK_BUILD_PYTHON_MODULE_DIR}/vtk"
# Touch the OUTPUT file, otherwise this command will always be dirty.
COMMAND ${CMAKE_COMMAND} ARGS -E touch "${CMAKE_CURRENT_BINARY_DIR}/vtk_py_copy_completed"
DEPENDS
${_vtkpy_modules}
"${VTK_BINARY_DIR}/Wrapping/Python/vtk/__init__.py"
)
add_custom_target(copy_vtk_py_files ALL
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/vtk_py_copy_completed"
)
# Copy ParaView specific python files
file (COPY ${CMAKE_CURRENT_SOURCE_DIR}/paraview
DESTINATION ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/site-packages
USE_SOURCE_PERMISSIONS
FILES_MATCHING
PATTERN *.py)
# Copy obsolete ColorMaps.xml for now.
file (COPY ${CMAKE_CURRENT_SOURCE_DIR}/ColorMaps.xml
DESTINATION ${PV_PYTHON_MODULE_BINARY_DIR}
USE_SOURCE_PERMISSIONS)
# Add a couple of vtk python files to "paraview" module.
file (COPY "${ParaView_SOURCE_DIR}/VTK/Wrapping/Python/vtk/util/numpy_support.py"
"${ParaView_SOURCE_DIR}/VTK/Wrapping/Python/vtk/util/vtkConstants.py"
DESTINATION ${PV_PYTHON_MODULE_BINARY_DIR}
USE_SOURCE_PERMISSIONS)
# Byte compile the paraview Python files.
configure_file(${PV_PYTHON_MODULE_SOURCE_DIR}/compile_all_pv.py.in
${PV_PYTHON_MODULE_BINARY_DIR}/compile_all_pv.py
@ONLY IMMEDIATE)
configure_file(${PV_PYTHON_MODULE_SOURCE_DIR}/cpexport.py.in
${PV_PYTHON_MODULE_BINARY_DIR}/cpexport.py
@ONLY IMMEDIATE)
add_custom_command(
WORKING_DIRECTORY ${PV_PYTHON_MODULE_BINARY_DIR}
COMMAND ${PYTHON_EXECUTABLE}
ARGS compile_all_pv.py
DEPENDS ${PV_PYTHON_SOURCE_FILES} ${PV_PYTHON_MODULE_BINARY_DIR}/compile_all_pv.py copy_vtk_py_files
${PV_PYTHON_OUTPUT_FILES}
OUTPUT "${PV_PYTHON_MODULE_BINARY_DIR}/pv_compile_complete"
)
add_custom_target(paraview_pyc ALL
DEPENDS copy_vtk_py_files "${PV_PYTHON_MODULE_BINARY_DIR}/pv_compile_complete")
# Install the paraview module files.
install(DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/site-packages
DESTINATION ${VTK_INSTALL_LIBRARY_DIR}
COMPONENT Runtime
PATTERN *-complete EXCLUDE
PATTERN *_complete EXCLUDE)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,143 @@
r"""
The paraview package provides modules used to script ParaView. Generally, users
should import the modules of interest directly e.g.::
from paraview.simple import *
However, one may want to import paraview package before importing any of the
ParaView modules to force backwards compatibility to an older version::
# To run scripts written for ParaView 4.0 in newer versions, you can use the
# following.
import paraview
paraview.compatibility.major = 4
paraview.compatibility.minor = 0
# Now, import the modules of interest.
from paraview.simple import *
"""
#==============================================================================
#
# Program: ParaView
# Module: __init__.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
class _version(object):
def __init__(self, major, minor):
self.major = major
self.minor = minor
def GetVersion(self):
"""Return version as a float. Will return None is no version is
specified."""
if self.minor != None and self.major != None:
version = float(self.minor)
while version >= 1.0:
version = version / 10.0
version += float(self.major)
return version
return None
def __lt__(self, other):
"""This will always return False if compatibility is not being forced
to a particular version."""
myversion = self.GetVersion()
if not myversion:
return False
else:
return myversion < other
def __le__(self, other):
"""This will always return False if compatibility is not forced to a
particular version."""
myversion = self.GetVersion()
if not myversion:
return False
else:
return myversion <= other
def __eq__(self, other):
raise RuntimeError("Equal operation not supported.")
def __ne__(self, other):
raise RuntimeError("NotEqual operation not supported.")
def __gt__(self, other):
"""This will always return True if compatibility is not being forced to
a particular version"""
myversion = self.GetVersion()
if not myversion:
return True
else:
return myversion > other
def __ge__(self, other):
"""This will always return True if compatibility is not being forced to
a particular version"""
myversion = self.GetVersion()
if not myversion:
return True
else:
return myversion >= other
def __repr__(self):
myversion = self.GetVersion()
if not myversion:
return "(none)"
return str(myversion)
class compatibility:
"""Class used to check version number and compatibility. Users should only
set the compatibility explicitly to force backwards compatibility to and
older versions.
"""
minor = None
major = None
def GetVersion(cls):
return _version(cls.major, cls.minor)
GetVersion = classmethod(GetVersion)
def make_name_valid(name):
"""Make a string into a valid Python variable name."""
if not name:
return None
import string
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
name = str().join([c for c in name if c in valid_chars])
if not name[0].isalpha():
name = 'a' + name
return name
class options:
"""Values set here have any effect, only when importing the paraview module
in python interpretor directly i.e. not through pvpython or pvbatch. In
that case, one should use command line arguments for the two
executables"""
"""When True, act as pvbatch. Default behaviour is to act like pvpython"""
batch = False
"""When True, acts like pvbatch --symmetric. Requires that batch is set to
True to have any effect."""
symmetric = False
def print_warning(text):
"""Print text"""
print text
def print_error(text):
"""Print text"""
print text
def print_debug_info(text):
"""Print text"""
print text
"""This variable is set whenever Python is initialized within a ParaView
Qt-based application. Modules within the 'paraview' package often use this to
taylor their behaviour based on whether the Python environment is embedded
within an application or not."""
fromGUI = False

View File

@ -0,0 +1,165 @@
#==============================================================================
#
# Program: ParaView
# Module: annotation.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
r"""
This module is used by vtkPythonAnnotationFilter.
"""
try:
import numpy as np
except ImportError:
raise RuntimeError, "'numpy' module is not found. numpy is needed for "\
"this functionality to work. Please install numpy and try again."
from paraview import calculator
from vtk import vtkDataObject
from vtk.numpy_interface import dataset_adapter as dsa
def _get_ns(self, do, association):
if association == vtkDataObject.FIELD:
# For FieldData, it gets tricky. In general, one would think we are going
# to look at field data in inputDO directly -- same for composite datasets.
# However, ExodusIIReader likes to put field data on leaf nodes insead.
# So we also check leaf nodes, if the FieldData on the root is empty.
# We explicitly call dsa.DataObject.GetFieldData to ensure that
# when dealing with composite datasets, we get the FieldData on the
# vtkCompositeDataSet itself, not in the leaf nodes.
fieldData = dsa.DataObject.GetFieldData(do)
if len(fieldData.keys()) == 0:
# if this is a composite dataset, use field data from the first block with some
# field data.
if isinstance(do, dsa.CompositeDataSet):
for dataset in do:
fieldData = dataset.GetFieldData()
if (not fieldData is None) and (len(fieldData.keys()) > 0): break
else:
fieldData = do.GetAttributes(association)
arrays = calculator.get_arrays(fieldData)
ns = {}
ns["input"] = do
if self.GetDataTimeValid():
ns["time_value"] = self.GetDataTime()
ns["t_value"] = ns["time_value"]
if self.GetNumberOfTimeSteps() > 0:
ns["time_steps"] = [self.GetTimeStep(x) for x in xrange(self.GetNumberOfTimeSteps())]
ns["t_steps"] = ns["time_steps"]
if self.GetTimeRangeValid():
ns["time_range"] = self.GetTimeRange()
ns["t_range"] = ns["time_range"]
if self.GetDataTimeValid() and self.GetNumberOfTimeSteps() > 0:
try:
ns["time_index"] = ns["time_steps"].index(ns["time_value"])
ns["t_index"] = ns["time_index"]
except ValueError: pass
ns.update(arrays)
return ns
def execute(self):
"""Called by vtkPythonAnnotationFilter."""
expression = self.GetExpression()
inputDO = self.GetCurrentInputDataObject()
if not expression or not inputDO:
return True
inputs = [dsa.WrapDataObject(inputDO)]
association = self.GetArrayAssociation()
ns = _get_ns(self, inputs[0], association)
try:
result = calculator.compute(inputs, expression, ns=ns)
except:
from sys import stderr
print >> stderr, "Failed to evaluate expression '%s'. "\
"The following exception stack should provide additional "\
"developer specific information. This typically implies a malformed "\
"expression. Verify that the expression is valid.\n\n" \
"Variables in current scope are %s \n" % (expression, ns.keys())
raise
self.SetComputedAnnotationValue("%s" % result)
return True
def execute_on_global_data(self):
"""Called by vtkAnnotateGlobalDataFilter."""
inputDO = self.GetCurrentInputDataObject()
if not inputDO:
return True
inputs = [dsa.WrapDataObject(inputDO)]
association = self.GetArrayAssociation()
ns = _get_ns(self, inputs[0], association)
if not ns.has_key(self.GetFieldArrayName()):
print >> stderr, "Failed to locate global array '%s'." % self.GetFieldArrayName()
raise RuntimeError, "Failed to locate global array"
array = ns[self.GetFieldArrayName()]
chosen_element = array
try:
# if the array has as many elements as the timesteps, pick the element
# matching the current timestep.
if self.GetNumberOfTimeSteps() > 0 and \
array.shape[0] == self.GetNumberOfTimeSteps():
chosen_element = array[ns["time_index"]]
# if the array has as many elements as the `mode_shape_range`, pick the
# element matching the `mode_shape` (BUG #0015322).
elif ns.has_key("mode_shape") and ns.has_key("mode_shape_range") and \
ns["mode_shape_range"].shape[1] == 2 and \
array.shape[0] == (ns["mode_shape_range"].GetValue(1) - ns["mode_shape_range"].GetValue(0) + 1):
chosen_element = array[ns["mode_shape"].GetValue(0) - ns["mode_shape_range"].GetValue(0)]
elif array.shape[0] == 1:
# for single element arrays, just extract the value.
# This avoids the extra () when converting to string
# (see BUG #15321).
chosen_element = array[0]
except AttributeError: pass
try:
# hack for string array.
if chosen_element.IsA("vtkStringArray"):
chosen_element = chosen_element.GetValue(0)
except: pass
expression = self.GetPrefix() if self.GetPrefix() else ""
expression += str(chosen_element)
expression += self.GetPostfix() if self.GetPostfix() else ""
self.SetComputedAnnotationValue(expression)
return True
def execute_on_attribute_data(self, evaluate_locally):
"""Called by vtkAnnotateAttributeDataFilter."""
inputDO = self.GetCurrentInputDataObject()
if not inputDO:
return True
inputs = [dsa.WrapDataObject(inputDO)]
association = self.GetArrayAssociation()
ns = _get_ns(self, inputs[0], association)
if not ns.has_key(self.GetArrayName()):
print >> stderr, "Failed to locate array '%s'." % self.GetArrayName()
raise RuntimeError, "Failed to locate array"
if not evaluate_locally:
return True
array = ns[self.GetArrayName()]
chosen_element = array[self.GetElementId()]
expression = self.GetPrefix() if self.GetPrefix() else ""
expression += str(chosen_element)
self.SetComputedAnnotationValue(expression)
return True

View File

@ -0,0 +1,691 @@
"""
This module has utilities to benchmark paraview.
First, when run standalone, this will do a simple rendering benchmark test. The
test renders a sphere with various rendering settings and reports the rendering
rate achieved in triangles/sec. run() is the entrypoint for that usage.
Second, you can set up arbitrary pipelines and this module helps you obtain,
interpret and report the information recorded by ParaView's logs.
Do that like so:
1. optionally, call maximize logs first
2. setup and run your visualization pipeline (via GUI or script as you prefer)
3. either
call print_logs() to print out the logs in raw format
call parse_logs() to let the script identify and report on per frame and per filter execution times
::
WARNING: This was meant for server side rendering, but it could work
reasonably well when geometry is delivered to the client and rendered there
if the script were changed to recognize MPIMoveData as end of frame and did
something sensible on the server which has no other end of frame knowledge
TODO: builtin mode shouldn't show server info, it is redundant
TODO: this doesn't handle split render/data server mode
TODO: the end of frame markers are heuristic, likely buggy, and have not
been tried since before 3.9's view restructuring
"""
import time
import sys
from paraview.simple import *
try:
import numpy
numpy_loaded = True
except ImportError:
numpy_loaded = False
import re
import paraview
import copy
import pickle
# a regular expression to parse filter execution time
match_filter = re.compile(" *Execute (\w+) id: +(\d+), +(\d*.*\d+) +seconds")
match_vfilter = re.compile(" *Execute (\w+) +, +(\d*.*\d+) +seconds")
# a regular expression to parse overall rendering time
match_still_render = re.compile(" *(Still) Render, +(\d*.*\d+) +seconds")
match_interactive_render = \
re.compile(" *(Interactive) Render, +(\d*.*\d+) +seconds")
match_render = re.compile(" *(\w+|\w+ Dev) Render, +(\d*.*\d+) +seconds")
match_icetrender = re.compile("(IceT Dev) Render, +(\d*.*\d+) +seconds")
# more for parallel composite and delivery time
match_composite = re.compile(" *Compositing, +(\d*.*\d+) +seconds")
match_send = re.compile(" *Sending, +(\d*.*\d+) +seconds")
match_receive = re.compile(" *Receiving, +(\d*.*\d+) +seconds")
match_comp_xmit = \
re.compile(" *TreeComp (Send|Receive) (\d+) " + \
"(to|from) (\d+) uchar (\d+), +(\d*.*\d+) +seconds")
match_comp_comp = re.compile(" *TreeComp composite, *(\d*.*\d+) +seconds")
showparse = False
#icet composite message comes after the render messages,
#where for bswap and manta it comes before so we have to treat icet differently
icetquirk = False
start_frame = 0
default_log_threshold = dict()
class OneLog :
def __init__(self):
self.runmode = 'batch'
self.servertype = 'unified'
self.component = 0x10
self.rank = 0
self.lines = []
def componentString(self):
ret = ""
if self.component & 0x10:
ret = ret + " CLIENT "
if self.component & 0x4:
ret = ret + " RENDER "
if self.component & 0x1:
ret = ret + " DATA "
return ret
def print_log(self, showlines=False):
print "#RunMode:", self.runmode,
print "ServerType:", self.servertype,
print "Component:", self.componentString(),
print "processor#:", self.rank
if showlines:
for i in self.lines:
print i
def toString(self, showlines=False):
result = "#RunMode: " + self.runmode + " ServerType: " + self.servertype + " Component: " + self.componentString() + " processor#: " + str(self.rank) + "\n"
if showlines:
for i in self.lines:
result += i + "\n"
return result
logs = []
def maximize_logs () :
"""
Convenience method to ask paraview to produce logs with lots of space and
highest resolution.
"""
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
if pm == None:
return
ss = paraview.servermanager.vtkSMSession
for ptype in [ss.CLIENT_AND_SERVERS, ss.CLIENT, ss.SERVERS,
ss.RENDER_SERVER, ss.DATA_SERVER]:
default_log_threshold[str(ptype)] = 0.0
pxm = paraview.servermanager.ProxyManager()
tl = pxm.NewProxy("misc", "TimerLog")
prop = tl.GetProperty("MaxEntries")
prop.SetElements1(1000000)
tl.UpdateVTKObjects()
def get_memuse() :
session = servermanager.ProxyManager().GetSessionProxyManager().GetSession()
retval = []
infos = servermanager.vtkPVMemoryUseInformation()
session.GatherInformation(session.CLIENT, infos, 0)
procUse = str(infos.GetProcMemoryUse(0))
hostUse = str(infos.GetHostMemoryUse(0))
retval.append("CLIENT " + procUse + " / " + hostUse)
infos = servermanager.vtkPVMemoryUseInformation()
session.GatherInformation(session.DATA_SERVER, infos, 0)
for i in range(0,infos.GetSize()):
rank = str(infos.GetRank(i))
procUse = str(infos.GetProcMemoryUse(i))
hostUse = str(infos.GetHostMemoryUse(i))
retval.append("DS[" + rank + "] " + procUse + " / " + hostUse)
return retval
def dump_logs( filename ) :
"""
This saves off the logs we've gathered.
Ot allows you to run a benchmark somewhere, save off all of the details in
raw format, then load them somewhere else. You can then do a detailed
analysis and you always have the raw data to go back to.
"""
global logs
f = open(filename, "w")
pickle.dump(logs, f)
f.close()
def import_logs( filename ) :
"""
This is for bringing in a saved log files and parse it after the fact.
TODO: add an option to load in raw parview logs in text format
"""
global logs
logs = []
f = open(filename, "r")
logs = pickle.load(f)
f.close()
def get_logs() :
"""
This is for bringing in logs at run time to parse while running.
"""
global logs
logs = []
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
if pm == None:
return
connectionId = paraview.servermanager.ActiveConnection.ID
session = paraview.servermanager.ActiveConnection.Session
is_symmetric_mode = False
if pm.GetProcessTypeAsInt() == pm.PROCESS_BATCH:
runmode = 'batch'
is_symmetric_mode = pm.GetSymmetricMPIMode()
else:
runmode = 'interactive'
if session.GetRenderClientMode() == session.RENDERING_UNIFIED:
servertype = 'unified'
else:
servertype = 'split'
if runmode == 'batch':
# collect information from all processes in one go.
components = [session.CLIENT_AND_SERVERS]
else:
if servertype == 'unified':
# collect information separately for client and servers.
components = [session.CLIENT, session.SERVERS]
else:
# collect information separately for all process types.
components = [session.CLIENT, session.RENDER_SERVER, session.DATA_SERVER]
for component in components:
timerInfo = paraview.servermanager.vtkPVTimerInformation()
if len(default_log_threshold) != 0:
timerInfo.SetLogThreshold(default_log_threshold[str(component)])
session.GatherInformation(component, timerInfo, 0)
for i in range(timerInfo.GetNumberOfLogs()):
alog = OneLog()
alog.runmode = runmode
alog.servertype = servertype
alog.component = component
alog.rank = i
if is_symmetric_mode:
# in Symmetric mode, GatherInformation() only collects
# information from the current node. so the
# vtkPVTimerInformation will only have info for local process.
alog.rank = pm.GetPartitionId()
for line in timerInfo.GetLog(i).split('\n'):
alog.lines.append(line)
logs.append(alog)
def print_logs() :
"""
Print logs on the root node by gathering logs accross all the nodes
regardless if the process was started in symmetric mode or not.
"""
global logs
if len(logs) == 0:
get_logs()
# Handle symetric mode specificaly if need be
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
is_symmetric_mode = False
if pm != None:
is_symmetric_mode = pm.GetSymmetricMPIMode()
if is_symmetric_mode:
# Need to provide extra synchronization
ctrl = pm.GetGlobalController()
proc = pm.GetPartitionId()
nbProc = pm.GetNumberOfLocalPartitions()
if proc == 0:
# Start with my logs
for i in logs:
i.print_log(True)
# Then Print the log of every other rank
for otherProc in range(1, nbProc):
# Max buffer size 999999
logSize = " " * 6
ctrl.Receive(logSize, len(logSize), otherProc, 987455)
logSize = int(logSize)
logTxt = " " * logSize
ctrl.Receive(logTxt, logSize, otherProc, 987456)
print logTxt
else:
# Extract logs text
logTxt = ""
for i in logs:
logTxt += i.toString(True)
logSize = str(len(logTxt))
# Push local logs to process 0
ctrl.Send(logSize, len(logSize), 0, 987455)
ctrl.Send(logTxt, len(logTxt), 0, 987456)
else:
# Regular local print
for i in logs:
i.print_log(True)
def __process_frame() :
global filters
global current_frames_records
global frames
global start_frame
max = len(current_frames_records)
#determine ancestry of each record from order and indent
#subtract only immediate children from each record
#TODO: Make this an option
for x in xrange(max):
indent = current_frames_records[x]['indent']
minindent = 10000
for y in xrange(x+1,max):
indent2 = current_frames_records[y]['indent']
if indent2<=indent:
#found a record which is not a descendant
break
if indent2 < minindent:
minindent = indent2
for y in xrange(x+1,max):
indent2 = current_frames_records[y]['indent']
if indent2 == minindent:
current_frames_records[x]['local_duration'] = \
current_frames_records[x]['local_duration'] -\
current_frames_records[y]['duration']
for x in xrange(max):
#keep global statics per filter
record = current_frames_records[x]
id = record['id']
if id in filters:
srecord = filters[id]
srecord['duration'] = srecord['duration'] + record['duration']
srecord['local_duration'] = srecord['local_duration'] +\
record['local_duration']
srecord['count'] = srecord['count'] + 1
filters[id] = srecord
else:
filters[id] = copy.deepcopy(record)
#save off this frame and begin the next
frames.append(current_frames_records)
current_frames_records = []
def __parse_line (line) :
"""
Examine one line from the logs. If it is a report about a filter's
execution time, parse the relevant information out of the line and
collect those statistics. We record each filter's average execution
time as well as the each filters contribution to the each rendered frame.
"""
global filters
global current_frames_records
global cnt
global show_input
global icetquirk
found = False
#find indent
cnt = 0
for c in range(len(line)):
if line[c] == " ":
cnt = cnt + 1
else:
break
#determine if this log comes from icet so we can
#do special case treatement for frame markings
icetline = False
match = match_icetrender.match(line)
if match != None:
icetquirk = True
icetline = True
match = match_filter.match(line)
if match != None:
found = True
if showparse:
print "FILT:", cnt, line
name = match.group(1)
id = match.group(2)
duration = match.group(3)
match = match_vfilter.match(line)
if match != None:
found = True
if showparse:
print "LFLT:", cnt, line
name = match.group(1)
id = name
duration = match.group(2)
match = match_comp_comp.match(line)
if match != None:
found = True
if showparse:
print "TCMP:", cnt, line
name = "tree comp"
id = name
duration = match.group(1)
match = match_comp_xmit.match(line)
if match != None:
found = True
if showparse:
print "TXMT:", cnt, line
name = match.group(1)
id = name
duration = match.group(6)
match = match_composite.match(line)
if match != None:
found = True
if showparse:
print "COMP:", cnt, line
name = 'composite'
id = 'comp'
duration = match.group(1)
match = match_send.match(line)
if match != None:
found = True
if showparse:
print "SEND:", cnt, line
name = 'send'
id = 'send'
duration = match.group(1)
match = match_receive.match(line)
if match != None:
found = True
if showparse:
print "RECV:", cnt, line
name = 'receive'
id = 'recv'
duration = match.group(1)
match = match_still_render.match(line)
if match != None:
found = True
if showparse:
print "STILL:", cnt, line
name = match.group(1)
id = 'still'
duration = match.group(2)
if match == None:
match = match_interactive_render.match(line)
if match != None:
found = True
if showparse:
print "INTER:", cnt, line
name = match.group(1)
id = 'inter'
duration = match.group(2)
if match == None:
match = match_render.match(line)
if match != None:
found = True
if showparse:
print "REND:", cnt, line
name = match.group(1)
id = 'render'
duration = match.group(2)
if found == False:
# we didn't find anything we recognized in this line, ignore it
if showparse:
print "????:", cnt, line
return
record = dict()
record['id'] = id
record['name'] = name
record['duration'] = float(duration)
record['local_duration'] = float(duration)
record['count'] = 1
record['indent'] = cnt
#watch for the beginning of the next frame/end of previous frame
if cnt == 0:
if (id == 'still') or \
(id == 'inter') or \
(icetquirk == False and id == 'comp') or \
(icetquirk == True and icetline == True) :
if showparse:
print "SOF" #start of frame
#decipher parent child information from records in the frame
#and save off newly gathered per filter and per frame statistics
__process_frame()
#keep a record of this execution as part for the current frame
current_frames_records.append(record)
return
def parse_logs(show_parse = False, tabular = False) :
"""
Parse the collected paraview log information.
This prints out per frame, and aggregated per filter statistics.
If show_parse is true, debugging information is shown about the parsing
process that allows you to verify that the derived stats are correct.
This includes each and echo of each log line collected, prepended by
the token type and indent scanned in, or ???? if the line is unrecognized
and ignored. Frame boundaries are denoted by SOF, indicating the preceeding
line was determined to be the start of the next frame.
"""
global filters
global current_frames_records
global frames
global cnt
global showparse
global start_frame
showparse = show_parse
if len(logs) == 0:
get_logs()
for i in logs:
# per filter records
filters = dict()
filters.clear()
# per frame records
frames = []
# components of current frame
current_frames_records = []
cnt = 0
runmode = i.runmode
servertype = i.servertype
component = i.component
rank = i.rank
i.print_log(False)
for line in i.lines:
__parse_line(line)
#collect stats for the current frame in process but not officially ended
__process_frame()
#print out the gathered per frame information
if tabular:
frecs = dict()
line = "#framenum, "
for x in filters:
line += filters[x]['name'] + ":" + filters[x]['id'] + ", "
#print line
for cnt in xrange(start_frame, len(frames)):
line = ""
line += str(cnt) + ", "
printed = dict()
for x in filters:
id = filters[x]['id']
name = filters[x]['name']
found = False
for record in frames[cnt]:
if 'id' in record:
if record['id'] == id and \
record['name'] == name and \
not id in printed:
found = True
printed[id] = 1
line += str(record['local_duration']) + ", "
if not id in frecs:
frecs[id] = []
frecs[id].append(record['local_duration'])
if not found:
line += "0, "
#print line
#print
for x in frecs.keys():
v = frecs[x]
print "# ", x, len(v),
if numpy_loaded:
print numpy.min(v), numpy.mean(v), numpy.max(v),
print numpy.std(v)
else:
print "#FRAME TIMINGS"
print "#filter id, filter type, inclusive duration, local duration"
for cnt in xrange(start_frame, len(frames)):
print "#Frame ", cnt
for record in frames[cnt]:
if 'id' in record:
print record['id'], ",",
print record['name'], ",",
print record['duration'], ",",
print record['local_duration']
#print
#print
if not tabular:
#print out the gathered per filter information
print "#FILTER TIMINGS"
print "#filter id, filter type, count, "+\
"sum inclusive duration, sum local duration"
for x in filters:
record = filters[x]
print record['id'], ",",
print record['name'], ",",
print record['count'], ",",
print record['duration'], ",",
print record['local_duration']
print
def __render(ss, v, title, nframes):
print '============================================================'
print title
res = []
res.append(title)
for phires in (500, 1000):
ss.PhiResolution = phires
c = v.GetActiveCamera()
v.CameraPosition = [-3, 0, 0]
v.CameraFocalPoint = [0, 0, 0]
v.CameraViewUp = [0, 0, 1]
Render()
c1 = time.time()
for i in range(nframes):
c.Elevation(0.5)
Render()
tpr = (time.time() - c1)/nframes
ncells = ss.GetDataInformation().GetNumberOfCells()
print tpr, " secs/frame"
print ncells, " polys"
print ncells/tpr, " polys/sec"
res.append((ncells, ncells/tpr))
return res
def run(filename=None, nframes=60):
""" Runs the benchmark. If a filename is specified, it will write the
results to that file as csv. The number of frames controls how many times
a particular configuration is rendered. Higher numbers lead to more accurate
averages. """
# Turn off progress printing
paraview.servermanager.SetProgressPrintingEnabled(0)
# Create a sphere source to use in the benchmarks
ss = Sphere(ThetaResolution=1000, PhiResolution=500)
rep = Show()
v = Render()
results = []
# Start with these defaults
#v.RemoteRenderThreshold = 0
obj = servermanager.misc.GlobalMapperProperties()
obj.GlobalImmediateModeRendering = 0
# Test different configurations
title = 'display lists, no triangle strips, solid color'
obj.GlobalImmediateModeRendering = 0
results.append(__render(ss, v, title, nframes))
title = 'no display lists, no triangle strips, solid color'
obj.GlobalImmediateModeRendering = 1
results.append(__render(ss, v, title, nframes))
# Color by normals
lt = servermanager.rendering.PVLookupTable()
rep.LookupTable = lt
rep.ColorArrayName = "Normals"
lt.RGBPoints = [-1, 0, 0, 1, 0.0288, 1, 0, 0]
lt.ColorSpace = 'HSV'
lt.VectorComponent = 0
title = 'display lists, no triangle strips, color by array'
obj.GlobalImmediateModeRendering = 0
results.append(__render(ss, v, title, nframes))
title = 'no display lists, no triangle strips, color by array'
obj.GlobalImmediateModeRendering = 1
results.append(__render(ss, v, title, nframes))
if filename:
f = open(filename, "w")
else:
f = sys.stdout
print >>f, 'configuration, %d, %d' % (results[0][1][0], results[0][2][0])
for i in results:
print >>f, '"%s", %g, %g' % (i[0], i[1][1], i[2][1])
def test_module():
"""Simply exercises a few components of the module."""
maximize_logs()
paraview.servermanager.SetProgressPrintingEnabled(0)
ss = Sphere(ThetaResolution=1000, PhiResolution=500)
rep = Show()
v = Render()
print_logs()
if __name__ == "__main__":
if "--test" in sys.argv:
test_module()
else:
run()

View File

@ -0,0 +1,105 @@
r"""This module is used by vtkPythonCalculator. It encapsulates the logic
implemented by the vtkPythonCalculator to operate on datasets to compute
derived quantities.
"""
try:
import numpy as np
except ImportError:
raise RuntimeError, "'numpy' module is not found. numpy is needed for "\
"this functionality to work. Please install numpy and try again."
import paraview
import vtk.numpy_interface.dataset_adapter as dsa
from vtk.numpy_interface.algorithms import *
# -- this will import vtkMultiProcessController and vtkMPI4PyCommunicator
def get_arrays(attribs, controller=None):
"""Returns a 'dict' referring to arrays in dsa.DataSetAttributes or
dsa.CompositeDataSetAttributes instance.
When running in parallel, this method will ensure that arraynames are
reduced across all ranks and for any arrays missing on the local process, a
NoneArray will be added to the returned dictionary. This ensures that
expressions evaluate without issues due to missing arrays on certain ranks.
"""
if not isinstance(attribs, dsa.DataSetAttributes) and \
not isinstance(attribs, dsa.CompositeDataSetAttributes):
raise ValueError, \
"Argument must be DataSetAttributes or CompositeDataSetAttributes."
arrays = dict()
for key in attribs.keys():
varname = paraview.make_name_valid(key)
arrays[varname] = attribs[key]
# If running in parallel, ensure that the arrays are synced up so that
# missing arrays get NoneArray assigned to them avoiding any unnecessary
# errors when evaluating expressions.
if controller is None and vtkMultiProcessController is not None:
controller = vtkMultiProcessController.GetGlobalController()
if controller and controller.IsA("vtkMPIController") and controller.GetNumberOfProcesses() > 1:
from mpi4py import MPI
comm = vtkMPI4PyCommunicator.ConvertToPython(controller.GetCommunicator())
rank = comm.Get_rank()
# reduce the array names across processes to ensure arrays missing on
# certain ranks are handled correctly.
arraynames = arrays.keys()
# gather to root and then broadcast
# I couldn't get Allgather/Allreduce to work properly with strings.
gathered_names = comm.gather(arraynames, root=0)
# gathered_names is a list of lists.
if rank == 0:
result = set()
for list in gathered_names:
for val in list: result.add(val)
gathered_names = [x for x in result]
arraynames = comm.bcast(gathered_names, root=0)
for name in arraynames:
if not arrays.has_key(name):
arrays[name] = dsa.NoneArray
return arrays
def compute(inputs, expression, ns=None):
# build the locals environment used to eval the expression.
mylocals = dict()
if ns:
mylocals.update(ns)
mylocals["inputs"] = inputs
try:
mylocals["points"] = inputs[0].Points
except AttributeError: pass
retVal = eval(expression, globals(), mylocals)
return retVal
def execute(self, expression):
"""
**Internal Method**
Called by vtkPythonCalculator in its RequestData(...) method. This is not
intended for use externally except from within
vtkPythonCalculator::RequestData(...).
"""
# Add inputs.
inputs = []
for index in range(self.GetNumberOfInputConnections(0)):
# wrap all input data objects using vtk.numpy_interface.dataset_adapter
inputs.append(dsa.WrapDataObject(self.GetInputDataObject(0, index)))
# Setup output.
output = dsa.WrapDataObject(self.GetOutputDataObject(0))
if self.GetCopyArrays():
output.GetPointData().PassData(inputs[0].GetPointData())
output.GetCellData().PassData(inputs[0].GetCellData())
# get a dictionary for arrays in the dataset attributes. We pass that
# as the variables in the eval namespace for compute.
variables = get_arrays(inputs[0].GetAttributes(self.GetArrayAssociation()))
retVal = compute(inputs, expression, ns=variables)
if retVal is not None:
output.GetAttributes(self.GetArrayAssociation()).append(\
retVal, self.GetArrayName())

View File

@ -0,0 +1,136 @@
import OpenEXR as oe
import Imath as im
import numpy as np
class OexrCompression:
NONE = 0
RLE = 1
ZIPS = 2
ZIP = 3
PIZ = 4
PXR24 = 5
def save_rgb(image, filePath, comp = OexrCompression.ZIP):
'''Saves the rgb (image) in OpenEXR format. Expects a 3-chan uint32 image.'''
if len(image.shape) != 3:
raise Exception("Incorrect dimensions!")
h, w, c = image.shape # expects Numpy convention (row, col) -> (height, width)
if c != 3:
raise Exception("Incorrect number of channels!")
if image.dtype != "uint32":
raise Exception("Incorrect type!, expected uint32")
try:
header = oe.Header(w, h)
header["channels"] = {"R" : im.Channel(im.PixelType(oe.UINT)),
"G" : im.Channel(im.PixelType(oe.UINT)),
"B" : im.Channel(im.PixelType(oe.UINT))}
header['compression'] = im.Compression(comp)
of = oe.OutputFile(filePath, header)
r_data = image[:, :, 0].tostring()
g_data = image[:, :, 1].tostring()
b_data = image[:, :, 2].tostring()
of.writePixels({"R" : r_data, "G" : g_data, "B" : b_data})
of.close()
except:
raise
def save_depth(image, filePath, comp = OexrCompression.ZIP):
'''Saves the zBuffer (image) in OpenEXR format. Expects a 1-chann float32 image.'''
if len(image.shape) != 2:
raise Exception("Incorrect dimensions!")
if image.dtype != "float32":
raise Exception("Incorrect type!, expected float32")
try:
h, w = image.shape # expects Numpy convention (row, col) -> (height, width)
header = oe.Header(w, h)
header["channels"] = {"Z" : im.Channel(im.PixelType(oe.FLOAT))}
header['compression'] = im.Compression(comp)
of = oe.OutputFile(filePath, header)
image_data = image.tostring()
of.writePixels({"Z" : image_data})
of.close()
except:
raise
def load_rgb(filePath):
''' Loads an rgb OpenEXR image.'''
if oe.isOpenExrFile(filePath) is not True:
raise Exception("File ", filePath, " does not exist!")
try:
ifi = oe.InputFile(filePath)
# Compute size
header = ifi.header()
dw = header["dataWindow"]
w, h = dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1
# Read the three channels
ifiType = header["channels"]["R"].type.v
if ifiType is not im.PixelType.UINT:
raise Exception("Only uint32 supported! (file is type ", ifiType)
R = ifi.channel("R", im.PixelType(ifiType))
G = ifi.channel("G", im.PixelType(ifiType))
B = ifi.channel("B", im.PixelType(ifiType))
ifi.close()
image = np.zeros((h, w, 3), dtype = np.uint32) # order = "C"
image[:, :, 0] = np.core.multiarray.fromstring(R, dtype = np.uint32).reshape(h, w)
image[:, :, 1] = np.core.multiarray.fromstring(G, dtype = np.uint32).reshape(h, w)
image[:, :, 2] = np.core.multiarray.fromstring(B, dtype = np.uint32).reshape(h, w)
except:
raise
return image
def load_depth(filePath):
''' Loads an depth OpenEXR image.'''
if oe.isOpenExrFile(filePath) is not True:
raise Exception("File ", filePath, " does not exist!")
try:
ifi = oe.InputFile(filePath)
# Compute size
header = ifi.header()
dw = header["dataWindow"]
w, h = dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1
# Read the three channels
ifiType = header["channels"]["Z"].type.v
if ifiType is not im.PixelType.FLOAT:
raise Exception("Only float32 supported! (file is type ", ifiType)
Z = ifi.channel("Z", im.PixelType(ifiType))
ifi.close()
image = np.zeros((h, w), dtype = np.float32) # order = "C"
image[:, :] = np.core.multiarray.fromstring(Z, dtype = np.float32).reshape(h, w)
except:
raise
return image

View File

@ -0,0 +1,979 @@
#==============================================================================
# Copyright (c) 2015, Kitware Inc., Los Alamos National Laboratory
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
"""
Module defining classes and methods for managing cinema data storage.
"""
import sys
import json
import os.path
import re
import itertools
import weakref
import numpy as np
import copy
import raster_wrangler
class Document(object):
"""
This refers to a document in the cinema data storage. A document is
uniquely identified by a 'descriptor'. A descriptor is a dictionary
with key-value pairs, where key is a parameter name and value is the
value for that particular parameter.
TODO:
A document can have arbitrary data (as 'data') and meta-data (as
'attributes') associated with it. At the moment we are assuming
stored images and are ignoring the attributes.
"""
def __init__(self, descriptor, data=None):
self.__descriptor = descriptor
self.__data = data
self.__attributes = None
@property
def descriptor(self):
"""A document descriptor is a unique
identifier for the document. It is a dict with key value pairs. The
descriptor cannot be changed once the document has been instantiated."""
return self.__descriptor
@property
def data(self):
"""Data associated with the document."""
return self.__data
@data.setter
def data(self, val):
self.__data = val
@property
def attributes(self):
"""Attributes are arbitrary meta-data associated with the document.
If no attributes are present, it is set to None. When present,
attributes are a dict with arbitrary meta-data relevant to the application.
"""
return self.__attributes
@attributes.setter
def attributes(self, attrs):
self.__attributes = attrs
class Store(object):
"""
API for cinema stores. A store is a collection of Documents,
with API to add, find, and access them. This class is an abstract class
defining the API and storage independent logic. Storage specific
subclasses handle the 'database' access.
The design of cinema store is based on the following principles:
The store comprises of documents (Document instances). Each document has a
unique set of parameters, aka a "descriptor" associated with it. This
can be thought of as the 'unique key' in database terminology.
One defines the parameters (contents of the descriptor) for documents
on the store itself. The set of them is is referred to as 'parameter_list'.
One uses 'add_parameter()' calls to add new parameter definitions for a new
store instance.
Users insert documents in the store using 'insert'. One can find
document(s) using 'find' which returns a generator (or cursor) allow users
to iterate over all match documents.
"""
def __init__(self):
self.__metadata = None
self.__parameter_list = {}
self.__loaded = False
self.__parameter_associations = {}
self.__view_associations = {}
self.__type_specs = {}
self.cached_searches = {}
self.raster_wrangler = raster_wrangler.RasterWrangler()
@property
def parameter_list(self):
"""
The parameter list is the set of variables and their values that the
documents in the store vary over. """
return self.__parameter_list
def _parse_parameter_type(self, name, properties):
#look for hints about document type relations
if 'Z' in self.__type_specs:
Zs = self.__type_specs['Z']
else:
Zs = []
if 'LUMINANCE' in self.__type_specs:
Ls = self.__type_specs['LUMINANCE']
else:
Ls = []
if 'VALUE' in self.__type_specs:
Vs = self.__type_specs['VALUE']
else:
Vs = []
if 'types' in properties:
for x in range(0, len(properties['types'])):
if properties['types'][x] == 'depth':
value = properties['values'][x]
newentry = [name, value]
Zs.append(newentry)
if properties['types'][x] == 'luminance':
value = properties['values'][x]
newentry = [name, value]
Ls.append(newentry)
# Mark value renders
if properties['types'][x] == 'value':
value = properties['values'][x]
newentry = [name, value]
Vs.append(newentry)
if len(Zs) > 0:
self.__type_specs['Z'] = Zs
if len(Ls) > 0:
self.__type_specs['LUMINANCE'] = Ls
if len(Vs) > 0:
self.__type_specs['VALUE'] = Vs
def _set_parameter_list(self, val):
"""For use by subclasses alone"""
self.__parameter_list = val
for name in self.__parameter_list:
self._parse_parameter_type(name, self.__parameter_list[name])
def add_parameter(self, name, properties):
"""Add a parameter.
:param name: Name for the parameter.
:param properties: Keyword arguments can be used to associate miscellaneous
meta-data with this parameter.
"""
#if self.__loaded:
# raise RuntimeError("Updating parameters after loading/creating a store is not supported.")
# TODO: Err, except when it is, in the important case of adding new time steps to a collection.
# I postulate it is always OK to add safely to outermost parameter (loop).
self.__parameter_list[name] = properties
self._parse_parameter_type(name, properties)
def get_parameter(self, name):
return self.__parameter_list[name]
def get_complete_descriptor(self, partial_desc):
"""
Convenience method that expands an incomplete list of parameters into
the full set using default values for the missing variables.
TODO: doesn't make sense with bifurcation (dependencies), when SFS supports them remove
"""
full_desc = dict()
for name, properties in self.parameter_list.items():
if properties.has_key("default"):
full_desc[name] = properties["default"]
full_desc.update(partial_desc)
return full_desc
def get_default_type(self):
""" subclasses override this if they know more """
return "RGB"
def determine_type(self, desc):
#try any assigned mappings (for example color='depth' then 'Z')
for typename, checks in self.__type_specs.items():
for check in checks:
name = check[0]
conditions = check[1]
if name in desc and desc[name] in conditions:
return typename
#no takers, use the default for this store
typename = self.get_default_type()
return typename
@property
def parameter_associations(self):
return self.__parameter_associations
def _set_parameter_associations(self, val):
"""For use by subclasses alone"""
self.__parameter_associations = val
@property
def view_associations(self):
return self.__view_associations
def _set_view_associations(self, val):
"""For use by subclasses alone"""
self.__view_associations = val
@property
def metadata(self):
"""
Auxiliary data about the store itself. An example is hints that help the
viewer app know how to interpret this particular store.
"""
return self.__metadata
@metadata.setter
def metadata(self, val):
self.__metadata = val
def add_metadata(self, keyval):
if not self.__metadata:
self.__metadata = {}
self.__metadata.update(keyval)
def create(self):
"""
Creates an empty store.
Subclasses must extend this.
"""
assert not self.__loaded
self.__loaded = True
def load(self):
"""
Loads contents on the store (but not the documents).
Subclasses must extend this.
"""
assert not self.__loaded
self.__loaded = True
def find(self, q=None):
"""
Return iterator to all documents that match query q.
Should support empty query or direct values queries e.g.
for doc in store.find({'phi': 0}):
print doc.data
for doc in store.find({'phi': 0, 'theta': 100}):
print doc.data
"""
raise RuntimeError("Subclasses must define this method")
def insert(self, document):
"""
Inserts a new document.
Subclasses must extend this.
"""
if not self.__loaded:
self.create()
def assign_parameter_dependence(self, dep_param, param, on_values):
"""
mark a particular parameter as being explorable only for a subset
of the possible values of another.
For example given parameter 'appendage type' which might have
value 'foot' or 'flipper', a dependent parameter might be 'shoe type'
which only makes sense for 'feet'. More to the point we use this
for 'layers' and 'fields' in composite rendering of objects in a scene
and the color settings that each object is allowed to take.
"""
self.__parameter_associations.setdefault(dep_param, {}).update(
{param: on_values})
def assign_view_dependence(self, dep_param, param, on_values):
"""
mark a particular parameter as being explorable only for a subset
of the possible values of another.
For example given parameter 'appendage type' which might have
value 'foot' or 'flipper', a dependent parameter might be 'shoe type'
which only makes sense for 'feet'. More to the point we use this
for 'layers' and 'fields' in composite rendering of objects in a scene
and the color settings that each object is allowed to take.
"""
self.__view_associations.setdefault(dep_param, {}).update(
{param: on_values})
def isdepender(self, name):
""" check if the named parameter depends on any others """
if name in self.parameter_associations.keys():
return True
return False
def isdependee(self, name):
""" check if the named parameter has others that depend on it """
for depender, dependees in self.parameter_associations.iteritems():
if name in dependees:
return True
return False
def getDependeeValue(self, depender, dependee):
""" Return the required value of a dependee to fulfill a dependency. """
try:
value = self.parameter_associations[depender][dependee]
except KeyError:
raise KeyError("Invalid dependency! ", depender, ", ", dependee)
return value
def isviewdepender(self, name):
""" check if the named parameter depends on any others """
if name in self.view_associations.keys():
return True
return False
def isviewdependee(self, name):
""" check if the named parameter has others that depend on it """
for depender, dependees in self.view_associations.iteritems():
if name in dependees:
return True
return False
def getdependers(self, name):
""" return a list of all the parameters that depend on the given one """
result = []
for depender, dependees in self.parameter_associations.iteritems():
if name in dependees["vis"]:
result.append(depender)
return result
def getdependees(self, depender):
""" return a list of all the parameters that 'depender' depends on """
try:
result = self.parameter_associations[depender]
except KeyError:
#This is a valid state, it only means there is no dependees
result = {}
return result
def getRelatedField(self, parameter):
''' Returns the 'field' argument related to a 'parameter'. '''
for depender, dependees in self.parameter_associations.iteritems():
if parameter in dependees["vis"] and \
self.isfield(depender):
return depender
return None
def hasRelatedParameter(self, fieldName):
''' Predicate to know if a field has a related 'parameter' argument. '''
paramName = self.parameter_associations[fieldName]["vis"][0]
return (paramName in self.parameter_list)
def getviewdependers(self, name):
""" return a list of all the parameters that depend on the given one """
result = []
for depender, dependees in self.view_associations.iteritems():
if name in dependees:
result.append(depender)
return result
def dependencies_satisfied(self, dep_param, descriptor):
"""
Check if the values in decriptor satisfy all of the dependencies
of dep_param.
Return true if no dependencies to satisfy.
Return false if dependency of dependency fails.
"""
if not dep_param in self.__parameter_associations:
return True
for dep in self.__parameter_associations[dep_param]:
if not dep in descriptor:
#something dep_param needs is not in the descriptor at all
return False
if not descriptor[dep] in self.__parameter_associations[dep_param][dep]:
#something dep_param needs doesn't have an accepted value in the descriptor
return False
if not self.dependencies_satisfied(dep, descriptor):
#recurse to check deps of dep_param themselves
return False
return True
def view_dependencies_satisfied(self, dep_param, descriptor):
"""
Check if the values in decriptor satisfy all of the dependencies
of dep_param.
Return true if no dependencies to satisfy.
Return false if dependency of dependency fails.
"""
if not dep_param in self.__view_associations:
return True
for dep in self.__view_associations[dep_param]:
if not dep in descriptor:
#something dep_param needs is not in the descriptor at all
return False
if not descriptor[dep] in self.__view_associations[dep_param][dep]:# and not ('layer' in dep_param and 'layer' in dep):
#something dep_param needs doesn't have an accepted value in the descriptor
return False
if not self.view_dependencies_satisfied(dep, descriptor):
#recurse to check deps of dep_param themselves
return False
return True
def add_layer(self, name, properties):
"""
A Layer boils down to an image of something in the scene, and only
that thing, along with the depth at each pixel. Layers (note the
plural) can be composited back together by a viewer.
"""
properties['type'] = 'option'
properties['role'] = 'layer'
self.add_parameter(name, properties)
def islayer(self, name):
return (self.parameter_list[name]['role'] == 'layer') if (name in self.parameter_list and 'role' in self.parameter_list[name]) else False
def add_sublayer(self, name, properties, parent_layer, parents_value):
"""
An example of a layer is an isocontour display. An example of a sublayer
is the particular isovalues for the isocontour.
"""
self.add_layer(name, properties)
self.assign_parameter_dependence(name, parent_layer, parents_value)
def add_field(self, name, properties, parent_layer, parents_values):
"""
A field is a component of the final color for a layer. Examples include:
depth, normal, color, scalar values.
"""
properties['type'] = 'hidden'
properties['role'] = 'field'
self.add_parameter(name, properties)
self.assign_parameter_dependence(name, parent_layer, parents_values)
def isfield(self, name):
return (self.parameter_list[name]['role'] == 'field') if (name in self.parameter_list and 'role' in self.parameter_list[name]) else False
def add_control(self, name, properties):
"""
A control is a togglable parameter for a filter. Examples include:
isovalue, offset.
"""
properties['role'] = 'control'
self.add_parameter(name, properties)
def iscontrol(self, name):
return (self.parameter_list[name]['role'] == 'control') if (name in self.parameter_list and 'role' in self.parameter_list[name]) else False
def parameters_for_object(self, obj):
"""
Given <obj>, an element of the layer <vis>, this method returns:
1. the names of independent parameters (t, theta, etc) that affect it
2. the name of its associated field
3. the names of the controls that affect it
"""
independent_parameters = [par for par in self.__parameter_list.keys()
if 'role' not in self.__parameter_list[par]]
fields = [x for x in self.parameter_associations.keys()
if obj in self.parameter_associations[x]['vis'] and
self.isfield(x)]
field = fields[0] if fields else None
controls = [x for x in self.parameter_associations.keys()
if obj in self.parameter_associations[x]['vis'] and
self.iscontrol(x)]
return (independent_parameters,field,controls)
def iterate(self, parameters=None, fixedargs=None, forGUI=False):
"""
Run through all combinations of parameter/value pairs without visiting
any combinations that do not satisfy dependencies among them.
Parameters, if supplied, is a list of parameter names to enforce an ordering.
Fixed arguments, if supplied, are parameter/value pairs that we want
to hold constant in the exploration.
"""
#optimization - cache and reuse to avoid expensive search
argstr = json.dumps((parameters,fixedargs,forGUI), sort_keys=True)
#todo: good for viewer but breaks exploration
if argstr in self.cached_searches:
for x in self.cached_searches[argstr]:
yield x
return
#prepare to iterate through all the possibilities, in order if one is given
#param_names = parameters if parameters else sorted(self.parameter_list.keys())
param_names = parameters if parameters else self.parameter_list.keys()
#print "PARAMETERS", param_names
params = []
values = []
dep_params = []
for name in param_names:
vals = self.get_parameter(name)['values']
if fixedargs and name in fixedargs:
continue
params.append(name)
values.append(vals)
#the algorithm is to iterate through all combinations, and remove
#the impossible ones. I use a set to avoid redundant combinations.
#In order to use the set I serialize to make something hashable.
#Then I insert into a list to preserve the (hopefully optimized) order.
ok_descs = set()
ordered_descs = []
for element in itertools.product(*values):
descriptor = dict(itertools.izip(params, element))
if fixedargs != None:
descriptor.update(fixedargs)
ok_params = []
ok_vals = []
ok_desc = {}
for param, value in descriptor.iteritems():
if forGUI:
if self.view_dependencies_satisfied(param, descriptor):
ok_desc.update({param:value})
else:
if self.dependencies_satisfied(param, descriptor):
ok_desc.update({param:value})
OK = True
if fixedargs:
for k,v in fixedargs.iteritems():
if not (k in ok_desc and ok_desc[k] == v):
OK = False
if OK:
strval = "{ "
for name in sorted(ok_desc.keys()):
strval = strval + '"' + name + '": "' + str(ok_desc[name]) + '", '
strval = strval[0:-2] + "}"
#strval = json.dumps(ok_desc, sort_keys=True) #slower than hand rolled above
if not strval in ok_descs:
ok_descs.add(strval)
ordered_descs.append(ok_desc)
yield ok_desc
self.cached_searches[argstr] = ordered_descs
class FileStore(Store):
"""Implementation of a store based on named files and directories."""
def __init__(self, dbfilename=None):
super(FileStore, self).__init__()
self.__filename_pattern = None
tmpfname = dbfilename if dbfilename \
else os.path.join(os.getcwd(), "info.json")
if not tmpfname.endswith("info.json"):
tmpfname = os.path.join(tmpfname, "info.json")
self.__dbfilename = tmpfname
self.cached_searches = {}
self.cached_files = {}
def create(self):
"""creates a new file store"""
super(FileStore, self).create()
self.save()
def load(self):
"""loads an existing filestore"""
super(FileStore, self).load()
with open(self.__dbfilename, mode="rb") as file:
info_json = json.load(file)
#for legacy reasons, the parameters are called
#arguments" in the files
self._set_parameter_list(info_json['arguments'])
self.metadata = info_json['metadata']
self.filename_pattern = info_json['name_pattern']
a = {}
if 'associations' in info_json:
a = info_json['associations']
self._set_parameter_associations(a)
va = {}
if 'view_associations' in info_json:
va = info_json['view_associations']
if va == {} or va == None:
va = copy.deepcopy(a)
self._set_view_associations(va)
def save(self):
""" writes out a modified file store """
info_json = dict(
arguments = self.parameter_list,
name_pattern = self.filename_pattern,
metadata = self.metadata,
associations = self.parameter_associations,
view_associations = self.view_associations
)
dirname = os.path.dirname(self.__dbfilename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(self.__dbfilename, mode="wb") as file:
json.dump(info_json, file)
@property
def filename_pattern(self):
"""
Files corresponding to Documents are arranged on disk
according the the directory and filename structure described
by the filename_pattern. The format is a regular expression
consisting of parameter names enclosed in '{' and '}' and
separated by spacers. "/" spacer characters produce sub
directories.
"""
return self.__filename_pattern
@filename_pattern.setter
def filename_pattern(self, val):
self.__filename_pattern = val
#choose default data type in the store based on file extension
self._default_type = 'RGB'
if val[val.rfind("."):] == '.txt':
self._default_type = 'TXT'
def get_default_type(self):
""" overridden to use the filename pattern to determine default type """
return self._default_type
def _get_filename(self, desc):
dirname = os.path.dirname(self.__dbfilename)
#print self.__dbfilename
#print desc
#print self.filename_pattern
#find filename modulo any dependent parameters
fixed = self.filename_pattern.format(**desc)
base, ext = os.path.splitext(fixed)
# #add any dependent parameters
# for dep in sorted(self.parameter_associations.keys()):
# if dep in desc:
# #print " ->>> base /// dep: ", base, " /// ", dep
# base = base + "/" + dep + "=" + str(desc[dep])
#a more intuitive layout than the above alphanumeric sort
#this one follows the graph and thus keeps related things, like
#all the rasters (fields) for a particular object (layer), close
#to one another
#TODO: optimize this
keys = [k for k in sorted(desc)]
ordered_keys = []
while len(keys):
k = keys.pop(0)
if not self.isdepender(k) and not self.isdependee(k):
continue
parents = self.getdependees(k)
ready = True
for p in parents:
if not (p in ordered_keys):
ready = False
#this is the crux - haven't seen a parent yet, so try again later
keys.append(k)
break
if ready:
ordered_keys.append(k)
for k in ordered_keys:
base = base + "/" + k + "=" + str(desc[k])
#determine file type for this document
doctype = self.determine_type(desc)
if doctype == "Z":
ext = self.raster_wrangler.zfileextension()
fullpath = os.path.join(dirname, base+ext)
return fullpath
def insert(self, document):
super(FileStore, self).insert(document)
fname = self._get_filename(document.descriptor)
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
if not document.data is None:
doctype = self.determine_type(document.descriptor)
if doctype == 'RGB' or doctype == 'VALUE' or doctype == 'LUMINANCE':
self.raster_wrangler.rgbwriter(document.data, fname)
elif doctype == 'Z':
self.raster_wrangler.zwriter(document.data, fname)
else:
self.raster_wrangler.genericwriter(document.data, fname)
def _load_data(self, doc_file, descriptor):
doctype = self.determine_type(descriptor)
try:
if doctype == 'RGB' or doctype == 'VALUE':
data = self.raster_wrangler.rgbreader(doc_file)
elif doctype == 'LUMINANCE':
data = self.raster_wrangler.rgbreader(doc_file)
elif doctype == 'Z':
data = self.raster_wrangler.zreader(doc_file)
else:
data = self.raster_wrangler.genericreader(doc_file)
except IOError:
data = None
raise
doc = Document(descriptor, data)
doc.attributes = None
return doc
def find(self, q=None, forGUI=False):
q = q if q else dict()
target_desc = q
#print "->>> store::find(): target_desc-> ", target_desc
for possible_desc in self.iterate(fixedargs=target_desc, forGUI=forGUI):
if possible_desc == {}:
yield None
#print "->>> store::find() possible_desc: ", possible_desc
filename = self._get_filename(possible_desc)
#optimization - cache and reuse to avoid file load
if filename in self.cached_files:
yield self.cached_files[filename]
return
fcontent = self._load_data(filename, possible_desc)
#todo: shouldn't be unbounded size
self.cached_files[filename] = fcontent
yield fcontent
class SingleFileStore(Store):
"""Implementation of a store based on a single volume file (image stack)."""
def __init__(self, dbfilename=None):
super(SingleFileStore, self).__init__()
self.__dbfilename = dbfilename if dbfilename \
else os.path.join(os.getcwd(), "info.json")
self._volume = None
self._needWrite = False
self.add_metadata({"store_type" : "SFS"})
def __del__(self):
if self._needWrite:
import vtk
vw = vtk.vtkXMLImageDataWriter()
vw.SetFileName(self._vol_file)
vw.SetInputData(self._volume)
vw.Write()
def create(self):
"""creates a new file store"""
super(SingleFileStore, self).create()
self.save()
def load(self):
"""loads an existing filestore"""
super(SingleFileStore, self).load()
with open(self.__dbfilename, mode="rb") as file:
info_json = json.load(file)
self._set_parameter_list(info_json['arguments'])
self.metadata = info_json['metadata']
def save(self):
""" writes out a modified file store """
info_json = dict(
arguments = self.parameter_list,
metadata = self.metadata
)
dirname = os.path.dirname(self.__dbfilename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(self.__dbfilename, mode="wb") as file:
json.dump(info_json, file)
def _get_numslices(self):
slices = 0
for name in sorted(self.parameter_list.keys()):
numvals = len(self.get_parameter(name)['values'])
if slices == 0:
slices = numvals
else:
slices = slices * numvals
return slices
def compute_sliceindex(self, descriptor):
#find position of descriptor within the set of slices
#TODO: algorithm is dumb, but consisent with find (which is also dumb)
args = []
values = []
ordered = sorted(self.parameter_list.keys())
for name in ordered:
vals = self.get_parameter(name)['values']
args.append(name)
values.append(vals)
index = 0
for element in itertools.product(*values):
desc = dict(itertools.izip(args, element))
fail = False
for k,v in descriptor.items():
if desc[k] != v:
fail = True
if not fail:
return index
index = index + 1
def get_sliceindex(self, document):
desc = self.get_complete_descriptor(document.descriptor)
index = self.compute_sliceindex(desc)
return index
def _insertslice(self, vol_file, index, document):
volume = self._volume
width = document.data.shape[0]
height = document.data.shape[1]
if not volume:
import vtk
slices = self._get_numslices()
volume = vtk.vtkImageData()
volume.SetExtent(0, width-1, 0, height-1, 0, slices-1)
volume.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
self._volume = volume
self._vol_file = vol_file
imageslice = document.data
imageslice = imageslice.reshape(width*height, 3)
from vtk.numpy_interface import dataset_adapter as dsa
image = dsa.WrapDataObject(volume)
oid = volume.ComputePointId([0,0,index])
nparray = image.PointData[0]
nparray[oid:oid+(width*height)] = imageslice
self._needWrite = True
def insert(self, document):
super(SingleFileStore, self).insert(document)
index = self.get_sliceindex(document)
if not document.data is None:
dirname = os.path.dirname(self.__dbfilename)
if not os.path.exists(dirname):
os.makedirs(dirname)
vol_file = os.path.join(dirname, "cinema.vti")
self._insertslice(vol_file, index, document)
def _load_slice(self, q, index, desc):
if not self._volume:
import vtk
dirname = os.path.dirname(self.__dbfilename)
vol_file = os.path.join(dirname, "cinema.vti")
vr = vtk.vtkXMLImageDataReader()
vr.SetFileName(vol_file)
vr.Update()
volume = vr.GetOutput()
self._volume = volume
self._vol_file = vol_file
else:
volume = self._volume
ext = volume.GetExtent()
width = ext[1]-ext[0]
height = ext[3]-ext[2]
from vtk.numpy_interface import dataset_adapter as dsa
image = dsa.WrapDataObject(volume)
oid = volume.ComputePointId([0, 0, index])
nparray = image.PointData[0]
imageslice = np.reshape(nparray[oid:oid+width*height], (width,height,3))
doc = Document(desc, imageslice)
doc.attributes = None
return doc
def find(self, q=None):
#TODO: algorithm is dumb, but consisent with compute_sliceindex (which is also dumb)
q = q if q else dict()
args = []
values = []
ordered = sorted(self.parameter_list.keys())
for name in ordered:
vals = self.get_parameter(name)['values']
args.append(name)
values.append(vals)
index = 0
for element in itertools.product(*values):
desc = dict(itertools.izip(args, element))
fail = False
for k,v in q.items():
if desc[k] != v:
fail = True
if not fail:
yield self._load_slice(q, index, desc)
index = index + 1
def make_parameter(name, values, **kwargs):
default = kwargs['default'] if 'default' in kwargs else values[0]
if not default in values:
raise RuntimeError, "Invalid default, must be one of %s" % str(values)
typechoice = kwargs['typechoice'] if 'typechoice' in kwargs else 'range'
valid_types = ['list','range','option','hidden']
if not typechoice in valid_types:
raise RuntimeError, "Invalid typechoice, must be one of %s" % str(valid_types)
label = kwargs['label'] if 'label' in kwargs else name
properties = dict()
properties['type'] = typechoice
properties['label'] = label
properties['values'] = values
properties['default'] = default
return properties
def make_field(name, _values, **kwargs):
#specialization of make_parameters for parameters that define fields
#in this case the values is a list of name, type pairs
values = _values.keys()
img_types = _values.values()
valid_itypes = ['rgb','depth','value','luminance','normals']
for i in img_types:
if i not in valid_itypes:
raise RuntimeError, "Invalid typechoice, must be one of %s" % str(valid_itypes)
default = kwargs['default'] if 'default' in kwargs else values[0]
if not default in values:
raise RuntimeError, "Invalid default, must be one of %s" % str(values)
typechoice = 'hidden'
valid_types = ['list','range','option','hidden']
if not typechoice in valid_types:
raise RuntimeError, "Invalid typechoice, must be one of %s" % str(valid_types)
label = kwargs['label'] if 'label' in kwargs else name
properties = dict()
properties['type'] = typechoice
properties['label'] = label
properties['values'] = values
properties['default'] = default
properties['types'] = img_types
if 'valueRanges' in kwargs:
properties['valueRanges'] = kwargs['valueRanges']
return properties

View File

@ -0,0 +1,156 @@
#==============================================================================
# Copyright (c) 2015, Kitware Inc., Los Alamos National Laboratory
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
import cinema_store
import itertools
import json
class Explorer(object):
"""
Middleman that connects an arbitrary producing codes to the CinemaStore.
The purpose of this class is to run through the parameter sets, and tell a
set of tracks (in order) to do something with the parameter values
it cares about.
"""
def __init__(self,
cinema_store,
parameters, #these are the things that this explorer is responsible for and their ranges
tracks #the things we pass off values to in order to do the work
):
self.__cinema_store = cinema_store
self.parameters = parameters
self.tracks = tracks
@property
def cinema_store(self):
return self.__cinema_store
def list_parameters(self):
"""
parameters is an ordered list of parameters that the Explorer varies over
"""
return self.parameters
def prepare(self):
""" Give tracks a chance to get ready for a run """
if self.tracks:
for e in self.tracks:
res = e.prepare(self)
def execute(self, desc):
# Create the document/data product for this sample.
doc = cinema_store.Document(desc)
for e in self.tracks:
#print "EXECUTING track ", e, doc.descriptor
e.execute(doc)
self.insert(doc)
def explore(self, fixedargs=None, forGUI=False):
"""
Explore the problem space to populate the store being careful not to hit combinations
where dependencies are not satisfied.
Fixed arguments are the parameters that we want to hold constant in the exploration.
"""
self.prepare()
for descriptor in self.cinema_store.iterate(self.list_parameters(), fixedargs, forGUI):
self.execute(descriptor)
self.finish()
def finish(self):
""" Give tracks a chance to clean up after a run """
if self.tracks:
for e in self.tracks:
res = e.finish()
def insert(self, doc):
self.cinema_store.insert(doc)
class Track(object):
"""
abstract interface for things that can produce data
to use this:
caller should set up some visualization
then tie a particular set of parameters to an action with a track
"""
def __init__(self):
pass
def prepare(self, explorer):
""" subclasses get ready to run here """
pass
def finish(self):
""" subclasses cleanup after running here """
pass
def execute(self, document):
""" subclasses operate on parameters here"""
pass
class LayerControl(object):
"""
Prototype for something that Layer track can control
"""
def __init__(self, name, showFunc, hideFunc):
self.name = name
self.callShow = showFunc #todo, determine if function now and convert instead of try/except below
self.callHide = hideFunc
class Layer(Track):
"""
A track that connects a layer to the set of objects in the scene that it controls.
"""
def __init__(self, layer, objectlist):
super(Layer, self).__init__()
self.parameter = layer
# objlist is an array of class instances, they must have a name and
#show and hide method, use LayerControl to make them.
self.objectlist = objectlist
def execute(self, doc):
o = None
if self.parameter in doc.descriptor:
o = doc.descriptor[self.parameter]
for obj in self.objectlist:
if obj.name == o:
try:
obj.callShow() #method
except TypeError:
obj.callShow(obj) #function
else:
try:
obj.callHide()
except TypeError:
obj.callHide(obj)

View File

@ -0,0 +1,402 @@
#==============================================================================
# Copyright (c) 2015, Kitware Inc., Los Alamos National Laboratory
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
"""
Module consisting of explorers and tracks that connect arbitrary paraview
pipelines to cinema stores.
"""
import explorers
import paraview.simple as simple
import numpy as np
import paraview
from paraview import numpy_support as numpy_support
class ImageExplorer(explorers.Explorer):
"""
An explorer that connects a paraview script's views to a store
and makes it save new images into the store.
"""
def __init__(self,
cinema_store, parameters, tracks,
view=None,
iSave=True):
super(ImageExplorer, self).__init__(cinema_store, parameters, tracks)
self.view = view
self.CaptureDepth = False
self.CaptureLuminance = False
self.iSave = iSave
self.UsingGL2 = False
if self.view:
try:
rw=self.view.GetRenderWindow()
if rw.GetRenderingBackend()==2:
self.UsingGL2 = True
except AttributeError:
pass
if self.UsingGL2:
def rgb2grey(rgb, height, width):
as_grey = np.dot(rgb[...,:3], [0.0, 1.0, 0.0]) #pass through Diffuse lum term
res = as_grey.reshape(height,width).astype('uint8')
return res
self.rgb2grey = rgb2grey
else:
def rgb2grey(rgb, height, width):
as_grey = np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
res = as_grey.reshape(height,width).astype('uint8')
return res
self.rgb2grey = rgb2grey
def insert(self, document):
if not self.view:
return
if self.CaptureDepth:
simple.Render()
image = self.view.CaptureDepthBuffer()
idata = numpy_support.vtk_to_numpy(image) * 256
rw = self.view.GetRenderWindow()
width,height = rw.GetSize()
try:
imageslice = idata.reshape(height,width)
except ValueError:
imageslice = None
#import Image
#img = Image.fromarray(imageslice)
#img.show()
#try:
# input("Press enter to continue ")
#except NameError:
# pass
document.data = imageslice
self.CaptureDepth = False
else:
imageslice = None
if self.CaptureLuminance and not self.UsingGL2:
try:
rep = simple.GetRepresentation()
if rep != None:
rep.DiffuseColor = [1,1,1]
rep.ColorArrayName = None
except ValueError:
pass
image = self.view.CaptureWindow(1)
ext = image.GetExtent()
width = ext[1] - ext[0] + 1
height = ext[3] - ext[2] + 1
imagescalars = image.GetPointData().GetScalars()
idata = numpy_support.vtk_to_numpy(imagescalars)
idata = self.rgb2grey(idata, height, width)
imageslice = np.dstack((idata,idata,idata))
image.UnRegister(None)
else:
image = self.view.CaptureWindow(1)
ext = image.GetExtent()
width = ext[1] - ext[0] + 1
height = ext[3] - ext[2] + 1
imagescalars = image.GetPointData().GetScalars()
idata = numpy_support.vtk_to_numpy(imagescalars)
imageslice = idata.reshape(height,width,3)
image.UnRegister(None)
#import Image
#img = Image.fromarray(imageslice)
#img.show()
#try:
# input("Press enter to continue ")
#except NameError:
# pass
document.data = imageslice
if self.iSave:
super(ImageExplorer, self).insert(document)
def setDrawMode(self, choice, **kwargs):
if choice == 'color':
self.view.StopCaptureValues()
if self.UsingGL2:
self.view.StopCaptureLuminance()
self.CaptureDepth = False
self.CaptureLuminance = False
if choice == 'luminance':
self.view.StopCaptureValues()
if self.UsingGL2:
self.view.StartCaptureLuminance()
self.CaptureDepth = False
self.CaptureLuminance = True
if choice == 'depth':
self.view.StopCaptureValues()
if self.UsingGL2:
self.view.StopCaptureLuminance()
self.CaptureDepth=True
self.CaptureLuminance = False
if choice == 'value':
if self.UsingGL2:
self.view.StopCaptureLuminance()
self.view.DrawCells = kwargs['field']
self.view.ArrayNameToDraw = kwargs['name']
self.view.ArrayComponentToDraw = kwargs['component']
self.view.ScalarRange = kwargs['range']
self.view.StartCaptureValues()
self.CaptureDepth = False
self.CaptureLuminance = False
def finish(self):
super(ImageExplorer, self).finish()
#TODO: actually record state in init and restore here, for now just
#make an assumption
self.view.StopCaptureValues()
if self.UsingGL2:
self.view.StopCaptureLuminance()
try:
simple.Show()
simple.Render()
except RuntimeError:
pass
class Camera(explorers.Track):
"""
A track that connects a paraview script's camera to the phi and theta tracks.
This allows the creation of spherical camera stores where the user can
view the data from many points around it.
"""
def __init__(self, center, axis, distance, view):
super(Camera, self).__init__()
try:
# Z => 0 | Y => 2 | X => 1
self.offset = (axis.index(1) + 1 ) % 3
except ValueError:
raise Exception("Rotation axis not supported", axis)
self.center = center
self.distance = distance
self.view = view
def execute(self, document):
import math
theta = document.descriptor['theta']
phi = document.descriptor['phi']
theta_rad = float(theta) / 180.0 * math.pi
phi_rad = float(phi) / 180.0 * math.pi
pos = [
float(self.center[0]) - math.cos(phi_rad) * self.distance * math.cos(theta_rad),
float(self.center[1]) + math.sin(phi_rad) * self.distance * math.cos(theta_rad),
float(self.center[2]) + math.sin(theta_rad) * self.distance
]
up = [
+ math.cos(phi_rad) * math.sin(theta_rad),
- math.sin(phi_rad) * math.sin(theta_rad),
+ math.cos(theta_rad)
]
self.view.CameraPosition = pos
self.view.CameraViewUp = up
self.view.CameraFocalPoint = self.center
@staticmethod
def obtain_angles(angular_steps=[10,15]):
import math
thetas = []
phis = []
theta_offset = 90 % angular_steps[1]
if theta_offset == 0:
theta_offset += angular_steps[1]
for theta in range(-90 + theta_offset,
90 - theta_offset + 1, angular_steps[1]):
theta_rad = float(theta) / 180.0 * math.pi
for phi in range(0, 360, angular_steps[0]):
phi_rad = float(phi) / 180.0 * math.pi
thetas.append(theta)
phis.append(phi)
return thetas, phis
class Slice(explorers.Track):
"""
A track that connects slice filters to a scalar valued parameter.
"""
def __init__(self, parameter, filt):
super(Slice, self).__init__()
self.parameter = parameter
self.slice = filt
def prepare(self, explorer):
super(Slice, self).prepare(explorer)
def execute(self, doc):
if self.parameter in doc.descriptor:
o = doc.descriptor[self.parameter]
self.slice.SliceOffsetValues=[o]
class Contour(explorers.Track):
"""
A track that connects contour filters to a scalar valued parameter.
"""
def __init__(self, parameter, filt):
super(Contour, self).__init__()
self.parameter = parameter
self.contour = filt
self.control = 'Isosurfaces'
def prepare(self, explorer):
super(Contour, self).prepare(explorer)
def execute(self, doc):
if self.parameter in doc.descriptor:
o = doc.descriptor[self.parameter]
self.contour.SetPropertyWithName(self.control,[o])
class Clip(explorers.Track):
"""
A track that connects clip filters to a scalar valued parameter.
"""
def __init__(self, argument, clip):
super(Clip, self).__init__()
self.argument = argument
self.clip = clip
def prepare(self, explorer):
super(Clip, self).prepare(explorer)
def execute(self, doc):
if self.argument in doc.descriptor:
o = doc.descriptor[self.argument]
self.clip.UseValueAsOffset = True
self.clip.Value = o
class Templated(explorers.Track):
"""
A track that connects any type of filter to a scalar valued
'control' parameter.
"""
def __init__(self, parameter, filt, control):
explorers.Track.__init__(self)
self.parameter = parameter
self.filt = filt
self.control = control
def execute(self, doc):
o = doc.descriptor[self.parameter]
self.filt.SetPropertyWithName(self.control,[o])
class ColorList():
"""
A helper that creates a dictionary of color controls for ParaView. The Color track takes in
a color name from the Explorer and looks up into a ColorList to determine exactly what
needs to be set to apply the color.
"""
def __init__(self):
self._dict = {}
def AddSolidColor(self, name, RGB):
self._dict[name] = {'type':'rgb','content':RGB}
def AddLUT(self, name, lut):
self._dict[name] = {'type':'lut','content':lut}
def AddDepth(self, name):
self._dict[name] = {'type':'depth'}
def AddLuminance(self, name):
self._dict[name] = {'type':'luminance'}
def AddValueRender(self, name, field, arrayname, component, range):
self._dict[name] = {'type':'value',
'field':field,
'arrayname':arrayname,
'component':component,
'range':range}
def getColor(self, name):
return self._dict[name]
class Color(explorers.Track):
"""
A track that connects a parameter to a choice of surface rendered color maps.
"""
def __init__(self, parameter, colorlist, rep):
super(Color, self).__init__()
self.parameter = parameter
self.colorlist = colorlist
self.rep = rep
self.imageExplorer = None
def execute(self, doc):
if not self.parameter in doc.descriptor:
return
if self.rep == None:
#TODO: probably a bad sign
return
o = doc.descriptor[self.parameter]
spec = self.colorlist.getColor(o)
found = False
if spec['type'] == 'rgb':
found = True
self.rep.DiffuseColor = spec['content']
self.rep.ColorArrayName = None
if self.imageExplorer:
self.imageExplorer.setDrawMode('color')
if spec['type'] == 'lut':
found = True
self.rep.LookupTable = spec['content']
self.rep.ColorArrayName = o
if self.imageExplorer:
self.imageExplorer.setDrawMode('color')
if spec['type'] == 'depth':
found = True
if self.imageExplorer:
self.imageExplorer.setDrawMode('depth')
if spec['type'] == 'luminance':
found = True
if self.imageExplorer:
self.imageExplorer.setDrawMode('luminance')
if spec['type'] == 'value':
found = True
if self.imageExplorer:
self.imageExplorer.setDrawMode("value",
field=spec['field'],
name=spec['arrayname'],
component=spec['component'],
range=spec['range'])
class SourceProxyInLayer(explorers.LayerControl):
"""
A track that turns on and off an source proxy in a layer
"""
def showme(self):
self.representation.Visibility = 1
def hideme(self):
self.representation.Visibility = 0
def __init__(self, parameter, representation):
super(SourceProxyInLayer, self).__init__(parameter, self.showme, self.hideme)
self.representation = representation

View File

@ -0,0 +1,597 @@
#==============================================================================
# Copyright (c) 2015, Kitware Inc., Los Alamos National Laboratory
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
import cinema_store
import paraview
import pv_explorers
from itertools import imap
import numpy as np
def record_visibility():
proxies = []
view_info = {}
view_proxy = paraview.simple.GetActiveView()
view_info['proxy'] = "__view_info"
view_info[
'orientation_axis_visibility'] = view_proxy.OrientationAxesVisibility
camera = view_proxy.GetActiveCamera()
view_info['position'] = camera.GetPosition()
view_info['view_up'] = camera.GetViewUp()
view_info['focal_point'] = camera.GetFocalPoint()
proxies.append(view_info)
source_proxies = paraview.servermanager.ProxyManager().GetProxiesInGroup(
"sources")
for key in source_proxies:
listElt = {}
proxy = source_proxies[key]
listElt['proxy'] = proxy
listElt['visibility'] = None
listElt['scalar_bar_visibility'] = False
listElt['color_array_name'] = None
listElt['color_array_association'] = None
rep = paraview.simple.GetDisplayProperties(proxy)
if rep != None:
listElt['visibility'] = rep.Visibility
listElt['scalar_bar_visibility'] = rep.IsScalarBarVisible(view_proxy)
listElt['color_array_name'] = rep.ColorArrayName.GetArrayName()
listElt['color_array_association'] = rep.ColorArrayName.GetAssociation()
proxies.append(listElt)
return proxies
def restore_visibility(proxies):
view_proxy = paraview.simple.GetActiveView()
for listElt in proxies:
if listElt['proxy'] == "__view_info":
view_proxy.OrientationAxesVisibility = listElt[
'orientation_axis_visibility']
camera = view_proxy.GetActiveCamera()
camera.SetPosition(listElt['position'])
camera.SetViewUp(listElt['view_up'])
camera.SetFocalPoint(listElt['focal_point'])
else:
proxy = listElt['proxy']
vis = listElt['visibility']
if vis != None:
rep = paraview.simple.GetDisplayProperties(proxy)
if rep != None:
rep.Visibility = listElt['visibility']
if listElt['color_array_association']:
rep.SetScalarColoring(
listElt['color_array_name'],
paraview.servermanager.GetAssociationFromString(
listElt['color_array_association']))
if listElt['scalar_bar_visibility']:
rep.SetScalarBarVisibility(view_proxy,
listElt['scalar_bar_visibility'])
def inspect(skip_invisible=True):
"""
Produces a representation of the pipeline that is easier to work with.
Thanks Scott Wittenburg and the pv mailing list for this gem
"""
source_proxies = paraview.servermanager.ProxyManager().GetProxiesInGroup("sources")
proxies = []
proxybyId = {}
for key in source_proxies:
listElt = {}
listElt['name'] = key[0]
listElt['id'] = key[1]
proxy = source_proxies[key]
#skip the invisible
rep = paraview.simple.GetDisplayProperties(proxy)
if skip_invisible:
if rep == None:
#for example, writers in catalyst pipeline
#todo: is it possible for these to have decendents that are visible?
continue
listElt['visibility'] = 0 if (rep == None) else rep.Visibility
parentId = '0'
try:
if hasattr(proxy, 'Input'):
parentId = proxy.Input.GetGlobalIDAsString()
except AttributeError:
parentId = '0'
listElt['parent'] = parentId
proxies.append(listElt)
proxybyId[key[1]] = listElt
if skip_invisible:
#reparent upward over invisible parents
for l in proxies:
pid = l['parent']
while pid != '0' and proxybyId[pid]['visibility'] == 0:
pid = proxybyId[pid]['parent']
l['parent'] = pid
#remove invisible proxies themselves
pxies = []
for l in proxies:
if l['visibility'] != 0:
pxies.append(l)
else:
pxies = proxies
return pxies
def get_pipeline():
proxies = inspect(skip_invisible=False)
for proxy in proxies:
source = paraview.simple.FindSource(proxy['name'])
numberOfProducers = source.GetNumberOfProducers()
if proxy['parent'] is '0' and numberOfProducers > 0:
# this proxy is the result of a merge
parents = []
for i in xrange(numberOfProducers):
parents.append(source.GetProducerProxy(i).GetGlobalIDAsString())
proxy['parents'] = parents
else:
proxy['parents'] = [proxy['parent']]
del proxy['parent']
for proxy in proxies:
proxy['children'] = [p['id'] for p in proxies
if proxy['id'] in p['parents']]
return proxies
def float_limiter(x):
#a shame, but needed to make sure python, java and (directory/file)name agree
if isinstance(x, (float)):
#return '%6f' % x #arbitrarily chose 6 decimal places
return '%.6e' % x #arbitrarily chose 6 significant digits
else:
return x
# Keeps a link between a filter and its explorer-track. Populated in addFilterValue()
# and queried in explore()
explorerDir = {}
def add_filter_value(name, cs, userDefinedValues):
source = paraview.simple.FindSource(name)
# plane offset generator (for Slice or Clip)
def generate_offset_values():
bounds = source.Input.GetDataInformation().DataInformation.GetBounds()
minPoint = np.array([bounds[0], bounds[2], bounds[4]])
maxPoint = np.array([bounds[1], bounds[3], bounds[5]])
scaleVec = maxPoint - minPoint
# adjust offset size depending on the plane orientation
if hasattr(source, 'SliceType'):
n = source.SliceType.Normal
elif hasattr(source, 'ClipType'):
n = source.ClipType.Normal
sNormal = np.array([n[0] * scaleVec[0], n[1] * scaleVec[1], n[2] * scaleVec[2]])
steps = 3 # generate N slice offsets
offsetStep = np.linalg.norm(sNormal) / steps
values = np.arange(-(steps/2), steps/2) * offsetStep
return values.tolist()
# generate values depending on the type of filter
if isinstance(source, paraview.simple.servermanager.filters.Clip):
# grab values from ui or generate defaults
values = userDefinedValues[name] if (name in userDefinedValues) else generate_offset_values()
if len(values) == 0: values = generate_offset_values()
# add sublayer and create the appropriate track
cs.add_control(name, cinema_store.make_parameter(name, values, typechoice='hidden'))
explorerDir[name] = pv_explorers.Clip(name, source)
elif isinstance(source, paraview.simple.servermanager.filters.Slice):
# grab values from ui or generate defaults
values = userDefinedValues[name] if (name in userDefinedValues) else generate_offset_values()
if len(values) == 0: values = generate_offset_values()
# add sublayer and create the appropriate track
cs.add_control(name, cinema_store.make_parameter(name, values, typechoice='hidden'))
explorerDir[name] = pv_explorers.Slice(name, source)
elif isinstance(source, paraview.simple.servermanager.filters.Contour):
def generate_contour_values():
# grab values from ui or generate defaults
vRange = source.Input.GetDataInformation().DataInformation.GetPointDataInformation().GetArrayInformation(0).GetComponentRange(0)
return np.linspace(vRange[0], vRange[1], 5).tolist() # generate 5 contour values
values = userDefinedValues[name] if (name in userDefinedValues) else generate_contour_values()
if len(values) == 0: values = generate_contour_values()
# add sublayer and create the appropriate track
cs.add_control(name, cinema_store.make_parameter(name, values, typechoice='hidden'))
explorerDir[name] = pv_explorers.Contour(name, source)
def filter_has_parameters(name):
source = paraview.simple.FindSource(name)
return any(imap(lambda filter: isinstance(source, filter),
[paraview.simple.servermanager.filters.Clip,
paraview.simple.servermanager.filters.Slice,
paraview.simple.servermanager.filters.Contour]))
def add_control_and_colors(name, cs):
source = paraview.simple.FindSource(name)
#make up list of color options
fields = {'depth':'depth','luminance':'luminance'}
ranges = {}
defaultName = None
view_proxy = paraview.simple.GetActiveView()
rep = paraview.simple.GetRepresentation(source, view_proxy)
if rep.Representation != 'Outline':
cda = source.GetCellDataInformation()
for a in range(0, cda.GetNumberOfArrays()):
arr = cda.GetArray(a)
arrName = arr.GetName()
if not arrName == "Normals":
for i in range(0, arr.GetNumberOfComponents()):
fName = arrName+"_"+str(i)
fields[fName] = 'value'
ranges[fName] = arr.GetRange(i)
if defaultName == None:
defaultName = fName
pda = source.GetPointDataInformation()
for a in range(0, pda.GetNumberOfArrays()):
arr = pda.GetArray(a)
arrName = arr.GetName()
if not arrName == "Normals":
for i in range(0, arr.GetNumberOfComponents()):
fName = arrName+"_"+str(i)
fields[fName] = 'value'
ranges[fName] = arr.GetRange(i)
if defaultName == None:
defaultName = fName
if defaultName == None:
fields['white']='rgb'
defaultName='white'
cparam = cinema_store.make_field("color"+name, fields, default=defaultName, valueRanges=ranges)
cs.add_field("color"+name,cparam,'vis',[name])
def make_cinema_store(proxies, ocsfname, forcetime=False, _userDefinedValues={}):
"""
Takes in the pipeline, structured as a tree, and makes a cinema store definition
containing all the parameters we might will vary.
"""
if "phi" in _userDefinedValues:
phis = _userDefinedValues["phi"]
else:
#phis = [0,45,90,135,180,225,270,315,360]
phis = [0,180,360]
if "theta" in _userDefinedValues:
thetas = _userDefinedValues["theta"]
else:
#thetas = [0,20,40,60,80,100,120,140,160,180]
thetas = [0,90,180]
tvalues = []
cs = cinema_store.FileStore(ocsfname)
try:
cs.load()
tprop = cs.get_parameter('time')
tvalues = tprop['values']
#start with clean slate, other than time
cs = cinema_store.FileStore(ocsfname)
except IOError, KeyError:
pass
cs.add_metadata({'type':'composite-image-stack'})
cs.add_metadata({'store_type':'FS'})
cs.add_metadata({'version':'0.0'})
pipeline = get_pipeline()
cs.add_metadata({'pipeline':pipeline})
vis = [proxy['name'] for proxy in proxies]
cs.add_layer("vis",cinema_store.make_parameter('vis', vis))
for proxy in proxies:
proxy_name = proxy['name']
add_filter_value(proxy_name,cs,_userDefinedValues)
dependency_set = set([proxy['id']])
repeat = True
while repeat:
repeat = False
deps = set(proxy['id'] for proxy in proxies if proxy['parent'] in dependency_set)
if deps - dependency_set:
dependency_set = dependency_set.union(deps)
repeat = True
dependency_list = [proxy['name'] for proxy in proxies if proxy['id'] in dependency_set]
cs.assign_parameter_dependence(proxy_name,'vis',dependency_list)
add_control_and_colors(proxy_name,cs)
cs.assign_parameter_dependence("color"+proxy_name,'vis',[proxy_name])
fnp = ""
if forcetime:
#time specified, use it, being careful to append if already a list
tvalues.append(forcetime)
tprop = cinema_store.make_parameter('time', tvalues)
cs.add_parameter('time', tprop)
fnp = fnp+"{time}_"
else:
#time not specified, try and make them automatically
times = paraview.simple.GetAnimationScene().TimeKeeper.TimestepValues
if not times:
pass
else:
prettytimes = [float_limiter(t) for t in times]
cs.add_parameter("time", cinema_store.make_parameter('time', prettytimes))
fnp = fnp+"{time}_"
cs.add_parameter("phi", cinema_store.make_parameter('phi', phis))
cs.add_parameter("theta", cinema_store.make_parameter('theta', thetas))
fnp = fnp+"{phi}_{theta}.png"
cs.filename_pattern = fnp
return cs
def testexplore(cs):
"""
For debugging, takes in the cinema store and prints out everything that we'll take snapshots off
"""
import explorers
import copy
class printer(explorers.Explorer):
def execute(self, desc):
p = copy.deepcopy(desc)
x = 'phi'
if x in p.keys():
print x, ":", desc[x], ",",
del p[x]
x = 'theta'
if x in p.keys():
print x, ":", desc[x], ",",
del p[x]
for x in sorted(p.keys()):
print x, ":", p[x], ",",
print
params = cs.parameter_list.keys()
e = printer(cs, params, [])
e.explore()
def explore(cs, proxies, iSave=True, currentTime=None):
"""
Takes in the store, which contains only the list of parameters,
"""
# import pv_explorers
import explorers
view_proxy = paraview.simple.GetActiveView()
dist = paraview.simple.GetActiveCamera().GetDistance()
#associate control points wlth parameters of the data store
cam = pv_explorers.Camera([0,0,0], [0,1,0], dist, view_proxy)
params = cs.parameter_list.keys()
tracks = []
tracks.append(cam)
cols = []
ctime_float=None
if currentTime:
ctime_float = float(currentTime['time'])
#hide all annotations
view_proxy.OrientationAxesVisibility = 0
for x in proxies:
name = x['name']
for y in params:
if (y in explorerDir) and (name == y):
#print "name in ExplorerDir: ", y, ", ", explorerDir[y]
tracks.append(explorerDir[y])
if name in y:
#print "N", name
#print "X", x
#print "Y", y
#visibility of the layer
sp = paraview.simple.FindSource(name)
rep = paraview.simple.GetRepresentation(sp, view_proxy)
#hide all annotations
if rep.LookupTable:
rep.SetScalarBarVisibility(view_proxy, False)
tc1 = pv_explorers.SourceProxyInLayer(name, rep)
lt = explorers.Layer('vis', [tc1])
tracks.append(lt)
#fields for the layer
cC = pv_explorers.ColorList()
cC.AddDepth('depth')
cC.AddLuminance('luminance')
sp.UpdatePipeline(ctime_float)
cda = sp.GetCellDataInformation()
numVals = 0
if rep.Representation != 'Outline':
for a in range(0, cda.GetNumberOfArrays()):
arr = cda.GetArray(a)
arrName = arr.GetName()
if not arrName == "Normals":
for i in range(0,arr.GetNumberOfComponents()):
numVals+=1
cC.AddValueRender(arrName+"_"+str(i),
True,
arrName,
i, arr.GetRange(i))
pda = sp.GetPointDataInformation()
for a in range(0, pda.GetNumberOfArrays()):
arr = pda.GetArray(a)
arrName = arr.GetName()
if not arrName == "Normals":
for i in range(0,arr.GetNumberOfComponents()):
numVals+=1
cC.AddValueRender(arrName+"_"+str(i),
False,
arrName,
i, arr.GetRange(i))
if numVals == 0:
cC.AddSolidColor('white', [1,1,1])
col = pv_explorers.Color("color"+name, cC, rep)
tracks.append(col)
cols.append(col)
e = pv_explorers.ImageExplorer(cs, params,
tracks,
view_proxy,
iSave)
for c in cols:
c.imageExplorer = e
times = paraview.simple.GetAnimationScene().TimeKeeper.TimestepValues
if not times:
e.explore(currentTime)
else:
for t in times:
view_proxy.ViewTime=t
e.explore({'time':float_limiter(t)})
def record(csname="/tmp/test_pv/info.json"):
paraview.simple.Render()
view = paraview.simple.GetActiveView()
camera = view.GetActiveCamera()
pxystate = record_visibility()
view.LockBounds = 1
p = inspect()
cs = make_cinema_store(p, csname)
#if test:
# testexplore(cs)
#else:
explore(cs, p)
view.LockBounds = 0
restore_visibility(pxystate)
cs.save()
def export_scene(baseDirName, viewSelection, trackSelection):
'''This explores a set of user-defined views and tracks. export_scene is
called from vtkCinemaExport. The expected order of parameters is as follows:
- viewSelection (following the format defined in Wrapping/Python/paraview/cpstate.py):
Directory of the form {'ViewName' : [parameters], ...}, with parameters defined in the
order: Image filename, freq, fittoscreen, magnification, width, height, cinema).
- trackSelection:
Directory of the form {'TrackName' : [v1, v2, v3], ...}
Note: baseDirName is used as the parent directory of the database generated for
each view in viewSelection. 'Image filename' is used as the database directory name.
'''
import paraview.simple as pvs
# save initial state
initialView = pvs.GetActiveView()
pvstate = record_visibility()
atLeastOneViewExported = False
for viewName, viewParams in viewSelection.iteritems():
# check if this view was selected to export as spec b
cinemaParams = viewParams[6]
if len(cinemaParams) == 0:
print "Skipping view: Not selected to export as cinema spherical."
continue
# get the view and save the initial status
view = pvs.FindView(viewName)
pvs.SetActiveView(view)
view.ViewSize = [viewParams[4], viewParams[5]]
pvs.Render() # fully renders the scene (if not, some faces might be culled)
view.LockBounds = 1
#writeFreq = viewParams[1] # TODO where to get the timestamp in this case?
#if (writeFreq and timestamp % writeFreq == 0):
#magnification = viewParams[3] # Not used in cinema (TODO hide in UI)
fitToScreen = viewParams[2]
if fitToScreen != 0:
if view.IsA("vtkSMRenderViewProxy") == True:
view.ResetCamera()
elif view.IsA("vtkSMContextViewProxy") == True:
view.ResetDisplay()
else:
print ' do not know what to do with a ', view.GetClassName()
userDefValues = {}
if "theta" in cinemaParams:
userDefValues["theta"] = cinemaParams["theta"]
if "phi" in cinemaParams:
userDefValues["phi"] = cinemaParams["phi"]
userDefValues.update(trackSelection)
# generate file path
import os.path
viewFileName = viewParams[0]
viewDirName = viewFileName[0:viewFileName.rfind("_")] #strip _num.ext
filePath = os.path.join(baseDirName, viewDirName, "info.json")
p = inspect()
cs = make_cinema_store(p, filePath, forcetime = False,
_userDefinedValues = userDefValues)
explore(cs, p)
view.LockBounds = 0
cs.save()
atLeastOneViewExported = True
if not atLeastOneViewExported:
print "No view was selected to export as cinema spherical."
return
# restore initial state
pvs.SetActiveView(initialView)
restore_visibility(pvstate)

View File

@ -0,0 +1,295 @@
#==============================================================================
# Copyright (c) 2015, Kitware Inc., Los Alamos National Laboratory
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
"""
Module that uses one of the available back end libraries to write out image
files for cinema's file store class.
"""
import numpy
import os
import warnings
exrEnabled = False
try:
import OexrHelper as exr
exrEnabled = True
print "Imported OpenEXR, will default to *.exr in z-buffer images."
except ImportError:
pass
pilEnabled = False
try:
import PIL.Image
import PIL.ImImagePlugin
pilEnabled = True
except ImportError:
pass
vtkEnabled = False
try:
import sys
if "paraview" in sys.modules:
import paraview.vtk
import paraview.vtk.vtkIOImage
from paraview.vtk.vtkIOImage import (vtkPNGReader,
vtkBMPReader,
vtkPNMReader,
vtkTIFFReader,
vtkJPEGReader,
vtkPNGWriter,
vtkBMPWriter,
vtkPNMWriter,
vtkTIFFWriter,
vtkJPEGWriter)
from paraview.vtk.vtkCommonDataModel import vtkImageData
from paraview import numpy_support as n2v
else:
import vtk
from vtk import (vtkPNGReader,
vtkBMPReader,
vtkPNMReader,
vtkTIFFReader,
vtkJPEGReader,
vtkPNGWriter,
vtkBMPWriter,
vtkPNMWriter,
vtkTIFFWriter,
vtkJPEGWriter,
vtkImageData)
from vtk.util import numpy_support as n2v
vtkEnabled = True
except ImportError:
pass
class RasterWrangler(object):
"""
Isolates the specifics of raster file formats from the cinema store.
In particular this delegates the task to one or more subsidiary modules.
The choice of which is open to facilitate packaging in different
places, i.e. PIL for desktop and small packages, VTK for HPC contexts.
"""
def __init__(self):
self.backends = set()
if exrEnabled:
self.backends.add("OpenEXR")
elif pilEnabled:
self.backends.add("PIL")
elif vtkEnabled:
self.backends.add("VTK")
def enableOpenEXR(self):
if exrEnabled:
self.backends.add("OpenEXR")
else:
warnings.warn("OpenEXR module not found", ImportWarning)
def enablePIL(self):
if pilEnabled:
self.backends.add("PIL")
else:
warnings.warn("PIL module not found", ImportWarning)
def enableVTK(self):
if vtkEnabled:
self.backends.add("VTK")
else:
warnings.warn("VTK module not found", ImportWarning)
def _make_writer(self,filename):
"Internal function."
extension = None
parts = filename.split('.')
if len(parts) > 1:
extension = parts[-1]
else:
raise RuntimeError, "Filename has no extension, cannot guess writer to use"
if extension == 'png':
return vtkPNGWriter()
elif extension == 'bmp':
return vtkBMPWriter()
elif extension == 'ppm':
return vtkPNMWriter()
elif extension == 'tif' or extension == 'tiff':
return vtkTIFFWriter()
elif extension == 'jpg' or extension == 'jpeg':
return vtkJPEGWriter()
elif extension == 'vti':
return vtkXMLImageDataWriter()
else:
raise RuntimeError, "Cannot infer filetype from extension:", extension
def _make_reader(self,filename):
"Internal function."
extension = None
parts = filename.split('.')
if len(parts) > 1:
extension = parts[-1]
else:
raise RuntimeError, "Filename has no extension, please guess reader to use"
if extension == 'png':
return vtkPNGReader()
elif extension == 'bmp':
return vtkBMPReader()
elif extension == 'ppm':
return vtkPNMReader()
elif extension == 'tif' or extension == 'tiff':
return vtkTIFFReader()
elif extension == 'jpg' or extension == 'jpeg':
return vtkJPEGReader()
elif extension == 'vti':
return vtkXMLImageDataReader()
else:
raise RuntimeError, "Cannot infer filetype from extension:", extension
def genericreader(self, fname):
with open(fname, "r") as file:
return file.read()
def genericwriter(self, imageslice, fname):
with open(fname, "w") as file:
file.write(imageslice)
def rgbreader(self, fname):
if "VTK" in self.backends:
height = imageslice.shape[1]
width = imageslice.shape[0]
contig = imageslice.reshape(height*width,3)
vtkarray = n2v.numpy_to_vtk(contig)
id = vtkImageData()
id.SetExtent(0, height-1, 0, width-1, 0, 0)
id.GetPointData().SetScalars(vtkarray)
writer = self._make_writer(fname)
writer.SetInputData(id)
writer.SetFileName(fname)
writer.Write()
elif "PIL" in self.backends:
im = PIL.Image.open(fname)
return numpy.array(im, numpy.uint8).reshape(im.size[1],im.size[0],3)
else:
print "Warning: need PIL or VTK to read from " + fname
def rgbwriter(self, imageslice, fname):
if "VTK" in self.backends:
height = imageslice.shape[1]
width = imageslice.shape[0]
contig = imageslice.reshape(height*width,3)
vtkarray = n2v.numpy_to_vtk(contig)
id = vtkImageData()
id.SetExtent(0, height-1, 0, width-1, 0, 0)
id.GetPointData().SetScalars(vtkarray)
writer = self._make_writer(fname)
writer.SetInputData(id)
writer.SetFileName(fname)
writer.Write()
elif "PIL" in self.backends:
imageslice = numpy.flipud(imageslice)
pimg = PIL.Image.fromarray(imageslice)
pimg.save(fname)
else:
print "Warning: need PIL or VTK to write to " + fname
def zfileextension(self):
if "OpenEXR" in self.backends:
return ".exr"
else:
return ".im"
def zreader(self, fname):
if "OpenEXR" in self.backends:
return exr.load_depth(fname)
elif "PIL" in self.backends:
im = PIL.Image.open(fname)
return numpy.array(im, numpy.float32).reshape(im.size[1],im.size[0])
else:
print "Warning: need OpenEXR or PIL to read from " + fname
def zwriter(self, imageslice, fname):
if "OpenEXR" in self.backends:
imageslice = numpy.flipud(imageslice)
exr.save_depth(imageslice, fname)
elif "VTK" in self.backends:
height = imageslice.shape[1]
width = imageslice.shape[0]
file = open(fname, mode='w')
file.write("Image type: L 32F image\r\n")
file.write("Name: A cinema depth image\r\n")
file.write("Image size (x*y): "+str(height) + "*" + str(width) + "\r\n")
file.write("File size (no of images): 1\r\n")
file.write(chr(26))
imageslice.tofile(file)
file.close()
elif "PIL" in self.backends:
imageslice = numpy.flipud(imageslice)
pimg = PIL.Image.fromarray(imageslice)
#TODO:
# don't let ImImagePlugin.py insert the Name: filename in line two
# why? because ImImagePlugin.py reader has a 100 character limit
pimg.save(fname)
else:
print "Warning: need OpenEXR or PIL or VTK to write to " + fname
def assertvalidimage(self, filename):
if not os.path.isfile(filename):
raise IOError(filename + " does not exist.")
if "OpenEXR" in self.backends:
if not exr.isOpenExrFile(filename):
raise IOError(filename + " cannot be opened using OpenEXR.")
elif "VTK" in self.backends:
reader = self._make_reader(filename)
if not reader.CanReadFile(filename):
raise IOError("VTK Cannot open file " + filename)
elif "PIL" in self.backends:
try:
PIL.Image.open(filename)
except IOError:
raise
else:
raise RuntimeError(
"Warning: need OpenEXR or PIL or VTK to validate file store")

View File

@ -0,0 +1,41 @@
r"""collaboration is a helper module useful for handling synchronisation
in multi-client configuration as well as providing other collaboration
related methods
A simple example:
from paraview import collaboration
collaboration.processServerEvents()
"""
#==============================================================================
#
# Program: ParaView
# Module: collaboration.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
import paraview, re, new, sys, vtk
import simple
import servermanager
from paraview.vtk import vtkPVClientServerCoreCore
from paraview.vtk import vtkPVServerManagerCore
from paraview.vtk import vtkPVServerImplementationCore
from paraview.vtk import vtkCommonCore
def processServerEvents():
"""Update the local state based on the notifications received from the server
that have been generated by other clients."""
if servermanager.ActiveConnection:
session = servermanager.ActiveConnection.Session
if session.IsMultiClients() and session.IsNotBusy():
while vtkPVClientServerCoreCore.vtkProcessModule.GetProcessModule().GetNetworkAccessManager().ProcessEvents(100):
pass

View File

@ -0,0 +1,4 @@
import compileall
compileall.compile_dir('@PV_PYTHON_MODULE_BINARY_DIR@')
file = open('@PV_PYTHON_MODULE_BINARY_DIR@/pv_compile_complete', 'w')
file.write('Done')

View File

@ -0,0 +1,716 @@
r"""
This module is designed for use in co-processing Python scripts. It provides a
class, Pipeline, which is designed to be used as the base-class for Python
pipeline. Additionally, this module has several other utility functions that are
approriate for co-processing.
"""
from paraview import simple, servermanager
from vtkPVVTKExtensionsCorePython import *
import math
# -----------------------------------------------------------------------------
def IsInModulo(timestep, frequencyArray):
"""
Return True if the given timestep is in one of the provided frequency.
This can be interpreted as follow::
isFM = IsInModulo(timestep, [2,3,7])
is similar to::
isFM = (timestep % 2 == 0) or (timestep % 3 == 0) or (timestep % 7 == 0)
"""
for frequency in frequencyArray:
if frequency > 0 and (timestep % frequency == 0):
return True
return False
class CoProcessor(object):
"""Base class for co-processing Pipelines. paraview.cpstate Module can
be used to dump out ParaView states as co-processing pipelines. Those are
typically subclasses of this. The subclasses must provide an
implementation for the CreatePipeline() method."""
def __init__(self):
self.__PipelineCreated = False
self.__ProducersMap = {}
self.__WritersList = []
self.__ViewsList = []
self.__EnableLiveVisualization = False
self.__LiveVisualizationFrequency = 1;
self.__LiveVisualizationLink = None
self.__CinemaTracksList = []
self.__UserDefinedValues = {}
self.__InitialFrequencies = {}
def SetUpdateFrequencies(self, frequencies):
"""Set the frequencies at which the pipeline needs to be updated.
Typically, this is called by the subclass once it has determined what
timesteps co-processing will be needed to be done.
frequencies is a map, with key->string name of for the simulation
input, and value is a list of frequencies.
"""
if type(frequencies) != dict:
raise RuntimeError,\
"Incorrect argument type: %s, must be a dict" % type(frequencies)
self.__InitialFrequencies = frequencies
def EnableLiveVisualization(self, enable, frequency = 1):
"""Call this method to enable live-visualization. When enabled,
DoLiveVisualization() will communicate with ParaView server if possible
for live visualization. Frequency specifies how often the
communication happens (default is every second)."""
self.__EnableLiveVisualization = enable
self.__LiveVisualizationFrequency = frequency
def CreatePipeline(self, datadescription):
"""This methods must be overridden by subclasses to create the
visualization pipeline."""
raise RuntimeError, "Subclasses must override this method."
def LoadRequestedData(self, datadescription):
"""Call this method in RequestDataDescription co-processing pass to mark
the datadescription with information about what fields and grids are
required for this pipeline for the given timestep, if any.
Default implementation uses the update-frequencies set using
SetUpdateFrequencies() to determine if the current timestep needs to
be processed and then requests all fields. Subclasses can override
this method to provide addtional customizations."""
timestep = datadescription.GetTimeStep()
# if this is a time step to do live then all of the inputs
# must be made available. note that we want the pipeline built
# before we do the actual first live connection.
if self.__EnableLiveVisualization and timestep % self.__LiveVisualizationFrequency == 0 \
and self.__LiveVisualizationLink:
if self.__LiveVisualizationLink.Initialize(servermanager.ActiveConnection.Session.GetSessionProxyManager()):
num_inputs = datadescription.GetNumberOfInputDescriptions()
for cc in range(num_inputs):
input_name = datadescription.GetInputDescriptionName(cc)
datadescription.GetInputDescription(cc).AllFieldsOn()
datadescription.GetInputDescription(cc).GenerateMeshOn()
return
# if we haven't processed the pipeline yet in DoCoProcessing() we
# must use the initial frequencies to figure out if there's
# work to do this time/timestep. If Live is enabled we mark
# all inputs as needed (this is only done if the Live connection
# hasn't been set up yet). If we don't have live enabled
# we know that the output frequencies aren't changed and can
# just use the initial frequencies.
if self.__InitialFrequencies or not self.__EnableLiveVisualization:
num_inputs = datadescription.GetNumberOfInputDescriptions()
for cc in range(num_inputs):
input_name = datadescription.GetInputDescriptionName(cc)
freqs = self.__InitialFrequencies.get(input_name, [])
if self.__EnableLiveVisualization or ( self and IsInModulo(timestep, freqs) ):
datadescription.GetInputDescription(cc).AllFieldsOn()
datadescription.GetInputDescription(cc).GenerateMeshOn()
else:
# the catalyst pipeline may have been changed by a live connection
# so we need to regenerate the frequencies
import cpstate
frequencies = {}
for writer in self.__WritersList:
frequency = writer.parameters.GetProperty(
"WriteFrequency").GetElement(0)
if (timestep % frequency) == 0 or \
datadescription.GetForceOutput() == True:
writerinputs = cpstate.locate_simulation_inputs(writer)
for writerinput in writerinputs:
datadescription.GetInputDescriptionByName(writerinput).AllFieldsOn()
datadescription.GetInputDescriptionByName(writerinput).GenerateMeshOn()
for view in self.__ViewsList:
if (view.cpFrequency and timestep % view.cpFrequency == 0) or \
datadescription.GetForceOutput() == True:
viewinputs = cpstate.locate_simulation_inputs(writer)
for viewinput in viewinputs:
datadescription.GetInputDescriptionByName(viewinput).AllFieldsOn()
datadescription.GetInputDescriptionByName(viewinput).GenerateMeshOn()
def UpdateProducers(self, datadescription):
"""This method will update the producers in the pipeline. If the
pipeline is not created, it will be created using
self.CreatePipeline().
"""
if not self.__PipelineCreated:
self.CreatePipeline(datadescription)
self.__PipelineCreated = True
if self.__EnableLiveVisualization:
# we don't want to use __InitialFrequencies any more with live viz
self.__InitialFrequencies = None
else:
simtime = datadescription.GetTime()
for name, producer in self.__ProducersMap.iteritems():
producer.GetClientSideObject().SetOutput(
datadescription.GetInputDescriptionByName(name).GetGrid(),
simtime)
def WriteData(self, datadescription):
"""This method will update all writes present in the pipeline, as
needed, to generate the output data files, respecting the
write-frequencies set on the writers."""
timestep = datadescription.GetTimeStep()
for writer in self.__WritersList:
frequency = writer.parameters.GetProperty(
"WriteFrequency").GetElement(0)
if (timestep % frequency) == 0 or \
datadescription.GetForceOutput() == True:
fileName = writer.parameters.GetProperty("FileName").GetElement(0)
writer.FileName = fileName.replace("%t", str(timestep))
writer.UpdatePipeline(datadescription.GetTime())
def WriteImages(self, datadescription, rescale_lookuptable=False):
"""This method will update all views, if present and write output
images, as needed."""
timestep = datadescription.GetTimeStep()
cinema_dirs = []
for view in self.__ViewsList:
if (view.cpFrequency and timestep % view.cpFrequency == 0) or \
datadescription.GetForceOutput() == True:
fname = view.cpFileName
fname = fname.replace("%t", str(timestep))
if view.cpFitToScreen != 0:
if view.IsA("vtkSMRenderViewProxy") == True:
view.ResetCamera()
elif view.IsA("vtkSMContextViewProxy") == True:
view.ResetDisplay()
else:
print ' do not know what to do with a ', view.GetClassName()
view.ViewTime = datadescription.GetTime()
if rescale_lookuptable:
self.RescaleDataRange(view, datadescription.GetTime())
cinemaOptions = view.cpCinemaOptions
if cinemaOptions and 'camera' in cinemaOptions:
dirname = None
if 'composite' in cinemaOptions:
dirname = self.UpdateCinemaComposite(view, datadescription)
else:
dirname = self.UpdateCinema(view, datadescription)
if dirname:
cinema_dirs.append(dirname)
else:
simple.WriteImage(fname, view, Magnification=view.cpMagnification)
if len(cinema_dirs) > 1:
workspace = open('cinema/info.json', 'w')
workspace.write('{\n')
workspace.write(' "metadata": {\n')
workspace.write(' "type": "workbench"\n')
workspace.write(' },\n')
workspace.write(' "runs": [\n')
for i in range(0,len(cinema_dirs)):
workspace.write(' {\n')
workspace.write(' "title": "%s",\n' % cinema_dirs[i])
workspace.write(' "description": "%s",\n' % cinema_dirs[i])
workspace.write(' "path": "%s"\n' % cinema_dirs[i])
if i+1 < len(cinema_dirs):
workspace.write(' },\n')
else:
workspace.write(' }\n')
workspace.write(' ]\n')
workspace.write('}\n')
workspace.close()
def DoLiveVisualization(self, datadescription, hostname, port):
"""This method execute the code-stub needed to communicate with ParaView
for live-visualization. Call this method only if you want to support
live-visualization with your co-processing module."""
if not self.__EnableLiveVisualization:
return
if not self.__LiveVisualizationLink and self.__EnableLiveVisualization:
# Create the vtkLiveInsituLink i.e. the "link" to the visualization processes.
self.__LiveVisualizationLink = servermanager.vtkLiveInsituLink()
# Tell vtkLiveInsituLink what host/port must it connect to
# for the visualization process.
self.__LiveVisualizationLink.SetHostname(hostname)
self.__LiveVisualizationLink.SetInsituPort(int(port))
# Initialize the "link"
self.__LiveVisualizationLink.Initialize(servermanager.ActiveConnection.Session.GetSessionProxyManager())
timeStep = datadescription.GetTimeStep()
if self.__EnableLiveVisualization and timeStep % self.__LiveVisualizationFrequency == 0:
if not self.__LiveVisualizationLink.Initialize(servermanager.ActiveConnection.Session.GetSessionProxyManager()):
return
time = datadescription.GetTime()
# stay in the loop while the simulation is paused
while True:
# Update the simulation state, extracts and simulationPaused
# from ParaView Live
self.__LiveVisualizationLink.InsituUpdate(time, timeStep)
# sources need to be updated by insitu
# code. vtkLiveInsituLink never updates the pipeline, it
# simply uses the data available at the end of the
# pipeline, if any.
from paraview import simple
for source in simple.GetSources().values():
source.UpdatePipeline(time)
# push extracts to the visualization process.
self.__LiveVisualizationLink.InsituPostProcess(time, timeStep)
if (self.__LiveVisualizationLink.GetSimulationPaused()):
# This blocks until something changes on ParaView Live
# and then it continues the loop. Returns != 0 if LIVE side
# disconnects
if (self.__LiveVisualizationLink.WaitForLiveChange()):
break;
else:
break
def CreateProducer(self, datadescription, inputname):
"""Creates a producer proxy for the grid. This method is generally used in
CreatePipeline() call to create producers."""
# Check that the producer name for the input given is valid for the
# current setup.
if not datadescription.GetInputDescriptionByName(inputname):
raise RuntimeError, "Simulation input name '%s' does not exist" % inputname
grid = datadescription.GetInputDescriptionByName(inputname).GetGrid()
producer = simple.PVTrivialProducer(guiName=inputname)
producer.add_attribute("cpSimulationInput", inputname)
# mark this as an input proxy so we can use cpstate.locate_simulation_inputs()
# to find it
producer.SMProxy.cpSimulationInput = inputname
# we purposefully don't set the time for the PVTrivialProducer here.
# when we update the pipeline we will do it then.
producer.GetClientSideObject().SetOutput(grid, datadescription.GetTime())
if grid.IsA("vtkImageData") == True or \
grid.IsA("vtkStructuredGrid") == True or \
grid.IsA("vtkRectilinearGrid") == True:
extent = datadescription.GetInputDescriptionByName(inputname).GetWholeExtent()
producer.WholeExtent= [ extent[0], extent[1], extent[2], extent[3], extent[4], extent[5] ]
# Save the producer for easy access in UpdateProducers() call.
self.__ProducersMap[inputname] = producer
producer.UpdatePipeline(datadescription.GetTime())
return producer
def RegisterWriter(self, writer, filename, freq):
"""Registers a writer proxy. This method is generally used in
CreatePipeline() to register writers. All writes created as such will
write the output files appropriately in WriteData() is called."""
writerParametersProxy = self.WriterParametersProxy(
writer, filename, freq)
writer.FileName = filename
writer.add_attribute("parameters", writerParametersProxy)
self.__WritersList.append(writer)
return writer
def WriterParametersProxy(self, writer, filename, freq):
"""Creates a client only proxy that will be synchronized with ParaView
Live, allowing a user to set the filename and frequency.
"""
controller = servermanager.ParaViewPipelineController()
# assume that a client only proxy with the same name as a writer
# is available in "insitu_writer_paramters"
# Since coprocessor sometimes pass writer as a custom object and not
# a proxy, we need to handle that. Just creating any arbitrary writer
# proxy to store the parameters it acceptable. So let's just do that
# when the writer is not a proxy.
writerIsProxy = isinstance(writer, servermanager.Proxy)
helperName = writer.GetXMLName() if writerIsProxy else "XMLPImageDataWriter"
proxy = servermanager.ProxyManager().NewProxy(
"insitu_writer_parameters", helperName)
controller.PreInitializeProxy(proxy)
if writerIsProxy:
# it's possible that the writer can take in multiple input connections
# so we need to go through all of them. the try/except block seems
# to be the best way to figure out if there are multipel input connections
try:
length = len(writer.Input)
for i in range(length):
proxy.GetProperty("Input").AddInputConnection(
writer.Input[i].SMProxy, 0)
except:
proxy.GetProperty("Input").SetInputConnection(
0, writer.Input.SMProxy, 0)
proxy.GetProperty("FileName").SetElement(0, filename)
proxy.GetProperty("WriteFrequency").SetElement(0, freq)
controller.PostInitializeProxy(proxy)
controller.RegisterPipelineProxy(proxy)
return proxy
def UpdateFilterValues(self, name, proxy, values):
if (isinstance(proxy, simple.servermanager.filters.Slice) or
isinstance(proxy, simple.servermanager.filters.Clip) or
isinstance(proxy, simple.servermanager.filters.Contour)):
self.__UserDefinedValues[name] = values
def RegisterCinemaTrack(self, name, proxy, smproperty, valrange):
"""
Register a point of control (filter's property) that will be varied over in a cinema export.
"""
if not isinstance(proxy, servermanager.Proxy):
raise RuntimeError, "Invalid 'proxy' argument passed to RegisterCinemaTrack."
self.__CinemaTracksList.append({"name":name, "proxy":proxy, "smproperty":smproperty, "valrange":valrange})
self.UpdateFilterValues(name, proxy, valrange)
return proxy
def RegisterView(self, view, filename, freq, fittoscreen, magnification, width, height, cinema=None):
"""Register a view for image capture with extra meta-data such
as magnification, size and frequency."""
if not isinstance(view, servermanager.Proxy):
raise RuntimeError, "Invalid 'view' argument passed to RegisterView."
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFrequency", freq)
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFitToScreen", fittoscreen)
view.add_attribute("cpMagnification", magnification)
view.add_attribute("cpCinemaOptions", cinema)
view.ViewSize = [ width, height ]
self.__ViewsList.append(view)
return view
def CreateWriter(self, proxy_ctor, filename, freq):
""" **** DEPRECATED!!! Use RegisterWriter instead ****
Creates a writer proxy. This method is generally used in
CreatePipeline() to create writers. All writes created as such will
write the output files appropriately in WriteData() is called."""
writer = proxy_ctor()
return self.RegisterWriter(writer, filename, freq)
def CreateView(self, proxy_ctor, filename, freq, fittoscreen, magnification, width, height):
""" **** DEPRECATED!!! Use RegisterView instead ****
Create a CoProcessing view for image capture with extra meta-data
such as magnification, size and frequency."""
view = proxy_ctor()
return self.RegisterView(view, filename, freq, fittoscreen, magnification, width, height, None)
def Finalize(self):
for writer in self.__WritersList:
if hasattr(writer, 'Finalize'):
writer.Finalize()
for view in self.__ViewsList:
if hasattr(view, 'Finalize'):
view.Finalize()
def RescaleDataRange(self, view, time):
"""DataRange can change across time, sometime we want to rescale the
color map to match to the closer actual data range."""
reps = view.Representations
for rep in reps:
if not hasattr(rep, 'Visibility') or \
not rep.Visibility or \
not hasattr(rep, 'MapScalars') or \
not rep.MapScalars or \
not rep.LookupTable:
# rep is either not visibile or not mapping scalars using a LUT.
continue;
input = rep.Input
input.UpdatePipeline(time) #make sure range is up-to-date
lut = rep.LookupTable
colorArrayInfo = rep.GetArrayInformationForColorArray()
if not colorArrayInfo:
continue
if lut.VectorMode != 'Magnitude' or \
colorArrayInfo.GetNumberOfComponents() == 1:
datarange = colorArrayInfo.GetComponentRange(lut.VectorComponent)
else:
# -1 corresponds to the magnitude.
datarange = colorArrayInfo.GetComponentRange(-1)
import vtkParallelCorePython
import paraview.vtk as vtk
import paraview.servermanager
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
globalController = pm.GetGlobalController()
localarray = vtk.vtkDoubleArray()
localarray.SetNumberOfTuples(2)
localarray.SetValue(0, -datarange[0]) # negate so that MPI_MAX gets min instead of doing a MPI_MIN and MPI_MAX
localarray.SetValue(1, datarange[1])
globalarray = vtk.vtkDoubleArray()
globalarray.SetNumberOfTuples(2)
globalController.AllReduce(localarray, globalarray, 0)
globaldatarange = [-globalarray.GetValue(0), globalarray.GetValue(1)]
rgbpoints = lut.RGBPoints.GetData()
numpts = len(rgbpoints)/4
if globaldatarange[0] != rgbpoints[0] or globaldatarange[1] != rgbpoints[(numpts-1)*4]:
# rescale all of the points
oldrange = rgbpoints[(numpts-1)*4] - rgbpoints[0]
newrange = globaldatarange[1] - globaldatarange[0]
# only readjust if the new range isn't zero.
if newrange != 0:
newrgbpoints = list(rgbpoints)
# if the old range isn't 0 then we use that ranges distribution
if oldrange != 0:
for v in range(numpts-1):
newrgbpoints[v*4] = globaldatarange[0]+(rgbpoints[v*4] - rgbpoints[0])*newrange/oldrange
# avoid numerical round-off, at least with the last point
newrgbpoints[(numpts-1)*4] = globaldatarange[1]
else: # the old range is 0 so the best we can do is to space the new points evenly
for v in range(numpts+1):
newrgbpoints[v*4] = globaldatarange[0]+v*newrange/(1.0*numpts)
lut.RGBPoints.SetData(newrgbpoints)
def UpdateCinema(self, view, datadescription):
""" called from catalyst at each timestep to add to the cinema "SPEC A" database """
if not view.IsA("vtkSMRenderViewProxy") == True:
return
try:
import paraview.cinemaIO.cinema_store as CS
import paraview.cinemaIO.explorers as explorers
import paraview.cinemaIO.pv_explorers as pv_explorers
except ImportError as e:
paraview.print_error("Cannot import cinema")
paraview.print_error(e)
return
def get_nearest(eye, at, up, phis, thetas):
""" returns phi and theta settings that most closely match current view """
#todo: derive it instead of this brute force search
best_phi = None
best_theta = None
best_dist = None
best_up = None
dist1 = math.sqrt(sum(math.pow(eye[x]-at[x],2) for x in [0,1,2]))
for t,p in ((x,y) for x in thetas for y in phis):
theta_rad = (float(t)) / 180.0 * math.pi
phi_rad = float(p) / 180.0 * math.pi
pos = [
float(at[0]) - math.cos(phi_rad) * dist1 * math.cos(theta_rad),
float(at[1]) + math.sin(phi_rad) * dist1 * math.cos(theta_rad),
float(at[2]) + math.sin(theta_rad) * dist1
]
nup = [
+ math.cos(phi_rad) * math.sin(theta_rad),
- math.sin(phi_rad) * math.sin(theta_rad),
+ math.cos(theta_rad)
]
dist = math.sqrt(sum(math.pow(eye[x]-pos[x],2) for x in [0,1,2]))
updiff = math.sqrt(sum(math.pow(up[x]-nup[x],2) for x in [0,1,2]))
if best_dist == None or (dist<best_dist and updiff<1.0):
best_phi = p
best_theta = t
best_dist = dist
best_up = updiff
return best_phi, best_theta
pm = servermanager.vtkProcessModule.GetProcessModule()
pid = pm.GetPartitionId()
#load or create the cinema store for this view
import os.path
vfname = view.cpFileName
vfname = vfname[0:vfname.rfind("_")] #strip _num.ext
fname = os.path.join(os.path.dirname(vfname),
"cinema",
os.path.basename(vfname),
"info.json")
fs = CS.FileStore(fname)
try:
fs.load()
except IOError:
pass
fs.add_metadata({'type':'parametric-image-stack'})
def float_limiter(x):
#a shame, but needed to make sure python, javascript and (directory/file)name agree
if isinstance(x, (float)):
#return '%6f' % x #arbitrarily chose 6 decimal places
return '%.6e' % x #arbitrarily chose 6 significant digits
else:
return x
#add record of current time to the store
timestep = datadescription.GetTimeStep()
time = datadescription.GetTime()
view.ViewTime = time
formatted_time = float_limiter(time)
try:
tprop = fs.get_parameter('time')
tprop['values'].append(formatted_time)
except KeyError:
tprop = CS.make_parameter('time', [formatted_time])
fs.add_parameter('time', tprop)
parameters = []
tracks = []
#fixed track for time
fnpattern = "{time}/"
#make up track for each variable
vals = []
names = []
for track in self.__CinemaTracksList:
proxy = track['proxy']
#rep = servermanager.GetRepresentation(proxy, view)
#if not rep or rep.Visibility == 0:
# #skip if track if not visible in this view
# continue
name = track['name']
#make unique
idx = 0
while name in names:
name = track['name'] + str(idx)
idx = idx+1
names.append(name)
fnpattern = fnpattern + "{"+name+"}/"
proxy = track['proxy']
smproperty = track['smproperty']
valrange = list(float_limiter(x for x in track['valrange']))
fs.add_parameter(name, CS.make_parameter(name, valrange))
parameters.append(name)
tracks.append(pv_explorers.Templated(name, proxy, smproperty))
#save off current value for later restoration
vals.append([proxy, smproperty, list(proxy.GetPropertyValue(smproperty))])
#make track for the camera rotation
cinemaOptions = view.cpCinemaOptions
if cinemaOptions and cinemaOptions.get('camera') == 'Spherical':
fnpattern = fnpattern + "{phi}/{theta}/"
if 'initial' in cinemaOptions:
eye = cinemaOptions['initial']['eye']
at = cinemaOptions['initial']['at']
up = cinemaOptions['initial']['up']
phis = list(float_limiter(x for x in cinemaOptions['phi']))
thetas = list(float_limiter(x for x in cinemaOptions['theta']))
best_phi, best_theta = get_nearest(eye, at, up, phis, thetas)
fs.add_parameter("phi", CS.make_parameter('phi', phis, default=best_phi))
fs.add_parameter("theta", CS.make_parameter('theta', thetas, default=best_theta))
else:
eye = view.CameraPosition
at = view.CameraFocalPoint
phis = list(float_limiter(x for x in cinemaOptions['phi']))
thetas = list(float_limiter(x for x in cinemaOptions['theta']))
fs.add_parameter("phi", CS.make_parameter('phi', phis))
fs.add_parameter("theta", CS.make_parameter('theta', thetas))
dist = math.sqrt(sum(math.pow(eye[x]-at[x],2) for x in [0,1,2]))
#rectify for cinema exporter
up = [math.fabs(x) for x in view.CameraViewUp]
uppest = 0;
if up[1]>up[uppest]: uppest = 1
if up[2]>up[uppest]: uppest = 2
cinup = [0,0,0]
cinup[uppest]=1
parameters.append("phi")
parameters.append("theta")
tracks.append(pv_explorers.Camera(at, cinup, dist, view))
#save off current value for later restoration
vals.append([view, 'CameraPosition', list(eye)])
vals.append([view, 'CameraFocalPoint', list(at)])
vals.append([view, 'CameraViewUp', list(up)])
fnpattern = fnpattern[:-1] #strip trailing /
imgext = view.cpFileName[view.cpFileName.rfind("."):]
fnpattern = fnpattern + imgext
fs.filename_pattern = fnpattern
#at current time, run through parameters and dump files
e = pv_explorers.ImageExplorer(fs, parameters, tracks, view=view, iSave=(pid==0))
e.explore({'time':formatted_time})
if pid == 0:
fs.save()
#restore values to what they were at beginning for next view
for proxy, property, value in vals:
proxy.SetPropertyWithName(property, value)
return os.path.basename(vfname)
def UpdateCinemaComposite(self, view, datadescription):
""" called from catalyst at each timestep to add to the cinema "SPEC B" database """
if not view.IsA("vtkSMRenderViewProxy") == True:
return
try:
import paraview.cinemaIO.cinema_store as CS
import paraview.cinemaIO.explorers as explorers
import paraview.cinemaIO.pv_explorers as pv_explorers
import paraview.cinemaIO.pv_introspect as pv_introspect
import paraview.simple as simple
except ImportError as e:
paraview.print_error("Cannot import cinema")
paraview.print_error(e)
return
#figure out where to put this store
import os.path
vfname = view.cpFileName
vfname = vfname[0:vfname.rfind("_")] #strip _num.ext
fname = os.path.join(os.path.dirname(vfname),
"cinema",
os.path.basename(vfname),
"info.json")
def float_limiter(x):
#a shame, but needed to make sure python, javascript and (directory/file)name agree
if isinstance(x, (float)):
return '%.6e' % x #arbitrarily chose 6 significant digits
else:
return x
#what time?
timestep = datadescription.GetTimeStep()
time = datadescription.GetTime()
view.ViewTime = time
formatted_time = float_limiter(time)
#pass down user provided parameters
co = view.cpCinemaOptions
if "phi" in co:
self.__UserDefinedValues["phi"] = co["phi"]
if "theta" in co:
self.__UserDefinedValues["theta"] = co["theta"]
simple.Render(view)
#figure out what we show now
pxystate= pv_introspect.record_visibility()
#make sure depth rasters are consistent
view.LockBounds = 1
p = pv_introspect.inspect()
fs = pv_introspect.make_cinema_store(p, fname,
forcetime=formatted_time,
_userDefinedValues = self.__UserDefinedValues)
#all nodes participate, but only root can writes out the files
pm = servermanager.vtkProcessModule.GetProcessModule()
pid = pm.GetPartitionId()
pv_introspect.explore(fs, p, iSave=(pid==0), currentTime={'time':formatted_time})
if pid == 0:
fs.save()
view.LockBounds = 0
#restore what we showed
pv_introspect.restore_visibility(pxystate)

View File

@ -0,0 +1,130 @@
r"""This module is used to export complete CoProcessing Python scripts that
can be used in a vtkCPPythonScriptPipeline.
This module uses paraview.cpstate Module to dump the ParaView session state as a
an Python class description that can be then be used in the CoProcessor.
The exported script can be used in a vtkCPPythonScriptPipeline instance for
CoProcessing."""
# -----------------------------------------------------------------------------
# The __output_contents is the templet script that accept 3 arguments:
# 1) The CoProcessor class definition
# 2) The boolean to know if we want to enable live-visualization
# 3) The boolean to know if we need to rescale the data range
# -----------------------------------------------------------------------------
__output_contents = """
from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ParaView @PARAVIEW_VERSION_FULL@ @PARAVIEW_BUILD_ARCHITECTURE@ bits
%s
#--------------------------------------------------------------
# Global variables that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView
coprocessor.EnableLiveVisualization(%s, %s)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=%s)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
"""
from paraview import cpstate
def DumpCoProcessingScript(export_rendering, simulation_input_map, screenshot_info,
rescale_data_range, enable_live_viz, live_viz_frequency,
cinema_tracks,
filename=None):
"""Returns a string with the generated CoProcessing script based on the
options specified.
First three arguments are same as those expected by
cpstate.DumpPipeline() function.
rescale_data_range :- boolean set to true if the LUTs must be scaled on
each timestep
enable_live_viz :- boolean set to true if the generated script should
handle live-visualization.
live_viz_frequency :- integer specifying how often should
the coprocessor send the live data
cinema_tracks :- cinema offline visualizer parameters
filename :- if specified, the script is written to the file.
"""
pipeline_script = cpstate.DumpPipeline(\
export_rendering, simulation_input_map, screenshot_info, cinema_tracks)
script = __output_contents % (pipeline_script,
enable_live_viz, live_viz_frequency,
rescale_data_range)
if filename:
outFile = open(filename, "w")
outFile.write(script)
outFile.close()
return script
def run(filename=None):
"""Create a dummy pipeline and save the coprocessing state in the filename
specified, if any, else dumps it out on stdout."""
from paraview import simple, servermanager
wavelet = simple.Wavelet(registrationName="Wavelet1")
contour = simple.Contour()
script = DumpCoProcessingScript(export_rendering=False,
simulation_input_map={"Wavelet1" : "input"},
screenshot_info={},
rescale_data_range=True,
enable_live_viz=True,
live_viz_frequency=1,
cinema_tracks={},
filename=filename)
if not filename:
print "# *** Generated Script Begin ***"
print script
print "# *** Generated Script End ***"
if __name__ == "__main__":
run()
# ---- end ----

View File

@ -0,0 +1,381 @@
r"""This module is used to by the CoProcessingScriptGenerator plugin to aid in
capturing ParaView state as CoProcessing python script.
This can capture the ParaView state in a Pipeline object that can then be used
in CoProcessing scripts. The entry point into this module is the function
DumpPipeline() which returns the Python trace script. Most of the other
functions can be considered internal.
Also refer to paraview.cpexport Module which is used to generate a complete
Python CoProcessing script that can be used with in a vtkCPPythonScriptPipeline.
"""
from paraview import smtrace, smstate, servermanager
class cpstate_globals: pass
def reset_cpstate_globals():
cpstate_globals.write_frequencies = {}
cpstate_globals.simulation_input_map = {}
cpstate_globals.view_proxies = []
cpstate_globals.screenshot_info = {}
cpstate_globals.export_rendering = False
cpstate_globals.cinema_tracks = {}
reset_cpstate_globals()
# -----------------------------------------------------------------------------
def locate_simulation_inputs(proxy):
"""Given any sink/filter proxy, returns a list of upstream proxies that have
been flagged as 'simulation input' in the state exporting wizard."""
if hasattr(proxy, "cpSimulationInput"):
return [ proxy.cpSimulationInput ]
input_proxies = []
for property in servermanager.PropertyIterator(proxy):
if property.IsA("vtkSMInputProperty"):
ip = servermanager.InputProperty(proxy, property)
input_proxies = input_proxies + ip[:]
simulation_inputs = []
for input in input_proxies:
cur_si = locate_simulation_inputs(input.SMProxy)
for cur in cur_si:
if not cur in simulation_inputs:
simulation_inputs.append(cur)
return simulation_inputs
# -----------------------------------------------------------------------------
def locate_simulation_inputs_for_view(view_proxy):
"""Given a view proxy, retruns a list of source proxies that have been
flagged as the 'simulation input' in the state exporting wizard."""
reprProp = servermanager.ProxyProperty(view_proxy, view_proxy.GetProperty("Representations"))
reprs = reprProp[:]
all_sim_inputs = []
for repr in reprs:
sim_inputs = locate_simulation_inputs(repr)
all_sim_inputs = all_sim_inputs + sim_inputs
return all_sim_inputs
# -----------------------------------------------------------------------------
class ProducerAccessor(smtrace.RealProxyAccessor):
"""This accessor is created instead of the standard one for proxies that
have been marked as simulation inputs. This accessor override the
trace_ctor() method to trace the constructor as the CreateProducer() call,
since the proxy is a dummy, in this case.
"""
def __init__(self, varname, proxy, simname):
self.SimulationInputName = simname
smtrace.RealProxyAccessor.__init__(self, varname, proxy)
# this cpSimulationInput attribute is used to locate the proxy later on.
proxy.SMProxy.cpSimulationInput = simname
def trace_ctor(self, ctor, filter, ctor_args=None, skip_assignment=False):
trace = smtrace.TraceOutput()
trace.append("# create a producer from a simulation input")
trace.append("%s = coprocessor.CreateProducer(datadescription, '%s')" % \
(self, self.SimulationInputName))
return trace.raw_data()
# TODO: Make Slice, Contour & Clip Accessors to share an interface to reduce code duplication
# -----------------------------------------------------------------------------
class SliceAccessor(smtrace.RealProxyAccessor):
"""
augments traces of slice filters with information to explore the
parameter space for cinema playback (if enabled)
"""
def __init__(self, varname, proxy):
smtrace.RealProxyAccessor.__init__(self, varname, proxy)
self.varname = varname
def trace_ctor(self, ctor, filter, ctor_args=None, skip_assignment=False):
original_trace = smtrace.RealProxyAccessor.trace_ctor(\
self, ctor, filter, ctor_args, skip_assignment)
trace = smtrace.TraceOutput(original_trace)
if cpstate_globals.cinema_tracks and self.varname in cpstate_globals.cinema_tracks:
valrange = cpstate_globals.cinema_tracks[self.varname]
trace.append_separated(["# register the filter with the coprocessor's cinema generator"])
trace.append(["coprocessor.RegisterCinemaTrack('slice', %s, 'SliceOffsetValues', %s)" % (self, valrange)])
trace.append_separator()
return trace.raw_data()
# -----------------------------------------------------------------------------
class ContourAccessor(smtrace.RealProxyAccessor):
"""
augments traces of contour filters with information to explore the
parameter space for cinema playback (if enabled)
"""
def __init__(self, varname, proxy):
smtrace.RealProxyAccessor.__init__(self, varname, proxy)
self.varname = varname
def trace_ctor(self, ctor, filter, ctor_args=None, skip_assignment=False):
original_trace = smtrace.RealProxyAccessor.trace_ctor(\
self, ctor, filter, ctor_args, skip_assignment)
trace = smtrace.TraceOutput(original_trace)
if cpstate_globals.cinema_tracks and self.varname in cpstate_globals.cinema_tracks:
valrange = cpstate_globals.cinema_tracks[self.varname]
trace.append_separated(["# register the filter with the coprocessor's cinema generator"])
trace.append(["coprocessor.RegisterCinemaTrack('contour', %s, 'Isosurfaces', %s)" % (self, valrange)])
trace.append_separator()
return trace.raw_data()
# -----------------------------------------------------------------------------
class ClipAccessor(smtrace.RealProxyAccessor):
"""
augments traces of clip filters with information to explore the
parameter space for cinema playback (if enabled)
"""
def __init__(self, varname, proxy):
smtrace.RealProxyAccessor.__init__(self, varname, proxy)
self.varname = varname
def trace_ctor(self, ctor, filter, ctor_args = None, skip_assignment = False):
original_trace = smtrace.RealProxyAccessor.trace_ctor( \
self, ctor, filter, ctor_args, skip_assignment)
trace = smtrace.TraceOutput(original_trace)
if cpstate_globals.cinema_tracks and self.varname in cpstate_globals.cinema_tracks:
valrange = cpstate_globals.cinema_tracks[self.varname]
trace.append_separated(["# register the filter with the coprocessor's cinema generator"])
trace.append(["coprocessor.RegisterCinemaTrack('clip', %s, 'OffsetValues', %s)" % (self, valrange)])
trace.append_separator()
return trace.raw_data()
# -----------------------------------------------------------------------------
class ViewAccessor(smtrace.RealProxyAccessor):
"""Accessor for views. Overrides trace_ctor() to trace registering of the
view with the coprocessor. (I wonder if this registering should be moved to
the end of the state for better readability of the generated state files.
"""
def __init__(self, varname, proxy, proxyname):
smtrace.RealProxyAccessor.__init__(self, varname, proxy)
self.ProxyName = proxyname
def trace_ctor(self, ctor, filter, ctor_args=None, skip_assignment=False):
original_trace = smtrace.RealProxyAccessor.trace_ctor(\
self, ctor, filter, ctor_args, skip_assignment)
trace = smtrace.TraceOutput(original_trace)
if self.ProxyName in cpstate_globals.screenshot_info:
trace.append_separated(["# register the view with coprocessor",
"# and provide it with information such as the filename to use,",
"# how frequently to write the images, etc."])
params = cpstate_globals.screenshot_info[self.ProxyName]
assert len(params) == 7
trace.append([
"coprocessor.RegisterView(%s," % self,
" filename='%s', freq=%s, fittoscreen=%s, magnification=%s, width=%s, height=%s, cinema=%s)" %\
(params[0], params[1], params[2], params[3], params[4], params[5], params[6]),
"%s.ViewTime = datadescription.GetTime()" % self])
trace.append_separator()
return trace.raw_data()
# -----------------------------------------------------------------------------
class WriterFilter(smtrace.PipelineProxyFilter):
def should_never_trace(self, prop):
"""overridden to never trace 'WriteFrequency' and 'FileName' properties
on writers."""
if prop.get_property_name() in ["WriteFrequency", "FileName"]: return True
return super(WriterFilter, self).should_never_trace(prop)
# -----------------------------------------------------------------------------
class WriterAccessor(smtrace.RealProxyAccessor):
"""Accessor for writers. Overrides trace_ctor() to use the actual writer
proxy name instead of the dummy-writer proxy's name. Also updates the
write_frequencies maintained in cpstate_globals with the write frequencies
for the writer.
"""
def __init__(self, varname, proxy):
smtrace.RealProxyAccessor.__init__(self, varname, proxy)
write_frequency = proxy.GetProperty("WriteFrequency").GetElement(0)
# Locate which simulation input this write is connected to, if any. If so,
# we update the write_frequencies datastructure accordingly.
sim_inputs = locate_simulation_inputs(proxy)
for sim_input_name in sim_inputs:
if not write_frequency in cpstate_globals.write_frequencies[sim_input_name]:
cpstate_globals.write_frequencies[sim_input_name].append(write_frequency)
cpstate_globals.write_frequencies[sim_input_name].sort()
def get_proxy_label(self, xmlgroup, xmlname):
pxm = servermanager.ProxyManager()
prototype = pxm.GetPrototypeProxy(xmlgroup, xmlname)
if not prototype:
# a bit of a hack but we assume that there's a stub of some
# writer that's not available in this build but is available
# with the build used by the simulation code (probably through a plugin)
# this stub must have the proper name in the coprocessing hints
print "WARNING: Could not find", xmlname, "writer in", xmlgroup, \
"XML group. This is not a problem as long as the writer is available with " \
"the ParaView build used by the simulation code."
ctor = servermanager._make_name_valid(xmlname)
else:
ctor = servermanager._make_name_valid(prototype.GetXMLLabel())
# TODO: use servermanager.ProxyManager().NewProxy() instead
# we create the writer proxy such that it is not registered with the
# ParaViewPipelineController, so its state is not sent to ParaView Live.
return "servermanager.%s.%s" % (xmlgroup, ctor)
def trace_ctor(self, ctor, filter, ctor_args=None, skip_assignment=False):
xmlElement = self.get_object().GetHints().FindNestedElementByName("WriterProxy")
xmlgroup = xmlElement.GetAttribute("group")
xmlname = xmlElement.GetAttribute("name")
write_frequency = self.get_object().GetProperty("WriteFrequency").GetElement(0)
filename = self.get_object().GetProperty("FileName").GetElement(0)
ctor = self.get_proxy_label(xmlgroup, xmlname)
original_trace = smtrace.RealProxyAccessor.trace_ctor(\
self, ctor, WriterFilter(), ctor_args, skip_assignment)
trace = smtrace.TraceOutput(original_trace)
trace.append_separated(["# register the writer with coprocessor",
"# and provide it with information such as the filename to use,",
"# how frequently to write the data, etc."])
trace.append("coprocessor.RegisterWriter(%s, filename='%s', freq=%s)" % \
(self, filename, write_frequency))
trace.append_separator()
return trace.raw_data()
def cp_hook(varname, proxy):
"""callback to create our special accessors instead of the standard ones."""
pname = smtrace.Trace.get_registered_name(proxy, "sources")
if pname:
if pname in cpstate_globals.simulation_input_map:
return ProducerAccessor(varname, proxy, cpstate_globals.simulation_input_map[pname])
if proxy.GetHints() and proxy.GetHints().FindNestedElementByName("WriterProxy"):
return WriterAccessor(varname, proxy)
if ("servermanager.Slice" in proxy.__class__().__str__() and
"Plane object" in proxy.__getattribute__("SliceType").__str__()):
return SliceAccessor(varname, proxy)
if ("servermanager.Clip" in proxy.__class__().__str__() and
"Plane object" in proxy.__getattribute__("ClipType").__str__()):
return ClipAccessor(varname, proxy)
if "servermanager.Contour" in proxy.__class__().__str__():
return ContourAccessor(varname, proxy)
pname = smtrace.Trace.get_registered_name(proxy, "views")
if pname:
cpstate_globals.view_proxies.append(proxy)
return ViewAccessor(varname, proxy, pname)
raise NotImplementedError
class cpstate_filter_proxies_to_serialize(object):
"""filter used to skip views and representations a when export_rendering is
disabled."""
def __call__(self, proxy):
if not smstate.visible_representations()(proxy): return False
if (not cpstate_globals.export_rendering) and \
(proxy.GetXMLGroup() in ["views", "representations"]): return False
return True
# -----------------------------------------------------------------------------
def DumpPipeline(export_rendering, simulation_input_map, screenshot_info, cinema_tracks):
"""
Method that will dump the current pipeline and return it as a string trace
- export_rendering : boolean telling if we want to export rendering
- simulation_input_map: string->string map with key being the proxyname
while value being the simulation input name.
- screenshot_info : map with information about screenshots
key -> view proxy name
value -> [filename, writefreq, fitToScreen,
magnification, width, height,
cinemacamera options]
- cinema_tracks : map with information about cinema tracks to record
key -> proxy name
value -> argument ranges
"""
# reset the global variables.
reset_cpstate_globals()
cpstate_globals.export_rendering = export_rendering
cpstate_globals.simulation_input_map = simulation_input_map
cpstate_globals.screenshot_info = screenshot_info
cpstate_globals.cinema_tracks = cinema_tracks
# Initialize the write frequency map
for key in cpstate_globals.simulation_input_map.values():
cpstate_globals.write_frequencies[key] = []
# Start trace
filter = cpstate_filter_proxies_to_serialize()
smtrace.RealProxyAccessor.register_create_callback(cp_hook)
state = smstate.get_state(filter=filter, raw=True)
smtrace.RealProxyAccessor.unregister_create_callback(cp_hook)
# iterate over all views that were saved in state and update write requencies
if export_rendering:
pxm = servermanager.ProxyManager()
for key, vtuple in screenshot_info.iteritems():
view = pxm.GetProxy("views", key)
if not view: continue
image_write_frequency = int(vtuple[1])
# Locate which simulation input this write is connected to, if any. If so,
# we update the write_frequencies datastructure accordingly.
sim_inputs = locate_simulation_inputs_for_view(view)
for sim_input_name in sim_inputs:
if not image_write_frequency in cpstate_globals.write_frequencies:
cpstate_globals.write_frequencies[sim_input_name].append(image_write_frequency)
cpstate_globals.write_frequencies[sim_input_name].sort()
# Create global fields values
pipelineClassDef = "\n"
pipelineClassDef += "# ----------------------- CoProcessor definition -----------------------\n\n"
# Create the resulting string that will contains the pipeline definition
pipelineClassDef += "def CreateCoProcessor():\n"
pipelineClassDef += " def _CreatePipeline(coprocessor, datadescription):\n"
pipelineClassDef += " class Pipeline:\n";
# add the traced code.
for original_line in state:
for line in original_line.split("\n"):
if line.find("import *") != -1 or \
line.find("#### import the simple") != -1:
continue
if line:
pipelineClassDef += " " + line + "\n"
else:
pipelineClassDef += "\n"
pipelineClassDef += " return Pipeline()\n";
pipelineClassDef += "\n"
pipelineClassDef += " class CoProcessor(coprocessing.CoProcessor):\n"
pipelineClassDef += " def CreatePipeline(self, datadescription):\n"
pipelineClassDef += " self.Pipeline = _CreatePipeline(self, datadescription)\n"
pipelineClassDef += "\n"
pipelineClassDef += " coprocessor = CoProcessor()\n";
pipelineClassDef += " # these are the frequencies at which the coprocessor updates.\n"
pipelineClassDef += " freqs = " + str(cpstate_globals.write_frequencies) + "\n"
pipelineClassDef += " coprocessor.SetUpdateFrequencies(freqs)\n"
pipelineClassDef += " return coprocessor\n"
return pipelineClassDef
#------------------------------------------------------------------------------
def run(filename=None):
"""Create a dummy pipeline and save the coprocessing state in the filename
specified, if any, else dumps it out on stdout."""
from paraview import simple, servermanager
simple.LoadDistributedPlugin("CatalystScriptGeneratorPlugin")
wavelet = simple.Wavelet(registrationName="Wavelet1")
contour = simple.Contour()
display = simple.Show()
view = simple.Render()
# create a new 'Parallel PolyData Writer'
parallelPolyDataWriter0 = simple.ParallelPolyDataWriter()
viewname = servermanager.ProxyManager().GetProxyName("views", view.SMProxy)
script = DumpPipeline(export_rendering=True,
simulation_input_map={"Wavelet1" : "input"},
screenshot_info={viewname : [ 'image.png', '1', '1', '2', '400', '400']},
cinema_tracks={})
if filename:
f = open(filename, "w")
f.write(script)
f.close()
else:
print "# *** Generated Script Begin ***"
print script
print "# *** Generated Script End ***"
if __name__ == "__main__":
run()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
Sphere()
Shrink()
Show()
Render()

View File

@ -0,0 +1,11 @@
# Macro for BUG #11065. This makes it possible to show the grid for a dataset in
# the background.
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
spcth_0 = GetActiveSource()
ExtractSurface2 = ExtractSurface()
DataRepresentation5 = Show()
DataRepresentation5.Representation = 'Wireframe'
DataRepresentation5.BackfaceRepresentation = 'Cull Frontface'

View File

@ -0,0 +1,115 @@
r"""This module is used by vtkPythonExtractSelection to extract query-based
selections. It relies on the python-calculator (vtkPythonCalculator),
specifically, the Python code used by that class, to compute a mask array from
the query expression. Once the mask array is obtained, this filter will either
extract the selected ids, or mark those elements as requested.
"""
try:
import numpy as np
except ImportError:
raise RuntimeError, "'numpy' module is not found. numpy is needed for "\
"this functionality to work. Please install numpy and try again."
import re
import vtk
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.numpy_interface.algorithms as algos
from paraview import calculator
def _create_id_array(dataobject, attributeType):
"""Returns a VTKArray or VTKCompositeDataArray for the ids"""
if not dataobject:
raise RuntimeError, "dataobject cannot be None"
if dataobject.IsA("vtkCompositeDataSet"):
ids = []
for ds in dataobject:
ids.append(_create_id_array(ds, attributeType))
return dsa.VTKCompositeDataArray(ids)
else:
return dsa.VTKArray(\
np.arange(dataobject.GetNumberOfElements(attributeType)))
def maskarray_is_valid(maskArray):
"""Validates that the maskArray is either a VTKArray or a
VTKCompositeDataArrays or a NoneArray other returns false."""
return maskArray is dsa.NoneArray or \
isinstance(maskArray, dsa.VTKArray) or \
isinstance(maskArray, dsa.VTKCompositeDataArray)
def execute(self):
inputDO = self.GetInputDataObject(0, 0)
inputSEL = self.GetInputDataObject(1, 0)
outputDO = self.GetOutputDataObject(0)
assert inputSEL.GetNumberOfNodes() >= 1
selectionNode = inputSEL.GetNode(0)
field_type = selectionNode.GetFieldType()
if field_type == selectionNode.CELL:
attributeType = vtk.vtkDataObject.CELL
elif field_type == selectionNode.POINT:
attributeType = vtk.vtkDataObject.POINT
elif field_type == selectionNode.ROW:
attributeType = vtk.vtkDataObject.ROW
else:
raise RuntimeError, "Unsupported field attributeType %r" % field_type
# evaluate expression on the inputDO.
# this is equivalent to executing the Python Calculator on the input dataset
# to produce a mask array.
inputs = []
inputs.append(dsa.WrapDataObject(inputDO))
query = selectionNode.GetQueryString()
# get a dictionary for arrays in the dataset attributes. We pass that
# as the variables in the eval namespace for calculator.compute().
elocals = calculator.get_arrays(inputs[0].GetAttributes(attributeType))
if not elocals.has_key("id") and re.search(r'\bid\b', query):
# add "id" array if the query string refers to id.
# This is a temporary fix. We should look into
# accelerating id-based selections in the future.
elocals["id"] = _create_id_array(inputs[0], attributeType)
try:
maskArray = calculator.compute(inputs, query, ns=elocals)
except:
from sys import stderr
print >> stderr, "Error: Failed to evaluate Expression '%s'. "\
"The following exception stack should provide additional developer "\
"specific information. This typically implies a malformed "\
"expression. Verify that the expression is valid.\n" % query
raise
if not maskarray_is_valid(maskArray):
raise RuntimeError,\
"Expression '%s' did not produce a valid mask array. The value "\
"produced is of the type '%s'. This typically implies a malformed "\
"expression. Verify that the expression is valid." % \
(query, type(maskArray))
# if inverse selection is requested, just logical_not the mask array.
if selectionNode.GetProperties().Has(selectionNode.INVERSE()) and \
selectionNode.GetProperties().Get(selectionNode.INVERSE()) == 1:
maskArray = algos.logical_not(maskArray)
output = dsa.WrapDataObject(outputDO)
if self.GetPreserveTopology():
# when preserving topology, just add the mask array as
# vtkSignedCharArray to the output. vtkPythonExtractSelection should
# have already ensured that the input is shallow copied over properly
# before this method gets called.
# note: since mask array is a bool-array, we multiply it by int8(1) to
# make it a type of array that can be represented as vtkSignedCharArray.
output.GetAttributes(attributeType).append(maskArray * np.int8(1), "vtkInsidedness")
else:
# handle extraction.
# flatnonzero() will give is array of indices where the arrays is
# non-zero (or non-False in our case). We then pass that to
# vtkPythonExtractSelection to extract the selected ids.
nonzero_indices = algos.flatnonzero(maskArray)
output.FieldData.append(nonzero_indices, "vtkSelectedIds");
#print output.FieldData["vtkSelectedIds"]
self.ExtractElements(attributeType, inputDO, outputDO)
del nonzero_indices
del maskArray

View File

@ -0,0 +1,329 @@
r"""Utility module for easy manipultions of lookup tables.
This module is intended for use with by simple.py."""
#==============================================================================
#
# Program: ParaView
# Module: lookuptable.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
import paraview.simple
import servermanager
import os
from math import sqrt
# -----------------------------------------------------------------------------
class _vtkPVLUTData:
"""
Internal container for ParaView lookup table data.
Don't use this directly. Use vtkPVLUTReader.
"""
def __init__(self):
self.Name=""
self.Space=""
self.Values=[]
self.Coords=[]
def SetName(self,aName):
self.Name=aName
def GetName(self):
return self.Name
def SetColorSpace(self,aSpace):
self.Space=aSpace
def GetColorSpace(self):
return self.Space
def SetRGBValues(self,aValues):
self.Values=aValues
def GetRGBValues(self):
return self.Values
def SetMapCoordinates(self,aCoords):
self.Coords=aCoords
# normalize the coordinates
# in preparation to map onto
# an arbitrary scalar range
nCoords = len(self.Coords)
minCoord = min(self.Coords)
maxCoord = max(self.Coords)
deltaCoord = maxCoord - minCoord
if (minCoord>=maxCoord):
print 'ERROR: in coordinate values'
return
i=0
while i<nCoords:
self.Coords[i] -= minCoord
self.Coords[i] /= deltaCoord
i+=1
return
def GetMapCoordinates(self):
return self.Coords;
def PrintSelf(self):
print self.Name
print self.Space
print self.Values
print self.Coords
# -----------------------------------------------------------------------------
class vtkPVLUTReader:
"""
Reader and container for ParaView's XML based lookup tables.
Once lookup tables are loaded you access them by name. When
accessing you must provide the array instance, which you may
get from a pvpython 'Source' type object.
This reader makes use of ParaView's XML LUT file format with
one exception - the XML document must be root'ed by an element
named "ColorMaps". Within the "ColorMaps" element an arbitrary
number of ParaView's "ColorMap" elements define LUT entries.
Usage:
# at the top of your script
# create the reader and load LUT's
lr = lookuptable.vtkPVLUTReader()
lr.Read('/path/to/luts.xml')
lr.Print()
# after you have a pvpython source object, get
# one of it's arrays.
srcObj = GetActiveSource()
array = srcObj.PointData.GetArray('arrayName')
# create a LUT for the array.
lut = lr.GetLUT(array,'lutName')
# set the active array and assign the LUT
srcObjRep = Show(srcObj)
srcObjRep.ColorArrayName = 'arrayName'
srcObjRep.LookupTable = lut
# finally render to display the result
Render()
File Format:
<ColorMaps>
...
<ColorMap name="LUTName" space="Lab,RGB,HSV" indexedLookup="true,false">
<Point x="val" o="val" r="val" g="val" b="val"/>
...
<Point x="val" o="val" r="val" g="val" b="val"/>
<NaN r="val" g="val" b="val"/>
</ColorMap>
...
<ColorMaps>
...
</ColorMap>
...
</ColorMaps>
"""
def __init__(self,ns=None):
self.LUTS={}
self.DefaultLUT=None
self.Globals=ns
baseDir=os.path.dirname(paraview.simple.__file__)
defaultLUTFile=os.path.join(baseDir,'ColorMaps.xml')
if (os.path.exists(defaultLUTFile)):
self.Read(defaultLUTFile)
else:
print 'WARNING: default LUTs not found at %s'%(defaultLUTFile)
return
def Clear(self):
"""
Clear internal data structures.
"""
self.LUTS={}
self.DefaultLUT=None
return
def Read(self, aFileName):
"""
Read in the LUT's defined in the named file. Each
call to read extends the internal list of LUTs.
"""
parser=servermanager.vtkPVXMLParser()
parser.SetFileName(aFileName)
if (not parser.Parse()):
print 'ERROR: parsing lut file %s'%(aFileName)
return
root=parser.GetRootElement()
if root.GetName()!='ColorMaps':
print 'ERROR: parsing LUT file %s'%(aFileName)
print 'ERROR: root element must be <ColorMaps>'
return
nElems=root.GetNumberOfNestedElements()
i=0
nFound=0
while (i<nElems):
cmapElem=root.GetNestedElement(i)
if (cmapElem.GetName()=='ColorMap'):
nFound+=1
lut=_vtkPVLUTData()
lut.SetName(cmapElem.GetAttribute('name'))
lut.SetColorSpace(cmapElem.GetAttribute('space'))
coords=[]
values=[]
nRGB=cmapElem.GetNumberOfNestedElements()
j=0
while (j<nRGB):
rgbElem=cmapElem.GetNestedElement(j)
if (rgbElem.GetName()=='Point'):
coord=float(rgbElem.GetAttribute('x'))
coords.append(coord)
val=[float(rgbElem.GetAttribute('r')),
float(rgbElem.GetAttribute('g')),
float(rgbElem.GetAttribute('b'))]
values.append(val)
j=j+1
lut.SetMapCoordinates(coords)
lut.SetRGBValues(values)
#lut.PrintSelf()
self.LUTS[lut.GetName()]=lut
i=i+1
if nFound==0:
print 'ERROR: No ColorMaps were found in %s'%(aFileName)
else:
if self.DefaultLUT is None:
names=self.LUTS.keys()
if len(names)>0:
self.DefaultLUT=names[0]
return nFound
def GetLUT(self,aArray,aLutName,aRangeOveride=[]):
"""
Given an array and lookup table name assign the LUT
to the given array and return the LUT. If aRangeOveride
is specified then LUT will be mapped through that
range rather than the array's actual range.
"""
try:
self.LUTS[aLutName]
except KeyError:
if self.DefaultLUT is not None:
print 'ERROR: No LUT named %s using %s'%(aLutName,self.DefaultLUT)
aLutName = self.DefaultLUT
else:
print 'ERROR: No LUT named %s and no default available.'%(aLutName)
return None
range = self.__GetRange(aArray,aRangeOveride)
return self.__GetLookupTableForArray(aArray,
RGBPoints=self.__MapRGB(aLutName,range),
ColorSpace=self.__GetColorSpace(aLutName),
VectorMode='Magnitude',
ScalarRangeInitialized=1.0)
def GetLUTNames(self):
"""
Return a list of the currently available LUT's names.
"""
return sorted(self.LUTS.iterkeys(),cmp=lambda x,y: cmp(x.lower(), y.lower()))
def Print(self):
"""
Print the available list of LUT's.
"""
names=""
i=0
for k in sorted(self.LUTS.iterkeys(),cmp=lambda x,y: cmp(x.lower(), y.lower())):
lut=self.LUTS[k]
names+=lut.GetName()
names+=", "
if ((i%6)==5):
names+="\n"
i+=1
print names
return
# end of public interface
def __GetColorSpace(self,aName):
"""
Return the color space from the lookup table object.
"""
return self.LUTS[aName].GetColorSpace()
def __GetRGB(self,aName):
"""
Return the rgb values for the named lut
"""
return self.LUTS[aName]
def __MapRGB(self,aName,aRange):
"""
Map the rgb values onto a scalar range
results are an array of [x r g b] values
"""
colors=self.LUTS[aName].GetRGBValues()
mapCoords=self.LUTS[aName].GetMapCoordinates()
nColors=len(colors)
coord0=float(aRange[0])
coordDelta=float(aRange[1])-float(aRange[0])
mappedColors=[]
i=0
while(i<nColors):
x=coord0+coordDelta*mapCoords[i]
val=[x]+colors[i]
mappedColors+=val
i=i+1
return mappedColors
def __GetRange(self,aArray,aRangeOveride):
"""
Get the range from an array proxy object or if
an overide is provided use that.
"""
nComps = aArray.GetNumberOfComponents()
range = [0.0, 1.0]
if (len(aRangeOveride) == 0):
if (nComps == 1):
range = aArray.GetRange()
else:
# TODO - this could be larger than the range of the magnitude aArray
rx = aArray.GetRange(0)
ry = aArray.GetRange(1)
rz = aArray.GetRange(2)
range = [0.0,
sqrt(rx[1]*rx[1]+ry[1]*ry[1]+rz[1]*rz[1])]
else:
range = aRangeOveride
return range
def __GetLookupTableForArray(self,aArray,**kwargs):
"""
Set the lookup table for the given array and assign
the named properties.
"""
proxyName='%d.%s.PVLookupTable'%(aArray.GetNumberOfComponents(),aArray.GetName())
lut = servermanager.ProxyManager().GetProxy('lookup_tables',proxyName)
if not lut:
lut = servermanager.rendering.PVLookupTable(ColorSpace="HSV",RGBPoints=[0,0,0,1, 1,1,0,0])
servermanager.Register(lut, registrationName=proxyName)
for arg in kwargs.keys():
if not hasattr(lut, arg):
raise AttributeError("LUT has no property %s"%(arg))
setattr(lut,arg,kwargs[arg])
return lut

View File

@ -0,0 +1,72 @@
#==============================================================================
#
# Program: ParaView
# Module: numeric.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
r"""
This module provides functions to vtk data arrays to NumPy arrays.
"""
__num_py_available__ = False
try:
import numpy
__num_py_available__ = True
except:
raise """NumPy module "numpy" is not accessible. Please make sure
that NumPy is installed correctly."""
# These types are returned by GetDataType to indicate data type.
VTK_VOID = 0
VTK_BIT = 1
VTK_CHAR = 2
VTK_UNSIGNED_CHAR = 3
VTK_SHORT = 4
VTK_UNSIGNED_SHORT = 5
VTK_INT = 6
VTK_UNSIGNED_INT = 7
VTK_LONG = 8
VTK_UNSIGNED_LONG = 9
VTK_FLOAT =10
VTK_DOUBLE =11
VTK_ID_TYPE =12
__typeDict = { VTK_CHAR:numpy.int8,
VTK_UNSIGNED_CHAR:numpy.uint8,
VTK_SHORT:numpy.int16,
VTK_UNSIGNED_SHORT:numpy.int16,
VTK_INT:numpy.int32,
VTK_FLOAT:numpy.float32,
VTK_DOUBLE:numpy.float64 }
def fromvtkarray(vtkarray):
"""This function takes a vtkDataArray of any type and converts it to a
NumPy array of appropriate type and dimensions."""
global __typeDict__
global __num_py_available__
if not __num_py_available__:
raise "NumPy module is not available."
#create a numpy array of the correct type.
vtktype = vtkarray.GetDataType()
if not __typeDict.has_key(vtktype):
raise "Cannot convert data arrays of the type %s" \
% vtkarray.GetDataTypeAsString()
# size = num_comps * num_tuples
# imArray = numpy.empty((size,), type)
# vtkarray.ExportToVoidPointer(imArray)
type = __typeDict[vtktype]
pyarray = numpy.frombuffer(vtkarray, dtype=type)
# re-shape the array to current number of rows and columns.
num_tuples = vtkarray.GetNumberOfTuples()
num_comps = vtkarray.GetNumberOfComponents()
pyarray = numpy.reshape(pyarray, (num_tuples, num_comps))
return pyarray

View File

@ -0,0 +1,2 @@
# deprecated, use import pvvtkextensions instead.
from pvvtkextensions import *

View File

@ -0,0 +1,4 @@
from paraview import vtk
from vtkPVVTKExtensionsCorePython import *
from vtkPVVTKExtensionsDefaultPython import *
from vtkPVVTKExtensionsRenderingPython import *

View File

@ -0,0 +1,187 @@
r"""python_view is a module providing access to a PythonView. It is
possible to use the PythonView API directly, but this module provides
convenience classes in Python.
"""
#==============================================================================
#
# Program: ParaView
# Module: python_view.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
import paraview
import vtk
from vtkPVServerImplementationCorePython import *
from vtkPVClientServerCoreCorePython import *
from vtkPVServerManagerCorePython import *
try:
from vtkPVServerManagerDefaultPython import *
except:
paraview.print_error("Error: Cannot import vtkPVServerManagerDefaultPython")
try:
from vtkPVServerManagerRenderingPython import *
except:
paraview.print_error("Error: Cannot import vtkPVServerManagerRenderingPython")
try:
from vtkPVServerManagerApplicationPython import *
except:
paraview.print_error("Error: Cannot import vtkPVServerManagerApplicationPython")
from vtkPVCommonPython import *
def numpy_to_image(numpy_array):
"""
@brief Convert a numpy 2D or 3D array to a vtkImageData object
@param numpy_array 2D or 3D numpy array containing image data
@return vtkImageData with the numpy_array content
"""
try:
import numpy
except:
paraview.print_error("Error: Cannot import numpy")
shape = numpy_array.shape
if len(shape) < 2:
raise Exception('numpy array must have dimensionality of at least 2')
h, w = shape[0], shape[1]
c = 1
if len(shape) == 3:
c = shape[2]
# Reshape 2D image to 1D array suitable for conversion to a
# vtkArray with numpy_support.numpy_to_vtk()
linear_array = numpy.reshape(numpy_array, (w*h, c))
try:
from vtk.util import numpy_support
except:
paraview.print_error("Error: Cannot import vtk.util.numpy_support")
vtk_array = numpy_support.numpy_to_vtk(linear_array)
image = vtk.vtkImageData()
image.SetDimensions(w, h, 1)
image.AllocateScalars(vtk_array.GetDataType(), 4)
image.GetPointData().GetScalars().DeepCopy(vtk_array)
return image
def figure_to_data(figure):
"""
@brief Convert a Matplotlib figure to a numpy 2D array with RGBA uint8 channels and return it.
@param figure A matplotlib figure.
@return A numpy 2D array of RGBA values.
"""
# Draw the renderer
try:
import matplotlib
except:
paraview.print_error("Error: Cannot import matplotlib")
figure.canvas.draw()
# Get the RGBA buffer from the figure
w, h = figure.canvas.get_width_height()
try:
import numpy
except:
paraview.print_error("Error: Cannot import numpy")
buf = numpy.fromstring(figure.canvas.tostring_argb(), dtype=numpy.uint8)
buf.shape = (h, w, 4)
# canvas.tostring_argb gives pixmap in ARGB mode. Roll the alpha channel to have it in RGBA mode
buf = numpy.roll(buf, 3, axis=2)
return buf
def figure_to_image(figure):
"""
@brief Convert a Matplotlib figure to a vtkImageData with RGBA unsigned char channels
@param figure A matplotlib figure.
@return a vtkImageData with the Matplotlib figure content
"""
buf = figure_to_data(figure)
# Flip rows to be suitable for vtkImageData.
buf = buf[::-1,:,:].copy()
return numpy_to_image(buf)
def matplotlib_figure(width, height):
"""
@brief Create a Matplotlib figure with specified width and height for rendering
@param w Width of desired plot
@param h Height of desired plot
@return A Matplotlib figure
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg
except:
paraview.print_error("Error: Cannot import matplotlib.backends.backend_agg.FigureCanvasAgg")
try:
from matplotlib.figure import Figure
except:
paraview.print_error("Error: Cannot import matplotlib.figure.Figure")
figure = Figure()
figureCanvas = FigureCanvasAgg(figure)
figure.set_dpi(72)
figure.set_size_inches(float(width)/72.0, float(height)/72.0)
return figure
def call_setup_data(setup_data_function, view):
"""
@brief Utility function to call the user-defined setup_data function. This is
meant to be called by the C++ side of the vtkPythonView class.
@parameter view vtkPythonView object
"""
if setup_data_function == None:
return
setup_data_function(view)
def call_render(render_function, view, width, height):
"""
@brief Utility function to call the user-defined render function. This is
called by the C++ side of the vtkPythonView class.
@parameter view vtkPythonView object
@parameter width Width of view
@parameter height Height of view
"""
if render_function == None:
return
# Check how many parameters it takes.
num_args = render_function.__code__.co_argcount
image = None
if (num_args == 3):
# Current-style render() function
image = render_function(view, width, height)
elif (num_args == 2):
# Old-style render() function introduced in ParaView 4.1
figure = matplotlib_figure(width, height)
render_function(view, figure)
image = figure_to_image(figure)
view.SetImageData(image)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,257 @@
r"""Module for generating a Python state for ParaView.
This module uses paraview.smtrace to generate a trace for a selected set of
proxies my mimicking the creating of various pipeline components in sequence.
Typical usage of this module is as follows::
from paraview import smstate
state = smstate.get_state()
print state
Note, this cannot be called when Python tracing is active.
"""
from paraview import servermanager as sm
from paraview import smtrace
from paraview import simple
class supported_proxies(object):
"""filter object used to hide proxies that are currently not supported by
the state saving mechanism or those that are generally skipped in state e.g.
animation proxies and time keeper."""
def __call__(self, proxy):
return proxy and \
not proxy.GetXMLGroup() == "animation" and \
not proxy.GetXMLName() == "TimeKeeper"
class visible_representations(object):
"""filter object to skip hidden representations from being saved in state file"""
def __call__(self, proxy):
if not supported_proxies()(proxy): return False
try:
return proxy.Visibility
except AttributeError:
pass
return True
def __toposort(input_set):
"""implementation of Tarjan topological sort to sort proxies using consumer
dependencies as graph edges."""
result = []
marked_set = set()
while marked_set != input_set:
unmarked_node = (input_set - marked_set).pop()
__toposort_visit(result, unmarked_node, input_set, marked_set)
result.reverse()
return result
def __toposort_visit(result, proxy, input_set, marked_set, t_marked_set=None):
if t_marked_set is None:
temporarily_marked_set = set()
else:
temporarily_marked_set = t_marked_set
if proxy in temporarily_marked_set:
raise RuntimeError, "Cycle detected in pipeline! %r" % proxy
if not proxy in marked_set:
temporarily_marked_set.add(proxy)
consumers = set()
get_consumers(proxy, lambda x: x in input_set, consumer_set=consumers, recursive=False)
for x in consumers:
__toposort_visit(result, x, input_set, marked_set, temporarily_marked_set)
marked_set.add(proxy)
temporarily_marked_set.discard(proxy)
result.append(proxy)
def get_consumers(proxy, filter, consumer_set, recursive=True):
"""Returns the consumers for a proxy iteratively. If filter is non-None,
filter is used to cull consumers."""
for i in xrange(proxy.GetNumberOfConsumers()):
consumer = proxy.GetConsumerProxy(i)
consumer = consumer.GetTrueParentProxy() if consumer else None
consumer = sm._getPyProxy(consumer)
if not consumer or consumer.IsPrototype() or consumer in consumer_set:
continue
if filter(consumer):
consumer_set.add(consumer)
if recursive: get_consumers(consumer, filter, consumer_set)
def get_producers(proxy, filter, producer_set):
"""Returns the producers for a proxy iteratively. If filter is non-None,
filter is used to cull producers."""
for i in xrange(proxy.GetNumberOfProducers()):
producer = proxy.GetProducerProxy(i)
producer = producer.GetTrueParentProxy() if producer else None
producer = sm._getPyProxy(producer)
if not producer or producer.IsPrototype() or producer in producer_set:
continue
if filter(producer):
producer_set.add(producer)
get_producers(producer, filter, producer_set)
# FIXME: LookupTable is missed :/, darn subproxies!
try:
if proxy.LookupTable and filter(proxy.LookupTable):
producer_set.add(proxy.LookupTable)
get_producers(proxy.LookupTable, filter, producer_set)
except AttributeError: pass
try:
if proxy.ScalarOpacityFunction and filter(proxy.ScalarOpacityFunction):
producer_set.add(proxy.ScalarOpacityFunction)
get_producers(proxy.ScalarOpacityFunction, filter, producer_set)
except AttributeError: pass
def get_state(propertiesToTraceOnCreate=1, # sm.vtkSMTrace.RECORD_MODIFIED_PROPERTIES,
skipHiddenRepresentations=True, source_set=[], filter=None, raw=False):
"""Returns the state string"""
if sm.vtkSMTrace.GetActiveTracer():
raise RuntimeError, "Cannot generate Python state when tracing is active."
if filter is None:
filter = visible_representations() if skipHiddenRepresentations else supported_proxies()
# build a set of proxies of interest
if source_set:
start_set = source_set
else:
# if nothing is specified, we save all views and sources.
start_set = simple.GetSources().values() + simple.GetViews()
start_set = [x for x in start_set if filter(x)]
# now, locate dependencies for the start_set, pruning irrelevant branches
consumers = set(start_set)
for proxy in start_set:
get_consumers(proxy, filter, consumers)
producers = set()
for proxy in consumers:
get_producers(proxy, filter, producers)
# proxies_of_interest is set of all proxies that we should trace.
proxies_of_interest = producers.union(consumers)
#print "proxies_of_interest", proxies_of_interest
trace_config = smtrace.start_trace()
# this ensures that lookup tables/scalar bars etc. are fully traced.
trace_config.SetFullyTraceSupplementalProxies(True)
trace = smtrace.TraceOutput()
trace.append("# state file generated using %s" % simple.GetParaViewSourceVersion())
#--------------------------------------------------------------------------
# First, we trace the views and layouts, if any.
# TODO: add support for layouts.
views = [x for x in proxies_of_interest if smtrace.Trace.get_registered_name(x, "views")]
if views:
# sort views by their names, so the state has some structure to it.
views = sorted(views, cmp=lambda x,y:\
cmp(smtrace.Trace.get_registered_name(x, "views"),
smtrace.Trace.get_registered_name(y, "views")))
trace.append_separated([\
"# ----------------------------------------------------------------",
"# setup views used in the visualization",
"# ----------------------------------------------------------------"])
for view in views:
# FIXME: save view camera positions and size.
traceitem = smtrace.RegisterViewProxy(view)
traceitem.finalize()
del traceitem
trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True))
#--------------------------------------------------------------------------
# Next, trace data processing pipelines.
sorted_proxies_of_interest = __toposort(proxies_of_interest)
sorted_sources = [x for x in sorted_proxies_of_interest \
if smtrace.Trace.get_registered_name(x, "sources")]
if sorted_sources:
trace.append_separated([\
"# ----------------------------------------------------------------",
"# setup the data processing pipelines",
"# ----------------------------------------------------------------"])
for source in sorted_sources:
traceitem = smtrace.RegisterPipelineProxy(source)
traceitem.finalize()
del traceitem
trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True))
#--------------------------------------------------------------------------
# Now, trace the transfer functions (color maps and opacity maps) used.
ctfs = set([x for x in proxies_of_interest \
if smtrace.Trace.get_registered_name(x, "lookup_tables")])
if ctfs:
trace.append_separated([\
"# ----------------------------------------------------------------",
"# setup color maps and opacity mapes used in the visualization",
"# note: the Get..() functions create a new object, if needed",
"# ----------------------------------------------------------------"])
for ctf in ctfs:
smtrace.Trace.get_accessor(ctf)
if ctf.ScalarOpacityFunction in proxies_of_interest:
smtrace.Trace.get_accessor(ctf.ScalarOpacityFunction)
trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True))
#--------------------------------------------------------------------------
# Can't decide if the representations should be saved with the pipeline
# objects or afterwords, opting for afterwords for now since the topological
# sort doesn't guarantee that the representations will follow their sources
# anyways.
sorted_representations = [x for x in sorted_proxies_of_interest \
if smtrace.Trace.get_registered_name(x, "representations")]
scalarbar_representations = [x for x in sorted_proxies_of_interest\
if smtrace.Trace.get_registered_name(x, "scalar_bars")]
# print "sorted_representations", sorted_representations
# print "scalarbar_representations", scalarbar_representations
if sorted_representations or scalarbar_representations:
for view in views:
view_representations = [x for x in view.Representations if x in sorted_representations]
view_scalarbars = [x for x in view.Representations if x in scalarbar_representations]
if view_representations or view_scalarbars:
trace.append_separated([\
"# ----------------------------------------------------------------",
"# setup the visualization in view '%s'" % smtrace.Trace.get_accessor(view),
"# ----------------------------------------------------------------"])
for rep in view_representations:
try:
producer = rep.Input
port = rep.Input.Port
traceitem = smtrace.Show(producer, port, view, rep,
comment="show data from %s" % smtrace.Trace.get_accessor(producer))
traceitem.finalize()
del traceitem
trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True))
if rep.IsScalarBarVisible(view):
# FIXME: this will save this multiple times, right now,
# if two representations use the same LUT.
trace.append_separated([\
"# show color legend",
"%s.SetScalarBarVisibility(%s, True)" % (\
smtrace.Trace.get_accessor(rep),
smtrace.Trace.get_accessor(view))])
except AttributeError: pass
# save the scalar bar properties themselves.
if view_scalarbars:
trace.append_separated("# setup the color legend parameters for each legend in this view")
for rep in view_scalarbars:
smtrace.Trace.get_accessor(rep)
trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True))
del trace_config
smtrace.stop_trace()
#print trace
return str(trace) if not raw else trace.raw_data()
if __name__ == "__main__":
print "Running test"
simple.Mandelbrot()
simple.Show()
simple.Hide()
simple.Shrink().ShrinkFactor = 0.4
simple.UpdatePipeline()
simple.Clip().ClipType.Normal[1] = 1
rep = simple.Show()
view = simple.Render()
view.ViewSize=[500, 500]
rep.SetScalarBarVisibility(view, True)
simple.Render()
# rep.SetScalarBarVisibility(view, False)
print "===================================================================="
print get_state()

View File

@ -0,0 +1,157 @@
# This is a module for Server Manager testing using Python.
# This provides several utility functions useful for testing
import os
import re
import sys
import exceptions
from vtkPVServerManagerDefaultPython import *
# we get different behavior based on how we import servermanager
# so we want to import servermanager the same way in this module
# as we do in any module that is importing this
SMModuleName = 'paraview.servermanager'
if sys.modules.has_key('paraview.simple'):
SMModuleName = 'paraview.simple'
sm = __import__(SMModuleName)
servermanager = sm.servermanager
class TestError(exceptions.Exception):
pass
__ProcessedCommandLineArguments__ = False
DataDir = ""
TempDir = ""
BaselineImage = ""
Threshold = 10.0
SMStatesDir = ""
StateXMLFileName = ""
UseSavedStateForRegressionTests = False
def Error(message):
print "ERROR: %s" % message
return False
def ProcessCommandLineArguments():
"""Processes the command line areguments."""
global DataDir
global TempDir
global BaselineImage
global Threshold
global StateXMLFileName
global UseSavedStateForRegressionTests
global SMStatesDir
global __ProcessedCommandLineArguments__
if __ProcessedCommandLineArguments__:
return
__ProcessedCommandLineArguments__ = True
length = len(sys.argv)
index = 1
while index < length:
key = sys.argv[index-1]
value = sys.argv[index]
index += 2
if key == "-D":
DataDir = value
elif key == "-V":
BaselineImage = value
elif key == "-T":
TempDir = value
elif key == "-S":
SMStatesDir = value
elif key == "--threshold":
Threshold = float(value)
elif key == "--state":
StateXMLFileName = value
elif key == "--use_saved_state":
UseSavedStateForRegressionTests = True
index -= 1
else:
index -=1
return
def LoadServerManagerState(filename):
"""This function loads the servermanager state xml/pvsm.
Returns the status of the load."""
global DataDir
ProcessCommandLineArguments()
parser = servermanager.vtkPVXMLParser()
try:
fp = open(filename, "r")
data = fp.read()
fp.close()
except:
return Error("Failed to open state file %s" % filename)
regExp = re.compile("\${DataDir}")
data = regExp.sub(DataDir, data)
if not parser.Parse(data):
return Error("Failed to parse")
loader = servermanager.vtkSMStateLoader()
loader.SetSession(servermanager.ActiveConnection.Session)
root = parser.GetRootElement()
if loader.LoadState(root):
pxm = servermanager.vtkSMProxyManager.GetProxyManager().GetActiveSessionProxyManager()
pxm.UpdateRegisteredProxiesInOrder(0);
pxm.UpdateRegisteredProxies(0)
return True
return Error("Failed to load state file %s" % filename)
def DoRegressionTesting(rmProxy=None):
"""Perform regression testing."""
global TempDir
global BaselineImage
global Threshold
ProcessCommandLineArguments()
testing = vtkSMTesting()
testing.AddArgument("-T")
testing.AddArgument(TempDir)
testing.AddArgument("-V")
testing.AddArgument(BaselineImage)
if not rmProxy:
rmProxy = servermanager.GetRenderView()
if rmProxy:
rmProxy = rmProxy.SMProxy
if not rmProxy:
raise "Failed to locate view to perform regression testing."
#pyProxy(rmProxy).SetRenderWindowSize(300, 300);
#rmProxy.GetProperty("RenderWindowSize").SetElement(0, 300)
#rmProxy.GetProperty("RenderWindowSize").SetElement(1, 300)
#rmProxy.UpdateVTKObjects()
rmProxy.StillRender()
testing.SetRenderViewProxy(rmProxy)
if testing.RegressionTest(Threshold) == 1:
return True
return Error("Regression Test Failed!")
if __name__ == "__main__":
# This script loads the state, saves out a temp state and loads the saved state.
# This saved state is used for testing -- this will ensure load/save SM state
# is working fine.
servermanager.Connect()
ProcessCommandLineArguments()
ret = 1
if StateXMLFileName:
if LoadServerManagerState(StateXMLFileName):
pxm = servermanager.vtkSMProxyManager.GetProxyManager().GetActiveSessionProxyManager()
if UseSavedStateForRegressionTests:
saved_state = os.path.join(TempDir, "temp.pvsm")
pxm.SaveState(saved_state)
pxm.UnRegisterProxies();
LoadServerManagerState(saved_state)
try:
os.remove(saved_state)
except:
pass
if DoRegressionTesting():
ret = 0
else:
Error("No ServerManager state file specified")
if ret:
# This leads to vtkDebugLeaks reporting leaks, hence we do this
# only when the tests failed.
sys.exit(ret)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
import sys
import os
import paraview
import paraview.simple as pvsimple
paraview.servermanager.misc.GlobalMapperProperties.GlobalImmediateModeRendering = 1
# trying to import the library where I can specify the global and subcontrollers
import vtkParallelCorePython
paraview.options.batch = True # this may not be necessary
paraview.simple._DisableFirstRenderCameraReset()
def CreateTimeCompartments(globalController, timeCompartmentSize):
if globalController.GetNumberOfProcesses() == 1:
print 'single process'
return
elif globalController.GetNumberOfProcesses() % timeCompartmentSize != 0:
print 'number of processes must be an integer multiple of time compartment size'
return
elif timeCompartmentSize == globalController.GetNumberOfProcesses():
return globalController
gid = globalController.GetLocalProcessId()
timeCompartmentGroupId = int (gid / timeCompartmentSize )
newController = globalController.PartitionController(timeCompartmentGroupId, gid % timeCompartmentSize)
# must unregister if the reference count is greater than 1
if newController.GetReferenceCount() > 1:
newController.UnRegister(None)
#print gid, ' of global comm is ', newController.GetLocalProcessId()
globalController.SetGlobalController(newController)
return newController
def CheckReader(reader):
if hasattr(reader, "FileName") == False:
print "ERROR: Don't know how to set file name for ", reader.SMProxy.GetXMLName()
sys.exit(-1)
if hasattr(reader, "TimestepValues") == False:
print "ERROR: ", reader.SMProxy.GetXMLName(), " doesn't have time information"
sys.exit(-1)
def CreateControllers(timeCompartmentSize):
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
globalController = pm.GetGlobalController()
if timeCompartmentSize > globalController.GetNumberOfProcesses():
timeCompartmentSize = globalController.GetNumberOfProcesses()
temporalController = CreateTimeCompartments(globalController, timeCompartmentSize)
return globalController, temporalController, timeCompartmentSize
def WriteImages(currentTimeStep, currentTime, views):
for view in views:
filename = view.tpFileName.replace("%t", str(currentTimeStep))
view.ViewTime = currentTime
pvsimple.WriteImage(filename, view, Magnification=view.tpMagnification)
def WriteFiles(currentTimeStep, currentTime, writers):
for writer in writers:
originalfilename = writer.FileName
fname = originalfilename.replace("%t", str(currentTimeStep))
writer.FileName = fname
writer.UpdatePipeline(currentTime)
writer.FileName = originalfilename
def IterateOverTimeSteps(globalController, timeCompartmentSize, timeSteps, writers, views):
numProcs = globalController.GetNumberOfProcesses()
numTimeCompartments = numProcs/timeCompartmentSize
tpp = len(timeSteps)/numTimeCompartments
remainder = len(timeSteps)%numTimeCompartments
timeCompartmentIndex = int(globalController.GetLocalProcessId()/timeCompartmentSize)
myStartTimeStep = tpp*timeCompartmentIndex
myEndTimeStep = myStartTimeStep+tpp
if timeCompartmentIndex < remainder:
myStartTimeStep = myStartTimeStep+timeCompartmentIndex
myEndTimeStep = myStartTimeStep+tpp+1
else:
myStartTimeStep = myStartTimeStep+remainder
myEndTimeStep = myStartTimeStep+tpp
for currentTimeStep in range(myStartTimeStep,myEndTimeStep):
#print globalController.GetLocalProcessId(), " is working on ", currentTimeStep
WriteImages(currentTimeStep, timeSteps[currentTimeStep], views)
WriteFiles(currentTimeStep, timeSteps[currentTimeStep], writers)
def CreateReader(ctor, fileInfo, **kwargs):
"Creates a reader, checks if it can be used, and sets the filenames"
import glob
files = glob.glob(fileInfo)
files.sort() # assume there is a logical ordering of the filenames that corresponds to time ordering
reader = ctor(FileName=files)
CheckReader(reader)
if kwargs:
pvsimple.SetProperties(reader, **kwargs)
return reader
def CreateWriter(ctor, filename, tp_writers):
writer = ctor()
return RegisterWriter(writer, filename, tp_writers)
def RegisterWriter(writer, filename, tp_writers):
writer.FileName = filename
tp_writers.append(writer)
return writer
def CreateView(proxy_ctor, filename, magnification, width, height, tp_views):
view = proxy_ctor()
return RegisterView(view, filename, magnification, width, height, tp_views)
def RegisterView(view, filename, magnification, width, height, tp_views):
view.add_attribute("tpFileName", filename)
view.add_attribute("tpMagnification", magnification)
tp_views.append(view)
view.ViewSize = [width, height]
return view

View File

@ -0,0 +1,27 @@
from paraview import vtk
from vtkPVVTKExtensionsDefaultPython import *
def SetOutputWholeExtent(algorithm, extent):
"""
Convenience method to help set the WHOLE_EXTENT() in RequestInformation.
Commonly used by programmable filters. The arguments are the algorithm
and a tuple/list with 6 elements (xmin, xmax, ymin, ymax, zmin, zmax).
Example use:
import paraview.util
# The output will be of dimensions 10, 1, 1
paraview.util.SetOutputWholeExtent(algorithm, (0, 9, 0, 0, 0, 0)
"""
if len(extent) != 6:
raise "Expected a sequence of length 6"
algorithm.GetExecutive().GetOutputInformation(0).Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), extent[0], extent[1], extent[2],extent[3], extent[4], extent[5])
def IntegrateCell(dataset, cellId):
"""
This functions uses vtkCellIntegrator's Integrate method that calculates
the length/area/volume of a 1D/2D/3D cell. The calculation is exact for
lines, polylines, triangles, triangle strips, pixels, voxels, convex
polygons, quads and tetrahedra. All other 3D cells are triangulated
during volume calculation. In such cases, the result may not be exact.
"""
return vtkCellIntegrator.Integrate(dataset, cellId)

View File

@ -0,0 +1,210 @@
"""
Utility functions to mimic the template support functions for vtkVariant
"""
import vtk
import vtkConstants
from vtk import vtkVariant
_variant_type_map = {
'void' : vtkConstants.VTK_VOID,
'char' : vtkConstants.VTK_CHAR,
'unsigned char' : vtkConstants.VTK_UNSIGNED_CHAR,
'signed char' : vtkConstants.VTK_SIGNED_CHAR,
'short' : vtkConstants.VTK_SHORT,
'unsigned short' : vtkConstants.VTK_UNSIGNED_SHORT,
'int' : vtkConstants.VTK_INT,
'unsigned int' : vtkConstants.VTK_UNSIGNED_INT,
'long' : vtkConstants.VTK_LONG,
'unsigned long' : vtkConstants.VTK_UNSIGNED_LONG,
'long long' : vtkConstants.VTK_LONG_LONG,
'unsigned long long' : vtkConstants.VTK_UNSIGNED_LONG_LONG,
'__int64' : vtkConstants.VTK___INT64,
'unsigned __int64' : vtkConstants.VTK_UNSIGNED___INT64,
'float' : vtkConstants.VTK_FLOAT,
'double' : vtkConstants.VTK_DOUBLE,
'string' : vtkConstants.VTK_STRING,
'unicode string' : vtkConstants.VTK_UNICODE_STRING,
'vtkObjectBase' : vtkConstants.VTK_OBJECT,
'vtkObject' : vtkConstants.VTK_OBJECT,
}
_variant_method_map = {
vtkConstants.VTK_VOID : '',
vtkConstants.VTK_CHAR : 'ToChar',
vtkConstants.VTK_UNSIGNED_CHAR : 'ToUnsignedChar',
vtkConstants.VTK_SIGNED_CHAR : 'ToSignedChar',
vtkConstants.VTK_SHORT : 'ToShort',
vtkConstants.VTK_UNSIGNED_SHORT : 'ToUnsignedShort',
vtkConstants.VTK_INT : 'ToInt',
vtkConstants.VTK_UNSIGNED_INT : 'ToUnsignedInt',
vtkConstants.VTK_LONG : 'ToLong',
vtkConstants.VTK_UNSIGNED_LONG : 'ToUnsignedLong',
vtkConstants.VTK_LONG_LONG : 'ToLongLong',
vtkConstants.VTK_UNSIGNED_LONG_LONG : 'ToUnsignedLongLong',
vtkConstants.VTK___INT64 : 'To__Int64',
vtkConstants.VTK_UNSIGNED___INT64 : 'ToUnsigned__Int64',
vtkConstants.VTK_FLOAT : 'ToFloat',
vtkConstants.VTK_DOUBLE : 'ToDouble',
vtkConstants.VTK_STRING : 'ToString',
vtkConstants.VTK_UNICODE_STRING : 'ToUnicodeString',
vtkConstants.VTK_OBJECT : 'ToVTKObject',
}
_variant_check_map = {
vtkConstants.VTK_VOID : 'IsValid',
vtkConstants.VTK_CHAR : 'IsChar',
vtkConstants.VTK_UNSIGNED_CHAR : 'IsUnsignedChar',
vtkConstants.VTK_SIGNED_CHAR : 'IsSignedChar',
vtkConstants.VTK_SHORT : 'IsShort',
vtkConstants.VTK_UNSIGNED_SHORT : 'IsUnsignedShort',
vtkConstants.VTK_INT : 'IsInt',
vtkConstants.VTK_UNSIGNED_INT : 'IsUnsignedInt',
vtkConstants.VTK_LONG : 'IsLong',
vtkConstants.VTK_UNSIGNED_LONG : 'IsUnsignedLong',
vtkConstants.VTK_LONG_LONG : 'IsLongLong',
vtkConstants.VTK_UNSIGNED_LONG_LONG : 'IsUnsignedLongLong',
vtkConstants.VTK___INT64 : 'Is__Int64',
vtkConstants.VTK_UNSIGNED___INT64 : 'IsUnsigned__Int64',
vtkConstants.VTK_FLOAT : 'IsFloat',
vtkConstants.VTK_DOUBLE : 'IsDouble',
vtkConstants.VTK_STRING : 'IsString',
vtkConstants.VTK_UNICODE_STRING : 'IsUnicodeString',
vtkConstants.VTK_OBJECT : 'IsVTKObject',
}
def vtkVariantCreate(v, t):
"""
Create a vtkVariant of the specified type, where the type is in the
following format: 'int', 'unsigned int', etc. for numeric types,
and 'string' or 'unicode string' for strings. You can also use an
integer VTK type constant for the type.
"""
if not issubclass(type(t), int):
t = _variant_type_map[t]
return vtkVariant(v, t)
def vtkVariantExtract(v, t=None):
"""
Extract the specified value type from the vtkVariant, where the type is
in the following format: 'int', 'unsigned int', etc. for numeric types,
and 'string' or 'unicode string' for strings. You can also use an
integer VTK type constant for the type. Set the type to 'None" to
extract the value in its native type.
"""
v = vtkVariant(v)
if t == None:
t = v.GetType()
elif not issubclass(type(t), int):
t = _variant_type_map[t]
if getattr(v, _variant_check_map[t])():
return getattr(v, _variant_method_map[t])()
else:
return None
def vtkVariantCast(v, t):
"""
Cast the vtkVariant to the specified value type, where the type is
in the following format: 'int', 'unsigned int', etc. for numeric types,
and 'string' or 'unicode string' for strings. You can also use an
integer VTK type constant for the type.
"""
if not issubclass(type(t), int):
t = _variant_type_map[t]
v = vtkVariant(v, t)
if v.IsValid():
return getattr(v, _variant_method_map[t])()
else:
return None
def vtkVariantStrictWeakOrder(s1, s2):
"""
Compare variants by type first, and then by value. The return values
are -1, 0, 1 like the python cmp() method, for compatibility with the
python list sort() method. This is in contrast with the C++ version,
which returns true or false.
"""
s1 = vtkVariant(s1)
s2 = vtkVariant(s2)
t1 = s1.GetType()
t2 = s2.GetType()
# check based on type
if t1 != t2:
return cmp(t1,t2)
v1 = s1.IsValid()
v2 = s2.IsValid()
# check based on validity
if (not v1) and (not v2):
return 0
elif v1 != v2:
return cmp(v1,v2)
# extract and compare the values
r1 = getattr(s1, _variant_method_map[t1])()
r2 = getattr(s2, _variant_method_map[t2])()
# compare vtk objects by classname
if t1 == vtk.VTK_OBJECT:
return cmp(r1.GetClassName(), r2.GetClassName())
return cmp(r1, r2)
def vtkVariantStrictEquality(s1, s2):
"""
Check two variants for strict equality of type and value.
"""
s1 = vtkVariant(s1)
s2 = vtkVariant(s2)
t1 = s1.GetType()
t2 = s2.GetType()
# check based on type
if t1 != t2:
return False
v1 = s1.IsValid()
v2 = s2.IsValid()
# check based on validity
if (not v1) and (not v2):
return True
elif v1 != v2:
return False
# extract and compare the values
r1 = getattr(s1, _variant_method_map[t1])()
r2 = getattr(s2, _variant_method_map[t2])()
return (r1 == r2)
def vtkVariantLessThan(s1, s2):
"""
Return true if s1 < s2. This isn't very useful in Python.
"""
return (vtkVariant(s1) < vtkVariant(s2))
def vtkVariantEqual(s1, s2):
"""
Return true if s1 == s2. This isn't very useful in Python.
"""
return (vtkVariant(s1) == vtkVariant(s2))
del vtkConstants

View File

@ -0,0 +1,57 @@
import paraview
try:
from vtkCommonComputationalGeometryPython import *
except ImportError:
paraview.print_error("Error: Could not import vtkCommonComputationalGeometryPython")
from vtkCommonCorePython import *
from vtkCommonDataModelPython import *
from vtkCommonExecutionModelPython import *
try:
from vtkCommonMathPython import *
except ImportError:
paraview.print_error("Error: Could not import vtkCommonMathPython")
try:
from vtkCommonMiscPython import *
except ImportError:
paraview.print_error("Error: Could not import vtkCommonMiscPython")
try:
from vtkCommonSystemPython import *
except ImportError:
paraview.print_error("Error: Could not import vtkCommonSystemPython")
try:
from vtkCommonTransformsPython import *
except ImportError:
paraview.print_error("Error: Could not import vtkCommonTransformsPython")
from vtkFiltersProgrammablePython import *
from vtkParallelCorePython import *
try:
from vtkRenderingCorePython import vtkCamera
except ImportError:
paraview.print_error("Error: Could not import vtkRenderingCorePython")
try:
from vtkFiltersCorePython import *
except ImportError:
paraview.print_error("Error: Could not import vtkFiltersCorePython")
# --------------------------------------
# useful macro for getting type names
__vtkTypeNameDict = {VTK_VOID:"void",
VTK_DOUBLE:"double",
VTK_FLOAT:"float",
VTK_LONG:"long",
VTK_UNSIGNED_LONG:"unsigned long",
VTK_INT:"int",
VTK_UNSIGNED_INT:"unsigned int",
VTK_SHORT:"short",
VTK_UNSIGNED_SHORT:"unsigned short",
VTK_CHAR:"char",
VTK_UNSIGNED_CHAR:"unsigned char",
VTK_SIGNED_CHAR:"signed char",
VTK_LONG_LONG:"long long",
VTK_UNSIGNED_LONG_LONG:"unsigned long long",
VTK___INT64:"__int64",
VTK_UNSIGNED___INT64:"unsigned __int64",
VTK_ID_TYPE:"vtkIdType",
VTK_BIT:"bit"}